From 39fe8330bb591c1c17b9bd9a30dd69fa4135ff06 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Wed, 24 Apr 2024 10:54:30 -0300 Subject: [PATCH 001/419] Upgraded and added new packages to requirements.txt --- framework/requirements.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/framework/requirements.txt b/framework/requirements.txt index a2e8917aaac..9aa726591a6 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -1,4 +1,4 @@ -aiohttp==3.9.3 +aiohttp==3.9.5 aiohttp-cache==2.2.0 aiohttp-cors==0.7.0 aiohttp-jinja2==1.5.1 @@ -42,7 +42,7 @@ greenlet==2.0.2 grpc-google-iam-v1==0.12.3 grpcio==1.58.0 hiredis==2.2.3 -idna==2.9 +idna==3.7 importlib-metadata==3.10.1 inflection==0.3.1 itsdangerous==2.0.0 @@ -56,6 +56,7 @@ multidict==5.2.0 mypy-extensions==0.4.3 numpy==1.26.0 openapi-spec-validator==0.2.6 +opensearch-py==2.5.0 packaging==20.9 pathlib==1.0.1 protobuf==3.19.6 From 076b8d988ccb7e30dcf2e804d86fdb4357b77212 Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Tue, 23 Apr 2024 04:40:40 +0000 Subject: [PATCH 002/419] Using vendor to filter candidates --- .../schemas/vulnerabilityCandidate.fbs | 1 + .../databaseFeedManager.hpp | 22 +- .../updateCVECandidates.hpp | 4 +- .../src/scanOrchestrator/osScanner.hpp | 9 +- .../src/scanOrchestrator/packageScanner.hpp | 122 ++++-- .../tests/mocks/MockDatabaseFeedManager.hpp | 3 +- .../tests/unit/databaseFeedManager_test.cpp | 22 +- .../tests/unit/packageScanner_test.cpp | 381 +++++++++++++++++- 8 files changed, 506 insertions(+), 58 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/schemas/vulnerabilityCandidate.fbs b/src/wazuh_modules/vulnerability_scanner/schemas/vulnerabilityCandidate.fbs index 4f302bd2641..2669717115b 100644 --- a/src/wazuh_modules/vulnerability_scanner/schemas/vulnerabilityCandidate.fbs +++ b/src/wazuh_modules/vulnerability_scanner/schemas/vulnerabilityCandidate.fbs @@ -19,6 +19,7 @@ table ScanVulnerabilityCandidate { defaultStatus: Status = 2; platforms: [string]; versions: [Version]; + vendor: string; } table ScanVulnerabilityCandidateArray { diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp index 5ad7afb70d7..6e6b53825f1 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp @@ -43,6 +43,17 @@ constexpr auto DATABASE_PATH {"queue/vd/feed"}; constexpr auto OFFSET_TRANSACTION_SIZE {1000}; constexpr auto EMPTY_KEY {""}; +/** + * @brief Scanning package data struct. + */ +struct packageData final +{ + std::string name; ///< Package name. + std::string vendor; ///< Package vendor. + std::string format; ///< Package format. + std::string version; ///< Package version. +}; + /** * @brief A struct for storing a pair of FlatBuffers data. * @@ -494,22 +505,23 @@ class TDatabaseFeedManager final : public Observer * @brief Get the Vulnerabilities Candidates information. * * @param cnaName RocksDB table identifier. - * @param packageName Package name. + * @param package Struct with package data. * @param callback Store vulnerability data. */ void getVulnerabilitiesCandidates( const std::string& cnaName, - std::string_view packageName, + const packageData& package, const std::function& callback) { - if (packageName.empty() || cnaName.empty()) + if (package.name.empty() || cnaName.empty()) { throw std::runtime_error("Invalid package/cna name."); } std::string packageNameWithSeparator; - packageNameWithSeparator.append(packageName); + packageNameWithSeparator.append(package.name); packageNameWithSeparator.append("_CVE"); for (const auto& [key, value] : m_feedDatabase->seek(packageNameWithSeparator, cnaName)) @@ -527,7 +539,7 @@ class TDatabaseFeedManager final : public Observer { for (const auto& candidate : *candidatesArray->candidates()) { - if (callback(cnaName, *candidate)) + if (callback(cnaName, package, *candidate)) { // If the candidate is vulnerable, we stop looking for. break; diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateCVECandidates.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateCVECandidates.hpp index 9d31b5a532f..e13148be2e8 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateCVECandidates.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateCVECandidates.hpp @@ -136,12 +136,14 @@ class UpdateCVECandidates final defaultStatus = VERSION_STATUS_MAP.at(affected->defaultStatus()->str()); } + // The vendor field will only be stored for the NVD auto candidate = NSVulnerabilityScanner::CreateScanVulnerabilityCandidateDirect( candidateBuilderRef, cveId->c_str(), defaultStatus, affected->platforms() && affected->platforms()->size() ? &platformsVec : nullptr, - &versionFBArray); + &versionFBArray, + affected->vendor() && shortName == "nvd" ? affected->vendor()->c_str() : nullptr); candidatesArraysMap.at(productName).first.push_back(candidate); } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp index 98f76890d30..024c30ad095 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp @@ -74,8 +74,9 @@ class TOsScanner final : public AbstractHandler> const auto osCPE = ScannerHelper::parseCPE(data->osCPEName().data()); - auto vulnerabilityScan = - [&](const std::string& cnaName, const NSVulnerabilityScanner::ScanVulnerabilityCandidate& callbackData) + auto vulnerabilityScan = [&](const std::string& cnaName, + const packageData& package, + const NSVulnerabilityScanner::ScanVulnerabilityCandidate& callbackData) { try { @@ -269,7 +270,9 @@ class TOsScanner final : public AbstractHandler> } else { - m_databaseFeedManager->getVulnerabilitiesCandidates("nvd", osCPE.product, vulnerabilityScan); + packageData package = {.name = osCPE.product}; + + m_databaseFeedManager->getVulnerabilitiesCandidates("nvd", package, vulnerabilityScan); if (data->osPlatform() == "windows") { diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp index c8fd9e4ce7f..fa153c500bb 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp @@ -81,6 +81,7 @@ class TPackageScanner final : public AbstractHandler data, const std::function& vulnerabilityScan) { @@ -108,13 +109,17 @@ class TPackageScanner final : public AbstractHandlerpackageFormat().data(), - data->packageVendor().data(), + translation.translatedVendor.c_str(), cnaName.c_str(), data->agentName().data(), data->agentId().data(), data->agentVersion().data()); - m_databaseFeedManager->getVulnerabilitiesCandidates( - cnaName, translation.translatedProduct, vulnerabilityScan); + + packageData translatedPackage = {.name = translation.translatedProduct, + .vendor = translation.translatedVendor, + .format = data->packageFormat().data(), + .version = data->packageVersion().data()}; + m_databaseFeedManager->getVulnerabilitiesCandidates(cnaName, translatedPackage, vulnerabilityScan); } return true; } @@ -137,14 +142,18 @@ class TPackageScanner final : public AbstractHandlerpackageFormat().data(), - data->packageVendor().data(), + translation.translatedVendor.c_str(), cnaName.c_str(), data->agentName().data(), data->agentId().data(), data->agentVersion().data()); - m_databaseFeedManager->getVulnerabilitiesCandidates( - cnaName, translation.translatedProduct, vulnerabilityScan); + packageData translatedPackage = {.name = translation.translatedProduct, + .vendor = translation.translatedVendor, + .format = data->packageFormat().data(), + .version = data->packageVersion().data()}; + + m_databaseFeedManager->getVulnerabilitiesCandidates(cnaName, translatedPackage, vulnerabilityScan); } // Store translations in Level 1 cache m_translationL1Cache.insertKey(cacheKey, std::move(L2Translations)); @@ -256,10 +265,9 @@ class TPackageScanner final : public AbstractHandler handleRequest(std::shared_ptr data) override { - const std::string packageName {Utils::toLowerCase(std::string(data->packageName()))}; - - auto vulnerabilityScan = - [&](const std::string& cnaName, const NSVulnerabilityScanner::ScanVulnerabilityCandidate& callbackData) + auto vulnerabilityScan = [&](const std::string& cnaName, + const packageData& package, + const NSVulnerabilityScanner::ScanVulnerabilityCandidate& callbackData) { try { @@ -282,8 +290,8 @@ class TPackageScanner final : public AbstractHandlerpackageVersion().data(), + package.name.c_str(), + package.version.c_str(), callbackData.cveId()->str().c_str(), platformValue.c_str(), data->osCPEName().data()); @@ -298,6 +306,15 @@ class TPackageScanner final : public AbstractHandlerosCodeName()) == 0) { + logDebug2( + WM_VULNSCAN_LOGTAG, + "The platform is in the list based on OS code name comparison for " + "Package: %s, Version: %s, CVE: %s, Content OS code name: %s, OS code name: %s", + package.name.c_str(), + package.version.c_str(), + callbackData.cveId()->str().c_str(), + platformValue.c_str(), + data->osCodeName().data()); matchPlatform = true; break; } @@ -306,20 +323,68 @@ class TPackageScanner final : public AbstractHandlerstr().c_str(), + data->osCPEName().data(), + data->osCodeName().data()); return false; } } + if (callbackData.vendor()) + { + if (package.vendor.empty() || " " == package.vendor) + { + logDebug2(WM_VULNSCAN_LOGTAG, + "The vendor information is not available for Package: %s, Version: %s, " + "CVE: %s, Content vendor: %s", + package.name.c_str(), + package.version.c_str(), + callbackData.cveId()->str().c_str(), + callbackData.vendor()->str().c_str()); + return false; + } + else + { + if (package.vendor.compare(callbackData.vendor()->str()) != 0) + { + logDebug2(WM_VULNSCAN_LOGTAG, + "The vendor is not the same for Package: %s, Version: %s, " + "CVE: %s, Content vendor: %s, Package vendor: %s", + package.name.c_str(), + package.version.c_str(), + callbackData.cveId()->str().c_str(), + callbackData.vendor()->str().c_str(), + package.vendor.c_str()); + return false; + } + else + { + logDebug2(WM_VULNSCAN_LOGTAG, + "Vendor match for Package: %s, Version: %s, " + "CVE: %s, Vendor: %s", + package.name.c_str(), + package.version.c_str(), + callbackData.cveId()->str().c_str(), + package.vendor.c_str()); + } + } + } + std::variant objectType = VersionMatcherStrategy::Unspecified; - if (const auto it = m_packageMap.find(data->packageFormat()); it != m_packageMap.end()) + if (const auto it = m_packageMap.find(package.format); it != m_packageMap.end()) { objectType = it->second; } for (const auto& version : *callbackData.versions()) { - const std::string packageVersion {data->packageVersion()}; + const std::string packageVersion {package.version}; std::string versionString {version->version() ? version->version()->str() : ""}; std::string versionStringLessThan {version->lessThan() ? version->lessThan()->str() : ""}; std::string versionStringLessThanOrEqual { @@ -329,7 +394,7 @@ class TPackageScanner final : public AbstractHandlerstr().c_str(), versionString.c_str(), @@ -349,7 +414,7 @@ class TPackageScanner final : public AbstractHandlerstr().c_str(), packageVersion.c_str(), versionString.c_str(), @@ -415,7 +480,7 @@ class TPackageScanner final : public AbstractHandlerstr().c_str(), packageVersion.c_str(), versionStringLessThan.c_str(), @@ -447,7 +512,7 @@ class TPackageScanner final : public AbstractHandlerstr().c_str(), versionString.c_str(), @@ -467,7 +532,7 @@ class TPackageScanner final : public AbstractHandlerstr().c_str(), data->agentName().data(), data->agentId().data(), @@ -481,8 +546,8 @@ class TPackageScanner final : public AbstractHandlerpackageVersion().data(), + package.name.c_str(), + package.version.c_str(), callbackData.cveId()->str().c_str()); return false; @@ -492,7 +557,7 @@ class TPackageScanner final : public AbstractHandlerpackageName()))}; + logDebug1(WM_VULNSCAN_LOGTAG, "Initiating a vulnerability scan for package '%s' (%s) (%s) with CVE Numbering " "Authorities (CNA) " @@ -519,14 +586,19 @@ class TPackageScanner final : public AbstractHandleragentId().data(), data->agentVersion().data()); - m_databaseFeedManager->getVulnerabilitiesCandidates(CNAValue, packageName, vulnerabilityScan); + packageData package = {.name = packageName, + .vendor = data->packageVendor().data(), + .format = data->packageFormat().data(), + .version = data->packageVersion().data()}; + + m_databaseFeedManager->getVulnerabilitiesCandidates(CNAValue, package, vulnerabilityScan); } } catch (const std::exception& e) { logWarn(WM_VULNSCAN_LOGTAG, "Failed to scan package: '%s', CVE Numbering Authorities (CNA): '%s', Error: '%s'", - packageName.c_str(), + data->packageName().data(), CNAValue.c_str(), e.what()); } @@ -534,7 +606,7 @@ class TPackageScanner final : public AbstractHandlerpackageName().data(), data->agentId().data()); if (data->m_elements.empty()) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp index 9494098714e..e42eb97248e 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp @@ -70,8 +70,9 @@ class MockDatabaseFeedManager MOCK_METHOD(void, getVulnerabilitiesCandidates, (const std::string& cnaName, - std::string_view packageName, + const packageData& package, const std::function& callback), ()); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp index 2a68261697a..3142b8ab525 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp @@ -477,10 +477,14 @@ TEST_F(DatabaseFeedManagerTest, DISABLED_GetVulnerabilityCandidatesSuccess) std::vector cves; + packageData package = {.name = PACKAGE_NAME}; + pDatabaseFeedManager->getVulnerabilitiesCandidates( CNA_NAME, - PACKAGE_NAME, - [&](const std::string& cnaName, const NSVulnerabilityScanner::ScanVulnerabilityCandidate& candidate) -> bool + package, + [&](const std::string& cnaName, + const packageData& package, + const NSVulnerabilityScanner::ScanVulnerabilityCandidate& candidate) -> bool { auto cveId = candidate.cveId()->str(); cves.push_back(cveId); @@ -528,12 +532,15 @@ TEST_F(DatabaseFeedManagerTest, GetVulnerabilityCandidatesCorrupted) TrampolineContentRegister, TrampolineRouterSubscriber>>(pIndexerConnectorTrap, shouldStop, mutex)}; + packageData package = {.name = CORRUPTED_PACKAGE_NAME}; + EXPECT_THROW( { pDatabaseFeedManager->getVulnerabilitiesCandidates( CNA_NAME, - CORRUPTED_PACKAGE_NAME, + package, [&](const std::string& cnaName, + const packageData& package, const NSVulnerabilityScanner::ScanVulnerabilityCandidate& candidate) -> bool { return true; }); }, std::runtime_error); @@ -565,12 +572,15 @@ TEST_F(DatabaseFeedManagerTest, GetVulnerabilityCandidatesNoPackageName) TrampolineContentRegister, TrampolineRouterSubscriber>>(pIndexerConnectorTrap, shouldStop, mutex)}; + packageData package = {.name = ""}; + EXPECT_ANY_THROW({ pDatabaseFeedManager->getVulnerabilitiesCandidates( CNA_NAME, - "", - [&](const std::string& cnaName, const NSVulnerabilityScanner::ScanVulnerabilityCandidate& candidate) -> bool - { return true; }); + package, + [&](const std::string& cnaName, + const packageData& package, + const NSVulnerabilityScanner::ScanVulnerabilityCandidate& candidate) -> bool { return true; }); }); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp index 26cf59df180..28c5fdedd62 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp @@ -58,6 +58,37 @@ namespace NSPackageScannerTest } )"; + const std::string DELTA_PACKAGES_INSERTED_MSG_WITHOUT_VENDOR = + R"( + { + "agent_info": { + "agent_id": "001", + "agent_ip": "192.168.33.20", + "agent_name": "focal", + "node_name": "node01" + }, + "data_type": "dbsync_packages", + "data": { + "architecture": "amd64", + "checksum": "1e6ce14f97f57d1bbd46ff8e5d3e133171a1bbce", + "description": "library for GIF images library", + "format": "deb", + "groups": "libs", + "item_id": "ec465b7eb5fa011a336e95614072e4c7f1a65a53", + "multiarch": "same", + "name": "libgif7", + "priority": "optional", + "scan_time": "2023/08/04 19:56:11", + "size": 72, + "source": "giflib", + "vendor": " ", + "version": "5.1.9", + "install_time": "1577890801" + }, + "operation": "INSERTED" + } + )"; + const std::string CANDIDATES_AFFECTED_LESS_THAN_INPUT = R"( { @@ -81,6 +112,54 @@ namespace NSPackageScannerTest } )"; + const std::string CANDIDATES_AFFECTED_LESS_THAN_INPUT_WITH_GENERIC_VENDOR = + R"( + { + "candidates": [ + { + "cveId": "CVE-2024-1234", + "defaultStatus": 0, + "platforms": [ + "upstream" + ], + "versions": [ + { + "lessThan": "5.2.0", + "status": "affected", + "version": "0", + "versionType": "custom" + } + ], + "vendor" : "testVendor" + } + ] + } + )"; + + const std::string CANDIDATES_AFFECTED_LESS_THAN_INPUT_WITH_UBUNTU_VENDOR = + R"( + { + "candidates": [ + { + "cveId": "CVE-2024-1234", + "defaultStatus": 0, + "platforms": [ + "upstream" + ], + "versions": [ + { + "lessThan": "5.2.0", + "status": "affected", + "version": "0", + "versionType": "custom" + } + ], + "vendor" : "Ubuntu Developers " + } + ] + } + )"; + const std::string CANDIDATES_AFFECTED_LESS_THAN_OR_EQUAL_INPUT = R"( { @@ -281,8 +360,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualTo) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - std::string_view packageName, + const packageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -305,7 +385,7 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualTo) { for (const auto& candidate : *candidatesArray->candidates()) { - if (callback(cnaName, *candidate)) + if (callback(cnaName, package, *candidate)) { // If the candidate is vulnerable, we stop looking for. break; @@ -373,8 +453,9 @@ TEST_F(PackageScannerTest, TestPackageUnaffectedEqualTo) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - std::string_view packageName, + const packageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -397,7 +478,7 @@ TEST_F(PackageScannerTest, TestPackageUnaffectedEqualTo) { for (const auto& candidate : *candidatesArray->candidates()) { - if (callback(cnaName, *candidate)) + if (callback(cnaName, package, *candidate)) { // If the candidate is vulnerable, we stop looking for. break; @@ -455,8 +536,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThan) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - std::string_view packageName, + const packageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -479,7 +561,266 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThan) { for (const auto& candidate : *candidatesArray->candidates()) { - if (callback(cnaName, *candidate)) + if (callback(cnaName, package, *candidate)) + { + // If the candidate is vulnerable, we stop looking for. + break; + } + } + } + }; + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "upstream", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + auto spDatabaseFeedManagerMock = std::make_shared(); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) + .WillOnce(testing::Invoke(mockGetVulnerabilitiesCandidates)); + EXPECT_CALL(*spDatabaseFeedManagerMock, getTranslationFromL2(_, _)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); + auto scanContextOriginal = std::make_shared>(syscollectorDelta); + + spGlobalDataMock = std::make_shared(); + EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); + + TPackageScanner, TrampolineGlobalData> packageScanner( + spDatabaseFeedManagerMock); + + std::shared_ptr> scanContextResult; + EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); + + ASSERT_TRUE(scanContextResult != nullptr); + + EXPECT_EQ(scanContextResult->m_elements.size(), 1); + EXPECT_NE(scanContextResult->m_elements.find(CVEID), scanContextResult->m_elements.end()); + + EXPECT_EQ(scanContextResult->m_matchConditions.size(), 1); + EXPECT_NE(scanContextResult->m_matchConditions.find(CVEID), scanContextResult->m_matchConditions.end()); + + auto& matchCondition = scanContextResult->m_matchConditions[CVEID]; + EXPECT_EQ(matchCondition.condition, MatchRuleCondition::LessThan); + EXPECT_STREQ(matchCondition.version.c_str(), "5.2.0"); +} + +TEST_F(PackageScannerTest, TestPackageAffectedLessThanVendorMissing) +{ + auto mockGetVulnerabilitiesCandidates = + [&](const std::string& cnaName, + const packageData& package, + const std::function& callback) + { + std::string candidatesFlatbufferSchemaStr; + + // Read schemas from filesystem. + bool valid = + flatbuffers::LoadFile(CANDIDATES_FLATBUFFER_SCHEMA_PATH.c_str(), false, &candidatesFlatbufferSchemaStr); + ASSERT_EQ(valid, true); + + // Parse schemas and JSON example. + flatbuffers::Parser fbParser; + valid = (fbParser.Parse(candidatesFlatbufferSchemaStr.c_str(), INCLUDE_DIRECTORIES) && + fbParser.Parse(CANDIDATES_AFFECTED_LESS_THAN_INPUT_WITH_UBUNTU_VENDOR.c_str())); + ASSERT_EQ(valid, true); + + auto candidatesArray = + GetScanVulnerabilityCandidateArray(reinterpret_cast(fbParser.builder_.GetBufferPointer())); + + if (candidatesArray) + { + for (const auto& candidate : *candidatesArray->candidates()) + { + if (callback(cnaName, package, *candidate)) + { + // If the candidate is vulnerable, we stop looking for. + break; + } + } + } + }; + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "upstream", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + auto spDatabaseFeedManagerMock = std::make_shared(); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) + .WillOnce(testing::Invoke(mockGetVulnerabilitiesCandidates)); + EXPECT_CALL(*spDatabaseFeedManagerMock, getTranslationFromL2(_, _)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG_WITHOUT_VENDOR.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); + auto scanContextOriginal = std::make_shared>(syscollectorDelta); + + spGlobalDataMock = std::make_shared(); + EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); + + TPackageScanner, TrampolineGlobalData> packageScanner( + spDatabaseFeedManagerMock); + + std::shared_ptr> scanContextResult; + EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); + + ASSERT_TRUE(scanContextResult == nullptr); +} + +TEST_F(PackageScannerTest, TestPackageAffectedLessThanVendorMismatch) +{ + auto mockGetVulnerabilitiesCandidates = + [&](const std::string& cnaName, + const packageData& package, + const std::function& callback) + { + std::string candidatesFlatbufferSchemaStr; + + // Read schemas from filesystem. + bool valid = + flatbuffers::LoadFile(CANDIDATES_FLATBUFFER_SCHEMA_PATH.c_str(), false, &candidatesFlatbufferSchemaStr); + ASSERT_EQ(valid, true); + + // Parse schemas and JSON example. + flatbuffers::Parser fbParser; + valid = (fbParser.Parse(candidatesFlatbufferSchemaStr.c_str(), INCLUDE_DIRECTORIES) && + fbParser.Parse(CANDIDATES_AFFECTED_LESS_THAN_INPUT_WITH_GENERIC_VENDOR.c_str())); + ASSERT_EQ(valid, true); + + auto candidatesArray = + GetScanVulnerabilityCandidateArray(reinterpret_cast(fbParser.builder_.GetBufferPointer())); + + if (candidatesArray) + { + for (const auto& candidate : *candidatesArray->candidates()) + { + if (callback(cnaName, package, *candidate)) + { + // If the candidate is vulnerable, we stop looking for. + break; + } + } + } + }; + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "upstream", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + auto spDatabaseFeedManagerMock = std::make_shared(); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) + .WillOnce(testing::Invoke(mockGetVulnerabilitiesCandidates)); + EXPECT_CALL(*spDatabaseFeedManagerMock, getTranslationFromL2(_, _)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); + auto scanContextOriginal = std::make_shared>(syscollectorDelta); + + spGlobalDataMock = std::make_shared(); + EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); + + TPackageScanner, TrampolineGlobalData> packageScanner( + spDatabaseFeedManagerMock); + + std::shared_ptr> scanContextResult; + EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); + + ASSERT_TRUE(scanContextResult == nullptr); +} + +TEST_F(PackageScannerTest, TestPackageAffectedLessThanVendorMatch) +{ + auto mockGetVulnerabilitiesCandidates = + [&](const std::string& cnaName, + const packageData& package, + const std::function& callback) + { + std::string candidatesFlatbufferSchemaStr; + + // Read schemas from filesystem. + bool valid = + flatbuffers::LoadFile(CANDIDATES_FLATBUFFER_SCHEMA_PATH.c_str(), false, &candidatesFlatbufferSchemaStr); + ASSERT_EQ(valid, true); + + // Parse schemas and JSON example. + flatbuffers::Parser fbParser; + valid = (fbParser.Parse(candidatesFlatbufferSchemaStr.c_str(), INCLUDE_DIRECTORIES) && + fbParser.Parse(CANDIDATES_AFFECTED_LESS_THAN_INPUT_WITH_UBUNTU_VENDOR.c_str())); + ASSERT_EQ(valid, true); + + auto candidatesArray = + GetScanVulnerabilityCandidateArray(reinterpret_cast(fbParser.builder_.GetBufferPointer())); + + if (candidatesArray) + { + for (const auto& candidate : *candidatesArray->candidates()) + { + if (callback(cnaName, package, *candidate)) { // If the candidate is vulnerable, we stop looking for. break; @@ -547,8 +888,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanOrEqual) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - std::string_view packageName, + const packageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -571,7 +913,7 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanOrEqual) { for (const auto& candidate : *candidatesArray->candidates()) { - if (callback(cnaName, *candidate)) + if (callback(cnaName, package, *candidate)) { // If the candidate is vulnerable, we stop looking for. break; @@ -639,8 +981,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanWithVersionNotZero) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - std::string_view packageName, + const packageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -663,7 +1006,7 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanWithVersionNotZero) { for (const auto& candidate : *candidatesArray->candidates()) { - if (callback(cnaName, *candidate)) + if (callback(cnaName, package, *candidate)) { // If the candidate is vulnerable, we stop looking for. break; @@ -731,8 +1074,9 @@ TEST_F(PackageScannerTest, TestPackageUnaffectedLessThan) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - std::string_view packageName, + const packageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -755,7 +1099,7 @@ TEST_F(PackageScannerTest, TestPackageUnaffectedLessThan) { for (const auto& candidate : *candidatesArray->candidates()) { - if (callback(cnaName, *candidate)) + if (callback(cnaName, package, *candidate)) { // If the candidate is vulnerable, we stop looking for. break; @@ -813,8 +1157,9 @@ TEST_F(PackageScannerTest, TestPackageDefaultStatusAffected) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - std::string_view packageName, + const packageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -837,7 +1182,7 @@ TEST_F(PackageScannerTest, TestPackageDefaultStatusAffected) { for (const auto& candidate : *candidatesArray->candidates()) { - if (callback(cnaName, *candidate)) + if (callback(cnaName, package, *candidate)) { // If the candidate is vulnerable, we stop looking for. break; @@ -904,8 +1249,9 @@ TEST_F(PackageScannerTest, TestPackageDefaultStatusUnaffected) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - std::string_view packageName, + const packageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -928,7 +1274,7 @@ TEST_F(PackageScannerTest, TestPackageDefaultStatusUnaffected) { for (const auto& candidate : *candidatesArray->candidates()) { - if (callback(cnaName, *candidate)) + if (callback(cnaName, package, *candidate)) { // If the candidate is vulnerable, we stop looking for. break; @@ -986,8 +1332,9 @@ TEST_F(PackageScannerTest, TestPackageGetVulnerabilitiesCandidatesGeneratesExcep { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - std::string_view packageName, + const packageData& package, const std::function& callback) { throw std::runtime_error("Invalid package/cna name."); From d5f6bfc50e8d6cc30b9345497acd7857fce22cd2 Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Fri, 26 Apr 2024 02:15:21 +0000 Subject: [PATCH 003/419] Including efficacy tests for vendor changes. Using vendor in lower case as default --- .../qa/test_data/010/expected_002.out | 3 +-- .../qa/test_data/010/expected_003.out | 3 +-- .../qa/test_data/011/expected_001.out | 1 + .../qa/test_data/011/expected_002.out | 6 +++++ .../qa/test_data/011/expected_003.out | 1 + .../qa/test_data/011/expected_004.out | 4 +++ .../qa/test_data/011/expected_005.out | 8 ++++++ .../qa/test_data/011/input_001.json | 23 +++++++++++++++++ .../qa/test_data/011/input_002.json | 25 +++++++++++++++++++ .../qa/test_data/011/input_003.json | 24 ++++++++++++++++++ .../qa/test_data/011/input_004.json | 23 +++++++++++++++++ .../qa/test_data/011/input_005.json | 22 ++++++++++++++++ .../qa/test_data_policy/001/expected_003.out | 4 +-- .../qa/test_data_policy/002/expected_003.out | 2 +- .../qa/test_efficacy_log.py | 7 ++++-- .../src/scanOrchestrator/packageScanner.hpp | 5 ++-- .../tests/unit/packageScanner_test.cpp | 2 +- 17 files changed, 151 insertions(+), 12 deletions(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_001.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_002.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_003.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_004.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_005.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_001.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_002.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_003.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_004.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_005.json diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_002.out index 117a97e3889..f744bee5ed1 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_002.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_002.out @@ -1,5 +1,4 @@ [ "Translation for package 'Skype version 8.109' in platform 'windows' found in Level 2 cache.", - "Match found, the package 'skype version 8.109' is vulnerable to 'CVE-2007-4429' due to default status. - Agent '' (ID: '010', Version: '').", - "Match found, the package 'skype version 8.109' is vulnerable to 'CVE-2016-5720' due to default status. - Agent '' (ID: '010', Version: '')." + "Match found, the package 'skype' is vulnerable to 'CVE-2016-5720' due to default status. - Agent '' (ID: '010', Version: '')." ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_003.out index c031a43e13c..b4b6416d286 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_003.out @@ -1,5 +1,4 @@ [ "Translation for package 'Skype version 8.109' in platform 'windows' found in Level 1 cache.", - "Match found, the package 'skype version 8.109' is vulnerable to 'CVE-2007-4429' due to default status. - Agent '' (ID: '010', Version: '').", - "Match found, the package 'skype version 8.109' is vulnerable to 'CVE-2016-5720' due to default status. - Agent '' (ID: '010', Version: '')." + "Match found, the package 'skype' is vulnerable to 'CVE-2016-5720' due to default status. - Agent '' (ID: '010', Version: '')." ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_001.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_001.out new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_001.out @@ -0,0 +1 @@ +[] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_002.out new file mode 100644 index 00000000000..272352dc391 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_002.out @@ -0,0 +1,6 @@ +[ + "Translation for package 'Opera Stable 108.0.5067.29' in platform 'windows' found in Level 2 cache.", + "Initiating a vulnerability scan for package 'opera_browser' (win) (opera) with CVE Numbering Authorities (CNA) 'nvd' on Agent '' (ID: '001', Version: '').", + "Vendor match for Package: opera_browser, Version: 108.0.5067.29, CVE: CVE-2004-2659, Vendor: opera", + "Match found, the package 'opera_browser' is vulnerable to 'CVE-2004-2659' due to default status. - Agent '' (ID: '001', Version: '')." +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_003.out new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_003.out @@ -0,0 +1 @@ +[] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_004.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_004.out new file mode 100644 index 00000000000..6455d06940a --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_004.out @@ -0,0 +1,4 @@ +[ + "Initiating a vulnerability scan for package 'brotli' (pkg) ( ) with CVE Numbering Authorities (CNA) 'nvd' on Agent '' (ID: '002', Version: '').", + "The vendor information is not available for Package: brotli, Version: 1.1.0, CVE: CVE-2020-8927, Content vendor: google" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_005.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_005.out new file mode 100644 index 00000000000..192356cd872 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_005.out @@ -0,0 +1,8 @@ +[ + "Initiating a vulnerability scan for package 'mail' (pkg) (apple) with CVE Numbering Authorities (CNA) 'nvd' on Agent '' (ID: '002', Version: '').", + "The vendor is not the same for Package: mail, Version: 16.0, CVE: CVE-2008-4584, Content vendor: chilkat_software, Package vendor: apple", + "The vendor is not the same for Package: mail, Version: 16.0, CVE: CVE-2015-9097, Content vendor: mail_project, Package vendor: apple", + "The vendor is not the same for Package: mail, Version: 16.0, CVE: CVE-2017-15806, Content vendor: zetacomponents, Package vendor: apple", + "Vendor match for Package: mail, Version: 16.0, CVE: CVE-2005-2512, Vendor: apple", + "Match found, the package 'mail' is vulnerable to 'CVE-2005-2512' due to default status. - Agent '' (ID: '002', Version: '')." +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_001.json new file mode 100644 index 00000000000..353f220f46b --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_001.json @@ -0,0 +1,23 @@ +{ + "agent_info": { + "agent_id": "001", + "node_name": "test_node_name" + }, + "data_type": "state", + "data": { + "attributes_type": "syscollector_osinfo", + "attributes": { + "architecture": "x86_64", + "checksum": "1713967856394802400", + "hostname":"DESKTOP-EQ4F57D", + "os_major":"10", + "os_minor":"0", + "os_build":"19045.4291", + "os_name":"Microsoft Windows 10 Pro", + "os_display_version":"22H2", + "os_platform":"windows", + "os_version":"10.0.19045.4291", + "scan_time":"2024/04/24 14:10:57" + } + } + } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_002.json new file mode 100644 index 00000000000..45840ed1ae0 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_002.json @@ -0,0 +1,25 @@ +{ + "agent_info": { + "agent_id": "001", + "node_name": "test_node_name" + }, + "data_type": "dbsync_packages", + "data": { + "version": "108.0.5067.29", + "vendor": "Opera Software", + "architecture": " ", + "name": "Opera Stable 108.0.5067.29", + "size": 0, + "format": "win", + "checksum": "12c8e8d9df8f9a9f54d4aaf43568d10c79f3cc56", + "description": " ", + "install_time": "2024/04/03 22:53:13", + "item_id": "eff251a49a142accf85b170526462e13d3265f03", + "location": "C:\\Users\\Vagrant\\AppData\\Local\\Programs\\Opera", + "multiarch": null, + "priority": " ", + "scan_time": "2024/04/24 14:11:04", + "source": " " + }, + "operation": "INSERTED" + } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_003.json new file mode 100644 index 00000000000..467a334c6e0 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_003.json @@ -0,0 +1,24 @@ +{ + "agent_info": { + "agent_id": "002", + "node_name": "test_node_name" + }, + "data_type": "state", + "data": { + "attributes_type": "syscollector_osinfo", + "attributes": { + "architecture": "x86_64", + "checksum": "1713967856394802400", + "hostname":"vmwares-iMac.local", + "os_major":"14", + "os_minor":"1", + "os_patch":"1", + "os_build":"23B81", + "os_name":"macOS", + "os_display_version":" ", + "os_platform":"darwin", + "os_version":"14.1.1", + "scan_time":"2024/04/24 14:10:57" + } + } + } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_004.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_004.json new file mode 100644 index 00000000000..6ff0146c15e --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_004.json @@ -0,0 +1,23 @@ +{ + "agent_info": { + "agent_id": "002", + "node_name": "test_node_name" + }, + "data_type": "dbsync_packages", + "data": { + "scan_time": "2024/04/24 14:11:02", + "format": "pkg", + "name": "brotli", + "size": 0, + "vendor": " ", + "version": "1.1.0", + "architecture": "", + "multiarch": null, + "source": "homebrew", + "description": "Generic-purpose lossless compression algorithm by Google", + "location": "/usr/local/Cellar", + "checksum": "384accfb1b20f0efde0f09dedd8659c49098ff53", + "item_id": "3e8ff0d9234e6109de02bedcdb23c8f5a3e206a1" + }, + "operation": "INSERTED" +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_005.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_005.json new file mode 100644 index 00000000000..658402519e5 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_005.json @@ -0,0 +1,22 @@ +{ + "agent_info": { + "agent_id": "002", + "node_name": "test_node_name" + }, + "data_type": "dbsync_packages", + "data": { + "scan_time": "2024/04/24 14:11:02", + "format": "pkg", + "name": "Mail", + "size": 0, + "vendor": "apple", + "version": "16.0", + "multiarch": null, + "source": "utilities", + "description": "com.apple.mail", + "location": "/System/Applications/Mail.app/Contents/Info.plist", + "checksum": "a17eedf568b78ebecbc463ec4934b4d5c585ca3c", + "item_id": "1536bc19d4be6bc139966df6d25fd792983ed6c9" + }, + "operation": "INSERTED" +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out index f4f05747f0d..3e27ba4566a 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out @@ -4,7 +4,7 @@ "Event type: 9 processed", "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", - "Initiating a vulnerability scan for package 'gzip' (deb) (Ubuntu Developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'agent_ubuntu_22' (ID: '000', Version: 'v4.7.1').", + "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'agent_ubuntu_22' (ID: '000', Version: 'v4.7.1').", "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'agent_ubuntu_22' (ID: '000', Version: 'v4.7.1').", "Vulnerability scan for package 'gzip' on Agent '000' has completed.", @@ -12,7 +12,7 @@ "Processing and publish key: CVE-2022-1271", "Vulnerability scan for OS 'Red Hat Enterprise Linux' on Agent '001' has completed.", "Translation for package 'lua-libs' in platform 'Linux' not found. Using provided packageName.", - "Initiating a vulnerability scan for package 'lua-libs' (rpm) (Red Hat, Inc.) with CVE Numbering Authorities (CNA) 'redhat_8' on Agent 'agent_redhat_8' (ID: '001', Version: 'v4.7.1').", + "Initiating a vulnerability scan for package 'lua-libs' (rpm) (red hat, inc.) with CVE Numbering Authorities (CNA) 'redhat_8' on Agent 'agent_redhat_8' (ID: '001', Version: 'v4.7.1').", "Vulnerability scan for package 'lua-libs' on Agent '001' has completed.", "Event type: 7 processed" ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out index 4cfd7cf9046..0f89e3ff493 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out @@ -2,7 +2,7 @@ "Vulnerability scanner module started", "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", - "Initiating a vulnerability scan for package 'gzip' (deb) (Ubuntu Developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'agent_ubuntu_22' (ID: '000', Version", + "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'agent_ubuntu_22' (ID: '000', Version", "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'agent_ubuntu_22' (ID: '000', Version:", "Vulnerability scan for package 'gzip' on Agent '000' has completed.", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_efficacy_log.py b/src/wazuh_modules/vulnerability_scanner/qa/test_efficacy_log.py index 6ce5d6aab13..40276dc9fc3 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_efficacy_log.py +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_efficacy_log.py @@ -21,7 +21,7 @@ isDeltas = False isRsync = False -def find_regex_in_file(regex, file, times=1, max_timeout=10): +def find_regex_in_file(regex, file, times=1, max_timeout=50): pattern = re.compile(regex) start_time = time.time() @@ -238,9 +238,12 @@ def run_process_and_monitor_log(request, run_on_end): basetimeout = timeout for expected_line in expected_lines: while not found_lines[expected_line]: - if timeout < 4*basetimeout: + if timeout < 8*basetimeout: tail_log(log_file, expected_lines, found_lines, timeout) timeout = 1.5*timeout + else: + LOGGER.error(f"Timeout waiting for: {expected_line}") + break process.terminate() diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp index fa153c500bb..b153fd6f318 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp @@ -572,6 +572,7 @@ class TPackageScanner final : public AbstractHandlerpackageName()))}; + const std::string packageVendor {Utils::toLowerCase(std::string(data->packageVendor()))}; logDebug1(WM_VULNSCAN_LOGTAG, "Initiating a vulnerability scan for package '%s' (%s) (%s) with CVE Numbering " @@ -580,14 +581,14 @@ class TPackageScanner final : public AbstractHandlerpackageFormat().data(), - data->packageVendor().data(), + packageVendor.c_str(), CNAValue.c_str(), data->agentName().data(), data->agentId().data(), data->agentVersion().data()); packageData package = {.name = packageName, - .vendor = data->packageVendor().data(), + .vendor = packageVendor, .format = data->packageFormat().data(), .version = data->packageVersion().data()}; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp index 28c5fdedd62..861087188af 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp @@ -154,7 +154,7 @@ namespace NSPackageScannerTest "versionType": "custom" } ], - "vendor" : "Ubuntu Developers " + "vendor" : "ubuntu developers " } ] } From b686278de051b54131f486e596cacdb6bdb3dc02 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Tue, 30 Apr 2024 23:54:25 -0300 Subject: [PATCH 004/419] Remove worker data from vulnerability state information. --- .../testtool/input/example.json | 1 - .../indexer/template/index-template.json | 4 - .../qa/test_data/001/expected_003.out | 2 +- .../qa/test_data/007/expected_002.out | 196 +++++++++--------- .../qa/test_data/008/expected_002.out | 12 +- .../qa/test_data/008/expected_003.out | 2 +- .../qa/test_data_policy/001/expected_003.out | 2 +- .../qa/test_data_policy/002/expected_003.out | 2 +- .../qa/test_data_policy/002/expected_004.out | 2 +- .../scanOrchestrator/cleanAgentInventory.hpp | 2 - .../scanOrchestrator/eventDeleteInventory.hpp | 2 - .../scanOrchestrator/eventDetailsBuilder.hpp | 1 - .../scanOrchestrator/eventInsertInventory.hpp | 2 - .../scanOrchestrator/scanInventorySync.hpp | 2 - .../tests/unit/cleanAgentInventory_test.cpp | 43 ++-- .../tests/unit/cleanInventory_test.cpp | 17 +- .../tests/unit/eventDeleteInventory_test.cpp | 9 +- .../tests/unit/eventDetailsBuilder_test.cpp | 24 +-- .../tests/unit/eventInsertInventory_test.cpp | 8 +- .../eventPackageAlertDetailsBuilder_test.cpp | 4 +- 20 files changed, 154 insertions(+), 183 deletions(-) diff --git a/src/shared_modules/indexer_connector/testtool/input/example.json b/src/shared_modules/indexer_connector/testtool/input/example.json index fc68b97366e..a395ea4df68 100644 --- a/src/shared_modules/indexer_connector/testtool/input/example.json +++ b/src/shared_modules/indexer_connector/testtool/input/example.json @@ -6,7 +6,6 @@ "build": { "original": "sample_build_1" }, - "ephemeral_id": "eph_id_1", "id": "agent_id_1", "name": "agent_name_1", "type": "agent_type_1", diff --git a/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json b/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json index 77f2a3334f2..1f5545f1b8d 100644 --- a/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json +++ b/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json @@ -44,10 +44,6 @@ } } }, - "ephemeral_id": { - "ignore_above": 1024, - "type": "keyword" - }, "id": { "ignore_above": 1024, "type": "keyword" diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/expected_003.out index bea0f46dfe7..d758a6ef49a 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/expected_003.out @@ -1,3 +1,3 @@ [ -"Deleting package agent key: test_node_name_001_ec465b7eb5fa011a336e95614072e4c7f1a65a53" +"Deleting package agent key: 001_ec465b7eb5fa011a336e95614072e4c7f1a65a53" ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/007/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/007/expected_002.out index bfa8c22a733..9be41557eb1 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/007/expected_002.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/007/expected_002.out @@ -1,100 +1,100 @@ [ -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-30774", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-38403", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-40404", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-40405", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-40408", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-40413", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-40416", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-40421", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-40423", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-40444", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-40447", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-40449", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-41072", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-41254", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-41975", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-41976", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-41977", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-41982", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-41983", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-41988", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-41989", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-41997", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42438", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42841", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42842", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42844", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42845", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42847", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42849", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42850", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42852", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42854", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42856", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42857", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42861", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42874", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42881", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42882", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42883", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42884", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42886", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42887", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42888", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42890", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42891", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42894", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42898", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42899", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42900", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42901", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42902", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42903", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42904", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42905", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42906", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42907", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42908", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42909", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42910", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42911", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42912", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42914", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42916", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42917", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42919", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42922", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42924", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42926", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42932", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42935", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42937", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-42940", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-45866", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-4733", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-4734", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-4736", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-4738", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-4750", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-4751", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-4752", -"Removing element from inventory: test_node_name_001_macOS_CVE-2023-4781", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23203", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23204", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23206", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23207", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23208", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23209", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23210", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23211", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23212", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23213", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23214", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23215", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23217", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23218", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23222", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23223", -"Removing element from inventory: test_node_name_001_macOS_CVE-2024-23224" +"Removing element from inventory: 001_macOS_CVE-2023-30774", +"Removing element from inventory: 001_macOS_CVE-2023-38403", +"Removing element from inventory: 001_macOS_CVE-2023-40404", +"Removing element from inventory: 001_macOS_CVE-2023-40405", +"Removing element from inventory: 001_macOS_CVE-2023-40408", +"Removing element from inventory: 001_macOS_CVE-2023-40413", +"Removing element from inventory: 001_macOS_CVE-2023-40416", +"Removing element from inventory: 001_macOS_CVE-2023-40421", +"Removing element from inventory: 001_macOS_CVE-2023-40423", +"Removing element from inventory: 001_macOS_CVE-2023-40444", +"Removing element from inventory: 001_macOS_CVE-2023-40447", +"Removing element from inventory: 001_macOS_CVE-2023-40449", +"Removing element from inventory: 001_macOS_CVE-2023-41072", +"Removing element from inventory: 001_macOS_CVE-2023-41254", +"Removing element from inventory: 001_macOS_CVE-2023-41975", +"Removing element from inventory: 001_macOS_CVE-2023-41976", +"Removing element from inventory: 001_macOS_CVE-2023-41977", +"Removing element from inventory: 001_macOS_CVE-2023-41982", +"Removing element from inventory: 001_macOS_CVE-2023-41983", +"Removing element from inventory: 001_macOS_CVE-2023-41988", +"Removing element from inventory: 001_macOS_CVE-2023-41989", +"Removing element from inventory: 001_macOS_CVE-2023-41997", +"Removing element from inventory: 001_macOS_CVE-2023-42438", +"Removing element from inventory: 001_macOS_CVE-2023-42841", +"Removing element from inventory: 001_macOS_CVE-2023-42842", +"Removing element from inventory: 001_macOS_CVE-2023-42844", +"Removing element from inventory: 001_macOS_CVE-2023-42845", +"Removing element from inventory: 001_macOS_CVE-2023-42847", +"Removing element from inventory: 001_macOS_CVE-2023-42849", +"Removing element from inventory: 001_macOS_CVE-2023-42850", +"Removing element from inventory: 001_macOS_CVE-2023-42852", +"Removing element from inventory: 001_macOS_CVE-2023-42854", +"Removing element from inventory: 001_macOS_CVE-2023-42856", +"Removing element from inventory: 001_macOS_CVE-2023-42857", +"Removing element from inventory: 001_macOS_CVE-2023-42861", +"Removing element from inventory: 001_macOS_CVE-2023-42874", +"Removing element from inventory: 001_macOS_CVE-2023-42881", +"Removing element from inventory: 001_macOS_CVE-2023-42882", +"Removing element from inventory: 001_macOS_CVE-2023-42883", +"Removing element from inventory: 001_macOS_CVE-2023-42884", +"Removing element from inventory: 001_macOS_CVE-2023-42886", +"Removing element from inventory: 001_macOS_CVE-2023-42887", +"Removing element from inventory: 001_macOS_CVE-2023-42888", +"Removing element from inventory: 001_macOS_CVE-2023-42890", +"Removing element from inventory: 001_macOS_CVE-2023-42891", +"Removing element from inventory: 001_macOS_CVE-2023-42894", +"Removing element from inventory: 001_macOS_CVE-2023-42898", +"Removing element from inventory: 001_macOS_CVE-2023-42899", +"Removing element from inventory: 001_macOS_CVE-2023-42900", +"Removing element from inventory: 001_macOS_CVE-2023-42901", +"Removing element from inventory: 001_macOS_CVE-2023-42902", +"Removing element from inventory: 001_macOS_CVE-2023-42903", +"Removing element from inventory: 001_macOS_CVE-2023-42904", +"Removing element from inventory: 001_macOS_CVE-2023-42905", +"Removing element from inventory: 001_macOS_CVE-2023-42906", +"Removing element from inventory: 001_macOS_CVE-2023-42907", +"Removing element from inventory: 001_macOS_CVE-2023-42908", +"Removing element from inventory: 001_macOS_CVE-2023-42909", +"Removing element from inventory: 001_macOS_CVE-2023-42910", +"Removing element from inventory: 001_macOS_CVE-2023-42911", +"Removing element from inventory: 001_macOS_CVE-2023-42912", +"Removing element from inventory: 001_macOS_CVE-2023-42914", +"Removing element from inventory: 001_macOS_CVE-2023-42916", +"Removing element from inventory: 001_macOS_CVE-2023-42917", +"Removing element from inventory: 001_macOS_CVE-2023-42919", +"Removing element from inventory: 001_macOS_CVE-2023-42922", +"Removing element from inventory: 001_macOS_CVE-2023-42924", +"Removing element from inventory: 001_macOS_CVE-2023-42926", +"Removing element from inventory: 001_macOS_CVE-2023-42932", +"Removing element from inventory: 001_macOS_CVE-2023-42935", +"Removing element from inventory: 001_macOS_CVE-2023-42937", +"Removing element from inventory: 001_macOS_CVE-2023-42940", +"Removing element from inventory: 001_macOS_CVE-2023-45866", +"Removing element from inventory: 001_macOS_CVE-2023-4733", +"Removing element from inventory: 001_macOS_CVE-2023-4734", +"Removing element from inventory: 001_macOS_CVE-2023-4736", +"Removing element from inventory: 001_macOS_CVE-2023-4738", +"Removing element from inventory: 001_macOS_CVE-2023-4750", +"Removing element from inventory: 001_macOS_CVE-2023-4751", +"Removing element from inventory: 001_macOS_CVE-2023-4752", +"Removing element from inventory: 001_macOS_CVE-2023-4781", +"Removing element from inventory: 001_macOS_CVE-2024-23203", +"Removing element from inventory: 001_macOS_CVE-2024-23204", +"Removing element from inventory: 001_macOS_CVE-2024-23206", +"Removing element from inventory: 001_macOS_CVE-2024-23207", +"Removing element from inventory: 001_macOS_CVE-2024-23208", +"Removing element from inventory: 001_macOS_CVE-2024-23209", +"Removing element from inventory: 001_macOS_CVE-2024-23210", +"Removing element from inventory: 001_macOS_CVE-2024-23211", +"Removing element from inventory: 001_macOS_CVE-2024-23212", +"Removing element from inventory: 001_macOS_CVE-2024-23213", +"Removing element from inventory: 001_macOS_CVE-2024-23214", +"Removing element from inventory: 001_macOS_CVE-2024-23215", +"Removing element from inventory: 001_macOS_CVE-2024-23217", +"Removing element from inventory: 001_macOS_CVE-2024-23218", +"Removing element from inventory: 001_macOS_CVE-2024-23222", +"Removing element from inventory: 001_macOS_CVE-2024-23223", +"Removing element from inventory: 001_macOS_CVE-2024-23224" ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_002.out index c565204faa5..02c67a94f2d 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_002.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_002.out @@ -1,10 +1,10 @@ [ -"Removing element from inventory: test_node_name_002_Microsoft Windows 10 Pro_CVE-2024-21338", -"Removing element from inventory: test_node_name_002_Microsoft Windows 10 Pro_CVE-2024-21340", -"Removing element from inventory: test_node_name_002_Microsoft Windows 10 Pro_CVE-2024-21341", -"Removing element from inventory: test_node_name_002_Microsoft Windows 10 Pro_CVE-2024-21371", -"Removing element from inventory: test_node_name_002_Microsoft Windows 10 Pro_CVE-2024-21372", -"Removing element from inventory: test_node_name_002_Microsoft Windows 10 Pro_CVE-2024-21405", +"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21338", +"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21340", +"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21341", +"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21371", +"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21372", +"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21405", "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21341\",\"cvss\":{\"cvss3\":{\"base_score\":6.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046", "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21338\",\"cvss\":{\"cvss3\":{\"base_score\":7.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21340\",\"cvss\":{\"cvss3\":{\"base_score\":4.6}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_003.out index e08b5e78590..6b374ce7627 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_003.out @@ -1,6 +1,6 @@ [ "Processing and publish key: CVE-2023-32040", -"Deleting os agent vulnerabilities key: test_node_name_002_Microsoft Windows 10 Pro", +"Deleting os agent vulnerabilities key: 002_Microsoft Windows 10 Pro", "Vulnerability report for agent ID 002, clean all OS alert.", "1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"status\":\"Clear\",\"title\":\"There is no information of installed packages. Vulnerabilities cleared.\",\"type\":\"Packages\"}}" ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out index f4f05747f0d..baa61bad79c 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out @@ -8,7 +8,7 @@ "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'agent_ubuntu_22' (ID: '000', Version: 'v4.7.1').", "Vulnerability scan for package 'gzip' on Agent '000' has completed.", - "Inserting agent package key: node01_000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", + "Inserting agent package key: 000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", "Processing and publish key: CVE-2022-1271", "Vulnerability scan for OS 'Red Hat Enterprise Linux' on Agent '001' has completed.", "Translation for package 'lua-libs' in platform 'Linux' not found. Using provided packageName.", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out index 4cfd7cf9046..c237e2562a0 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out @@ -6,7 +6,7 @@ "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'agent_ubuntu_22' (ID: '000', Version:", "Vulnerability scan for package 'gzip' on Agent '000' has completed.", - "Inserting agent package key: wazuh-manager_000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", + "Inserting agent package key: 000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", "Processing and publish key: CVE-2022-1271", "Event type: 8 processed" ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_004.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_004.out index 8ed32bd8aac..0ceff2193c1 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_004.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_004.out @@ -3,5 +3,5 @@ "Vulnerability scanner in manager deactivated. Performing clean-up.", "Event type: 10 processed", "Processing and publish key: CVE-2022-1271", - "Deleting package agent vulnerabilities key: wazuh-manager_000_040334345fd47ab6e72026cf3c45640456198fb4" + "Deleting package agent vulnerabilities key: 000_040334345fd47ab6e72026cf3c45640456198fb4" ] diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp index 785afb70723..3207f68aef0 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp @@ -62,8 +62,6 @@ class TCleanAgentInventory final std::shared_ptr handleRequest(std::shared_ptr data) override { std::string agentKey; - agentKey.append(data->agentNodeName()); - agentKey.append("_"); agentKey.append(data->agentId()); agentKey.append("_"); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDeleteInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDeleteInventory.hpp index 0a8b00dd27d..19b01adce23 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDeleteInventory.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDeleteInventory.hpp @@ -53,8 +53,6 @@ class TEventDeleteInventory final std::shared_ptr handleRequest(std::shared_ptr data) override { std::string key; - key.append(data->agentNodeName()); - key.append("_"); key.append(data->agentId()); key.append("_"); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp index 67b7b65025b..f84426caf08 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp @@ -163,7 +163,6 @@ class TEventDetailsBuilder final : public AbstractHandleragentNodeName()); populateField(agent, "/id"_json_pointer, data->agentId()); populateField(agent, "/name"_json_pointer, data->agentName()); populateField(agent, "/type"_json_pointer, "wazuh"); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventInsertInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventInsertInventory.hpp index a1057581955..5a450119d9c 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventInsertInventory.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventInsertInventory.hpp @@ -53,8 +53,6 @@ class TEventInsertInventory final std::shared_ptr handleRequest(std::shared_ptr data) override { std::string key; - key.append(data->agentNodeName()); - key.append("_"); key.append(data->agentId()); key.append("_"); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanInventorySync.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanInventorySync.hpp index db1137785a6..4986546445f 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanInventorySync.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanInventorySync.hpp @@ -50,8 +50,6 @@ class TScanInventorySync final std::shared_ptr handleRequest(std::shared_ptr data) override { std::string key; - key.append(data->agentNodeName()); - key.append("_"); key.append(data->agentId()); key.append("_"); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanAgentInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanAgentInventory_test.cpp index 41613e4dab2..83eab414bd7 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanAgentInventory_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanAgentInventory_test.cpp @@ -71,9 +71,9 @@ void CleanAgentInventoryTest::SetUp() { m_inventoryDatabase->createColumn(OS_INITIAL_SCAN); } - m_inventoryDatabase->put("node01_001_fdbd3c83c04c74d0cc7ad2f0e04ed88adfd74ad5", + m_inventoryDatabase->put("001_fdbd3c83c04c74d0cc7ad2f0e04ed88adfd74ad5", "CVE-2021-33560,CVE-2019-13627,CVE-2021-40528"); - m_inventoryDatabase->put("node01_001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", "CVE-2024-5678,CVE-2023-5362"); + m_inventoryDatabase->put("001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", "CVE-2024-5678,CVE-2023-5362"); } void CleanAgentInventoryTest::TearDown() @@ -93,10 +93,9 @@ TEST_F(CleanAgentInventoryTest, TestInstantiationOfTheeventInsertInventoryClass) TEST_F(CleanAgentInventoryTest, CleanAgentDataSuccessfulPackage) { // Created dummy data. - m_inventoryDatabase->put( - "node01_001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "CVE-1234-2024,CVE-4321-2024", PACKAGE); - m_inventoryDatabase->put("node01_001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "CVE-1234-2024,CVE-4321-2024", OS); - m_inventoryDatabase->put("node01_002_cccccccccccccccccccccccccccccccccccccccc", "CVE-1234-2024,CVE-4321-2024", OS); + m_inventoryDatabase->put("001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "CVE-1234-2024,CVE-4321-2024", PACKAGE); + m_inventoryDatabase->put("001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "CVE-1234-2024,CVE-4321-2024", OS); + m_inventoryDatabase->put("002_cccccccccccccccccccccccccccccccccccccccc", "CVE-1234-2024,CVE-4321-2024", OS); m_inventoryDatabase->put("001", "1", OS_INITIAL_SCAN); m_inventoryDatabase->put("002", "2", OS_INITIAL_SCAN); @@ -121,9 +120,9 @@ TEST_F(CleanAgentInventoryTest, CleanAgentDataSuccessfulPackage) // Check if the inventory was deleted. std::string value; - EXPECT_FALSE(m_inventoryDatabase->get("node01_001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", value, PACKAGE)); - EXPECT_FALSE(m_inventoryDatabase->get("node01_001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", value, OS)); - EXPECT_TRUE(m_inventoryDatabase->get("node01_002_cccccccccccccccccccccccccccccccccccccccc", value, OS)); + EXPECT_FALSE(m_inventoryDatabase->get("001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", value, PACKAGE)); + EXPECT_FALSE(m_inventoryDatabase->get("001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", value, OS)); + EXPECT_TRUE(m_inventoryDatabase->get("002_cccccccccccccccccccccccccccccccccccccccc", value, OS)); EXPECT_FALSE(m_inventoryDatabase->get("001", value, OS_INITIAL_SCAN)); EXPECT_TRUE(m_inventoryDatabase->get("002", value, OS_INITIAL_SCAN)); } @@ -131,10 +130,9 @@ TEST_F(CleanAgentInventoryTest, CleanAgentDataSuccessfulPackage) TEST_F(CleanAgentInventoryTest, CleanAgentDataIntegrityClear) { // Created dummy data. - m_inventoryDatabase->put( - "node01_001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "CVE-1234-2024,CVE-4321-2024", PACKAGE); - m_inventoryDatabase->put("node01_001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "CVE-1234-2024,CVE-4321-2024", OS); - m_inventoryDatabase->put("node01_002_cccccccccccccccccccccccccccccccccccccccc", "CVE-1234-2024,CVE-4321-2024", OS); + m_inventoryDatabase->put("001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "CVE-1234-2024,CVE-4321-2024", PACKAGE); + m_inventoryDatabase->put("001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "CVE-1234-2024,CVE-4321-2024", OS); + m_inventoryDatabase->put("002_cccccccccccccccccccccccccccccccccccccccc", "CVE-1234-2024,CVE-4321-2024", OS); m_inventoryDatabase->put("001", "1", OS_INITIAL_SCAN); m_inventoryDatabase->put("002", "2", OS_INITIAL_SCAN); @@ -160,9 +158,9 @@ TEST_F(CleanAgentInventoryTest, CleanAgentDataIntegrityClear) // Check if the inventory was deleted. std::string value; - EXPECT_FALSE(m_inventoryDatabase->get("node01_001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", value, PACKAGE)); - EXPECT_TRUE(m_inventoryDatabase->get("node01_001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", value, OS)); - EXPECT_TRUE(m_inventoryDatabase->get("node01_002_cccccccccccccccccccccccccccccccccccccccc", value, OS)); + EXPECT_FALSE(m_inventoryDatabase->get("001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", value, PACKAGE)); + EXPECT_TRUE(m_inventoryDatabase->get("001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", value, OS)); + EXPECT_TRUE(m_inventoryDatabase->get("002_cccccccccccccccccccccccccccccccccccccccc", value, OS)); EXPECT_TRUE(m_inventoryDatabase->get("001", value, OS_INITIAL_SCAN)); EXPECT_TRUE(m_inventoryDatabase->get("002", value, OS_INITIAL_SCAN)); } @@ -170,10 +168,9 @@ TEST_F(CleanAgentInventoryTest, CleanAgentDataIntegrityClear) TEST_F(CleanAgentInventoryTest, CleanAgentDataIntegrityClearOs) { // Created dummy data. - m_inventoryDatabase->put( - "node01_001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "CVE-1234-2024,CVE-4321-2024", PACKAGE); - m_inventoryDatabase->put("node01_001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "CVE-1234-2024,CVE-4321-2024", OS); - m_inventoryDatabase->put("node01_002_cccccccccccccccccccccccccccccccccccccccc", "CVE-1234-2024,CVE-4321-2024", OS); + m_inventoryDatabase->put("001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "CVE-1234-2024,CVE-4321-2024", PACKAGE); + m_inventoryDatabase->put("001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "CVE-1234-2024,CVE-4321-2024", OS); + m_inventoryDatabase->put("002_cccccccccccccccccccccccccccccccccccccccc", "CVE-1234-2024,CVE-4321-2024", OS); m_inventoryDatabase->put("001", "1", OS_INITIAL_SCAN); m_inventoryDatabase->put("002", "2", OS_INITIAL_SCAN); @@ -199,9 +196,9 @@ TEST_F(CleanAgentInventoryTest, CleanAgentDataIntegrityClearOs) // Check if the inventory was deleted. std::string value; - EXPECT_TRUE(m_inventoryDatabase->get("node01_001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", value, PACKAGE)); - EXPECT_FALSE(m_inventoryDatabase->get("node01_001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", value, OS)); - EXPECT_TRUE(m_inventoryDatabase->get("node01_002_cccccccccccccccccccccccccccccccccccccccc", value, OS)); + EXPECT_TRUE(m_inventoryDatabase->get("001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", value, PACKAGE)); + EXPECT_FALSE(m_inventoryDatabase->get("001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", value, OS)); + EXPECT_TRUE(m_inventoryDatabase->get("002_cccccccccccccccccccccccccccccccccccccccc", value, OS)); EXPECT_FALSE(m_inventoryDatabase->get("001", value, OS_INITIAL_SCAN)); EXPECT_TRUE(m_inventoryDatabase->get("002", value, OS_INITIAL_SCAN)); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanInventory_test.cpp index 3c76a00545f..93b8514933b 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanInventory_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanInventory_test.cpp @@ -93,17 +93,15 @@ TEST_F(CleanInventoryTest, TestInstantiationOfTheeventInsertInventoryClass) TEST_F(CleanInventoryTest, TestCleanAllData) { // Created dummy data. - m_inventoryDatabase->put( - "node01_001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "CVE-1234-2024,CVE-4321-2024", PACKAGE); - m_inventoryDatabase->put("node01_001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "CVE-1234-2024,CVE-4321-2024", OS); - m_inventoryDatabase->put("node01_002_cccccccccccccccccccccccccccccccccccccccc", "CVE-1234-2024,CVE-4321-2024", OS); - m_inventoryDatabase->put("node02_002_cccccccccccccccccccccccccccccccccccccccc", "CVE-1234-2024,CVE-4321-2024", OS); + m_inventoryDatabase->put("001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "CVE-1234-2024,CVE-4321-2024", PACKAGE); + m_inventoryDatabase->put("001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "CVE-1234-2024,CVE-4321-2024", OS); + m_inventoryDatabase->put("002_cccccccccccccccccccccccccccccccccccccccc", "CVE-1234-2024,CVE-4321-2024", OS); m_inventoryDatabase->put("001", "1", OS_INITIAL_SCAN); m_inventoryDatabase->put("002", "2", OS_INITIAL_SCAN); auto spSubOrchestration = std::make_shared>>>(); - EXPECT_CALL(*spSubOrchestration, handleRequest(testing::_)).Times(8); + EXPECT_CALL(*spSubOrchestration, handleRequest(testing::_)).Times(6); auto cleanInventory = std::make_shared, @@ -122,10 +120,9 @@ TEST_F(CleanInventoryTest, TestCleanAllData) // Check if the inventory was deleted. std::string value; - EXPECT_FALSE(m_inventoryDatabase->get("node01_001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", value, PACKAGE)); - EXPECT_FALSE(m_inventoryDatabase->get("node01_001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", value, OS)); - EXPECT_FALSE(m_inventoryDatabase->get("node01_002_cccccccccccccccccccccccccccccccccccccccc", value, OS)); - EXPECT_FALSE(m_inventoryDatabase->get("node02_002_cccccccccccccccccccccccccccccccccccccccc", value, OS)); + EXPECT_FALSE(m_inventoryDatabase->get("001_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", value, PACKAGE)); + EXPECT_FALSE(m_inventoryDatabase->get("001_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", value, OS)); + EXPECT_FALSE(m_inventoryDatabase->get("002_cccccccccccccccccccccccccccccccccccccccc", value, OS)); EXPECT_FALSE(m_inventoryDatabase->get("001", value, OS_INITIAL_SCAN)); EXPECT_FALSE(m_inventoryDatabase->get("002", value, OS_INITIAL_SCAN)); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDeleteInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDeleteInventory_test.cpp index 1f0564e7a08..4ee20255737 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDeleteInventory_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDeleteInventory_test.cpp @@ -105,7 +105,7 @@ TEST_F(EventDeleteInventoryTest, TestInstantiationOfTheEventInsertInventoryClass TEST_F(EventDeleteInventoryTest, TestHandleRequestPackageDelete) { // Instantiation of the eventDeleteInventory class. - m_inventoryDatabase->put("node01_001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", CVEID1, PACKAGE); + m_inventoryDatabase->put("001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", CVEID1, PACKAGE); auto eventDeleteInventory = std::make_shared>>(*m_inventoryDatabase); @@ -145,15 +145,14 @@ TEST_F(EventDeleteInventoryTest, TestHandleRequestPackageDelete) EXPECT_EQ(scanContext->m_elements.size(), 1); - std::string expectedKey = "node01_001_ec465b7eb5fa011a336e95614072e4c7f1a65a53_" + CVEID1; + std::string expectedKey = "001_ec465b7eb5fa011a336e95614072e4c7f1a65a53_" + CVEID1; std::unordered_map::const_iterator itElement; EXPECT_NE(itElement = scanContext->m_elements.find(CVEID1), scanContext->m_elements.end()); std::string expectedValue = - R"({"id":"node01_001_ec465b7eb5fa011a336e95614072e4c7f1a65a53_)" + CVEID1 + R"(","operation":"DELETED"})"; + R"({"id":"001_ec465b7eb5fa011a336e95614072e4c7f1a65a53_)" + CVEID1 + R"(","operation":"DELETED"})"; EXPECT_STREQ(itElement->second.dump().c_str(), expectedValue.c_str()); std::string inventoryEntry; - EXPECT_FALSE( - m_inventoryDatabase->get("node01_001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", inventoryEntry, PACKAGE)); + EXPECT_FALSE(m_inventoryDatabase->get("001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", inventoryEntry, PACKAGE)); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp index 73ffe2bbe27..4901e4e5dfe 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp @@ -226,7 +226,7 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS2) auto scanContext = std::make_shared>(syscollectorDelta); // Mock one vulnerability scanContext->m_elements[CVEID] = - R"({"operation":"INSERTED", "id":"node01_001_ec465b7eb5fa011a336e95614072e4c7f1a65a53_CVE-2024-1234"})"_json; + R"({"operation":"INSERTED", "id":"001_ec465b7eb5fa011a336e95614072e4c7f1a65a53_CVE-2024-1234"})"_json; TEventDetailsBuilder> eventDetailsBuilder( spDatabaseFeedManagerMock); @@ -239,14 +239,12 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS2) auto& element = scanContext->m_elements[CVEID]; EXPECT_STREQ(element.at("operation").get_ref().c_str(), "INSERTED"); - std::string elementId = std::string(scanContext->agentNodeName()) + "_" + std::string(scanContext->agentId()) + - "_" + std::string(scanContext->packageItemId()) + "_" + CVEID; + std::string elementId = + std::string(scanContext->agentId()) + "_" + std::string(scanContext->packageItemId()) + "_" + CVEID; EXPECT_STREQ(element.at("id").get_ref().c_str(), elementId.c_str()); auto& elementData = scanContext->m_elements[CVEID].at("data"); - EXPECT_STREQ(elementData.at("agent").at("ephemeral_id").get_ref().c_str(), - scanContext->agentNodeName().data()); EXPECT_STREQ(elementData.at("agent").at("id").get_ref().c_str(), scanContext->agentId().data()); EXPECT_STREQ(elementData.at("agent").at("name").get_ref().c_str(), scanContext->agentName().data()); @@ -414,7 +412,7 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); auto scanContext = std::make_shared>(syscollectorDelta); scanContext->m_elements[CVEID] = - R"({"operation":"INSERTED", "id":"node01_001_ec465b7eb5fa011a336e95614072e4c7f1a65a53_CVE-2024-1234"})"_json; + R"({"operation":"INSERTED", "id":"001_ec465b7eb5fa011a336e95614072e4c7f1a65a53_CVE-2024-1234"})"_json; scanContext->m_alerts[CVEID] = nlohmann::json::object(); // Mock one alert TEventDetailsBuilder> eventDetailsBuilder( @@ -428,14 +426,12 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) auto& element = scanContext->m_elements[CVEID]; EXPECT_STREQ(element.at("operation").get_ref().c_str(), "INSERTED"); - std::string elementId = std::string(scanContext->agentNodeName()) + "_" + std::string(scanContext->agentId()) + - "_" + std::string(scanContext->packageItemId()) + "_" + CVEID; + std::string elementId = + std::string(scanContext->agentId()) + "_" + std::string(scanContext->packageItemId()) + "_" + CVEID; EXPECT_STREQ(element.at("id").get_ref().c_str(), elementId.c_str()); auto& elementData = scanContext->m_elements[CVEID].at("data"); - EXPECT_STREQ(elementData.at("agent").at("ephemeral_id").get_ref().c_str(), - scanContext->agentNodeName().data()); EXPECT_STREQ(elementData.at("agent").at("id").get_ref().c_str(), scanContext->agentId().data()); EXPECT_STREQ(elementData.at("agent").at("name").get_ref().c_str(), scanContext->agentName().data()); @@ -684,7 +680,7 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulOsInserted) syscollectorDelta = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); auto scanContext = std::make_shared>(syscollectorDelta); scanContext->m_elements[CVEID] = - R"({"operation":"INSERTED", "id":"test_node_name_002_Microsoft Windows 10 Pro_CVE-2024-1234"})"_json; + R"({"operation":"INSERTED", "id":"002_Microsoft Windows 10 Pro_CVE-2024-1234"})"_json; TEventDetailsBuilder> eventDetailsBuilder( spDatabaseFeedManagerMock); @@ -697,14 +693,12 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulOsInserted) auto& element = scanContext->m_elements[CVEID]; EXPECT_STREQ(element.at("operation").get_ref().c_str(), "INSERTED"); - std::string elementId = std::string(scanContext->agentNodeName()) + "_" + std::string(scanContext->agentId()) + - "_" + std::string(scanContext->osName()) + "_" + CVEID; + std::string elementId = + std::string(scanContext->agentId()) + "_" + std::string(scanContext->osName()) + "_" + CVEID; EXPECT_STREQ(element.at("id").get_ref().c_str(), elementId.c_str()); auto& elementData = scanContext->m_elements[CVEID].at("data"); - EXPECT_STREQ(elementData.at("agent").at("ephemeral_id").get_ref().c_str(), - scanContext->agentNodeName().data()); EXPECT_STREQ(elementData.at("agent").at("id").get_ref().c_str(), scanContext->agentId().data()); EXPECT_STREQ(elementData.at("agent").at("name").get_ref().c_str(), scanContext->agentName().data()); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventInsertInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventInsertInventory_test.cpp index 8f9288e4ae6..5ee55307745 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventInsertInventory_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventInsertInventory_test.cpp @@ -147,8 +147,7 @@ TEST_F(EventInsertInventoryTest, TestHandleRequestPackageInsertNonExisting) EXPECT_STREQ(element.at("operation").get_ref().c_str(), "INSERTED"); std::string inventoryEntry; - EXPECT_TRUE( - m_inventoryDatabase->get("node01_001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", inventoryEntry, PACKAGE)); + EXPECT_TRUE(m_inventoryDatabase->get("001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", inventoryEntry, PACKAGE)); EXPECT_STREQ(inventoryEntry.c_str(), CVEID1.c_str()); } @@ -159,7 +158,7 @@ TEST_F(EventInsertInventoryTest, TestHandleRequestPackageInsertNonExisting) TEST_F(EventInsertInventoryTest, TestHandleRequestPackageInsertAlreadyExisting) { // Instantiation of the eventInsertInventory class. - m_inventoryDatabase->put("node01_001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", CVEID2, PACKAGE); + m_inventoryDatabase->put("001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", CVEID2, PACKAGE); auto eventInsertInventory = std::make_shared>>(*m_inventoryDatabase); @@ -202,8 +201,7 @@ TEST_F(EventInsertInventoryTest, TestHandleRequestPackageInsertAlreadyExisting) EXPECT_STREQ(element.at("operation").get_ref().c_str(), "INSERTED"); std::string inventoryEntry; - EXPECT_TRUE( - m_inventoryDatabase->get("node01_001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", inventoryEntry, PACKAGE)); + EXPECT_TRUE(m_inventoryDatabase->get("001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", inventoryEntry, PACKAGE)); auto listCve = Utils::split(inventoryEntry, ','); EXPECT_EQ(listCve.size(), 2); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp index 5d821e04b0a..be616e708f9 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp @@ -209,8 +209,8 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS2) auto& element = scanContext->m_elements[CVEID]; EXPECT_STREQ(element.at("operation").get_ref().c_str(), "INSERTED"); - std::string elementId = std::string(scanContext->agentNodeName()) + "_" + std::string(scanContext->agentId()) + - "_" + std::string(scanContext->packageItemId()) + "_" + CVEID; + std::string elementId = + std::string(scanContext->agentId()) + "_" + std::string(scanContext->packageItemId()) + "_" + CVEID; EXPECT_EQ(scanContext->m_alerts.size(), 1); EXPECT_NE(scanContext->m_alerts.find(CVEID), scanContext->m_alerts.end()); From 5b62ee303632dd8a705df1507c2b88e093741201 Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Thu, 2 May 2024 11:44:07 +0200 Subject: [PATCH 005/419] feat: graceful process shutdown log in wazuh-db --- src/wazuh_db/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/wazuh_db/main.c b/src/wazuh_db/main.c index 92a873f5265..a9e888f5df0 100644 --- a/src/wazuh_db/main.c +++ b/src/wazuh_db/main.c @@ -273,6 +273,7 @@ int main(int argc, char ** argv) snprintf(path_template, sizeof(path_template), "%s/%s", WDB2_DIR, WDB_PROF_NAME); unlink(path_template); mdebug1("Template file removed again: %s", path_template); + minfo("Graceful process shutdown."); return EXIT_SUCCESS; From d5087365ddc2ff5c15e922d34b0a197ad5c74ffd Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Thu, 2 May 2024 11:54:24 +0200 Subject: [PATCH 006/419] fix: create agent database files from template atomically --- src/wazuh_db/wdb.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/src/wazuh_db/wdb.c b/src/wazuh_db/wdb.c index f7726caadce..963697bdac0 100644 --- a/src/wazuh_db/wdb.c +++ b/src/wazuh_db/wdb.c @@ -471,6 +471,7 @@ wdb_t * wdb_open_tasks() { /* Create database for agent from profile. Returns 0 on success or -1 on error. */ int wdb_create_agent_db2(const char * agent_id) { char path[OS_FLSIZE + 1]; + char path_temp[OS_FLSIZE + 1]; char buffer[4096]; FILE *source; FILE *dest; @@ -494,8 +495,9 @@ int wdb_create_agent_db2(const char * agent_id) { } snprintf(path, OS_FLSIZE, "%s/%s.db", WDB2_DIR, agent_id); + snprintf(path_temp, OS_FLSIZE, "%s.new", path); - if (!(dest = wfopen(path, "w"))) { + if (!(dest = wfopen(path_temp, "w"))) { merror("Couldn't create database '%s': %s (%d)", path, strerror(errno), errno); fclose(source); return -1; @@ -503,7 +505,7 @@ int wdb_create_agent_db2(const char * agent_id) { while (nbytes = fread(buffer, 1, 4096, source), nbytes) { if (fwrite(buffer, 1, nbytes, dest) != nbytes) { - unlink(path); + unlink(path_temp); result = -1; break; } @@ -511,18 +513,24 @@ int wdb_create_agent_db2(const char * agent_id) { fclose(source); if (fclose(dest) == -1) { - merror("Couldn't create file %s completely ", path); + merror("Couldn't create file %s completely", path_temp); return -1; } if (result < 0) { - unlink(path); + unlink(path_temp); return -1; } - if (chmod(path, 0640) < 0) { - merror(CHMOD_ERROR, path, errno, strerror(errno)); - unlink(path); + if (chmod(path_temp, 0640) < 0) { + merror(CHMOD_ERROR, path_temp, errno, strerror(errno)); + unlink(path_temp); + return -1; + } + + if (OS_MoveFile(path_temp, path) < 0) { + merror(RENAME_ERROR, path_temp, path, errno, strerror(errno)); + unlink(path_temp); return -1; } From 14e9785494343cc1318eb977cb024565621070b4 Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Thu, 2 May 2024 13:00:11 +0200 Subject: [PATCH 007/419] test: add unit tests for wdb_create_agent_db2 --- src/unit_tests/wazuh_db/CMakeLists.txt | 3 + .../wazuh_db/test_create_agent_db.c | 132 ++++++++++++++++++ 2 files changed, 135 insertions(+) create mode 100644 src/unit_tests/wazuh_db/test_create_agent_db.c diff --git a/src/unit_tests/wazuh_db/CMakeLists.txt b/src/unit_tests/wazuh_db/CMakeLists.txt index 1f1f36f6a79..d4d5d051c75 100644 --- a/src/unit_tests/wazuh_db/CMakeLists.txt +++ b/src/unit_tests/wazuh_db/CMakeLists.txt @@ -194,6 +194,9 @@ list(APPEND wdb_tests_flags "-Wl,--wrap,wdb_create_state_json -Wl,--wrap,wdb_get list(APPEND wdb_tests_names "test_wdb_pool") list(APPEND wdb_tests_flags "-Wl,--wrap,pthread_mutex_lock -Wl,--wrap,pthread_mutex_unlock") +list(APPEND wdb_tests_names "test_create_agent_db") +list(APPEND wdb_tests_flags "-Wl,--wrap,wfopen,--wrap,fopen,--wrap,fclose,--wrap,fflush,--wrap,fgets,--wrap,fgetpos,--wrap,fopen,--wrap,fread,--wrap,fseek,--wrap,fwrite,--wrap,remove,--wrap,fgetc,--wrap,chmod,--wrap,stat,--wrap,OS_MoveFile ${DEBUG_OP_WRAPPERS}") + # Add extra compiling flags add_compile_options(-Wall) link_directories(${SRC_FOLDER}/build/shared_modules/router) diff --git a/src/unit_tests/wazuh_db/test_create_agent_db.c b/src/unit_tests/wazuh_db/test_create_agent_db.c new file mode 100644 index 00000000000..a3bb680b2ae --- /dev/null +++ b/src/unit_tests/wazuh_db/test_create_agent_db.c @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2015, Wazuh Inc. + * May 2, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "../wazuh_db/wdb.h" +#include "../wrappers/common.h" +#include "../wrappers/wazuh/shared/debug_op_wrappers.h" +#include "../wrappers/wazuh/shared/file_op_wrappers.h" +#include "../wrappers/libc/stdio_wrappers.h" + +int setup(__attribute__((unused)) void ** state) { + test_mode = 1; + return 0; +} + +int teardown(__attribute__((unused)) void ** state) { + test_mode = 0; + return 0; +} + +void test_wdb_create_agent_db2_ok(void ** state) { + expect_wfopen(WDB2_DIR "/" WDB_PROF_NAME, "r", (void *)1); + expect_wfopen(WDB2_DIR "/000.db.new", "w", (void *)2); + expect_fread("", 0); + expect_fclose((void *)1, 0); + expect_fclose((void *)2, 0); + expect_string(__wrap_chmod, path, WDB2_DIR "/000.db.new"); + will_return(__wrap_chmod, 0); + expect_string(__wrap_OS_MoveFile, src, WDB2_DIR "/000.db.new"); + expect_string(__wrap_OS_MoveFile, dst, WDB2_DIR "/000.db"); + will_return(__wrap_OS_MoveFile, 0); + + int result = wdb_create_agent_db2("000"); + assert_int_equal(result, 0); +} + +void test_wdb_create_agent_db2_wfopen_error(void ** state) { + expect_wfopen(WDB2_DIR "/" WDB_PROF_NAME, "r", (void *)1); + expect_wfopen(WDB2_DIR "/000.db.new", "w", NULL); + expect_string(__wrap__merror, formatted_msg, "Couldn't create database 'queue/db/000.db': Success (0)"); + expect_fclose((void *)1, 0); + + errno = 0; + int result = wdb_create_agent_db2("000"); + assert_int_equal(result, -1); +} + +void test_wdb_create_agent_db2_fwrite_error(void ** state) { + expect_wfopen(WDB2_DIR "/" WDB_PROF_NAME, "r", (void *)1); + expect_wfopen(WDB2_DIR "/000.db.new", "w", (void *)2); + expect_fread("Hello", 5); + will_return(__wrap_fwrite, 0); + expect_fclose((void *)1, 0); + expect_fclose((void *)2, 0); + + int result = wdb_create_agent_db2("000"); + assert_int_equal(result, -1); +} + +void test_wdb_create_agent_db2_fclose_error(void ** state) { + expect_wfopen(WDB2_DIR "/" WDB_PROF_NAME, "r", (void *)1); + expect_wfopen(WDB2_DIR "/000.db.new", "w", (void *)2); + expect_fread("", 0); + expect_fclose((void *)1, 0); + expect_fclose((void *)2, -1); + expect_string(__wrap__merror, formatted_msg, "Couldn't create file queue/db/000.db.new completely"); + + int result = wdb_create_agent_db2("000"); + assert_int_equal(result, -1); +} + +void test_wdb_create_agent_db2_chmod_error(void ** state) { + expect_wfopen(WDB2_DIR "/" WDB_PROF_NAME, "r", (void *)1); + expect_wfopen(WDB2_DIR "/000.db.new", "w", (void *)2); + expect_fread("", 0); + expect_fclose((void *)1, 0); + expect_fclose((void *)2, 0); + expect_string(__wrap_chmod, path, WDB2_DIR "/000.db.new"); + will_return(__wrap_chmod, -1); + expect_string(__wrap__merror, formatted_msg, "(1127): Could not chmod object 'queue/db/000.db.new' due to [(0)-(Success)]."); + + errno = 0; + int result = wdb_create_agent_db2("000"); + assert_int_equal(result, -1); +} + +void test_wdb_create_agent_db2_rename_error(void ** state) { + expect_wfopen(WDB2_DIR "/" WDB_PROF_NAME, "r", (void *)1); + expect_wfopen(WDB2_DIR "/000.db.new", "w", (void *)2); + expect_fread("", 0); + expect_fclose((void *)1, 0); + expect_fclose((void *)2, 0); + expect_string(__wrap_chmod, path, WDB2_DIR "/000.db.new"); + will_return(__wrap_chmod, 0); + expect_string(__wrap_OS_MoveFile, src, WDB2_DIR "/000.db.new"); + expect_string(__wrap_OS_MoveFile, dst, WDB2_DIR "/000.db"); + will_return(__wrap_OS_MoveFile, -1); + + expect_string(__wrap__merror, formatted_msg, "(1124): Could not rename file 'queue/db/000.db.new' to 'queue/db/000.db' due to [(0)-(Success)]."); + + errno = 0; + int result = wdb_create_agent_db2("000"); + assert_int_equal(result, -1); +} + +int main() { + test_mode = 1; + + const struct CMUnitTest tests[] = { + cmocka_unit_test(test_wdb_create_agent_db2_ok), + cmocka_unit_test(test_wdb_create_agent_db2_wfopen_error), + cmocka_unit_test(test_wdb_create_agent_db2_fwrite_error), + cmocka_unit_test(test_wdb_create_agent_db2_fclose_error), + cmocka_unit_test(test_wdb_create_agent_db2_chmod_error), + cmocka_unit_test(test_wdb_create_agent_db2_rename_error), + }; + + return cmocka_run_group_tests(tests, setup, teardown); +} From fd5df06599311a20db9d40652171e4cc2181f320 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Thu, 2 May 2024 12:57:26 -0300 Subject: [PATCH 008/419] Deleted opensearch api. --- framework/requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/framework/requirements.txt b/framework/requirements.txt index 9aa726591a6..1bc043900b8 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -56,7 +56,6 @@ multidict==5.2.0 mypy-extensions==0.4.3 numpy==1.26.0 openapi-spec-validator==0.2.6 -opensearch-py==2.5.0 packaging==20.9 pathlib==1.0.1 protobuf==3.19.6 From fe7651b27b78fb732f6c6af38e27d8b7f113a300 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Thu, 2 May 2024 15:54:56 -0300 Subject: [PATCH 009/419] Updated DEPS_VERSION. --- src/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index 3693076cbdf..068398840eb 100644 --- a/src/Makefile +++ b/src/Makefile @@ -1338,7 +1338,7 @@ TAR := tar -xf GUNZIP := gunzip GZIP := gzip CURL := curl -so -DEPS_VERSION = 25 +DEPS_VERSION = 25-23112 RESOURCES_URL_BASE := https://packages.wazuh.com/deps/ RESOURCES_URL := $(RESOURCES_URL_BASE)$(DEPS_VERSION) CPYTHON := cpython From 71d5e640f0fdca81fddf3c35b09d0c7631481d54 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Thu, 2 May 2024 14:49:48 -0300 Subject: [PATCH 010/419] Fix cna detection based in the vendor and CNA. --- .../databaseFeedManager.hpp | 21 ++++++---- .../src/scanOrchestrator/packageScanner.hpp | 5 ++- .../tests/mocks/MockDatabaseFeedManager.hpp | 4 +- .../tests/unit/databaseFeedManager_test.cpp | 42 +++++++++++-------- 4 files changed, 43 insertions(+), 29 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp index 6e6b53825f1..647bd66115b 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp @@ -628,17 +628,20 @@ class TDatabaseFeedManager final : public Observer /** * @brief Get CNA/ADP name based on the package vendor when it contains a specific word. * @param vendor Package vendor. + * @param platform Os platform. * @return CNA/ADP name. Empty string otherwise. */ - std::string getCnaNameByContains(std::string_view vendor) const + std::string getCnaNameByContains(std::string_view vendor, std::string_view platform) const { if (const auto& vendorMap = GlobalData::instance().vendorMaps(); vendorMap.contains("contains")) { for (const auto& item : vendorMap.at("contains")) { - if (vendor.find(item.begin().key()) != std::string::npos) + if (const auto& platforms = item.begin().value().at("platforms"); + vendor.find(item.begin().key()) != std::string::npos && + std::find(platforms.begin(), platforms.end(), platform) != platforms.end()) { - return item.begin().value(); + return item.begin().value().at("cna"); } } } @@ -649,17 +652,21 @@ class TDatabaseFeedManager final : public Observer /** * @brief Get CNA/ADP name based on the package vendor when it starts with a specific word. * @param vendor Package vendor. + * @param platform Os platform. + * * @return CNA/ADP name. Empty string otherwise. */ - std::string getCnaNameByPrefix(std::string_view vendor) const + std::string getCnaNameByPrefix(std::string_view vendor, std::string_view platform) const { if (const auto& vendorMap = GlobalData::instance().vendorMaps(); vendorMap.contains("prefix")) { for (const auto& item : vendorMap.at("prefix")) { - if (Utils::startsWith(vendor.data(), item.begin().key())) + if (const auto& platforms = item.begin().value().at("platforms"); + Utils::startsWith(vendor.data(), item.begin().key()) && + std::find(platforms.begin(), platforms.end(), platform) != platforms.end()) { - return item.begin().value(); + return item.begin().value().at("cna"); } } } @@ -675,7 +682,7 @@ class TDatabaseFeedManager final : public Observer * * @return The size of the translation cache as specified in the configuration settings. */ - uint32_t getCacheSizeFromConfig() + uint32_t getCacheSizeFromConfig() const { return TPolicyManager::instance().getTranslationLRUSize(); } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp index b153fd6f318..45f745763c4 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp @@ -181,10 +181,11 @@ class TPackageScanner final : public AbstractHandlergetCnaNameByPrefix(ctx->packageVendor().data()); + cnaName = m_databaseFeedManager->getCnaNameByPrefix(ctx->packageVendor().data(), ctx->osPlatform().data()); if (cnaName.empty()) { - cnaName = m_databaseFeedManager->getCnaNameByContains(ctx->packageVendor().data()); + cnaName = + m_databaseFeedManager->getCnaNameByContains(ctx->packageVendor().data(), ctx->osPlatform().data()); if (cnaName.empty()) { return DEFAULT_CNA; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp index e42eb97248e..b34646f2e85 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp @@ -109,13 +109,13 @@ class MockDatabaseFeedManager * @brief Mock method for getCnaNameByContains. * */ - MOCK_METHOD(std::string, getCnaNameByContains, (std::string_view vendor), ()); + MOCK_METHOD(std::string, getCnaNameByContains, (std::string_view vendor, std::string_view platform), ()); /** * @brief Mock method for getCnaNameByPrefix. * */ - MOCK_METHOD(std::string, getCnaNameByPrefix, (std::string_view vendor), ()); + MOCK_METHOD(std::string, getCnaNameByPrefix, (std::string_view vendor, std::string_view platform), ()); }; #endif // _MOCK_DATABASEFEEDMANAGER_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp index 3142b8ab525..c89c0f0215d 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp @@ -1547,12 +1547,12 @@ void DatabaseFeedManagerVendorMapTest::SetUp() auto map = R"( { "prefix": [ - {"Canonical": "canonical"}, - {"Ubuntu": "canonical"}, - {"CentOS": "redhat"} + {"Canonical":{"cna":"canonical", "platforms":["ubuntu"]}}, + {"Ubuntu": {"cna":"canonical", "platforms":["ubuntu"]}}, + {"CentOS": {"cna":"redhat", "platforms":["centos"]}} ], "contains": [ - {"@ubuntu.com": "canonical"} + {"@ubuntu.com": {"cna":"canonical", "platforms":["ubuntu"]}} ], "format": [ {"pypi": "pypi"}, @@ -1636,49 +1636,55 @@ TEST_F(DatabaseFeedManagerVendorMapTest, TestGetCnaNameByFormat) TEST_F(DatabaseFeedManagerVendorMapTest, TestGetCnaNameByPrefix) { - auto cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("Canonical"); + auto cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("Canonical", "ubuntu"); EXPECT_EQ(cnaName, "canonical"); - cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("canonical"); + cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("canonical", "ubuntu"); EXPECT_EQ(cnaName, ""); - cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("Ubuntu"); + cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("Ubuntu", "ubuntu"); EXPECT_EQ(cnaName, "canonical"); - cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("ubuntu"); + cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("ubuntu", "ubuntu"); EXPECT_EQ(cnaName, ""); - cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("This is a Canonical package"); + cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("This is a Canonical package", "ubuntu"); EXPECT_EQ(cnaName, ""); - cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("CentOS"); + cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("CentOS", "centos"); EXPECT_EQ(cnaName, "redhat"); - cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("centos"); + cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("centos", "centos"); EXPECT_EQ(cnaName, ""); - cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("This is a CentOS package"); + cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("This is a CentOS package", "centos"); EXPECT_EQ(cnaName, ""); - cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("invalid"); + cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("invalid", "centos"); + EXPECT_EQ(cnaName, ""); + + cnaName = m_spDatabaseFeedManager->getCnaNameByPrefix("Ubuntu", "centos"); EXPECT_EQ(cnaName, ""); } TEST_F(DatabaseFeedManagerVendorMapTest, TestGetCnaNameByContains) { - auto cnaName = m_spDatabaseFeedManager->getCnaNameByContains("This is an Ubuntu package"); + auto cnaName = m_spDatabaseFeedManager->getCnaNameByContains("This is an Ubuntu package", "ubuntu"); EXPECT_EQ(cnaName, ""); - cnaName = m_spDatabaseFeedManager->getCnaNameByContains("John Doe "); + cnaName = m_spDatabaseFeedManager->getCnaNameByContains("John Doe ", "ubuntu"); EXPECT_EQ(cnaName, "canonical"); - cnaName = m_spDatabaseFeedManager->getCnaNameByContains("@ubuntu.com"); + cnaName = m_spDatabaseFeedManager->getCnaNameByContains("@ubuntu.com", "ubuntu"); EXPECT_EQ(cnaName, "canonical"); - cnaName = m_spDatabaseFeedManager->getCnaNameByContains("@Ubuntu"); + cnaName = m_spDatabaseFeedManager->getCnaNameByContains("@Ubuntu", "ubuntu"); + EXPECT_EQ(cnaName, ""); + + cnaName = m_spDatabaseFeedManager->getCnaNameByContains("invalid", "ubuntu"); EXPECT_EQ(cnaName, ""); - cnaName = m_spDatabaseFeedManager->getCnaNameByContains("invalid"); + cnaName = m_spDatabaseFeedManager->getCnaNameByContains("@ubuntu.com", "centos"); EXPECT_EQ(cnaName, ""); } From 860004364fe80e8c4d59ff29aa0af8a37dc1f1b9 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 3 May 2024 01:28:54 -0300 Subject: [PATCH 011/419] Add changes to fix some qa tests. --- .../qa/test_data/003/expected_002.out | 2 +- .../qa/test_data/003/expected_004.out | 2 +- .../qa/test_data/003/expected_006.out | 2 +- .../qa/test_data/003/expected_008.out | 2 +- .../qa/test_data/009/expected_002.out | 7 ------- .../qa/test_data/010/expected_002.out | 4 ++-- .../qa/test_data/010/expected_003.out | 4 ++-- .../vulnerability_scanner/qa/test_data/010/input_002.json | 4 ++-- .../vulnerability_scanner/qa/test_data/010/input_003.json | 4 ++-- .../qa/test_data/011/expected_002.out | 4 ++-- .../qa/test_data/011/expected_005.out | 1 - .../qa/test_data_policy/001/agentOsData.json | 2 +- .../qa/test_data_policy/001/config.json | 4 +--- .../qa/test_data_policy/001/configDisabled.json | 4 +--- .../qa/test_data_policy/001/expected_003.out | 4 ++-- .../qa/test_data_policy/001/globalData.json | 4 ++-- .../qa/test_data_policy/002/agentOsData.json | 2 +- .../qa/test_data_policy/002/config.json | 4 +--- .../qa/test_data_policy/002/configManagerDisabled.json | 4 +--- .../qa/test_data_policy/002/globalData.json | 4 ++-- .../tests/unit/policyManager_test.cpp | 4 +--- .../testtool/scanner/config.content_generation.json | 4 +--- .../vulnerability_scanner/testtool/scanner/config.json | 7 +++---- 23 files changed, 31 insertions(+), 52 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_002.out index 04e5a941a63..8c14f8a618d 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_002.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_002.out @@ -1,3 +1,3 @@ [ - "Match found, the package 'networkd-dispatcher', is vulnerable to 'CVE-2022-29799'. Current version: '1.6-0ubuntu3.5' (less than '1.7-0ubuntu3.5' or equal to ''). - Agent '' (ID: '001', Version: '')." + "Match found, the package 'networkd-dispatcher', is vulnerable to 'CVE-2022-29799'. Current version: '1.6-0ubuntu3.5' (less than '1.7-0ubuntu3.4' or equal to ''). - Agent '' (ID: '001', Version: '')." ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_004.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_004.out index 642aee1d8fa..771c72cb640 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_004.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_004.out @@ -1,4 +1,4 @@ [ - "Match found, the package 'networkd-dispatcher', is vulnerable to 'CVE-2022-29799'. Current version: '2.1-1~ubuntu20.04.3' (less than '2.1-2~ubuntu20.04.3' or equal to ''). - Agent '' (ID: '002', Version: '').", + "Match found, the package 'networkd-dispatcher', is vulnerable to 'CVE-2022-29799'. Current version: '2.1-1~ubuntu20.04.3' (less than '2.1-2~ubuntu20.04.2' or equal to ''). - Agent '' (ID: '002', Version: '').", "Match found, the package 'networkd-dispatcher', is vulnerable to 'CVE-2022-29800'. Current version: '2.1-1~ubuntu20.04.3' (less than '2.1-2~ubuntu20.04.2' or equal to ''). - Agent '' (ID: '002', Version: '')." ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_006.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_006.out index ea3f9b3061a..1921a299872 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_006.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_006.out @@ -1,4 +1,4 @@ [ - "Match found, the package 'networkd-dispatcher', is vulnerable to 'CVE-2022-29799'. Current version: '2.1-1ubuntu0.21.10.2' (less than '2.1-2ubuntu0.21.10.2' or equal to ''). - Agent '' (ID: '003', Version: '').", + "Match found, the package 'networkd-dispatcher', is vulnerable to 'CVE-2022-29799'. Current version: '2.1-1ubuntu0.21.10.2' (less than '2.1-2ubuntu0.21.10.1' or equal to ''). - Agent '' (ID: '003', Version: '').", "Match found, the package 'networkd-dispatcher', is vulnerable to 'CVE-2022-29800'. Current version: '2.1-1ubuntu0.21.10.2' (less than '2.1-2ubuntu0.21.10.1' or equal to ''). - Agent '' (ID: '003', Version: '')." ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_008.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_008.out index afd9a252a70..b57631e2708 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_008.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/expected_008.out @@ -1,4 +1,4 @@ [ - "Match found, the package 'networkd-dispatcher', is vulnerable to 'CVE-2022-29799'. Current version: '2.1-1ubuntu0.22.04.2' (less than '2.1-2ubuntu0.22.04.2' or equal to ''). - Agent '' (ID: '004', Version: '').", + "Match found, the package 'networkd-dispatcher', is vulnerable to 'CVE-2022-29799'. Current version: '2.1-1ubuntu0.22.04.2' (less than '2.1-2ubuntu0.22.04.1' or equal to ''). - Agent '' (ID: '004', Version: '').", "Match found, the package 'networkd-dispatcher', is vulnerable to 'CVE-2022-29800'. Current version: '2.1-1ubuntu0.22.04.2' (less than '2.1-2ubuntu0.22.04.1' or equal to ''). - Agent '' (ID: '004', Version: '')." ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/expected_002.out index 74e45f0060b..0c83c86aed0 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/expected_002.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/expected_002.out @@ -6,27 +6,20 @@ "Match found, the package 'firefox' is vulnerable to 'CVE-2007-3670' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2007-3827' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2007-4013' due to default status. - Agent '' (ID: '009', Version: '').", - "Match found, the package 'firefox' is vulnerable to 'CVE-2007-5967' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2007-6715' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2008-2399' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2008-4059' due to default status. - Agent '' (ID: '009', Version: '').", - "Match found, the package 'firefox' is vulnerable to 'CVE-2009-2409' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2009-2469' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2009-4102' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2009-4129' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2009-4130' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2009-4630' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2011-0064' due to default status. - Agent '' (ID: '009', Version: '').", - "Match found, the package 'firefox' is vulnerable to 'CVE-2011-3389' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2012-4929' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2012-4930' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2014-6492' due to default status. - Agent '' (ID: '009', Version: '').", - "Match found, the package 'firefox' is vulnerable to 'CVE-2015-4000' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2016-7152' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2016-7153' due to default status. - Agent '' (ID: '009', Version: '').", - "Match found, the package 'firefox' is vulnerable to 'CVE-2018-10229' due to default status. - Agent '' (ID: '009', Version: '').", - "Match found, the package 'firefox' is vulnerable to 'CVE-2018-8024' due to default status. - Agent '' (ID: '009', Version: '').", - "Match found, the package 'firefox' is vulnerable to 'CVE-2022-4066' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox', is vulnerable to 'CVE-2023-4573'. Current version: '116.0.2-1' (less than '117.0' or equal to ''). - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox', is vulnerable to 'CVE-2023-4574'. Current version: '116.0.2-1' (less than '117.0' or equal to ''). - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox', is vulnerable to 'CVE-2023-4575'. Current version: '116.0.2-1' (less than '117.0' or equal to ''). - Agent '' (ID: '009', Version: '').", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_002.out index f744bee5ed1..04bcb4418a0 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_002.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_002.out @@ -1,4 +1,4 @@ [ - "Translation for package 'Skype version 8.109' in platform 'windows' found in Level 2 cache.", - "Match found, the package 'skype' is vulnerable to 'CVE-2016-5720' due to default status. - Agent '' (ID: '010', Version: '')." + "Translation for package 'Skype version 7.2' in platform 'windows' found in Level 2 cache.", + "Match found, the package 'skype', is vulnerable to 'CVE-2017-9948'. Current version: '7.2' is equal to '7.2'. - Agent '' (ID: '010', Version: '')." ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_003.out index b4b6416d286..e66b5f49ab1 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/expected_003.out @@ -1,4 +1,4 @@ [ - "Translation for package 'Skype version 8.109' in platform 'windows' found in Level 1 cache.", - "Match found, the package 'skype' is vulnerable to 'CVE-2016-5720' due to default status. - Agent '' (ID: '010', Version: '')." + "Translation for package 'Skype version 7.2' in platform 'windows' found in Level 1 cache.", + "Match found, the package 'skype', is vulnerable to 'CVE-2017-9948'. Current version: '7.2' is equal to '7.2'. - Agent '' (ID: '010', Version: '')." ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_002.json index c4d865f09d1..7a228d0f56e 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_002.json @@ -5,10 +5,10 @@ }, "data_type": "dbsync_packages", "data": { - "version": "8.109", + "version": "7.2", "vendor": "Microsoft Corporation", "architecture": "i686", - "name": "Skype version 8.109", + "name": "Skype version 7.2", "size": 0, "format": "win", "checksum": "ca80938eaaf2815edd6ce33b33a5f0b174f2e4ec", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_003.json index c4d865f09d1..7a228d0f56e 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_003.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_003.json @@ -5,10 +5,10 @@ }, "data_type": "dbsync_packages", "data": { - "version": "8.109", + "version": "7.2", "vendor": "Microsoft Corporation", "architecture": "i686", - "name": "Skype version 8.109", + "name": "Skype version 7.2", "size": 0, "format": "win", "checksum": "ca80938eaaf2815edd6ce33b33a5f0b174f2e4ec", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_002.out index 272352dc391..a6b6ed1ec1e 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_002.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_002.out @@ -1,6 +1,6 @@ [ "Translation for package 'Opera Stable 108.0.5067.29' in platform 'windows' found in Level 2 cache.", "Initiating a vulnerability scan for package 'opera_browser' (win) (opera) with CVE Numbering Authorities (CNA) 'nvd' on Agent '' (ID: '001', Version: '').", - "Vendor match for Package: opera_browser, Version: 108.0.5067.29, CVE: CVE-2004-2659, Vendor: opera", - "Match found, the package 'opera_browser' is vulnerable to 'CVE-2004-2659' due to default status. - Agent '' (ID: '001', Version: '')." + "Vendor match for Package: opera_browser, Version: 108.0.5067.29, CVE: CVE-2008-7297, Vendor: opera", + "Match found, the package 'opera_browser' is vulnerable to 'CVE-2008-7297' due to default status. - Agent '' (ID: '001', Version: '')." ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_005.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_005.out index 192356cd872..dc31512bfd0 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_005.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_005.out @@ -1,7 +1,6 @@ [ "Initiating a vulnerability scan for package 'mail' (pkg) (apple) with CVE Numbering Authorities (CNA) 'nvd' on Agent '' (ID: '002', Version: '').", "The vendor is not the same for Package: mail, Version: 16.0, CVE: CVE-2008-4584, Content vendor: chilkat_software, Package vendor: apple", - "The vendor is not the same for Package: mail, Version: 16.0, CVE: CVE-2015-9097, Content vendor: mail_project, Package vendor: apple", "The vendor is not the same for Package: mail, Version: 16.0, CVE: CVE-2017-15806, Content vendor: zetacomponents, Package vendor: apple", "Vendor match for Package: mail, Version: 16.0, CVE: CVE-2005-2512, Vendor: apple", "Match found, the package 'mail' is vulnerable to 'CVE-2005-2512' due to default status. - Agent '' (ID: '002', Version: '')." diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentOsData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentOsData.json index 71d0ed63ce9..0fe7b6af977 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentOsData.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentOsData.json @@ -26,7 +26,7 @@ "hostname": "vagrant", "os_major": "8", "os_name": "Red Hat Enterprise Linux", - "os_platform": "Linux", + "os_platform": "rhel", "os_version": "8.9", "reference": "e778c1fe83f2b15cdb013471a2c8223132c9e1ca", "release": "4.14.311-233.529.amzn2.x86_64", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/config.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/config.json index fe220eba116..28f77d8c0d4 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/config.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/config.json @@ -2,9 +2,7 @@ "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", - "managerNodeName": "wazuh-manager", - "managerDisabledScan": 1 + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" }, "indexer": { "enabled": "yes", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabled.json index 6a31a161867..04182759e39 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabled.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabled.json @@ -2,9 +2,7 @@ "vulnerability-detection": { "enabled": "no", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", - "managerNodeName": "wazuh-manager", - "managerDisabledScan": 1 + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" }, "indexer": { "enabled": "yes", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out index b23a0cfc42c..0c4462c45d0 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out @@ -10,8 +10,8 @@ "Vulnerability scan for package 'gzip' on Agent '000' has completed.", "Inserting agent package key: 000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", "Processing and publish key: CVE-2022-1271", - "Vulnerability scan for OS 'Red Hat Enterprise Linux' on Agent '001' has completed.", - "Translation for package 'lua-libs' in platform 'Linux' not found. Using provided packageName.", + "Vulnerability scan for OS 'enterprise_linux' on Agent '001' has completed.", + "Translation for package 'lua-libs' in platform 'rhel' not found. Using provided packageName.", "Initiating a vulnerability scan for package 'lua-libs' (rpm) (red hat, inc.) with CVE Numbering Authorities (CNA) 'redhat_8' on Agent 'agent_redhat_8' (ID: '001', Version: 'v4.7.1').", "Vulnerability scan for package 'lua-libs' on Agent '001' has completed.", "Event type: 7 processed" diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/globalData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/globalData.json index 35392297dc1..5025d61115f 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/globalData.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/globalData.json @@ -11,7 +11,7 @@ "os_minor": "04", "os_codename": "jammy", "os_build": "ubuntu", - "os_platform": "Linux", + "os_platform": "ubuntu", "os_uname": "agent_ubuntu_22 | 6.5.13-7-MANJARO | #1 SMP PREEMPT_DYNAMIC Wed Dec 20 07:15:58 UTC 2023", "os_arch": "x86_64", "version": "Wazuh v4.7.1", @@ -39,7 +39,7 @@ "os_minor": "9", "os_codename": "Ootpa", "os_build": "rhel", - "os_platform": "Linux", + "os_platform": "rhel", "os_uname": "agent_redhat_8 | 6.5.13-7-MANJARO | #1 SMP PREEMPT_DYNAMIC Wed Dec 20 07:15:58 UTC 2023", "os_arch": "x86_64", "version": "Wazuh v4.7.1", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/agentOsData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/agentOsData.json index 71d0ed63ce9..0fe7b6af977 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/agentOsData.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/agentOsData.json @@ -26,7 +26,7 @@ "hostname": "vagrant", "os_major": "8", "os_name": "Red Hat Enterprise Linux", - "os_platform": "Linux", + "os_platform": "rhel", "os_version": "8.9", "reference": "e778c1fe83f2b15cdb013471a2c8223132c9e1ca", "release": "4.14.311-233.529.amzn2.x86_64", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/config.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/config.json index fe220eba116..28f77d8c0d4 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/config.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/config.json @@ -2,9 +2,7 @@ "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", - "managerNodeName": "wazuh-manager", - "managerDisabledScan": 1 + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" }, "indexer": { "enabled": "yes", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/configManagerDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/configManagerDisabled.json index 308e8893246..519d55ab6d2 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/configManagerDisabled.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/configManagerDisabled.json @@ -2,9 +2,7 @@ "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", - "managerNodeName": "wazuh-manager", - "managerDisabledScan": 1 + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" }, "indexer": { "enabled": "yes", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/globalData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/globalData.json index 35392297dc1..5025d61115f 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/globalData.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/globalData.json @@ -11,7 +11,7 @@ "os_minor": "04", "os_codename": "jammy", "os_build": "ubuntu", - "os_platform": "Linux", + "os_platform": "ubuntu", "os_uname": "agent_ubuntu_22 | 6.5.13-7-MANJARO | #1 SMP PREEMPT_DYNAMIC Wed Dec 20 07:15:58 UTC 2023", "os_arch": "x86_64", "version": "Wazuh v4.7.1", @@ -39,7 +39,7 @@ "os_minor": "9", "os_codename": "Ootpa", "os_build": "rhel", - "os_platform": "Linux", + "os_platform": "rhel", "os_uname": "agent_redhat_8 | 6.5.13-7-MANJARO | #1 SMP PREEMPT_DYNAMIC Wed Dec 20 07:15:58 UTC 2023", "os_arch": "x86_64", "version": "Wazuh v4.7.1", diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp index 20edd0fa467..f0e6ed3c575 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp @@ -381,9 +381,7 @@ TEST_F(PolicyManagerTest, validConfigurationCheckFeedUpdateIntervalGreaterThan60 const auto& UPDATER_BASIC_CONFIG {nlohmann::json::parse(R"({ "vulnerability-detection": { "enabled": "yes", - "index-status": "yes", - "managerNodeName": "wazuh-manager", - "managerDisabledScan": 1 + "index-status": "yes" }, "updater": { "interval": 3600, diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.content_generation.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.content_generation.json index edb884f047a..a5cbd4c0547 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.content_generation.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.content_generation.json @@ -1,9 +1,7 @@ { "vulnerability-detection": { "enabled": "yes", - "index-status": "yes", - "managerNodeName": "wazuh-manager", - "managerDisabledScan": 1 + "index-status": "yes" }, "indexer": { "enabled": "yes", diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.json index bc9fe2e5345..483f45e6514 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.json @@ -2,9 +2,7 @@ "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", - "managerNodeName": "wazuh-manager", - "managerDisabledScan": 1 + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" }, "indexer": { "enabled": "yes", @@ -20,5 +18,6 @@ "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" } - } + }, + "managerNodeName": "wazuh-manager" } From 7cd5dc73b9d655071e68f49010d3283d14cf3a15 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Wed, 24 Apr 2024 01:29:45 -0300 Subject: [PATCH 012/419] Add feature to synchronize the indexer inventory. --- .../include/indexerConnector.hpp | 48 +++- .../src/indexerConnector.cpp | 266 ++++++++++++++---- .../testtool/cmdArgParser.hpp | 14 + .../testtool/input/config.json | 8 +- .../testtool/input/example.json | 35 +-- .../indexer_connector/testtool/main.cpp | 6 + src/shared_modules/utils/promiseFactory.h | 29 +- .../scanOrchestrator/factoryOrchestrator.hpp | 8 +- .../scanOrchestrator/globalSyncInventory.hpp | 62 ++++ .../src/scanOrchestrator/scanContext.hpp | 12 + .../src/scanOrchestrator/scanOrchestrator.hpp | 6 + 11 files changed, 391 insertions(+), 103 deletions(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/globalSyncInventory.hpp diff --git a/src/shared_modules/indexer_connector/include/indexerConnector.hpp b/src/shared_modules/indexer_connector/include/indexerConnector.hpp index 0b35235e009..7c184fdaf64 100644 --- a/src/shared_modules/indexer_connector/include/indexerConnector.hpp +++ b/src/shared_modules/indexer_connector/include/indexerConnector.hpp @@ -12,6 +12,7 @@ #ifndef _INDEXER_CONNECTOR_HPP #define _INDEXER_CONNECTOR_HPP +#include "rocksDBWrapper.hpp" #if __GNUC__ >= 4 #define EXPORTED __attribute__((visibility("default"))) #else @@ -22,12 +23,13 @@ static constexpr auto DEFAULT_INTERVAL = 60u; class ServerSelector; class SecureCommunication; - +#include "threadDispatcher.h" #include "threadEventDispatcher.hpp" #include #include using ThreadDispatchQueue = ThreadEventDispatcher&)>>; +using ThreadSyncQueue = Utils::AsyncDispatcher>; /** * @brief IndexerConnector class. @@ -35,6 +37,17 @@ using ThreadDispatchQueue = ThreadEventDispatcher m_stopping {false}; + std::unique_ptr m_db; + std::unique_ptr m_syncQueue; + std::string m_indexName; + std::mutex m_syncMutex; + std::unique_ptr m_dispatcher; + /** * @brief Intialize method used to load template data and initialize the index. * @@ -53,10 +72,27 @@ class EXPORTED IndexerConnector final * @param secureCommunication Secure communication. */ void initialize(const nlohmann::json& templateData, - const std::string& indexName, const std::shared_ptr& selector, const SecureCommunication& secureCommunication); + /** + * @brief Save documents into the database. + * @param documents Documents to be saved. + */ + void saveDocuments(const std::vector& documents); + + /** + * @brief This method is used to calculate the diff between the inventory database and the indexer. + * @param responseJson Response JSON. + * @param agentId Agent ID. + * @param secureCommunication Secure communication. + * @param selector Server selector. + */ + void diff(nlohmann::json& responseJson, + const std::string& agentId, + const SecureCommunication& secureCommunication, + const std::shared_ptr& selector); + public: /** * @brief Class constructor that initializes the publisher. @@ -85,6 +121,14 @@ class EXPORTED IndexerConnector final * @param message Message to be published. */ void publish(const std::string& message); + + /** + * @brief Sync the inventory database with the indexer. + * This method is used to synchronize the inventory database to the indexer. + * + * @param agentId Agent ID. + */ + void sync(const std::string& agentId); }; #endif // _INDEXER_CONNECTOR_HPP diff --git a/src/shared_modules/indexer_connector/src/indexerConnector.cpp b/src/shared_modules/indexer_connector/src/indexerConnector.cpp index 6ddfb92a266..ba65aac6af9 100644 --- a/src/shared_modules/indexer_connector/src/indexerConnector.cpp +++ b/src/shared_modules/indexer_connector/src/indexerConnector.cpp @@ -34,36 +34,20 @@ constexpr auto MAX_WAIT_TIME {60}; constexpr auto START_TIME {1}; constexpr auto DOUBLE_FACTOR {2}; -std::unordered_map> QUEUE_MAP; - // Single thread because the events needs to be processed in order. constexpr auto DATABASE_WORKERS = 1; constexpr auto DATABASE_BASE_PATH = "queue/indexer/"; +constexpr auto SYNC_WORKERS = 1; +constexpr auto SYNC_QUEUE_LIMIT = 4096; -IndexerConnector::IndexerConnector( - const nlohmann::json& config, - const std::string& templatePath, - const std::function& - logFunction, - const uint32_t& timeout) +static void initConfiguration(SecureCommunication& secureCommunication, const nlohmann::json& config) { - if (logFunction) - { - Log::assignLogFunction(logFunction); - } - - // Get index name. - const auto& indexName {config.at("name").get_ref()}; - std::string caRootCertificate; std::string sslCertificate; std::string sslKey; std::string username; std::string password; - auto secureCommunication = SecureCommunication::builder(); - if (config.contains("ssl")) { if (config.at("ssl").contains("certificate_authorities") && @@ -103,6 +87,65 @@ IndexerConnector::IndexerConnector( .sslCertificate(sslCertificate) .sslKey(sslKey) .caRootCertificate(caRootCertificate); +} + +void IndexerConnector::saveDocuments(const std::vector& documents) +{ + for (const auto& document : documents) + { + if (document.deleted) + { + m_db->delete_(document.id); + } + else + { + m_db->put(document.id, document.data); + } + } +} + +static void builderBulkDelete(std::string& bulkData, std::string_view id, std::string_view index) +{ + bulkData.append(R"({"delete":{"_index":")"); + bulkData.append(index); + bulkData.append(R"(","_id":")"); + bulkData.append(id); + bulkData.append(R"("}})"); + bulkData.append("\n"); +} + +static void builderBulkIndex(std::string& bulkData, std::string_view id, std::string_view index, std::string_view data) +{ + bulkData.append(R"({"index":{"_index":")"); + bulkData.append(index); + bulkData.append(R"(","_id":")"); + bulkData.append(id); + bulkData.append(R"("}})"); + bulkData.append("\n"); + bulkData.append(data); + bulkData.append("\n"); +} + +IndexerConnector::IndexerConnector( + const nlohmann::json& config, + const std::string& templatePath, + const std::function& + logFunction, + const uint32_t& timeout) +{ + if (logFunction) + { + Log::assignLogFunction(logFunction); + } + + // Get index name. + m_indexName = config.at("name").get_ref(); + + m_db = std::make_unique(std::string(DATABASE_BASE_PATH) + "db/" + m_indexName); + + auto secureCommunication = SecureCommunication::builder(); + initConfiguration(secureCommunication, config); // Read template file. std::ifstream templateFile(templatePath); @@ -115,9 +158,11 @@ IndexerConnector::IndexerConnector( // Initialize publisher. auto selector {std::make_shared(config.at("hosts"), timeout, secureCommunication)}; - QUEUE_MAP[this] = std::make_unique( + m_dispatcher = std::make_unique( [=](std::queue& dataQueue) { + std::scoped_lock lock(m_syncMutex); + if (!m_initialized && m_initializeThread.joinable()) { logDebug2(IC_NAME, "Waiting for initialization thread to process events."); @@ -134,6 +179,7 @@ IndexerConnector::IndexerConnector( std::string bulkData; url.append("/_bulk"); + std::vector documents; while (!dataQueue.empty()) { auto data = dataQueue.front(); @@ -143,49 +189,80 @@ IndexerConnector::IndexerConnector( if (parsedData.at("operation").get_ref().compare("DELETED") == 0) { - bulkData.append(R"({"delete":{"_index":")"); - bulkData.append(indexName); - bulkData.append(R"(","_id":")"); - bulkData.append(id); - bulkData.append(R"("}})"); - bulkData.append("\n"); + builderBulkDelete(bulkData, id, m_indexName); + documents.push_back({id, data, true}); } else { - bulkData.append(R"({"index":{"_index":")"); - bulkData.append(indexName); - bulkData.append(R"(","_id":")"); - bulkData.append(id); - bulkData.append(R"("}})"); - bulkData.append("\n"); - bulkData.append(parsedData.at("data").dump()); - bulkData.append("\n"); + const auto dataString = parsedData.at("data").dump(); + builderBulkIndex(bulkData, id, m_indexName, dataString); + documents.push_back({id, dataString, false}); } } // Process data. HTTPRequest::instance().post( HttpURL(url), bulkData, - [&](const std::string& response) { logDebug2(IC_NAME, "Response: %s", response.c_str()); }, - [&](const std::string& error, const long statusCode) + [](const std::string& response) { logDebug2(IC_NAME, "Response: %s", response.c_str()); }, + [](const std::string& error, const long statusCode) { - // TODO: Need to handle the case when the index is not created yet, to avoid losing data. logError(IC_NAME, "%s, status code: %ld", error.c_str(), statusCode); throw std::runtime_error(error); }, "", DEFAULT_HEADERS, secureCommunication); + + // Save documents to the database. + saveDocuments(documents); }, - DATABASE_BASE_PATH + indexName, + DATABASE_BASE_PATH + m_indexName, ELEMENTS_PER_BULK); + m_syncQueue = std::make_unique( + [=](const std::string& agentId) + { + try + { + std::scoped_lock lock(m_syncMutex); + nlohmann::json responseJson; + auto url = selector->getNext().append("/").append(m_indexName).append("/_search"); + + nlohmann::json postData; + + // TODO: Add scroll support. + postData["query"]["match"]["agent.id"] = agentId; + postData["size"] = 10000; + postData["_source"] = nlohmann::json::array({"_id"}); + + logDebug2(IC_NAME, "Payload: %s", postData.dump().c_str()); + + HTTPRequest::instance().post( + HttpURL(url), + postData.dump(), + [&responseJson](const std::string& response) { responseJson = nlohmann::json::parse(response); }, + [](const std::string& error, const long) { throw std::runtime_error(error); }, + "", + DEFAULT_HEADERS, + secureCommunication); + logDebug2(IC_NAME, "Response: %s", responseJson.dump().c_str()); + diff(responseJson, agentId, secureCommunication, selector); + } + catch (const std::exception& e) + { + logError(IC_NAME, "Failed to sync agent '%s' with the indexer.", agentId.c_str()); + logDebug1(IC_NAME, "Error: %s", e.what()); + } + }, + SYNC_WORKERS, + SYNC_QUEUE_LIMIT); + m_initializeThread = std::thread( // coverity[copy_constructor_call] [=]() { auto sleepTime = std::chrono::seconds(START_TIME); - std::unique_lock lock(m_mutex); + std::unique_lock lock(m_mutex); auto warningPrinted {false}; do { @@ -197,14 +274,14 @@ IndexerConnector::IndexerConnector( sleepTime = std::chrono::seconds(MAX_WAIT_TIME); } - initialize(templateData, indexName, selector, secureCommunication); + initialize(templateData, selector, secureCommunication); } catch (const std::exception& e) { logDebug1(IC_NAME, "Unable to initialize IndexerConnector for index '%s': %s. Retrying in %ld " "seconds.", - indexName.c_str(), + m_indexName.c_str(), e.what(), sleepTime.count()); if (!warningPrinted) @@ -212,11 +289,11 @@ IndexerConnector::IndexerConnector( logWarn(IC_NAME, "IndexerConnector initialization failed for index '%s', retrying until the connection " "is successful.", - indexName.c_str()); + m_indexName.c_str()); warningPrinted = true; } } - } while (!m_initialized && !m_cv.wait_for(lock, sleepTime, [&]() { return m_stopping.load(); })); + } while (!m_initialized && !m_cv.wait_for(lock, sleepTime, [this]() { return m_stopping.load(); })); }); } @@ -225,8 +302,6 @@ IndexerConnector::~IndexerConnector() m_stopping.store(true); m_cv.notify_all(); - QUEUE_MAP.erase(this); - if (m_initializeThread.joinable()) { m_initializeThread.join(); @@ -235,11 +310,98 @@ IndexerConnector::~IndexerConnector() void IndexerConnector::publish(const std::string& message) { - QUEUE_MAP[this]->push(message); + m_dispatcher->push(message); +} + +void IndexerConnector::sync(const std::string& agentId) +{ + m_syncQueue->push(agentId); +} + +void IndexerConnector::diff(nlohmann::json& responseJson, + const std::string& agentId, + const SecureCommunication& secureCommunication, + const std::shared_ptr& selector) +{ + std::vector> status; + std::vector> actions; + + // Move elements to vector. + for (const auto& hit : responseJson.at("hits").at("hits")) + { + if (hit.contains("_id")) + { + status.emplace_back(hit.at("_id").get_ref(), false); + } + } + + for (const auto& [key, value] : m_db->seek(agentId)) + { + bool found {false}; + for (auto& [id, data] : status) + { + if (key.compare(id) == 0) + { + data = true; + found = true; + break; + } + } + + if (!found) + { + actions.emplace_back(key, false); + } + } + + for (const auto& [id, data] : status) + { + if (!data) + { + actions.emplace_back(id, true); + } + } + + auto url = selector->getNext(); + url.append("/_bulk"); + + std::string bulkData; + for (const auto& [id, deleted] : actions) + { + if (deleted) + { + builderBulkDelete(bulkData, id, m_indexName); + } + else + { + std::string data; + if (!m_db->get(id, data)) + { + throw std::runtime_error("Failed to get data from the database."); + } + builderBulkIndex(bulkData, id, m_indexName, data); + } + } + + if (!bulkData.empty()) + { + logDebug2(IC_NAME, "Payload: %s", bulkData.c_str()); + HTTPRequest::instance().post( + HttpURL(url), + bulkData, + [](const std::string& response) { logDebug2(IC_NAME, "Response: %s", response.c_str()); }, + [](const std::string& error, const long statusCode) + { + logError(IC_NAME, "%s, status code: %ld", error.c_str(), statusCode); + throw std::runtime_error(error); + }, + "", + DEFAULT_HEADERS, + secureCommunication); + } } void IndexerConnector::initialize(const nlohmann::json& templateData, - const std::string& indexName, const std::shared_ptr& selector, const SecureCommunication& secureCommunication) { @@ -259,11 +421,13 @@ void IndexerConnector::initialize(const nlohmann::json& templateData, }; // Define the success callback - auto onSuccess = [&](const std::string& response) { + auto onSuccess = [](const std::string&) + { + // Not used }; // Initialize template. - HTTPRequest::instance().put(HttpURL(selector->getNext() + "/_index_template/" + indexName + "_template"), + HTTPRequest::instance().put(HttpURL(selector->getNext() + "/_index_template/" + m_indexName + "_template"), templateData, onSuccess, onError, @@ -272,7 +436,7 @@ void IndexerConnector::initialize(const nlohmann::json& templateData, secureCommunication); // Initialize Index. - HTTPRequest::instance().put(HttpURL(selector->getNext() + "/" + indexName), + HTTPRequest::instance().put(HttpURL(selector->getNext() + "/" + m_indexName), templateData.at("template"), onSuccess, onError, @@ -281,5 +445,5 @@ void IndexerConnector::initialize(const nlohmann::json& templateData, secureCommunication); m_initialized = true; - logInfo(IC_NAME, "IndexerConnector initialized successfully for index: %s.", indexName.c_str()); + logInfo(IC_NAME, "IndexerConnector initialized successfully for index: %s.", m_indexName.c_str()); } diff --git a/src/shared_modules/indexer_connector/testtool/cmdArgParser.hpp b/src/shared_modules/indexer_connector/testtool/cmdArgParser.hpp index 7976c9d9fc1..aad93d1836f 100644 --- a/src/shared_modules/indexer_connector/testtool/cmdArgParser.hpp +++ b/src/shared_modules/indexer_connector/testtool/cmdArgParser.hpp @@ -34,6 +34,7 @@ class CmdLineArgs , m_templateFilePath {paramValueOf(argc, argv, "-t")} , m_eventsFilePath {paramValueOf(argc, argv, "-e", std::make_pair(false, ""))} , m_autoGenerated {paramValueOf(argc, argv, "-a", std::make_pair(false, ""))} + , m_agentForSyncEvent {paramValueOf(argc, argv, "-s", std::make_pair(false, ""))} , m_numberOfEvents {paramValueOf(argc, argv, "-n", std::make_pair(false, ""))} { } @@ -84,6 +85,15 @@ class CmdLineArgs return std::stoull(m_numberOfEvents); } + /** + * @brief Gets the agent id to sync event. + * @return Agent to sync event. + */ + std::string getAgentIdSyncEvent() const + { + return m_agentForSyncEvent; + } + /** * @brief Shows the help to the user. */ @@ -97,10 +107,12 @@ class CmdLineArgs << "\t-e EVENTS_FILE\tSpecifies the events file.\n" << "\t-a AUTO_GENERATED\tSpecifies if the events are auto generated.\n" << "\t-n NUMBER_OF_EVENTS\tSpecifies the number of events to generate.\n" + << "\t-s SYNC_EVENT\tSend sync event before push event.\n" << "\nExample:" << "\n\t./indexer_connector_testtool -c config.json -t template.json\n" << "\n\t./indexer_connector_testtool -c config.json -t template.json -e events.json\n" << "\n\t./indexer_connector_testtool -c config.json -t template.json -a true -n 10000\n" + << "\n\t./indexer_connector_testtool -c config.json -t template.json -s 000\n" << std::endl; } @@ -133,6 +145,8 @@ class CmdLineArgs const std::string m_eventsFilePath; const std::string m_numberOfEvents; const std::string m_autoGenerated; + const std::string m_agentForSyncEvent; + ; }; #endif // _CMD_ARGS_PARSER_HPP_ diff --git a/src/shared_modules/indexer_connector/testtool/input/config.json b/src/shared_modules/indexer_connector/testtool/input/config.json index 1ac9d4305fe..fee4d3d0f31 100644 --- a/src/shared_modules/indexer_connector/testtool/input/config.json +++ b/src/shared_modules/indexer_connector/testtool/input/config.json @@ -1,10 +1,10 @@ { - "name": "wazuh-states-vulnerabilities", + "name": "wazuh-states-vulnerabilities-cluster", "enabled": "yes", "hosts": ["https://0.0.0.0:9200"], "ssl": { - "certificate_authorities": ["~/root-ca.pem"], - "certificate": "~/indexer.pem", - "key": "~/indexer-key.pem" + "certificate_authorities": ["/etc/filebeat/certs/root-ca.pem"], + "certificate": "/etc/filebeat/certs/filebeat.pem", + "key": "/etc/filebeat/certs/filebeat-key.pem" } } diff --git a/src/shared_modules/indexer_connector/testtool/input/example.json b/src/shared_modules/indexer_connector/testtool/input/example.json index a395ea4df68..5256b94d281 100644 --- a/src/shared_modules/indexer_connector/testtool/input/example.json +++ b/src/shared_modules/indexer_connector/testtool/input/example.json @@ -1,47 +1,16 @@ { - "id": "worker1_000_pkghash_CVE-2022-1234", + "id": "000_pkghash_CVE-2022-1234", "operation": "INSERT", "data":{ "agent": { "build": { "original": "sample_build_1" }, - "id": "agent_id_1", + "id": "000", "name": "agent_name_1", "type": "agent_type_1", "version": "1.0.0" }, - "event": { - "action": "sample_action", - "agent_id_status": "sample_status", - "category": "sample_category", - "code": "sample_code", - "created": "2023-09-18T12:00:00Z", - "dataset": "sample_dataset", - "duration": 12345, - "end": "2023-09-18T13:00:00Z", - "hash": "sample_hash", - "id": "event_id_1", - "ingested": "2023-09-18T12:05:00Z", - "kind": "sample_kind", - "module": "sample_module", - "original": "sample_original_content", - "outcome": "sample_outcome", - "provider": "sample_provider", - "reason": "sample_reason", - "reference": "sample_reference", - "risk_score": 5.5, - "risk_score_norm": 5.0, - "sequence": 1, - "severity": 3, - "start": "2023-09-18T11:00:00Z", - "timezone": "UTC", - "type": "sample_type", - "url": "http://example.com" - }, - "labels": { - "label1": "value1" - }, "message": "Sample message", "package": { "architecture": "x64", diff --git a/src/shared_modules/indexer_connector/testtool/main.cpp b/src/shared_modules/indexer_connector/testtool/main.cpp index dd11f386cc4..604bfa5b58a 100644 --- a/src/shared_modules/indexer_connector/testtool/main.cpp +++ b/src/shared_modules/indexer_connector/testtool/main.cpp @@ -135,6 +135,12 @@ int main(const int argc, const char* argv[]) << " : " << formattedStr << std::endl; } }); + + if (!cmdArgParser.getAgentIdSyncEvent().empty()) + { + indexerConnector.sync(cmdArgParser.getAgentIdSyncEvent()); + } + // Read events file. // If the events file path is empty, then the events are generated // automatically. diff --git a/src/shared_modules/utils/promiseFactory.h b/src/shared_modules/utils/promiseFactory.h index 917f520b089..2544f5ea7ac 100644 --- a/src/shared_modules/utils/promiseFactory.h +++ b/src/shared_modules/utils/promiseFactory.h @@ -10,28 +10,33 @@ */ #include "abstractWait.h" +#ifndef _PROMISE_FACTORY_HPP +#define _PROMISE_FACTORY_HPP + enum PromiseType { NORMAL, SLEEP }; -template +template class PromiseFactory final { - public: - static std::shared_ptr getPromiseObject() - { - return std::make_shared(); - } +public: + static std::shared_ptr getPromiseObject() + { + return std::make_shared(); + } }; -template <> +template<> class PromiseFactory final { - public: - static std::shared_ptr getPromiseObject() - { - return std::make_shared(); - } +public: + static std::shared_ptr getPromiseObject() + { + return std::make_shared(); + } }; + +#endif // _PROMISE_FACTORY_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp index 4464b4f1e40..3c222d721c0 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp @@ -24,6 +24,7 @@ #include "eventInsertInventory.hpp" #include "eventPackageAlertDetailsBuilder.hpp" #include "eventSendReport.hpp" +#include "globalSyncInventory.hpp" #include "osScanner.hpp" #include "packageScanner.hpp" #include "resultIndexer.hpp" @@ -55,7 +56,8 @@ template + typename TScanAgentList = ScanAgentList, + typename TGlobalSyncInventory = GlobalSyncInventory> class TFactoryOrchestrator final { private: @@ -153,6 +155,10 @@ class TFactoryOrchestrator final inventoryDatabase, std::make_shared(indexerConnector)); break; + case ScannerType::GlobalSyncInventory: + orchestration = std::make_shared(indexerConnector); + break; + default: throw std::runtime_error("Invalid scanner type"); } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/globalSyncInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/globalSyncInventory.hpp new file mode 100644 index 00000000000..086a6d1b98c --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/globalSyncInventory.hpp @@ -0,0 +1,62 @@ +/* + * Wazuh Vulnerability scanner - Scan Orchestrator + * Copyright (C) 2015, Wazuh Inc. + * April 22, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _GLOBAL_SYNC_INVENTORY_HPP +#define _GLOBAL_SYNC_INVENTORY_HPP + +#include "chainOfResponsability.hpp" +#include "indexerConnector.hpp" +#include "scanContext.hpp" + +/** + * @brief GlobalSyncInventory class. + * + * @tparam TIndexerConnector indexer connector type. + * @tparam TScanContext scan context type. + */ +template +class TGlobalSyncInventory final : public AbstractHandler> +{ +private: + std::shared_ptr m_indexerConnector; + +public: + // LCOV_EXCL_START + /** + * @brief GlobalSyncInventory constructor. + * + * @param indexerConnector Indexer connector. + */ + explicit TGlobalSyncInventory(std::shared_ptr indexerConnector) + : m_indexerConnector(std::move(indexerConnector)) + { + } + // LCOV_EXCL_STOP + + /** + * @brief Handles request and passes control to the next step of the chain. + * + * @param data Scan context. + * @return std::shared_ptr Abstract handler. + */ + std::shared_ptr handleRequest(std::shared_ptr data) override + { + if (m_indexerConnector != nullptr) + { + m_indexerConnector->sync(data->agentId().data()); + } + return AbstractHandler>::handleRequest(std::move(data)); + } +}; + +using GlobalSyncInventory = TGlobalSyncInventory<>; + +#endif // _GLOBAL_SYNC_INVENTORY_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index ee79b2b3d37..ff94fad4371 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -47,6 +47,7 @@ enum class ScannerType ReScanSingleAgent = 8, CleanupAllAgentData = 9, CleanupSingleAgentData = 10, + GlobalSyncInventory = 11 }; // The affected component type is used to determine what type of data is affected in the system. @@ -463,6 +464,17 @@ struct TScanContext final // Integrity clear for othre components not affected by the scanner. } } + else if (syncMsg->data_type() == SyscollectorSynchronization::DataUnion_integrity_check_global) + { + if (syncMsg->data_as_integrity_check_global()->attributes_type()->str().compare( + "syscollector_packages") == 0 || + syncMsg->data_as_integrity_check_global()->attributes_type()->str().compare( + "syscollector_osinfo") == 0) + { + m_type = ScannerType::GlobalSyncInventory; + m_affectedComponentType = AffectedComponentType::Agent; + } + } } else if constexpr (std::is_same_v) { diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp index bfb2c659593..df4b8099696 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp @@ -96,6 +96,11 @@ class TScanOrchestrator final : public TOSPrimitives std::move(indexerConnector), inventoryDatabase, std::move(reportDispatcher)); + m_inventorySyncOrchestration = TFactoryOrchestrator::create(ScannerType::GlobalSyncInventory, + std::move(databaseFeedManager), + std::move(indexerConnector), + inventoryDatabase, + std::move(reportDispatcher)); nlohmann::json response; // JSON object to store the response from the database std::string managerName; // String to store the retrieved manager name @@ -292,6 +297,7 @@ class TScanOrchestrator final : public TOSPrimitives std::shared_ptr m_reScanOrchestration; std::shared_ptr m_cleanUpDataOrchestration; std::shared_ptr m_deleteAgentScanOrchestration; + std::shared_ptr m_inventorySyncOrchestration; std::shared_mutex& m_mutex; std::shared_ptr m_eventDelayedDispatcher; }; From 06606f6c320e9877fee917ea39032ffbecfe3ec7 Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Mon, 22 Apr 2024 21:02:31 +0000 Subject: [PATCH 013/419] Each node publishes the vulnerabilities to its index --- .../indexer_connector/src/indexerConnector.cpp | 2 +- .../src/policyManager/policyManager.hpp | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/shared_modules/indexer_connector/src/indexerConnector.cpp b/src/shared_modules/indexer_connector/src/indexerConnector.cpp index ba65aac6af9..a37cf4bb4e5 100644 --- a/src/shared_modules/indexer_connector/src/indexerConnector.cpp +++ b/src/shared_modules/indexer_connector/src/indexerConnector.cpp @@ -177,7 +177,7 @@ IndexerConnector::IndexerConnector( auto url = selector->getNext(); std::string bulkData; - url.append("/_bulk"); + url.append("/_bulk?refresh=wait_for"); std::vector documents; while (!dataQueue.empty()) diff --git a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp index eb00d5f0a9d..1351e1cbf09 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp @@ -27,6 +27,8 @@ constexpr auto UNKNOWN_VALUE {" "}; constexpr auto STATES_VD_INDEX_NAME {"wazuh-states-vulnerabilities"}; +constexpr auto STATES_VD_INDEX_DEFAULT_CLUSTER_NAME {"wazuh"}; +constexpr auto STATES_VD_INDEX_DEFAULT_NODE_NAME {"node01"}; constexpr auto DEFAULT_TRANSLATION_LRU_SIZE {2048}; constexpr auto DEFAULT_OSDATA_LRU_SIZE {1000}; const static std::string UPDATER_PATH {"queue/vd_updater"}; @@ -115,7 +117,15 @@ class PolicyManager final : public Singleton newPolicy["indexer"]["ssl"]["certificate"] = ""; newPolicy["indexer"]["ssl"]["key"] = ""; } - newPolicy["indexer"]["name"] = STATES_VD_INDEX_NAME; + + newPolicy["indexer"]["name"] = + std::string {STATES_VD_INDEX_NAME} + '-' + + (newPolicy.at("vulnerability-detection").contains("clusterName") + ? newPolicy.at("vulnerability-detection").at("clusterName").get_ref() + : STATES_VD_INDEX_DEFAULT_CLUSTER_NAME) + + '-' + + (newPolicy.contains("managerNodeName") ? newPolicy.at("managerNodeName").get_ref() + : STATES_VD_INDEX_DEFAULT_NODE_NAME); if (!newPolicy.at("vulnerability-detection").contains("feed-update-interval")) { From d77d700a627a835463275ff6683b9d1a05792604 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Wed, 24 Apr 2024 01:37:49 -0300 Subject: [PATCH 014/419] Add wait for parameter. --- .../indexer_connector/src/indexerConnector.cpp | 2 +- .../src/policyManager/policyManager.hpp | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/src/shared_modules/indexer_connector/src/indexerConnector.cpp b/src/shared_modules/indexer_connector/src/indexerConnector.cpp index a37cf4bb4e5..77e41507422 100644 --- a/src/shared_modules/indexer_connector/src/indexerConnector.cpp +++ b/src/shared_modules/indexer_connector/src/indexerConnector.cpp @@ -363,7 +363,7 @@ void IndexerConnector::diff(nlohmann::json& responseJson, } auto url = selector->getNext(); - url.append("/_bulk"); + url.append("/_bulk?refresh=wait_for"); std::string bulkData; for (const auto& [id, deleted] : actions) diff --git a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp index 1351e1cbf09..41ff8fdd17d 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp @@ -28,7 +28,6 @@ constexpr auto UNKNOWN_VALUE {" "}; constexpr auto STATES_VD_INDEX_NAME {"wazuh-states-vulnerabilities"}; constexpr auto STATES_VD_INDEX_DEFAULT_CLUSTER_NAME {"wazuh"}; -constexpr auto STATES_VD_INDEX_DEFAULT_NODE_NAME {"node01"}; constexpr auto DEFAULT_TRANSLATION_LRU_SIZE {2048}; constexpr auto DEFAULT_OSDATA_LRU_SIZE {1000}; const static std::string UPDATER_PATH {"queue/vd_updater"}; @@ -122,10 +121,7 @@ class PolicyManager final : public Singleton std::string {STATES_VD_INDEX_NAME} + '-' + (newPolicy.at("vulnerability-detection").contains("clusterName") ? newPolicy.at("vulnerability-detection").at("clusterName").get_ref() - : STATES_VD_INDEX_DEFAULT_CLUSTER_NAME) + - '-' + - (newPolicy.contains("managerNodeName") ? newPolicy.at("managerNodeName").get_ref() - : STATES_VD_INDEX_DEFAULT_NODE_NAME); + : STATES_VD_INDEX_DEFAULT_CLUSTER_NAME); if (!newPolicy.at("vulnerability-detection").contains("feed-update-interval")) { From 62926c2d8afc105414e95acb0038e97df2d53a88 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Thu, 25 Apr 2024 15:57:47 -0300 Subject: [PATCH 015/419] Add abuse control and logs. --- .../include/indexerConnector.hpp | 27 +++- .../src/indexerConnector.cpp | 133 +++++++++++------- .../src/scanOrchestrator/scanContext.hpp | 1 - .../src/scanOrchestrator/scanOrchestrator.hpp | 9 +- 4 files changed, 112 insertions(+), 58 deletions(-) diff --git a/src/shared_modules/indexer_connector/include/indexerConnector.hpp b/src/shared_modules/indexer_connector/include/indexerConnector.hpp index 7c184fdaf64..892b5a0e846 100644 --- a/src/shared_modules/indexer_connector/include/indexerConnector.hpp +++ b/src/shared_modules/indexer_connector/include/indexerConnector.hpp @@ -62,6 +62,7 @@ class EXPORTED IndexerConnector final std::string m_indexName; std::mutex m_syncMutex; std::unique_ptr m_dispatcher; + std::unordered_map m_lastSync; /** * @brief Intialize method used to load template data and initialize the index. @@ -75,12 +76,6 @@ class EXPORTED IndexerConnector final const std::shared_ptr& selector, const SecureCommunication& secureCommunication); - /** - * @brief Save documents into the database. - * @param documents Documents to be saved. - */ - void saveDocuments(const std::vector& documents); - /** * @brief This method is used to calculate the diff between the inventory database and the indexer. * @param responseJson Response JSON. @@ -88,11 +83,29 @@ class EXPORTED IndexerConnector final * @param secureCommunication Secure communication. * @param selector Server selector. */ - void diff(nlohmann::json& responseJson, + void diff(const nlohmann::json& responseJson, const std::string& agentId, const SecureCommunication& secureCommunication, const std::shared_ptr& selector); + /** + * @brief Get agent ids of documents from the indexer. + * @param url Indexer URL. + * @param agentId Agent ID. + * @param secureCommunication Secure communication. + * @return Agent documents. + */ + nlohmann::json getAgentDocumentsIds(const std::string& url, + const std::string& agentId, + const SecureCommunication& secureCommunication) const; + + /** + * @brief Abuse control. + * @param agentId Agent ID. + * @return True if the agent is abusing the indexer, false otherwise. + */ + bool abuseControl(const std::string& agentId); + public: /** * @brief Class constructor that initializes the publisher. diff --git a/src/shared_modules/indexer_connector/src/indexerConnector.cpp b/src/shared_modules/indexer_connector/src/indexerConnector.cpp index 77e41507422..a83f7e333bb 100644 --- a/src/shared_modules/indexer_connector/src/indexerConnector.cpp +++ b/src/shared_modules/indexer_connector/src/indexerConnector.cpp @@ -37,9 +37,14 @@ constexpr auto DOUBLE_FACTOR {2}; // Single thread because the events needs to be processed in order. constexpr auto DATABASE_WORKERS = 1; constexpr auto DATABASE_BASE_PATH = "queue/indexer/"; + +// Sync configuration constexpr auto SYNC_WORKERS = 1; constexpr auto SYNC_QUEUE_LIMIT = 4096; +// Abuse control +constexpr auto MINIMAL_SYNC_TIME {30}; // In minutes + static void initConfiguration(SecureCommunication& secureCommunication, const nlohmann::json& config) { std::string caRootCertificate; @@ -89,21 +94,6 @@ static void initConfiguration(SecureCommunication& secureCommunication, const nl .caRootCertificate(caRootCertificate); } -void IndexerConnector::saveDocuments(const std::vector& documents) -{ - for (const auto& document : documents) - { - if (document.deleted) - { - m_db->delete_(document.id); - } - else - { - m_db->put(document.id, document.data); - } - } -} - static void builderBulkDelete(std::string& bulkData, std::string_view id, std::string_view index) { bulkData.append(R"({"delete":{"_index":")"); @@ -159,7 +149,7 @@ IndexerConnector::IndexerConnector( auto selector {std::make_shared(config.at("hosts"), timeout, secureCommunication)}; m_dispatcher = std::make_unique( - [=](std::queue& dataQueue) + [this, selector, secureCommunication](std::queue& dataQueue) { std::scoped_lock lock(m_syncMutex); @@ -190,13 +180,13 @@ IndexerConnector::IndexerConnector( if (parsedData.at("operation").get_ref().compare("DELETED") == 0) { builderBulkDelete(bulkData, id, m_indexName); - documents.push_back({id, data, true}); + m_db->delete_(id); } else { const auto dataString = parsedData.at("data").dump(); builderBulkIndex(bulkData, id, m_indexName, dataString); - documents.push_back({id, dataString, false}); + m_db->put(id, dataString); } } // Process data. @@ -212,41 +202,24 @@ IndexerConnector::IndexerConnector( "", DEFAULT_HEADERS, secureCommunication); - - // Save documents to the database. - saveDocuments(documents); }, DATABASE_BASE_PATH + m_indexName, ELEMENTS_PER_BULK); m_syncQueue = std::make_unique( - [=](const std::string& agentId) + [this, selector, secureCommunication](const std::string& agentId) { try { std::scoped_lock lock(m_syncMutex); - nlohmann::json responseJson; - auto url = selector->getNext().append("/").append(m_indexName).append("/_search"); - - nlohmann::json postData; - - // TODO: Add scroll support. - postData["query"]["match"]["agent.id"] = agentId; - postData["size"] = 10000; - postData["_source"] = nlohmann::json::array({"_id"}); - - logDebug2(IC_NAME, "Payload: %s", postData.dump().c_str()); - - HTTPRequest::instance().post( - HttpURL(url), - postData.dump(), - [&responseJson](const std::string& response) { responseJson = nlohmann::json::parse(response); }, - [](const std::string& error, const long) { throw std::runtime_error(error); }, - "", - DEFAULT_HEADERS, - secureCommunication); - logDebug2(IC_NAME, "Response: %s", responseJson.dump().c_str()); - diff(responseJson, agentId, secureCommunication, selector); + if (!abuseControl(agentId)) + { + logDebug2(IC_NAME, "Syncing agent '%s' with the indexer.", agentId.c_str()); + diff(getAgentDocumentsIds(selector->getNext(), agentId, secureCommunication), + agentId, + secureCommunication, + selector); + } } catch (const std::exception& e) { @@ -259,7 +232,7 @@ IndexerConnector::IndexerConnector( m_initializeThread = std::thread( // coverity[copy_constructor_call] - [=]() + [this, templateData, selector, secureCommunication]() { auto sleepTime = std::chrono::seconds(START_TIME); std::unique_lock lock(m_mutex); @@ -297,6 +270,73 @@ IndexerConnector::IndexerConnector( }); } +bool IndexerConnector::abuseControl(const std::string& agentId) +{ + const auto currentTime = std::chrono::system_clock::now(); + if (const auto lastSync = m_lastSync.find(agentId); lastSync != m_lastSync.end()) + { + const auto diff = std::chrono::duration_cast(currentTime - lastSync->second); + if (diff.count() < MINIMAL_SYNC_TIME) + { + logDebug2(IC_NAME, "Agent '%s' ommited due to abuse control.", agentId.c_str()); + return true; + } + } + m_lastSync[agentId] = currentTime; + return false; +} + +nlohmann::json IndexerConnector::getAgentDocumentsIds(const std::string& url, + const std::string& agentId, + const SecureCommunication& secureCommunication) const +{ + nlohmann::json postData; + nlohmann::json responseJson; + constexpr auto ELEMENTS_PER_QUERY {10000}; // The max value for queries is 10000 in the wazuh-indexer. + + postData["query"]["match"]["agent.id"] = agentId; + postData["size"] = ELEMENTS_PER_QUERY; + postData["_source"] = nlohmann::json::array({"_id"}); + + HTTPRequest::instance().post( + HttpURL(url + "/" + m_indexName + "/_search?scroll=1m"), + postData.dump(), + [&responseJson](const std::string& response) { responseJson = nlohmann::json::parse(response); }, + [](const std::string& error, const long) { throw std::runtime_error(error); }, + "", + DEFAULT_HEADERS, + secureCommunication); + + // If the response have more than ELEMENTS_PER_QUERY elements, we need to scroll. + if (responseJson.at("hits").at("total").at("value").get() > ELEMENTS_PER_QUERY) + { + const auto scrollId = responseJson.at("_scroll_id").get_ref(); + const auto scrollUrl = url + "/_search/scroll"; + const auto scrollData = R"({"scroll":"1m","scroll_id":")" + scrollId + "\"}"; + + while (responseJson.at("hits").at("hits").size() < responseJson.at("hits").at("total").at("value").get()) + { + HTTPRequest::instance().post( + HttpURL(scrollUrl), + scrollData, + [&responseJson](const std::string& response) + { + auto newResponse = nlohmann::json::parse(response); + for (const auto& hit : newResponse.at("hits").at("hits")) + { + responseJson.at("hits").at("hits").push_back(hit); + } + }, + [](const std::string& error, const long) { throw std::runtime_error(error); }, + "", + DEFAULT_HEADERS, + secureCommunication); + } + } + + return responseJson; +} + IndexerConnector::~IndexerConnector() { m_stopping.store(true); @@ -318,7 +358,7 @@ void IndexerConnector::sync(const std::string& agentId) m_syncQueue->push(agentId); } -void IndexerConnector::diff(nlohmann::json& responseJson, +void IndexerConnector::diff(const nlohmann::json& responseJson, const std::string& agentId, const SecureCommunication& secureCommunication, const std::shared_ptr& selector) @@ -385,7 +425,6 @@ void IndexerConnector::diff(nlohmann::json& responseJson, if (!bulkData.empty()) { - logDebug2(IC_NAME, "Payload: %s", bulkData.c_str()); HTTPRequest::instance().post( HttpURL(url), bulkData, diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index ff94fad4371..83e23723876 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -336,7 +336,6 @@ struct TScanContext final m_data = arg; m_messageType = MessageType::Sync; auto syncMsg = std::get(m_data); - if (syncMsg->data_type() == SyscollectorSynchronization::DataUnion_state) { if (syncMsg->data_as_state()->attributes_type() == diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp index df4b8099696..cee32003565 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp @@ -92,10 +92,10 @@ class TScanOrchestrator final : public TOSPrimitives inventoryDatabase, reportDispatcher); m_cleanUpDataOrchestration = TFactoryOrchestrator::create(ScannerType::CleanupAllAgentData, - std::move(databaseFeedManager), - std::move(indexerConnector), + databaseFeedManager, + indexerConnector, inventoryDatabase, - std::move(reportDispatcher)); + reportDispatcher); m_inventorySyncOrchestration = TFactoryOrchestrator::create(ScannerType::GlobalSyncInventory, std::move(databaseFeedManager), std::move(indexerConnector), @@ -273,6 +273,9 @@ class TScanOrchestrator final : public TOSPrimitives case ScannerType::CleanupSingleAgentData: m_deleteAgentScanOrchestration->handleRequest(std::move(context)); break; + case ScannerType::GlobalSyncInventory: + m_inventorySyncOrchestration->handleRequest(std::move(context)); + break; // LCOV_EXCL_STOP default: return; } From 998479f701803041bc45bd494c9d73f059f7bac2 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 26 Apr 2024 01:29:09 -0300 Subject: [PATCH 016/419] Rename index name. --- .../src/policyManager/policyManager.hpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp index 41ff8fdd17d..ce3c7a82868 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp @@ -117,11 +117,7 @@ class PolicyManager final : public Singleton newPolicy["indexer"]["ssl"]["key"] = ""; } - newPolicy["indexer"]["name"] = - std::string {STATES_VD_INDEX_NAME} + '-' + - (newPolicy.at("vulnerability-detection").contains("clusterName") - ? newPolicy.at("vulnerability-detection").at("clusterName").get_ref() - : STATES_VD_INDEX_DEFAULT_CLUSTER_NAME); + newPolicy["indexer"]["name"] = STATES_VD_INDEX_NAME; if (!newPolicy.at("vulnerability-detection").contains("feed-update-interval")) { From 720df07f99c8d1e258bf4ca75aa429c4fe92a671 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 26 Apr 2024 13:31:10 -0300 Subject: [PATCH 017/419] Fix tests. --- .../include/indexerConnector.hpp | 11 - .../src/indexerConnector.cpp | 3 +- .../tests/unit/factoryOrchestrator_test.cpp | 215 +++++++++++------- .../tests/unit/scanOrchestrator_test.cpp | 49 +++- 4 files changed, 174 insertions(+), 104 deletions(-) diff --git a/src/shared_modules/indexer_connector/include/indexerConnector.hpp b/src/shared_modules/indexer_connector/include/indexerConnector.hpp index 892b5a0e846..f6cf432bbe6 100644 --- a/src/shared_modules/indexer_connector/include/indexerConnector.hpp +++ b/src/shared_modules/indexer_connector/include/indexerConnector.hpp @@ -37,17 +37,6 @@ using ThreadSyncQueue = Utils::AsyncDispatcher documents; while (!dataQueue.empty()) { auto data = dataQueue.front(); @@ -342,6 +341,8 @@ IndexerConnector::~IndexerConnector() m_stopping.store(true); m_cv.notify_all(); + m_dispatcher->cancel(); + if (m_initializeThread.joinable()) { m_initializeThread.join(); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp index 8de00afd1a4..286ec1d3ff3 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp @@ -31,7 +31,8 @@ enum class ScannerMockID : int BUILD_ALL_AGENT_LIST_CONTEXT = 13, BUILD_SINGLE_AGENT_LIST_CONTEXT = 14, CLEAN_SINGLE_AGENT_INVENTORY = 15, - SCAN_AGENT_LIST = 16 + SCAN_AGENT_LIST = 16, + GLOBAL_INVENTORY_SYNC = 17 }; /** @@ -126,11 +127,12 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypePackageInsert) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::PackageInsert, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::PackageInsert, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -170,11 +172,12 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypePackageDelete) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::PackageDelete, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::PackageDelete, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -212,11 +215,12 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypeIntegrityClear) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::IntegrityClear, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::IntegrityClear, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -233,30 +237,32 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypeIntegrityClear) TEST_F(FactoryOrchestratorTest, TestScannerTypeOs) { // Create the orchestrator for Os. - auto orchestration = TFactoryOrchestrator, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - MockDatabaseFeedManager, - MockIndexerConnector, - std::vector, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass>::create(ScannerType::Os, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + auto orchestration = + TFactoryOrchestrator, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + MockDatabaseFeedManager, + MockIndexerConnector, + std::vector, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass>::create(ScannerType::Os, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -296,11 +302,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAllData) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::CleanupAllAgentData, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::CleanupAllAgentData, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -335,11 +342,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanAllAgents) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::ReScanAllAgents, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::ReScanAllAgents, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -375,11 +383,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanSingleAgent) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::ReScanSingleAgent, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::ReScanSingleAgent, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -393,31 +402,28 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanSingleAgent) TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAgentData) { // Create the orchestrator for CleanupSingleAgentData. - auto orchestration = - TFactoryOrchestrator, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - MockDatabaseFeedManager, - MockIndexerConnector, - std::vector, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass>::create(ScannerType::CleanupSingleAgentData, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + auto orchestration = TFactoryOrchestrator, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + MockDatabaseFeedManager, + MockIndexerConnector, + std::vector, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass>:: + create(ScannerType::CleanupSingleAgentData, nullptr, nullptr, *m_inventoryDatabase, nullptr); auto context = std::make_shared>(); @@ -455,11 +461,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationInvalidScannerType) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(invalidScannerType, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(invalidScannerType, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); } catch (const std::runtime_error& e) { @@ -470,3 +477,41 @@ TEST_F(FactoryOrchestratorTest, TestCreationInvalidScannerType) FAIL() << "Expected std::runtime_error"; } } + +TEST_F(FactoryOrchestratorTest, TestCreationGlobalSyncInventory) +{ + // Create the orchestrator for CleanupSingleAgentData. + auto orchestration = + TFactoryOrchestrator, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + MockDatabaseFeedManager, + MockIndexerConnector, + std::vector, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass>::create(ScannerType::GlobalSyncInventory, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); + + auto context = std::make_shared>(); + + EXPECT_NO_THROW(orchestration->handleRequest(context)); + EXPECT_EQ(context->size(), 1); + EXPECT_EQ(context->at(0), ScannerMockID::GLOBAL_INVENTORY_SYNC); +} + diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp index cebe09c495b..f122665a3b9 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp @@ -263,6 +263,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsert) std::make_shared>>(); EXPECT_CALL(*spQueryAllPkgsOrchestrationMock, handleRequest(_)).Times(0); + auto spGlobalInventorySyncOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -272,7 +276,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsert) .WillOnce(testing::Return(spFetchAllGlobalDb)) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) - .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)); + .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -374,6 +379,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageDelete) std::make_shared>>(); EXPECT_CALL(*spDeleteAgentScanOrchestration, handleRequest(_)).Times(0); + auto spGlobalInventorySyncOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -383,7 +392,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageDelete) .WillOnce(testing::Return(spFetchAllGlobalDb)) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) - .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)); + .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -485,6 +495,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixInsert) std::make_shared>>(); EXPECT_CALL(*spDeleteAgentScanOrchestration, handleRequest(_)).Times(0); + auto spGlobalInventorySyncOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -494,7 +508,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixInsert) .WillOnce(testing::Return(spFetchAllGlobalDb)) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) - .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)); + .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -596,6 +611,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixDelete) std::make_shared>>(); EXPECT_CALL(*spDeleteAgentScanOrchestration, handleRequest(_)).Times(0); + auto spGlobalInventorySyncOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -605,7 +624,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixDelete) .WillOnce(testing::Return(spFetchAllGlobalDb)) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) - .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)); + .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -691,6 +711,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeOs) std::make_shared>>(); EXPECT_CALL(*spDeleteAgentScanOrchestration, handleRequest(_)).Times(0); + auto spGlobalInventorySyncOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -700,7 +724,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeOs) .WillOnce(testing::Return(spFetchAllGlobalDb)) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) - .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)); + .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -803,6 +828,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeIntegrityClear) std::make_shared>>(); EXPECT_CALL(*spDeleteAgentScanOrchestration, handleRequest(_)).Times(0); + auto spGlobalInventorySyncOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -812,7 +841,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeIntegrityClear) .WillOnce(testing::Return(spFetchAllGlobalDb)) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) - .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)); + .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -916,6 +946,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsertInDelayed) std::make_shared>>(); EXPECT_CALL(*spQueryAllPkgsOrchestrationMock, handleRequest(_)).Times(0); + auto spGlobalInventorySyncOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -925,7 +959,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsertInDelayed) .WillOnce(testing::Return(spFetchAllGlobalDb)) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) - .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)); + .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); From f71fa8bf6fdfca1d2a4e1b6051ecbde426746432 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 26 Apr 2024 14:19:41 -0300 Subject: [PATCH 018/419] Fix CSF. --- .../tests/unit/factoryOrchestrator_test.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp index 286ec1d3ff3..58db0f86638 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp @@ -514,4 +514,3 @@ TEST_F(FactoryOrchestratorTest, TestCreationGlobalSyncInventory) EXPECT_EQ(context->size(), 1); EXPECT_EQ(context->at(0), ScannerMockID::GLOBAL_INVENTORY_SYNC); } - From dd5ea60a25ee6784eab1cfbeae02ea2c851e2e71 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 26 Apr 2024 14:22:58 -0300 Subject: [PATCH 019/419] Change error to warning for sync message when the wazuh-indexer is off. --- src/shared_modules/indexer_connector/src/indexerConnector.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shared_modules/indexer_connector/src/indexerConnector.cpp b/src/shared_modules/indexer_connector/src/indexerConnector.cpp index da351341566..5d3245e6d1c 100644 --- a/src/shared_modules/indexer_connector/src/indexerConnector.cpp +++ b/src/shared_modules/indexer_connector/src/indexerConnector.cpp @@ -222,7 +222,7 @@ IndexerConnector::IndexerConnector( } catch (const std::exception& e) { - logError(IC_NAME, "Failed to sync agent '%s' with the indexer.", agentId.c_str()); + logWarn(IC_NAME, "Failed to sync agent '%s' with the indexer.", agentId.c_str()); logDebug1(IC_NAME, "Error: %s", e.what()); } }, From 467593cf8c5592d884bb92f3a95895f111f76287 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Mon, 29 Apr 2024 01:42:04 -0300 Subject: [PATCH 020/419] Add qa test for basic functionality of the indexer connector. --- .../actions/indexer_connector_deps/action.yml | 44 +++ .../vulnerability_scanner_deps/action.yml | 2 +- .github/workflows/indexer-connector-tests.yml | 57 +++ .../indexer_connector/qa/requirements.txt | 4 + .../config.json | 5 + .../event_delete.json | 54 +++ .../event_insert.json | 54 +++ .../template.json | 283 +++++++++++++ .../config.json | 5 + .../template.json | 283 +++++++++++++ .../indexer_connector/qa/test_efficacy_log.py | 321 +++++++++++++++ .../src/indexerConnector.cpp | 371 +++++++++--------- .../testtool/cmdArgParser.hpp | 29 +- .../testtool/input/config.json | 7 +- .../indexer_connector/testtool/main.cpp | 170 ++++---- 15 files changed, 1425 insertions(+), 264 deletions(-) create mode 100644 .github/actions/indexer_connector_deps/action.yml create mode 100644 .github/workflows/indexer-connector-tests.yml create mode 100644 src/shared_modules/indexer_connector/qa/requirements.txt create mode 100644 src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/config.json create mode 100644 src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/event_delete.json create mode 100644 src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/event_insert.json create mode 100644 src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/template.json create mode 100644 src/shared_modules/indexer_connector/qa/test_data/test_initialize_indexer_connector/config.json create mode 100644 src/shared_modules/indexer_connector/qa/test_data/test_initialize_indexer_connector/template.json create mode 100644 src/shared_modules/indexer_connector/qa/test_efficacy_log.py diff --git a/.github/actions/indexer_connector_deps/action.yml b/.github/actions/indexer_connector_deps/action.yml new file mode 100644 index 00000000000..87e81800b61 --- /dev/null +++ b/.github/actions/indexer_connector_deps/action.yml @@ -0,0 +1,44 @@ +name: "Indexer connector dependencies" +description: "Download and compiles the dependencies for the indexer connector" + +runs: + using: "composite" + steps: + - name: Dependencies for local execution + if: env.ACT # Only run for local execution + shell: bash + run: | + + # Obtain a copy of the signing key + wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | sudo tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null + + # Add the repository to your sources list + echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ jammy main' | sudo tee /etc/apt/sources.list.d/kitware.list >/dev/null + + # Update packages + sudo apt-get update + sudo apt-get install -y cmake + + - name: General dependencies + shell: bash + run: | + sudo apt-get update + sudo apt-get install -y libc6-dbg + + - name: Build external deps + run: | + cd src + make deps TARGET=server -j2 + make libwazuhext.so TARGET=server -j2 + shell: bash + + - name: Build http-request + run: | + cd src + SRC_FOLDER=$(pwd) + + cd shared_modules/http-request + mkdir -p build && cd build + cmake .. -DCMAKE_PROJECT_NAME=http-request -DSRC_FOLDER=${SRC_FOLDER} && make -j2 + shell: bash + diff --git a/.github/actions/vulnerability_scanner_deps/action.yml b/.github/actions/vulnerability_scanner_deps/action.yml index 30367a30f57..934cb5cfeca 100644 --- a/.github/actions/vulnerability_scanner_deps/action.yml +++ b/.github/actions/vulnerability_scanner_deps/action.yml @@ -18,7 +18,7 @@ runs: # Update packages sudo apt-get update sudo apt-get install -y cmake - + - name: General dependencies shell: bash run: | diff --git a/.github/workflows/indexer-connector-tests.yml b/.github/workflows/indexer-connector-tests.yml new file mode 100644 index 00000000000..6752c327cae --- /dev/null +++ b/.github/workflows/indexer-connector-tests.yml @@ -0,0 +1,57 @@ +name: Indexer connector + +on: + workflow_dispatch: + pull_request: + # Pull request events + types: [synchronize, opened, reopened, ready_for_review] + # Path filtering + paths: + - ".github/workflows/-tests.yml" + - ".github/actions/compile_and_test/action.yml" + - ".github/actions/indexer_connector_deps/action.yml" + - "src/shared_modules/indexer_connector/**" + - "src/wazuh_modules/http-request/**" + +jobs: + indexer_connector-qa: + runs-on: ubuntu-22.04 + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + submodules: recursive + + - name: Project dependencies + uses: ./.github/actions/indexer_connector_deps + + # indexer connector + - name: Indexer connector + uses: ./.github/actions/compile + with: + path: src/shared_modules/indexer_connector + + # Install python dependencies + - name: Install dependencies + run: | + pip install -r src/shared_modules/indexer_connector/qa/requirements.txt + + # Create folder for test logs + - name: Create folder for test logs + run: | + mkdir -p ${{ github.workspace }}/qa_logs + + # Run indexer connector tests. + - name: Run tests + run: | + cd src + python -m pytest -vv shared_modules/indexer_connector/qa/ --log-cli-level=DEBUG + rm -rf tmp + + # Upload log files of the tests + - name: Upload log files + if: always() + uses: actions/upload-artifact@v3 + with: + name: QA log files + path: ${{ github.workspace }}/qa_logs diff --git a/src/shared_modules/indexer_connector/qa/requirements.txt b/src/shared_modules/indexer_connector/qa/requirements.txt new file mode 100644 index 00000000000..c7e01e10e6d --- /dev/null +++ b/src/shared_modules/indexer_connector/qa/requirements.txt @@ -0,0 +1,4 @@ +pytest==7.2.2 +jsonschema==4.17.3 +docker + diff --git a/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/config.json b/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/config.json new file mode 100644 index 00000000000..58452acbd2d --- /dev/null +++ b/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/config.json @@ -0,0 +1,5 @@ +{ + "name": "wazuh-states-vulnerabilities-default", + "enabled": "yes", + "hosts": ["http://localhost:9200"] +} diff --git a/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/event_delete.json b/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/event_delete.json new file mode 100644 index 00000000000..04ec960c6a3 --- /dev/null +++ b/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/event_delete.json @@ -0,0 +1,54 @@ +{ + "id": "000_pkghash_CVE-2022-1234", + "operation": "DELETED", + "data":{ + "agent": { + "build": { + "original": "sample_build_1" + }, + "ephemeral_id": "eph_id_1", + "id": "000", + "name": "agent_name_1", + "type": "agent_type_1", + "version": "1.0.0" + }, + "message": "Sample message", + "package": { + "architecture": "x64", + "build_version": "1.0.0", + "checksum": "checksum_value", + "description": "Sample package description", + "install_scope": "global", + "installed": "2023-09-17T12:00:00Z", + "license": "MIT", + "name": "sample_package", + "path": "/path/to/package", + "reference": "sample_reference", + "size": 12345, + "type": "sample_package_type", + "version": "1.0.0" + }, + "tags": ["sample", "tag1"], + "vulnerability": { + "detected_at": "2023-09-18T12:00:00Z", + "published_at": "2023-01-18T12:00:00Z", + "category": "sample_category", + "classification": "sample_classification", + "description": "Sample vulnerability description", + "enumeration": "sample_enumeration", + "id": "vuln_id_1", + "reference": "sample_reference", + "report_id": "report_id_1", + "scanner": { + "vendor": "sample_vendor" + }, + "score": { + "base": 5.0, + "environmental": 5.5, + "temporal": 4.5, + "version": "1.0.0" + }, + "severity": "medium" + } + } +} diff --git a/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/event_insert.json b/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/event_insert.json new file mode 100644 index 00000000000..d8050fe1a67 --- /dev/null +++ b/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/event_insert.json @@ -0,0 +1,54 @@ +{ + "id": "000_pkghash_CVE-2022-1234", + "operation": "INSERT", + "data":{ + "agent": { + "build": { + "original": "sample_build_1" + }, + "ephemeral_id": "eph_id_1", + "id": "000", + "name": "agent_name_1", + "type": "agent_type_1", + "version": "1.0.0" + }, + "message": "Sample message", + "package": { + "architecture": "x64", + "build_version": "1.0.0", + "checksum": "checksum_value", + "description": "Sample package description", + "install_scope": "global", + "installed": "2023-09-17T12:00:00Z", + "license": "MIT", + "name": "sample_package", + "path": "/path/to/package", + "reference": "sample_reference", + "size": 12345, + "type": "sample_package_type", + "version": "1.0.0" + }, + "tags": ["sample", "tag1"], + "vulnerability": { + "detected_at": "2023-09-18T12:00:00Z", + "published_at": "2023-01-18T12:00:00Z", + "category": "sample_category", + "classification": "sample_classification", + "description": "Sample vulnerability description", + "enumeration": "sample_enumeration", + "id": "vuln_id_1", + "reference": "sample_reference", + "report_id": "report_id_1", + "scanner": { + "vendor": "sample_vendor" + }, + "score": { + "base": 5.0, + "environmental": 5.5, + "temporal": 4.5, + "version": "1.0.0" + }, + "severity": "medium" + } + } +} diff --git a/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/template.json b/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/template.json new file mode 100644 index 00000000000..77f2a3334f2 --- /dev/null +++ b/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/template.json @@ -0,0 +1,283 @@ +{ + "index_patterns": [ + "wazuh-states-vulnerabilities" + ], + "priority": 1, + "template": { + "settings": { + "index": { + "codec": "best_compression", + "mapping": { + "total_fields": { + "limit": 1000 + } + }, + "number_of_replicas": "0", + "number_of_shards": "1", + "query.default_field": [ + "base.tags", + "agent.id", + "host.os.family", + "host.os.full.text", + "host.os.version", + "package.name", + "package.version", + "vulnerability.id", + "vulnerability.description.text", + "vulnerability.severity", + "wazuh.cluster.name" + ], + "refresh_interval": "2s" + } + }, + "mappings": { + "date_detection": false, + "dynamic": "strict", + "properties": { + "agent": { + "properties": { + "build": { + "properties": { + "original": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "ephemeral_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "host": { + "properties": { + "os": { + "properties": { + "family": { + "ignore_above": 1024, + "type": "keyword" + }, + "full": { + "fields": { + "text": { + "type": "text" + } + }, + "ignore_above": 1024, + "type": "keyword" + }, + "kernel": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "fields": { + "text": { + "type": "text" + } + }, + "ignore_above": 1024, + "type": "keyword" + }, + "platform": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "message": { + "type": "text" + }, + "package": { + "properties": { + "architecture": { + "ignore_above": 1024, + "type": "keyword" + }, + "build_version": { + "ignore_above": 1024, + "type": "keyword" + }, + "checksum": { + "ignore_above": 1024, + "type": "keyword" + }, + "description": { + "ignore_above": 1024, + "type": "keyword" + }, + "install_scope": { + "ignore_above": 1024, + "type": "keyword" + }, + "installed": { + "type": "date" + }, + "license": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + }, + "reference": { + "ignore_above": 1024, + "type": "keyword" + }, + "size": { + "type": "long" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + }, + "vulnerability": { + "properties": { + "category": { + "ignore_above": 1024, + "type": "keyword" + }, + "classification": { + "ignore_above": 1024, + "type": "keyword" + }, + "description": { + "fields": { + "text": { + "type": "text" + } + }, + "ignore_above": 1024, + "type": "keyword" + }, + "detected_at": { + "type": "date" + }, + "enumeration": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "published_at": { + "type": "date" + }, + "reference": { + "ignore_above": 1024, + "type": "keyword" + }, + "report_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "scanner": { + "properties": { + "vendor": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "score": { + "properties": { + "base": { + "type": "float" + }, + "environmental": { + "type": "float" + }, + "temporal": { + "type": "float" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "severity": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "wazuh": { + "properties": { + "cluster": { + "properties": { + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "node": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "manager": { + "properties": { + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "schema": { + "properties": { + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + } + } + } + } +} diff --git a/src/shared_modules/indexer_connector/qa/test_data/test_initialize_indexer_connector/config.json b/src/shared_modules/indexer_connector/qa/test_data/test_initialize_indexer_connector/config.json new file mode 100644 index 00000000000..58452acbd2d --- /dev/null +++ b/src/shared_modules/indexer_connector/qa/test_data/test_initialize_indexer_connector/config.json @@ -0,0 +1,5 @@ +{ + "name": "wazuh-states-vulnerabilities-default", + "enabled": "yes", + "hosts": ["http://localhost:9200"] +} diff --git a/src/shared_modules/indexer_connector/qa/test_data/test_initialize_indexer_connector/template.json b/src/shared_modules/indexer_connector/qa/test_data/test_initialize_indexer_connector/template.json new file mode 100644 index 00000000000..77f2a3334f2 --- /dev/null +++ b/src/shared_modules/indexer_connector/qa/test_data/test_initialize_indexer_connector/template.json @@ -0,0 +1,283 @@ +{ + "index_patterns": [ + "wazuh-states-vulnerabilities" + ], + "priority": 1, + "template": { + "settings": { + "index": { + "codec": "best_compression", + "mapping": { + "total_fields": { + "limit": 1000 + } + }, + "number_of_replicas": "0", + "number_of_shards": "1", + "query.default_field": [ + "base.tags", + "agent.id", + "host.os.family", + "host.os.full.text", + "host.os.version", + "package.name", + "package.version", + "vulnerability.id", + "vulnerability.description.text", + "vulnerability.severity", + "wazuh.cluster.name" + ], + "refresh_interval": "2s" + } + }, + "mappings": { + "date_detection": false, + "dynamic": "strict", + "properties": { + "agent": { + "properties": { + "build": { + "properties": { + "original": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "ephemeral_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "host": { + "properties": { + "os": { + "properties": { + "family": { + "ignore_above": 1024, + "type": "keyword" + }, + "full": { + "fields": { + "text": { + "type": "text" + } + }, + "ignore_above": 1024, + "type": "keyword" + }, + "kernel": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "fields": { + "text": { + "type": "text" + } + }, + "ignore_above": 1024, + "type": "keyword" + }, + "platform": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "message": { + "type": "text" + }, + "package": { + "properties": { + "architecture": { + "ignore_above": 1024, + "type": "keyword" + }, + "build_version": { + "ignore_above": 1024, + "type": "keyword" + }, + "checksum": { + "ignore_above": 1024, + "type": "keyword" + }, + "description": { + "ignore_above": 1024, + "type": "keyword" + }, + "install_scope": { + "ignore_above": 1024, + "type": "keyword" + }, + "installed": { + "type": "date" + }, + "license": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + }, + "reference": { + "ignore_above": 1024, + "type": "keyword" + }, + "size": { + "type": "long" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + }, + "vulnerability": { + "properties": { + "category": { + "ignore_above": 1024, + "type": "keyword" + }, + "classification": { + "ignore_above": 1024, + "type": "keyword" + }, + "description": { + "fields": { + "text": { + "type": "text" + } + }, + "ignore_above": 1024, + "type": "keyword" + }, + "detected_at": { + "type": "date" + }, + "enumeration": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "published_at": { + "type": "date" + }, + "reference": { + "ignore_above": 1024, + "type": "keyword" + }, + "report_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "scanner": { + "properties": { + "vendor": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "score": { + "properties": { + "base": { + "type": "float" + }, + "environmental": { + "type": "float" + }, + "temporal": { + "type": "float" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "severity": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "wazuh": { + "properties": { + "cluster": { + "properties": { + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "node": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "manager": { + "properties": { + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "schema": { + "properties": { + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + } + } + } + } +} diff --git a/src/shared_modules/indexer_connector/qa/test_efficacy_log.py b/src/shared_modules/indexer_connector/qa/test_efficacy_log.py new file mode 100644 index 00000000000..a8c1537b93d --- /dev/null +++ b/src/shared_modules/indexer_connector/qa/test_efficacy_log.py @@ -0,0 +1,321 @@ +import pytest +import docker +import time +import requests +import logging +import os +import subprocess +import inspect +from pathlib import Path + +LOGGER = logging.getLogger(__name__) + +def init_opensearch(): + client = docker.from_env() + env_vars = { + 'discovery.type': 'single-node', + 'plugins.security.disabled': 'true', + 'OPENSEARCH_INITIAL_ADMIN_PASSWORD': 'WazuhTest99$', + } + client.containers.run("opensearchproject/opensearch", detach=True, ports={'9200/tcp': 9200}, + environment=env_vars, name='opensearch', stdout=True, stderr=True) + ## Wait for the container is running and opensearch is ready + while True: + try: + response = requests.get('http://localhost:9200') + if response.status_code == 200: + break + except requests.exceptions.ConnectionError: + pass + time.sleep(1) + return client + + +@pytest.fixture(scope='session') +def opensearch(): + client = init_opensearch() + yield client + # Stop all containers + for container in client.containers.list(): + container.stop() + client.containers.prune() + +def test_opensearch_health(opensearch): + url = 'http://localhost:9200/_cluster/health' + response = requests.get(url) + assert response.status_code == 200 + assert response.json()['status'] == 'green' + +def test_initialize_indexer_connector(opensearch): + os.chdir(Path(__file__).parent.parent.parent.parent) + LOGGER.debug(f"Current directory: {os.getcwd()}") + + ## Remove folder queue/indexer/db/wazuh-states-vulnerabilities-cluster + if Path("queue/indexer/db/wazuh-states-vulnerabilities-default").exists(): + for file in Path("queue/indexer/db/wazuh-states-vulnerabilities-default").glob("*"): + file.unlink() + Path("queue/indexer/db/wazuh-states-vulnerabilities-default").rmdir() + + # Run indexer connector testtool out of the container + cmd = Path("build/shared_modules/indexer_connector/testtool/", "indexer_connector_tool") + cmdAlt = Path("shared_modules/indexer_connector/build/testtool/", "indexer_connector_tool") + + # Ensure the binary exists + if not cmd.exists(): + cmd = cmdAlt + assert cmd.exists(), "The binary does not exists" + + # Remove previous log file if exists + if Path("log.out").exists(): + Path("log.out").unlink() + + test_name = inspect.currentframe().f_code.co_name + + LOGGER.debug(f"Running test {test_name}") + + args = ["-c", "shared_modules/indexer_connector/qa/test_data/" + test_name + "/config.json", + "-t", "shared_modules/indexer_connector/qa/test_data/" + test_name + "/template.json", + "-w", "120"] + + command = [cmd] + args + + LOGGER.debug(f"Running command: {command}") + process = subprocess.Popen(command) + # if the process is not running fail the test + assert process.poll() is None, "The process is not running" + + # Query to check if the index is created and template is applied + counter = 0 + while counter < 10: + url = 'http://localhost:9200/_cat/indices' + response = requests.get(url) + if response.status_code == 200 and 'wazuh-states-vulnerabilities-default' in response.text: + LOGGER.debug(f"Index created {response.text}") + break + time.sleep(1) + counter += 1 + + process.terminate() + assert counter < 10, "The index was not created" + +def test_add_bulk_indexer_connector(opensearch): + os.chdir(Path(__file__).parent.parent.parent.parent) + LOGGER.debug(f"Current directory: {os.getcwd()}") + + ## Remove folder queue/indexer/db/wazuh-states-vulnerabilities-cluster + if Path("queue/indexer/db/wazuh-states-vulnerabilities-default").exists(): + for file in Path("queue/indexer/db/wazuh-states-vulnerabilities-default").glob("*"): + file.unlink() + Path("queue/indexer/db/wazuh-states-vulnerabilities-default").rmdir() + + # Run indexer connector testtool out of the container + cmd = Path("build/shared_modules/indexer_connector/testtool/", "indexer_connector_tool") + cmdAlt = Path("shared_modules/indexer_connector/build/testtool/", "indexer_connector_tool") + + # Ensure the binary exists + if not cmd.exists(): + cmd = cmdAlt + assert cmd.exists(), "The binary does not exists" + + # Remove previous log file if exists + if Path("log.out").exists(): + Path("log.out").unlink() + + test_name = inspect.currentframe().f_code.co_name + + LOGGER.debug(f"Running test {test_name}") + + args = ["-c", "shared_modules/indexer_connector/qa/test_data/" + test_name + "/config.json", + "-t", "shared_modules/indexer_connector/qa/test_data/" + test_name + "/template.json", + "-e", "shared_modules/indexer_connector/qa/test_data/" + test_name + "/event_insert.json", + "-w", "120", + "-l", "log.out"] + + command = [cmd] + args + process = subprocess.Popen(command) + # if the process is not running fail the test + assert process.poll() is None, "The process is not running" + + # Query to check if the index is created and template is applied + counter = 0 + while counter < 10: + url = 'http://localhost:9200/wazuh-states-vulnerabilities-default/_search' + query = { + "query": { + "match_all": {} + } + } + response = requests.get(url, json=query) + LOGGER.debug(f"Info {response.text}") + if response.status_code == 200 and response.json()['hits']['total']['value'] == 1: + LOGGER.debug(f"Document created {response.text}") + break + time.sleep(1) + counter += 1 + assert counter < 10, "The document was not created" + process.terminate() + + # Delete the document to test the resync. + url = 'http://localhost:9200/wazuh-states-vulnerabilities-default/_delete_by_query?refresh=true' + query = { + "query": { + "match_all": {} + } + } + response = requests.post(url, json=query) + assert response.status_code == 200 + + url = 'http://localhost:9200/wazuh-states-vulnerabilities-default/_search' + query = { + "query": { + "match_all": {} + } + } + response = requests.get(url, json=query) + + # Run the process again to check the resync + args = ["-c", "shared_modules/indexer_connector/qa/test_data/" + test_name + "/config.json", + "-t", "shared_modules/indexer_connector/qa/test_data/" + test_name + "/template.json", + "-s", "000", + "-w", "120"] + command = [cmd] + args + process = subprocess.Popen(command) + + # Query to check if the element is resynced + counter = 0 + while counter < 10: + url = 'http://localhost:9200/wazuh-states-vulnerabilities-default/_search' + query = { + "query": { + "match_all": {} + } + } + response = requests.get(url, json=query) + LOGGER.debug(f"Info {response.text}") + if response.status_code == 200 and response.json()['hits']['total']['value'] == 1: + LOGGER.debug(f"Document created in sync {response.text}") + break + time.sleep(1) + counter += 1 + + assert counter < 10, "The document was not resynced" + process.terminate() + + # Delete element + args = ["-c", "shared_modules/indexer_connector/qa/test_data/" + test_name + "/config.json", + "-t", "shared_modules/indexer_connector/qa/test_data/" + test_name + "/template.json", + "-e", "shared_modules/indexer_connector/qa/test_data/" + test_name + "/event_delete.json", + "-w", "120", + "-l", "log.out"] + + command = [cmd] + args + process = subprocess.Popen(command) + + # if the process is not running fail the test + assert process.poll() is None, "The process is not running" + + # Query to check if the element is deleted + counter = 0 + while counter < 10: + url = 'http://localhost:9200/wazuh-states-vulnerabilities-default/_search' + query = { + "query": { + "match_all": {} + } + } + response = requests.get(url, json=query) + LOGGER.debug(f"Info {response.text}") + if response.status_code == 200 and response.json()['hits']['total']['value'] == 0: + LOGGER.debug(f"Document deleted {response.text}") + break + time.sleep(1) + counter += 1 + + assert counter < 10, "The document was not deleted" + + process.terminate() + + # Manual insert and check if resync clean the element. + url = 'http://localhost:9200/wazuh-states-vulnerabilities-cluster/_doc/000_pkghash_CVE-2022-123456?refresh=true' + query = """{ + "agent": { + "build": { + "original": "sample_build_1" + }, + "ephemeral_id": "eph_id_1", + "id": "000", + "name": "agent_name_1", + "type": "agent_type_1", + "version": "1.0.0" + }, + "message": "Sample message", + "package": { + "architecture": "x64", + "build_version": "1.0.0", + "checksum": "checksum_value", + "description": "Sample package description", + "install_scope": "global", + "installed": "2023-09-17T12:00:00Z", + "license": "MIT", + "name": "sample_package", + "path": "/path/to/package", + "reference": "sample_reference", + "size": 12345, + "type": "sample_package_type", + "version": "1.0.0" + }, + "tags": ["sample", "tag1"], + "vulnerability": { + "detected_at": "2023-09-18T12:00:00Z", + "published_at": "2023-01-18T12:00:00Z", + "category": "sample_category", + "classification": "sample_classification", + "description": "Sample vulnerability description", + "enumeration": "sample_enumeration", + "id": "vuln_id_1", + "reference": "sample_reference", + "report_id": "report_id_1", + "scanner": { + "vendor": "sample_vendor" + }, + "score": { + "base": 5.0, + "environmental": 5.5, + "temporal": 4.5, + "version": "1.0.0" + }, + "severity": "medium" + } + }""" + response = requests.put(url, data=query) + LOGGER.debug(f"Manual insert info {response.text}") + + # Run the process again to check the resync + args = ["-c", "shared_modules/indexer_connector/qa/test_data/" + test_name + "/config.json", + "-t", "shared_modules/indexer_connector/qa/test_data/" + test_name + "/template.json", + "-s", "000", + "-w", "120"] + command = [cmd] + args + process = subprocess.Popen(command) + + # Query to check if the element is resynced + counter = 0 + while counter < 10: + url = 'http://localhost:9200/wazuh-states-vulnerabilities-default/_search' + query = { + "query": { + "match_all": {} + } + } + response = requests.get(url, json=query) + LOGGER.debug(f"Info {response.text}") + if response.status_code == 200 and response.json()['hits']['total']['value'] == 0: + LOGGER.debug(f"Document deleted in sync {response.text}") + break + time.sleep(1) + counter += 1 + + assert counter < 10, "The document was not resynced" + process.terminate() + + diff --git a/src/shared_modules/indexer_connector/src/indexerConnector.cpp b/src/shared_modules/indexer_connector/src/indexerConnector.cpp index 5d3245e6d1c..7e0c488ae87 100644 --- a/src/shared_modules/indexer_connector/src/indexerConnector.cpp +++ b/src/shared_modules/indexer_connector/src/indexerConnector.cpp @@ -116,171 +116,21 @@ static void builderBulkIndex(std::string& bulkData, std::string_view id, std::st bulkData.append("\n"); } -IndexerConnector::IndexerConnector( - const nlohmann::json& config, - const std::string& templatePath, - const std::function& - logFunction, - const uint32_t& timeout) -{ - if (logFunction) - { - Log::assignLogFunction(logFunction); - } - - // Get index name. - m_indexName = config.at("name").get_ref(); - - m_db = std::make_unique(std::string(DATABASE_BASE_PATH) + "db/" + m_indexName); - - auto secureCommunication = SecureCommunication::builder(); - initConfiguration(secureCommunication, config); - - // Read template file. - std::ifstream templateFile(templatePath); - if (!templateFile.is_open()) - { - throw std::runtime_error("Could not open template file: " + templatePath); - } - nlohmann::json templateData = nlohmann::json::parse(templateFile); - - // Initialize publisher. - auto selector {std::make_shared(config.at("hosts"), timeout, secureCommunication)}; - - m_dispatcher = std::make_unique( - [this, selector, secureCommunication](std::queue& dataQueue) - { - std::scoped_lock lock(m_syncMutex); - - if (!m_initialized && m_initializeThread.joinable()) - { - logDebug2(IC_NAME, "Waiting for initialization thread to process events."); - m_initializeThread.join(); - } - - if (m_stopping.load()) - { - logDebug2(IC_NAME, "IndexerConnector is stopping, event processing will be skipped."); - throw std::runtime_error("IndexerConnector is stopping, event processing will be skipped."); - } - - auto url = selector->getNext(); - std::string bulkData; - url.append("/_bulk?refresh=wait_for"); - - while (!dataQueue.empty()) - { - auto data = dataQueue.front(); - dataQueue.pop(); - auto parsedData = nlohmann::json::parse(data); - const auto& id = parsedData.at("id").get_ref(); - - if (parsedData.at("operation").get_ref().compare("DELETED") == 0) - { - builderBulkDelete(bulkData, id, m_indexName); - m_db->delete_(id); - } - else - { - const auto dataString = parsedData.at("data").dump(); - builderBulkIndex(bulkData, id, m_indexName, dataString); - m_db->put(id, dataString); - } - } - // Process data. - HTTPRequest::instance().post( - HttpURL(url), - bulkData, - [](const std::string& response) { logDebug2(IC_NAME, "Response: %s", response.c_str()); }, - [](const std::string& error, const long statusCode) - { - logError(IC_NAME, "%s, status code: %ld", error.c_str(), statusCode); - throw std::runtime_error(error); - }, - "", - DEFAULT_HEADERS, - secureCommunication); - }, - DATABASE_BASE_PATH + m_indexName, - ELEMENTS_PER_BULK); - - m_syncQueue = std::make_unique( - [this, selector, secureCommunication](const std::string& agentId) - { - try - { - std::scoped_lock lock(m_syncMutex); - if (!abuseControl(agentId)) - { - logDebug2(IC_NAME, "Syncing agent '%s' with the indexer.", agentId.c_str()); - diff(getAgentDocumentsIds(selector->getNext(), agentId, secureCommunication), - agentId, - secureCommunication, - selector); - } - } - catch (const std::exception& e) - { - logWarn(IC_NAME, "Failed to sync agent '%s' with the indexer.", agentId.c_str()); - logDebug1(IC_NAME, "Error: %s", e.what()); - } - }, - SYNC_WORKERS, - SYNC_QUEUE_LIMIT); - - m_initializeThread = std::thread( - // coverity[copy_constructor_call] - [this, templateData, selector, secureCommunication]() - { - auto sleepTime = std::chrono::seconds(START_TIME); - std::unique_lock lock(m_mutex); - auto warningPrinted {false}; - do - { - try - { - sleepTime *= DOUBLE_FACTOR; - if (sleepTime.count() > MAX_WAIT_TIME) - { - sleepTime = std::chrono::seconds(MAX_WAIT_TIME); - } - - initialize(templateData, selector, secureCommunication); - } - catch (const std::exception& e) - { - logDebug1(IC_NAME, - "Unable to initialize IndexerConnector for index '%s': %s. Retrying in %ld " - "seconds.", - m_indexName.c_str(), - e.what(), - sleepTime.count()); - if (!warningPrinted) - { - logWarn(IC_NAME, - "IndexerConnector initialization failed for index '%s', retrying until the connection " - "is successful.", - m_indexName.c_str()); - warningPrinted = true; - } - } - } while (!m_initialized && !m_cv.wait_for(lock, sleepTime, [this]() { return m_stopping.load(); })); - }); -} - bool IndexerConnector::abuseControl(const std::string& agentId) { const auto currentTime = std::chrono::system_clock::now(); + // If the agent is in the map, check if the last sync was less than MINIMAL_SYNC_TIME minutes ago. if (const auto lastSync = m_lastSync.find(agentId); lastSync != m_lastSync.end()) { const auto diff = std::chrono::duration_cast(currentTime - lastSync->second); + // If the last sync was less than MINIMAL_SYNC_TIME minutes ago, return true. if (diff.count() < MINIMAL_SYNC_TIME) { - logDebug2(IC_NAME, "Agent '%s' ommited due to abuse control.", agentId.c_str()); + logDebug2(IC_NAME, "Agent '%s' sync ommited due to abuse control.", agentId.c_str()); return true; } } + // If the agent is not in the map, add it to the map with the current time. m_lastSync[agentId] = currentTime; return false; } @@ -336,29 +186,6 @@ nlohmann::json IndexerConnector::getAgentDocumentsIds(const std::string& url, return responseJson; } -IndexerConnector::~IndexerConnector() -{ - m_stopping.store(true); - m_cv.notify_all(); - - m_dispatcher->cancel(); - - if (m_initializeThread.joinable()) - { - m_initializeThread.join(); - } -} - -void IndexerConnector::publish(const std::string& message) -{ - m_dispatcher->push(message); -} - -void IndexerConnector::sync(const std::string& agentId) -{ - m_syncQueue->push(agentId); -} - void IndexerConnector::diff(const nlohmann::json& responseJson, const std::string& agentId, const SecureCommunication& secureCommunication, @@ -376,11 +203,13 @@ void IndexerConnector::diff(const nlohmann::json& responseJson, } } + // Iterate over the database and check if the element is in the status vector. for (const auto& [key, value] : m_db->seek(agentId)) { bool found {false}; for (auto& [id, data] : status) { + // If the element is found, mark it as found. if (key.compare(id) == 0) { data = true; @@ -389,12 +218,15 @@ void IndexerConnector::diff(const nlohmann::json& responseJson, } } + // If the element is not found, add it to the actions vector. This element will be added to the indexer. if (!found) { actions.emplace_back(key, false); } } + // Iterate over the status vector and check if the element is marked as not found. + // This means that the element is in the indexer but not in the database. To solve this, the element will be deleted for (const auto& [id, data] : status) { if (!data) @@ -407,6 +239,9 @@ void IndexerConnector::diff(const nlohmann::json& responseJson, url.append("/_bulk?refresh=wait_for"); std::string bulkData; + // Iterate over the actions vector and build the bulk data. + // If the element is marked as deleted, the element will be deleted from the indexer. + // If the element is not marked as deleted, the element will be added to the indexer. for (const auto& [id, deleted] : actions) { if (deleted) @@ -430,11 +265,7 @@ void IndexerConnector::diff(const nlohmann::json& responseJson, HttpURL(url), bulkData, [](const std::string& response) { logDebug2(IC_NAME, "Response: %s", response.c_str()); }, - [](const std::string& error, const long statusCode) - { - logError(IC_NAME, "%s, status code: %ld", error.c_str(), statusCode); - throw std::runtime_error(error); - }, + [](const std::string& error, const long statusCode) { throw std::runtime_error(error); }, "", DEFAULT_HEADERS, secureCommunication); @@ -487,3 +318,179 @@ void IndexerConnector::initialize(const nlohmann::json& templateData, m_initialized = true; logInfo(IC_NAME, "IndexerConnector initialized successfully for index: %s.", m_indexName.c_str()); } + +IndexerConnector::IndexerConnector( + const nlohmann::json& config, + const std::string& templatePath, + const std::function& + logFunction, + const uint32_t& timeout) +{ + if (logFunction) + { + Log::assignLogFunction(logFunction); + } + + // Get index name. + m_indexName = config.at("name").get_ref(); + + m_db = std::make_unique(std::string(DATABASE_BASE_PATH) + "db/" + m_indexName); + + auto secureCommunication = SecureCommunication::builder(); + initConfiguration(secureCommunication, config); + + // Read template file. + std::ifstream templateFile(templatePath); + if (!templateFile.is_open()) + { + throw std::runtime_error("Could not open template file: " + templatePath); + } + nlohmann::json templateData = nlohmann::json::parse(templateFile); + + // Initialize publisher. + auto selector {std::make_shared(config.at("hosts"), timeout, secureCommunication)}; + + m_dispatcher = std::make_unique( + [this, selector, secureCommunication](std::queue& dataQueue) + { + std::scoped_lock lock(m_syncMutex); + + if (!m_initialized && m_initializeThread.joinable()) + { + logDebug2(IC_NAME, "Waiting for initialization thread to process events."); + m_initializeThread.join(); + } + + if (m_stopping.load()) + { + logDebug2(IC_NAME, "IndexerConnector is stopping, event processing will be skipped."); + throw std::runtime_error("IndexerConnector is stopping, event processing will be skipped."); + } + + auto url = selector->getNext(); + std::string bulkData; + url.append("/_bulk?refresh=wait_for"); + + while (!dataQueue.empty()) + { + auto data = dataQueue.front(); + dataQueue.pop(); + auto parsedData = nlohmann::json::parse(data); + const auto& id = parsedData.at("id").get_ref(); + + if (parsedData.at("operation").get_ref().compare("DELETED") == 0) + { + builderBulkDelete(bulkData, id, m_indexName); + m_db->delete_(id); + } + else + { + const auto dataString = parsedData.at("data").dump(); + builderBulkIndex(bulkData, id, m_indexName, dataString); + m_db->put(id, dataString); + } + } + // Process data. + HTTPRequest::instance().post( + HttpURL(url), + bulkData, + [](const std::string& response) { logDebug2(IC_NAME, "Response: %s", response.c_str()); }, + [](const std::string& error, const long statusCode) + { + logError(IC_NAME, "%s, status code: %ld", error.c_str(), statusCode); + throw std::runtime_error(error); + }, + "", + DEFAULT_HEADERS, + secureCommunication); + }, + DATABASE_BASE_PATH + m_indexName, + ELEMENTS_PER_BULK); + + m_syncQueue = std::make_unique( + [this, selector, secureCommunication](const std::string& agentId) + { + try + { + std::scoped_lock lock(m_syncMutex); + if (!abuseControl(agentId)) + { + logDebug2(IC_NAME, "Syncing agent '%s' with the indexer.", agentId.c_str()); + diff(getAgentDocumentsIds(selector->getNext(), agentId, secureCommunication), + agentId, + secureCommunication, + selector); + } + } + catch (const std::exception& e) + { + logWarn(IC_NAME, "Failed to sync agent '%s' with the indexer.", agentId.c_str()); + logDebug1(IC_NAME, "Error: %s", e.what()); + } + }, + SYNC_WORKERS, + SYNC_QUEUE_LIMIT); + + m_initializeThread = std::thread( + // coverity[copy_constructor_call] + [this, templateData, selector, secureCommunication]() + { + auto sleepTime = std::chrono::seconds(START_TIME); + std::unique_lock lock(m_mutex); + auto warningPrinted {false}; + do + { + try + { + sleepTime *= DOUBLE_FACTOR; + if (sleepTime.count() > MAX_WAIT_TIME) + { + sleepTime = std::chrono::seconds(MAX_WAIT_TIME); + } + + initialize(templateData, selector, secureCommunication); + } + catch (const std::exception& e) + { + logDebug1(IC_NAME, + "Unable to initialize IndexerConnector for index '%s': %s. Retrying in %ld " + "seconds.", + m_indexName.c_str(), + e.what(), + sleepTime.count()); + if (!warningPrinted) + { + logWarn(IC_NAME, + "IndexerConnector initialization failed for index '%s', retrying until the connection " + "is successful.", + m_indexName.c_str()); + warningPrinted = true; + } + } + } while (!m_initialized && !m_cv.wait_for(lock, sleepTime, [this]() { return m_stopping.load(); })); + }); +} + +IndexerConnector::~IndexerConnector() +{ + m_stopping.store(true); + m_cv.notify_all(); + + m_dispatcher->cancel(); + + if (m_initializeThread.joinable()) + { + m_initializeThread.join(); + } +} + +void IndexerConnector::publish(const std::string& message) +{ + m_dispatcher->push(message); +} + +void IndexerConnector::sync(const std::string& agentId) +{ + m_syncQueue->push(agentId); +} diff --git a/src/shared_modules/indexer_connector/testtool/cmdArgParser.hpp b/src/shared_modules/indexer_connector/testtool/cmdArgParser.hpp index aad93d1836f..42b49f9fde7 100644 --- a/src/shared_modules/indexer_connector/testtool/cmdArgParser.hpp +++ b/src/shared_modules/indexer_connector/testtool/cmdArgParser.hpp @@ -36,6 +36,8 @@ class CmdLineArgs , m_autoGenerated {paramValueOf(argc, argv, "-a", std::make_pair(false, ""))} , m_agentForSyncEvent {paramValueOf(argc, argv, "-s", std::make_pair(false, ""))} , m_numberOfEvents {paramValueOf(argc, argv, "-n", std::make_pair(false, ""))} + , m_waitTime {paramValueOf(argc, argv, "-w", std::make_pair(false, "0"))} + , m_logFilePath {paramValueOf(argc, argv, "-l", std::make_pair(false, ""))} { } @@ -89,11 +91,29 @@ class CmdLineArgs * @brief Gets the agent id to sync event. * @return Agent to sync event. */ - std::string getAgentIdSyncEvent() const + const std::string& getAgentIdSyncEvent() const { return m_agentForSyncEvent; } + /** + * @brief Gets the wait time. + * @return Wait time. + */ + uint64_t getWaitTime() const + { + return std::stoull(m_waitTime); + } + + /** + * @brief Gets the log file path. + * @return Log file path. + */ + const std::string& getLogFilePath() const + { + return m_logFilePath; + } + /** * @brief Shows the help to the user. */ @@ -108,12 +128,12 @@ class CmdLineArgs << "\t-a AUTO_GENERATED\tSpecifies if the events are auto generated.\n" << "\t-n NUMBER_OF_EVENTS\tSpecifies the number of events to generate.\n" << "\t-s SYNC_EVENT\tSend sync event before push event.\n" + << "\t-w WAIT_TIME\tSpecifies the wait time before close.\n" << "\nExample:" << "\n\t./indexer_connector_testtool -c config.json -t template.json\n" << "\n\t./indexer_connector_testtool -c config.json -t template.json -e events.json\n" << "\n\t./indexer_connector_testtool -c config.json -t template.json -a true -n 10000\n" - << "\n\t./indexer_connector_testtool -c config.json -t template.json -s 000\n" - << std::endl; + << "\n\t./indexer_connector_testtool -c config.json -t template.json -s 000 -w 5\n\n"; } private: @@ -146,7 +166,8 @@ class CmdLineArgs const std::string m_numberOfEvents; const std::string m_autoGenerated; const std::string m_agentForSyncEvent; - ; + const std::string m_waitTime; + const std::string m_logFilePath; }; #endif // _CMD_ARGS_PARSER_HPP_ diff --git a/src/shared_modules/indexer_connector/testtool/input/config.json b/src/shared_modules/indexer_connector/testtool/input/config.json index fee4d3d0f31..bc26ab79a8c 100644 --- a/src/shared_modules/indexer_connector/testtool/input/config.json +++ b/src/shared_modules/indexer_connector/testtool/input/config.json @@ -1,10 +1,5 @@ { "name": "wazuh-states-vulnerabilities-cluster", "enabled": "yes", - "hosts": ["https://0.0.0.0:9200"], - "ssl": { - "certificate_authorities": ["/etc/filebeat/certs/root-ca.pem"], - "certificate": "/etc/filebeat/certs/filebeat.pem", - "key": "/etc/filebeat/certs/filebeat-key.pem" - } + "hosts": ["http://0.0.0.0:9200"] } diff --git a/src/shared_modules/indexer_connector/testtool/main.cpp b/src/shared_modules/indexer_connector/testtool/main.cpp index 604bfa5b58a..09eeb0291a4 100644 --- a/src/shared_modules/indexer_connector/testtool/main.cpp +++ b/src/shared_modules/indexer_connector/testtool/main.cpp @@ -35,7 +35,7 @@ float generateRandomFloat(float min, float max) int generateRandomInt(int min, int max) { - std::uniform_int_distribution distr(min, max); + std::uniform_int_distribution distr(min, max); return distr(ENG); } @@ -104,92 +104,120 @@ int main(const int argc, const char* argv[]) } const auto configuration = nlohmann::json::parse(configurationFile); - // Create indexer connector. - IndexerConnector indexerConnector(configuration, - cmdArgParser.getTemplateFilePath(), - [](const int logLevel, - const std::string& tag, - const std::string& file, - const int line, - const std::string& func, - const std::string& message, - va_list args) - { - auto pos = file.find_last_of('/'); - if (pos != std::string::npos) - { - pos++; - } - std::string fileName = file.substr(pos, file.size() - pos); - char formattedStr[MAXLEN] = {0}; - vsnprintf(formattedStr, MAXLEN, message.c_str(), args); - - if (logLevel != LOG_ERROR) - { - std::cout << tag << ":" << fileName << ":" << line << " " << func - << " : " << formattedStr << std::endl; - } - else - { - std::cerr << tag << ":" << fileName << ":" << line << " " << func - << " : " << formattedStr << std::endl; - } - }); - - if (!cmdArgParser.getAgentIdSyncEvent().empty()) + // Open file to write log. + std::ofstream logFile; + if (!cmdArgParser.getLogFilePath().empty()) { - indexerConnector.sync(cmdArgParser.getAgentIdSyncEvent()); + logFile.open(cmdArgParser.getLogFilePath()); + if (!logFile.is_open()) + { + throw std::runtime_error("Failed to open log file: " + cmdArgParser.getLogFilePath()); + } } - // Read events file. - // If the events file path is empty, then the events are generated - // automatically. - if (!cmdArgParser.getEventsFilePath().empty()) - { - std::ifstream eventsFile(cmdArgParser.getEventsFilePath()); - if (!eventsFile.is_open()) + // Create indexer connector. + IndexerConnector indexerConnector( + configuration, + cmdArgParser.getTemplateFilePath(), + [&logFile](const int logLevel, + const std::string& tag, + const std::string& file, + const int line, + const std::string& func, + const std::string& message, + va_list args) { - throw std::runtime_error("Could not open events file."); - } - const auto events = nlohmann::json::parse(eventsFile); + auto pos = file.find_last_of('/'); + if (pos != std::string::npos) + { + pos++; + } + std::string fileName = file.substr(pos, file.size() - pos); + char formattedStr[MAXLEN] = {0}; + vsnprintf(formattedStr, MAXLEN, message.c_str(), args); + + if (logLevel != LOG_ERROR) + { + std::cout << tag << ":" << fileName << ":" << line << " " << func << " : " << formattedStr << "\n"; + } + else + { + std::cerr << tag << ":" << fileName << ":" << line << " " << func << " : " << formattedStr << "\n"; + } - indexerConnector.publish(events.dump()); + if (logFile.is_open()) + { + logFile << tag << ":" << fileName << ":" << line << " " << func << " : " << formattedStr << "\n"; + } + // Flush the log file every time a message is written. + logFile.flush(); + }); + + if (!cmdArgParser.getAgentIdSyncEvent().empty()) + { + indexerConnector.sync(cmdArgParser.getAgentIdSyncEvent()); } - else if (cmdArgParser.getAutoGenerated()) + else { - const auto n = cmdArgParser.getNumberOfEvents(); - // Read template file. - std::ifstream templateFile(cmdArgParser.getTemplateFilePath()); - if (!templateFile.is_open()) + // Read events file. + // If the events file path is empty, then the events are generated + // automatically. + if (!cmdArgParser.getEventsFilePath().empty()) { - throw std::runtime_error("Could not open template file."); - } - - nlohmann::json templateData; - templateFile >> templateData; + std::ifstream eventsFile(cmdArgParser.getEventsFilePath()); + if (!eventsFile.is_open()) + { + throw std::runtime_error("Could not open events file."); + } + const auto events = nlohmann::json::parse(eventsFile); - if (n == 0) - { - throw std::runtime_error("Number of events must be greater than 0."); + indexerConnector.publish(events.dump()); } - else + else if (cmdArgParser.getAutoGenerated()) { - for (size_t i = 0; i < n; ++i) + const auto n = cmdArgParser.getNumberOfEvents(); + // Read template file. + + std::ifstream templateFile(cmdArgParser.getTemplateFilePath()); + if (!templateFile.is_open()) + { + throw std::runtime_error("Could not open template file."); + } + + nlohmann::json templateData; + templateFile >> templateData; + + if (n == 0) + { + throw std::runtime_error("Number of events must be greater than 0."); + } + else { - nlohmann::json randomData = - fillWithRandomData(templateData.at("template").at("mappings").at("properties")); - nlohmann::json event; - event["id"] = generateRandomString(20); - event["operation"] = "INSERT"; - event["data"] = std::move(randomData); - - indexerConnector.publish(event.dump()); + for (size_t i = 0; i < n; ++i) + { + nlohmann::json randomData = + fillWithRandomData(templateData.at("template").at("mappings").at("properties")); + nlohmann::json event; + event["id"] = generateRandomString(20); + event["operation"] = "INSERT"; + event["data"] = std::move(randomData); + + indexerConnector.publish(event.dump()); + } } } } - std::cout << "Press enter to stop the indexer connector tool..." << std::endl; - std::cin.get(); + + if (cmdArgParser.getWaitTime() > 0) + { + std::this_thread::sleep_for(std::chrono::seconds(cmdArgParser.getWaitTime())); + } + else + { + std::cout << "Press enter to stop the indexer connector tool... \n"; + std::cin.get(); + } } catch (const std::exception& e) { From 23c4499acf8785fd50e22d7a72e42794cca5baa6 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Mon, 29 Apr 2024 01:45:22 -0300 Subject: [PATCH 021/419] Add missing dependency --- .github/actions/indexer_connector_deps/action.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/actions/indexer_connector_deps/action.yml b/.github/actions/indexer_connector_deps/action.yml index 87e81800b61..101a7a3ffe6 100644 --- a/.github/actions/indexer_connector_deps/action.yml +++ b/.github/actions/indexer_connector_deps/action.yml @@ -42,3 +42,12 @@ runs: cmake .. -DCMAKE_PROJECT_NAME=http-request -DSRC_FOLDER=${SRC_FOLDER} && make -j2 shell: bash + - name: Build keystore + run: | + cd src + SRC_FOLDER=$(pwd) + + cd shared_modules/keystore + mkdir -p build && cd build + cmake .. -DCMAKE_PROJECT_NAME=keystore -DSRC_FOLDER=${SRC_FOLDER} && make -j2 + shell: bash From 566b97d7e8a96cde20ca1e2f8f28196203e4558c Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Thu, 2 May 2024 22:52:17 -0300 Subject: [PATCH 022/419] Fixes based on PR comments. --- src/shared_modules/indexer_connector/testtool/main.cpp | 6 +++--- .../src/policyManager/policyManager.hpp | 2 -- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/shared_modules/indexer_connector/testtool/main.cpp b/src/shared_modules/indexer_connector/testtool/main.cpp index 09eeb0291a4..d705a9a4724 100644 --- a/src/shared_modules/indexer_connector/testtool/main.cpp +++ b/src/shared_modules/indexer_connector/testtool/main.cpp @@ -176,7 +176,7 @@ int main(const int argc, const char* argv[]) } else if (cmdArgParser.getAutoGenerated()) { - const auto n = cmdArgParser.getNumberOfEvents(); + const auto eventsNumber = cmdArgParser.getNumberOfEvents(); // Read template file. std::ifstream templateFile(cmdArgParser.getTemplateFilePath()); @@ -188,13 +188,13 @@ int main(const int argc, const char* argv[]) nlohmann::json templateData; templateFile >> templateData; - if (n == 0) + if (eventsNumber == 0) { throw std::runtime_error("Number of events must be greater than 0."); } else { - for (size_t i = 0; i < n; ++i) + for (size_t i = 0; i < eventsNumber; ++i) { nlohmann::json randomData = fillWithRandomData(templateData.at("template").at("mappings").at("properties")); diff --git a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp index ce3c7a82868..eb00d5f0a9d 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp @@ -27,7 +27,6 @@ constexpr auto UNKNOWN_VALUE {" "}; constexpr auto STATES_VD_INDEX_NAME {"wazuh-states-vulnerabilities"}; -constexpr auto STATES_VD_INDEX_DEFAULT_CLUSTER_NAME {"wazuh"}; constexpr auto DEFAULT_TRANSLATION_LRU_SIZE {2048}; constexpr auto DEFAULT_OSDATA_LRU_SIZE {1000}; const static std::string UPDATER_PATH {"queue/vd_updater"}; @@ -116,7 +115,6 @@ class PolicyManager final : public Singleton newPolicy["indexer"]["ssl"]["certificate"] = ""; newPolicy["indexer"]["ssl"]["key"] = ""; } - newPolicy["indexer"]["name"] = STATES_VD_INDEX_NAME; if (!newPolicy.at("vulnerability-detection").contains("feed-update-interval")) From 3a85067c890677e6630b29804b7a685cdc74a2e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Luis=20L=C3=B3pez=20S=C3=A1nchez?= Date: Fri, 3 May 2024 09:15:08 +0200 Subject: [PATCH 023/419] Update deps_version in Makefile --- src/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index 068398840eb..3693076cbdf 100644 --- a/src/Makefile +++ b/src/Makefile @@ -1338,7 +1338,7 @@ TAR := tar -xf GUNZIP := gunzip GZIP := gzip CURL := curl -so -DEPS_VERSION = 25-23112 +DEPS_VERSION = 25 RESOURCES_URL_BASE := https://packages.wazuh.com/deps/ RESOURCES_URL := $(RESOURCES_URL_BASE)$(DEPS_VERSION) CPYTHON := cpython From 423941bffe3b22e421e986344a71e80194d34021 Mon Sep 17 00:00:00 2001 From: Marcel Kemp Date: Fri, 3 May 2024 10:04:15 +0200 Subject: [PATCH 024/419] build: bump revision to 40809 --- api/api/spec/spec.yaml | 2 +- framework/wazuh/core/cluster/__init__.py | 2 +- src/Doxyfile | 2 +- src/REVISION | 2 +- src/init/wazuh-client.sh | 2 +- src/init/wazuh-local.sh | 2 +- src/init/wazuh-server.sh | 2 +- src/win32/wazuh-installer.nsi | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index 5a9ddff744b..ce9be6cd4c5 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -41,7 +41,7 @@ info: version: '4.8.0' - x-revision: '40808' + x-revision: '40809' title: 'Wazuh API REST' license: name: 'GPL 2.0' diff --git a/framework/wazuh/core/cluster/__init__.py b/framework/wazuh/core/cluster/__init__.py index 459dbafa2dc..a87460ae2d7 100644 --- a/framework/wazuh/core/cluster/__init__.py +++ b/framework/wazuh/core/cluster/__init__.py @@ -5,7 +5,7 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 __version__ = '4.8.0' -__revision__ = '40808' +__revision__ = '40809' __author__ = "Wazuh Inc" __wazuh_name__ = "Wazuh" __licence__ = "\ diff --git a/src/Doxyfile b/src/Doxyfile index 7498a042693..8002130447c 100644 --- a/src/Doxyfile +++ b/src/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "WAZUH" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = "v4.8.0-40808" +PROJECT_NUMBER = "v4.8.0-40809" # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/src/REVISION b/src/REVISION index d953f5f0cb7..7a374c6741e 100644 --- a/src/REVISION +++ b/src/REVISION @@ -1 +1 @@ -40808 +40809 diff --git a/src/init/wazuh-client.sh b/src/init/wazuh-client.sh index 02f2cfe6c71..aef11529fef 100755 --- a/src/init/wazuh-client.sh +++ b/src/init/wazuh-client.sh @@ -12,7 +12,7 @@ DIR=`dirname $PWD`; # Installation info VERSION="v4.8.0" -REVISION="40808" +REVISION="40809" TYPE="agent" ### Do not modify below here ### diff --git a/src/init/wazuh-local.sh b/src/init/wazuh-local.sh index 2eec7f25d0e..6a592296cad 100644 --- a/src/init/wazuh-local.sh +++ b/src/init/wazuh-local.sh @@ -14,7 +14,7 @@ PLIST=${DIR}/bin/.process_list; # Installation info VERSION="v4.8.0" -REVISION="40808" +REVISION="40809" TYPE="local" ### Do not modify below here ### diff --git a/src/init/wazuh-server.sh b/src/init/wazuh-server.sh index 8386bf2a690..68f366ced2d 100755 --- a/src/init/wazuh-server.sh +++ b/src/init/wazuh-server.sh @@ -14,7 +14,7 @@ PLIST=${DIR}/bin/.process_list; # Installation info VERSION="v4.8.0" -REVISION="40808" +REVISION="40809" TYPE="server" ### Do not modify below here ### diff --git a/src/win32/wazuh-installer.nsi b/src/win32/wazuh-installer.nsi index c06c490250d..d998c5c89b9 100644 --- a/src/win32/wazuh-installer.nsi +++ b/src/win32/wazuh-installer.nsi @@ -21,7 +21,7 @@ !define MUI_ICON install.ico !define MUI_UNICON uninstall.ico !define VERSION "4.8.0" -!define REVISION "40808" +!define REVISION "40809" !define NAME "Wazuh" !define SERVICE "WazuhSvc" From 892577e195b01db8b560f30ffaf9852c178aa512 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 3 May 2024 11:47:45 -0300 Subject: [PATCH 025/419] Fix coverity issues. --- .../indexer_connector/src/indexerConnector.cpp | 3 ++- src/shared_modules/utils/socketDBWrapper.hpp | 8 ++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/shared_modules/indexer_connector/src/indexerConnector.cpp b/src/shared_modules/indexer_connector/src/indexerConnector.cpp index 7e0c488ae87..f1341c743d4 100644 --- a/src/shared_modules/indexer_connector/src/indexerConnector.cpp +++ b/src/shared_modules/indexer_connector/src/indexerConnector.cpp @@ -159,7 +159,7 @@ nlohmann::json IndexerConnector::getAgentDocumentsIds(const std::string& url, // If the response have more than ELEMENTS_PER_QUERY elements, we need to scroll. if (responseJson.at("hits").at("total").at("value").get() > ELEMENTS_PER_QUERY) { - const auto scrollId = responseJson.at("_scroll_id").get_ref(); + const auto& scrollId = responseJson.at("_scroll_id").get_ref(); const auto scrollUrl = url + "/_search/scroll"; const auto scrollData = R"({"scroll":"1m","scroll_id":")" + scrollId + "\"}"; @@ -409,6 +409,7 @@ IndexerConnector::IndexerConnector( ELEMENTS_PER_BULK); m_syncQueue = std::make_unique( + // coverity[missing_lock] [this, selector, secureCommunication](const std::string& agentId) { try diff --git a/src/shared_modules/utils/socketDBWrapper.hpp b/src/shared_modules/utils/socketDBWrapper.hpp index f0c0968d365..c3bd31bc1e0 100644 --- a/src/shared_modules/utils/socketDBWrapper.hpp +++ b/src/shared_modules/utils/socketDBWrapper.hpp @@ -30,6 +30,7 @@ char constexpr DB_WRAPPER_DUE[] {"due"}; enum class DbQueryStatus : uint8_t { + UNKNOWN, JSON_PARSING, EMPTY_RESPONSE, QUERY_ERROR, @@ -46,7 +47,7 @@ class SocketDBWrapper final nlohmann::json m_response; nlohmann::json m_responsePartial; std::string m_exceptionStr; - DbQueryStatus m_queryStatus; + DbQueryStatus m_queryStatus {DbQueryStatus::UNKNOWN}; std::mutex m_mutexMessage; std::mutex m_mutexResponse; std::condition_variable m_conditionVariable; @@ -185,15 +186,14 @@ class SocketDBWrapper final if (!m_exceptionStr.empty()) { + // coverity[missing_lock] switch (m_queryStatus) { case DbQueryStatus::EMPTY_RESPONSE: case DbQueryStatus::QUERY_ERROR: case DbQueryStatus::QUERY_IGNORE: case DbQueryStatus::QUERY_UNKNOWN: - case DbQueryStatus::QUERY_NOT_SYNCED: throw SocketDbWrapperException(m_exceptionStr); - case DbQueryStatus::JSON_PARSING: - case DbQueryStatus::INVALID_RESPONSE: + case DbQueryStatus::QUERY_NOT_SYNCED: throw SocketDbWrapperException(m_exceptionStr); break; default: throw std::runtime_error(m_exceptionStr); } } From 255991727ee19e3585edd591e2a8a873f7e7c75e Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Sat, 4 May 2024 20:10:45 -0300 Subject: [PATCH 026/419] Change approach to manage multi queue, from multi column family to single column family + prefix --- src/shared_modules/utils/rocksDBQueueCF.hpp | 251 +++++------------- src/shared_modules/utils/tests/CMakeLists.txt | 1 + .../tests/rocksDBSafeQueuePrefix_test.cpp | 153 +++++++++++ .../tests/rocksDBSafeQueuePrefix_test.hpp | 29 ++ .../utils/threadSafeMultiQueue.hpp | 5 +- 5 files changed, 254 insertions(+), 185 deletions(-) create mode 100644 src/shared_modules/utils/tests/rocksDBSafeQueuePrefix_test.cpp create mode 100644 src/shared_modules/utils/tests/rocksDBSafeQueuePrefix_test.hpp diff --git a/src/shared_modules/utils/rocksDBQueueCF.hpp b/src/shared_modules/utils/rocksDBQueueCF.hpp index c5d1001dcb0..97df7b5f6f0 100644 --- a/src/shared_modules/utils/rocksDBQueueCF.hpp +++ b/src/shared_modules/utils/rocksDBQueueCF.hpp @@ -12,11 +12,11 @@ #ifndef _ROCKSDB_QUEUE_CF_HPP #define _ROCKSDB_QUEUE_CF_HPP -#include "rocksDBColumnFamily.hpp" #include "rocksDBOptions.hpp" #include "rocksdb/db.h" #include "rocksdb/filter_policy.h" #include "rocksdb/table.h" +#include "stringHelper.h" #include #include #include @@ -26,7 +26,7 @@ template class RocksDBQueueCF final { private: - struct ColumnFamilyQueue : public Utils::ColumnFamilyRAII + struct QueueMetadata final { uint64_t head = 0; uint64_t tail = 0; @@ -34,89 +34,36 @@ class RocksDBQueueCF final // Time from epoch + postpone time. std::chrono::time_point postponeTime; - - ColumnFamilyQueue(const std::shared_ptr& db, rocksdb::ColumnFamilyHandle* rawHandle) - : Utils::ColumnFamilyRAII(db, rawHandle) - { - } }; - void dropColumn(std::string_view columnFamily) + void initializeQueueData() { - if (const auto it = std::find_if(m_columnsInstances.begin(), - m_columnsInstances.end(), - [&columnFamily](const ColumnFamilyQueue& handle) - { return columnFamily == handle->GetName(); }); - it != m_columnsInstances.end()) - { - it->drop(); - m_columnsInstances.erase(it); - } - } + constexpr auto ID_QUEUE = 0; + constexpr auto QUEUE_NUMBER = 1; - void createColumn(std::string_view columnName) - { - if (columnName.empty()) - { - throw std::invalid_argument {"Column name is empty"}; - } - - rocksdb::ColumnFamilyHandle* pColumnFamily; - - if (const auto status {m_db->CreateColumnFamily( - Utils::RocksDBOptions::buildColumnFamilyOptions(m_readCache), columnName.data(), &pColumnFamily)}; - !status.ok()) - { - throw std::runtime_error {"Couldn't create column family: " + std::string {status.getState()}}; - } - auto& element = m_columnsInstances.emplace_back(m_db, pColumnFamily); - element.head = 1; - element.tail = 0; - } - - bool columnExists(std::string_view columnName) const - { - if (columnName.empty()) + auto it = std::unique_ptr(m_db->NewIterator(rocksdb::ReadOptions())); + while (it->Valid()) { - throw std::invalid_argument {"Column name is empty"}; - } + // Split key to get the ID and queue number. + const auto data = Utils::split(it->key().ToString(), '_'); + const auto& id = data.at(ID_QUEUE); + const auto queueNumber = std::stoull(data.at(QUEUE_NUMBER)); - return std::find_if(m_columnsInstances.begin(), - m_columnsInstances.end(), - [&columnName](const ColumnFamilyQueue& handle) - { return columnName == handle->GetName(); }) != m_columnsInstances.end(); - } + if (m_queueMetadata.find(id.data()) == m_queueMetadata.end()) + { + m_queueMetadata.emplace(id, QueueMetadata {1, 0, 0, std::chrono::system_clock::now()}); + } - void initializeQueueData(ColumnFamilyQueue& element) - { - // RocksDB counter initialization. - element.size = 0; + auto& element = m_queueMetadata[id]; - auto it = std::unique_ptr(m_db->NewIterator(rocksdb::ReadOptions(), element.handle())); - it->SeekToFirst(); - if (it->Valid()) - { - const auto key = std::stoull(it->key().ToString()); - element.head = key; - element.tail = key; - } - else - { - element.head = 1; - element.tail = 0; - } - - while (it->Valid()) - { - const auto key = std::stoull(it->key().ToString()); - if (key > element.tail) + if (queueNumber > element.tail) { - element.tail = key; + element.tail = queueNumber; } - if (key < element.head) + if (queueNumber < element.head) { - element.head = key; + element.head = queueNumber; } ++element.size; @@ -145,37 +92,7 @@ class RocksDBQueueCF final // Create directories recursively if they do not exist std::filesystem::create_directories(databasePath); - // Get a list of the existing columns descriptors. - if (const auto databaseFile {databasePath / "CURRENT"}; std::filesystem::exists(databaseFile)) - { - // Read columns names. - std::vector columnsNames; - if (const auto listStatus {rocksdb::DB::ListColumnFamilies(options, path, &columnsNames)}; !listStatus.ok()) - { - throw std::runtime_error("Failed to list columns: " + std::string {listStatus.getState()}); - } - - // Create a set of column descriptors. This includes the default column. - for (auto& columnName : columnsNames) - { - columnsDescriptors.emplace_back(columnName, columnFamilyOptions); - } - } - else - { - // Database doesn't exist: Set just the default column descriptor. - columnsDescriptors.emplace_back(rocksdb::kDefaultColumnFamilyName, columnFamilyOptions); - } - - // Create a vector of column handles. - // This vector will be used to store the column handles created by the Open method and based on the - // columnsDescriptors. - std::vector columnHandles; - columnHandles.reserve(columnsDescriptors.size()); - - // Open database with a list of columns descriptors. - if (const auto status {rocksdb::DB::Open(options, path, columnsDescriptors, &columnHandles, &dbRawPtr)}; - !status.ok()) + if (const auto status = rocksdb::DB::Open(options, path, &dbRawPtr); !status.ok()) { throw std::runtime_error("Failed to open RocksDB database. Reason: " + std::string {status.getState()}); } @@ -184,88 +101,59 @@ class RocksDBQueueCF final // allocated RocksDB instance. m_db.reset(dbRawPtr); - // Create a RAII wrapper for each column handle. - for (const auto& handle : columnHandles) - { - if (handle->GetName() != rocksdb::kDefaultColumnFamilyName) - { - auto& element = m_columnsInstances.emplace_back(m_db, handle); - initializeQueueData(element); - } - else - { - // Close the default column handle. - // The default column handle is not used in this class. - if (const auto status {m_db->DestroyColumnFamilyHandle(handle)}; !status.ok()) - { - throw std::runtime_error("Failed to free RocksDB column family: " + - std::string {status.getState()}); - } - } - } + // Initialize queue data. + initializeQueueData(); } - void push(std::string_view columnFamily, const T& data) + void push(std::string_view id, const T& data) { - if (!columnExists(columnFamily)) + if (m_queueMetadata.find(id.data()) == m_queueMetadata.end()) { - createColumn(columnFamily); + m_queueMetadata.emplace(id, QueueMetadata {1, 0, 0, std::chrono::system_clock::now()}); } - const auto it {std::find_if(m_columnsInstances.begin(), - m_columnsInstances.end(), - [&columnFamily](const ColumnFamilyQueue& handle) - { return columnFamily == handle.handle()->GetName(); })}; - - if (it != m_columnsInstances.end()) + if (const auto it {m_queueMetadata.find(id.data())}; it != m_queueMetadata.end()) { - ++it->tail; - if (const auto status = m_db->Put(rocksdb::WriteOptions(), it->handle(), std::to_string(it->tail), data); + ++it->second.tail; + if (const auto status = + m_db->Put(rocksdb::WriteOptions(), std::string(id) + "_" + std::to_string(it->second.tail), data); !status.ok()) { throw std::runtime_error("Failed to enqueue element"); } - ++it->size; + ++it->second.size; } } - void pop(std::string_view columnFamily) + void pop(std::string_view id) { - if (const auto it {std::find_if(m_columnsInstances.begin(), - m_columnsInstances.end(), - [&columnFamily](const ColumnFamilyQueue& handle) - { return columnFamily == handle.handle()->GetName(); })}; - it != m_columnsInstances.end()) + if (const auto it {m_queueMetadata.find(id.data())}; it != m_queueMetadata.end()) { // RocksDB dequeue element. - if (!m_db->Delete(rocksdb::WriteOptions(), it->handle(), std::to_string(it->head)).ok()) + if (!m_db->Delete(rocksdb::WriteOptions(), std::string(id) + "_" + std::to_string(it->second.head)).ok()) { throw std::runtime_error("Failed to dequeue element, can't delete it"); } - ++it->head; - --it->size; + ++it->second.head; + --it->second.size; - if (it->size == 0) + if (it->second.size == 0) { - dropColumn(columnFamily); + m_queueMetadata.erase(it); } } else { - throw std::runtime_error("Couldn't find column family: " + std::string {columnFamily}); + throw std::runtime_error("Couldn't find ID: " + std::string {id}); } } - uint64_t size(std::string_view columnName) const + uint64_t size(std::string_view id) const { - if (const auto it {std::find_if(m_columnsInstances.begin(), - m_columnsInstances.end(), - [&columnName](const ColumnFamilyQueue& handle) - { return columnName == handle.handle()->GetName(); })}; - it != m_columnsInstances.end()) + if (const auto it = m_queueMetadata.find(id.data()); it != m_queueMetadata.end()) { - return it->size; + return it->second.size; } return 0; @@ -277,64 +165,59 @@ class RocksDBQueueCF final // Count if there is any column with elements. const auto currentSystemTime = std::chrono::system_clock::now(); auto count = - std::count_if(m_columnsInstances.begin(), - m_columnsInstances.end(), - [&](const ColumnFamilyQueue& handle) { return handle.postponeTime < currentSystemTime; }); + std::count_if(m_queueMetadata.begin(), + m_queueMetadata.end(), + [&](const auto& metadata) { return metadata.second.postponeTime < currentSystemTime; }); return count == 0; } const std::string& getAvailableColumn() { - if (m_columnsInstances.empty()) + if (m_queueMetadata.empty()) { - throw std::runtime_error("No column family available"); + throw std::runtime_error("No queue ids available"); } // Only consider the columns that are not postponed. const auto currentSystemTime = std::chrono::system_clock::now(); - auto it = - std::find_if(m_columnsInstances.begin(), - m_columnsInstances.end(), - [&](const ColumnFamilyQueue& handle) { return handle.postponeTime < currentSystemTime; }); + auto it = std::find_if(m_queueMetadata.begin(), + m_queueMetadata.end(), + [&](const auto& metadata) { return metadata.second.postponeTime < currentSystemTime; }); - if (it == m_columnsInstances.end()) + if (it == m_queueMetadata.end()) { - throw std::runtime_error("Probably race condition, no column family available"); + throw std::runtime_error("Probably race condition, no queue id available"); } - return it->handle()->GetName(); + return it->first; } - void postpone(std::string_view columnName, const std::chrono::seconds& time) noexcept + void postpone(std::string_view id, const std::chrono::seconds& time) noexcept { - if (const auto it {std::find_if(m_columnsInstances.begin(), - m_columnsInstances.end(), - [&columnName](const ColumnFamilyQueue& handle) - { return columnName == handle.handle()->GetName(); })}; - it != m_columnsInstances.end()) + if (const auto it {m_queueMetadata.find(id.data())}; it != m_queueMetadata.end()) { - it->postponeTime = std::chrono::system_clock::now() + time; + it->second.postponeTime = std::chrono::system_clock::now() + time; } } - U front(std::string_view columnFamily) const + U front(std::string_view id) const { U value; - if (const auto it {std::find_if(m_columnsInstances.begin(), - m_columnsInstances.end(), - [&columnFamily](const ColumnFamilyQueue& handle) - { return columnFamily == handle.handle()->GetName(); })}; - it != m_columnsInstances.end()) + if (const auto it {m_queueMetadata.find(id.data())}; it != m_queueMetadata.end()) { - if (!m_db->Get(rocksdb::ReadOptions(), it->handle(), std::to_string(it->head), &value).ok()) + if (!m_db->Get(rocksdb::ReadOptions(), + m_db->DefaultColumnFamily(), + std::string(id) + "_" + std::to_string(it->second.head), + &value) + .ok()) { - throw std::runtime_error("Failed to get front element, column family: " + std::string {columnFamily} + - " key: " + std::to_string(it->head)); + throw std::runtime_error("Failed to get front element, id: " + std::string {id} + + " key: " + std::to_string(it->second.head)); } } else { - throw std::runtime_error("Couldn't find column family: " + std::string {columnFamily}); + throw std::runtime_error("Couldn't find id: " + std::string {id}); } return value; @@ -344,7 +227,7 @@ class RocksDBQueueCF final std::shared_ptr m_db; std::shared_ptr m_readCache; std::shared_ptr m_writeManager; - std::vector m_columnsInstances; ///< List of column family. + std::map m_queueMetadata; ///< Map queue. }; #endif // _ROCKSDB_QUEUE_CF_HPP diff --git a/src/shared_modules/utils/tests/CMakeLists.txt b/src/shared_modules/utils/tests/CMakeLists.txt index f9e1ea8ca4a..609cc7f661f 100644 --- a/src/shared_modules/utils/tests/CMakeLists.txt +++ b/src/shared_modules/utils/tests/CMakeLists.txt @@ -38,6 +38,7 @@ file(GLOB UTIL_CXX_UNITTEST_LINUX_SRC "socketWrapper_test.cpp" "socket_test.cpp" "rocksDBSafeQueue_test.cpp" + "rocksDBSafeQueuePrefix_test.cpp" "rocksDBWrapper_test.cpp" "threadEventDispatcher_test.cpp" "xzHelper_test.cpp" diff --git a/src/shared_modules/utils/tests/rocksDBSafeQueuePrefix_test.cpp b/src/shared_modules/utils/tests/rocksDBSafeQueuePrefix_test.cpp new file mode 100644 index 00000000000..5ad312ab323 --- /dev/null +++ b/src/shared_modules/utils/tests/rocksDBSafeQueuePrefix_test.cpp @@ -0,0 +1,153 @@ +/* + * Wazuh shared modules utils + * Copyright (C) 2015, Wazuh Inc. + * Jun 4, 2023. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#include "rocksDBSafeQueuePrefix_test.hpp" +#include +#include + +void RocksDBSafeQueuePrefixTest::SetUp() +{ + std::error_code ec; + std::filesystem::remove_all("test.db", ec); + queue = std::make_unique>>( + RocksDBQueueCF("test.db")); +}; + +void RocksDBSafeQueuePrefixTest::TearDown() {}; + +TEST_F(RocksDBSafeQueuePrefixTest, PopInCancelledQueue) +{ + queue->cancel(); + EXPECT_TRUE(queue->cancelled()); + EXPECT_TRUE(queue->empty()); + auto queueSize {queue->size("000")}; + EXPECT_NO_THROW(queue->pop("000")); + EXPECT_EQ(queueSize, queue->size("000")); +} + +TEST_F(RocksDBSafeQueuePrefixTest, PopEmptyQueue) +{ + EXPECT_TRUE(queue->empty()); + EXPECT_FALSE(queue->cancelled()); + const auto front {queue->front()}; + EXPECT_STREQ("", front.first.c_str()); + EXPECT_STREQ("", front.second.c_str()); +} + +TEST_F(RocksDBSafeQueuePrefixTest, PopWithData) +{ + queue->push("000", "test"); + EXPECT_EQ(1, queue->size("000")); + auto front {queue->front()}; + EXPECT_NO_THROW(queue->pop(front.second)); + EXPECT_EQ(0, queue->size("000")); + EXPECT_STREQ("000", front.second.c_str()); + EXPECT_STREQ("test", front.first.c_str()); + + EXPECT_ANY_THROW(queue->pop("000")); + + queue->push("000", "test2"); + EXPECT_EQ(1, queue->size("000")); + front = queue->front(); + EXPECT_NO_THROW(queue->pop(front.second)); + EXPECT_EQ(0, queue->size("000")); + EXPECT_STREQ("000", front.second.c_str()); + EXPECT_STREQ("test2", front.first.c_str()); + + queue->cancel(); + EXPECT_TRUE(queue->cancelled()); + EXPECT_TRUE(queue->empty()); + queue->push("000", "test3"); + EXPECT_TRUE(queue->empty()); +} + +TEST_F(RocksDBSafeQueuePrefixTest, PopWithMultipleData) +{ + const int ITERATION_COUNT = 10000; + std::string data = "test"; + + for (int i = 0; i < ITERATION_COUNT; i++) + { + queue->push("000", data + std::to_string(i)); + } + + EXPECT_EQ(ITERATION_COUNT, queue->size("000")); + + for (int i = 0; i < ITERATION_COUNT; i++) + { + auto front {queue->front()}; + EXPECT_NO_THROW(queue->pop(front.second)); + EXPECT_EQ(data + std::to_string(i), front.first); + } + + EXPECT_TRUE(queue->empty()); +} + +TEST_F(RocksDBSafeQueuePrefixTest, PopWithMultipleIDData) +{ + const int ITERATION_COUNT = 10000; + std::string data = "test"; + + for (int i = 0; i < ITERATION_COUNT; i++) + { + queue->push(std::to_string(i), data); + } + + for (int i = 0; i < ITERATION_COUNT; i++) + { + auto front {queue->front()}; + EXPECT_EQ(1, queue->size(front.second)); + EXPECT_NO_THROW(queue->pop(front.second)); + EXPECT_EQ(0, queue->size(front.second)); + EXPECT_EQ(data, front.first); + } + + EXPECT_TRUE(queue->empty()); +} + +TEST_F(RocksDBSafeQueuePrefixTest, BlockingPopByRef) +{ + std::thread t1 {[this]() + { + auto front {queue->front()}; + EXPECT_NO_THROW(queue->pop(front.second)); + EXPECT_EQ("0", front.first); + }}; + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + queue->push("000", "0"); + t1.join(); +} + +TEST_F(RocksDBSafeQueuePrefixTest, CancelBlockingPop) +{ + std::thread t {[this]() + { + auto front {queue->front()}; + EXPECT_STREQ("", front.first.c_str()); + EXPECT_STREQ("", front.second.c_str()); + EXPECT_TRUE(queue->cancelled()); + }}; + queue->cancel(); + t.join(); +} + +TEST_F(RocksDBSafeQueuePrefixTest, CreateFolderRecursively) +{ + const std::string DATABASE_NAME {"folder1/folder2/test.db"}; + + EXPECT_NO_THROW({ + (std::make_unique>>( + RocksDBQueueCF(DATABASE_NAME))); + }); + + std::error_code ec; + std::filesystem::remove_all(DATABASE_NAME, ec); +} diff --git a/src/shared_modules/utils/tests/rocksDBSafeQueuePrefix_test.hpp b/src/shared_modules/utils/tests/rocksDBSafeQueuePrefix_test.hpp new file mode 100644 index 00000000000..b14ceac2b79 --- /dev/null +++ b/src/shared_modules/utils/tests/rocksDBSafeQueuePrefix_test.hpp @@ -0,0 +1,29 @@ +/* + * Wazuh shared modules utils + * Copyright (C) 2015, Wazuh Inc. + * May 4, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _ROCKSDB_SAFEQUEUE_PREFIX_TEST_HPP +#define _ROCKSDB_SAFEQUEUE_PREFIX_TEST_HPP + +#include "rocksDBQueueCF.hpp" +#include "threadSafeMultiQueue.hpp" +#include +#include + +class RocksDBSafeQueuePrefixTest : public ::testing::Test +{ +protected: + RocksDBSafeQueuePrefixTest() = default; + ~RocksDBSafeQueuePrefixTest() override = default; + std::unique_ptr>> queue; + void SetUp() override; + void TearDown() override; +}; +#endif //_ROCKSDB_SAFEQUEUE_PREFIX_TEST_HPP diff --git a/src/shared_modules/utils/threadSafeMultiQueue.hpp b/src/shared_modules/utils/threadSafeMultiQueue.hpp index cd920af2d2e..b1ebb15fcf6 100644 --- a/src/shared_modules/utils/threadSafeMultiQueue.hpp +++ b/src/shared_modules/utils/threadSafeMultiQueue.hpp @@ -87,7 +87,10 @@ namespace Utils void pop(std::string_view columnName) { std::scoped_lock lock {m_mutex}; - m_queue.pop(columnName); + if (!m_canceled) + { + m_queue.pop(columnName); + } } bool empty() const From 051927beaa73a795cf69786bb5cb1dd16daa75f0 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Sat, 4 May 2024 23:50:37 -0300 Subject: [PATCH 027/419] Renaming of the index name, now we use the cluster name as postfix. --- .../indexer/template/index-template.json | 2 +- .../src/policyManager/policyManager.hpp | 8 +++- .../tests/unit/policyManager_test.cpp | 37 +++++++++++++++++++ 3 files changed, 44 insertions(+), 3 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json b/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json index 1f5545f1b8d..e28984128cb 100644 --- a/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json +++ b/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json @@ -1,6 +1,6 @@ { "index_patterns": [ - "wazuh-states-vulnerabilities" + "wazuh-states-vulnerabilities-*" ], "priority": 1, "template": { diff --git a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp index eb00d5f0a9d..33fb086774f 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp @@ -26,7 +26,7 @@ #include constexpr auto UNKNOWN_VALUE {" "}; -constexpr auto STATES_VD_INDEX_NAME {"wazuh-states-vulnerabilities"}; +constexpr auto STATES_VD_INDEX_NAME_PREFIX {"wazuh-states-vulnerabilities-"}; constexpr auto DEFAULT_TRANSLATION_LRU_SIZE {2048}; constexpr auto DEFAULT_OSDATA_LRU_SIZE {1000}; const static std::string UPDATER_PATH {"queue/vd_updater"}; @@ -115,7 +115,11 @@ class PolicyManager final : public Singleton newPolicy["indexer"]["ssl"]["certificate"] = ""; newPolicy["indexer"]["ssl"]["key"] = ""; } - newPolicy["indexer"]["name"] = STATES_VD_INDEX_NAME; + newPolicy["indexer"]["name"] = + STATES_VD_INDEX_NAME_PREFIX + + (newPolicy.at("vulnerability-detection").contains("clusterName") + ? newPolicy.at("vulnerability-detection").at("clusterName").get_ref() + : "default"); if (!newPolicy.at("vulnerability-detection").contains("feed-update-interval")) { diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp index f0e6ed3c575..e96daf8dcda 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp @@ -263,6 +263,43 @@ TEST_F(PolicyManagerTest, validConfigurationDefaultValues) EXPECT_EQ(m_policyManager->getCTIUrl(), "cti-url.com"); EXPECT_EQ(m_policyManager->getTranslationLRUSize(), 2048); EXPECT_EQ(m_policyManager->getOsdataLRUSize(), 1000); + EXPECT_STREQ("wazuh-states-vulnerabilities-default", + m_policyManager->getIndexerConfiguration().at("name").get_ref().c_str()); +} + +TEST_F(PolicyManagerTest, validConfigurationDefaultValuesWithClusterName) +{ + const auto& configJson {nlohmann::json::parse(R"({ + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "cti-url": "cti-url.com", + "clusterName":"cluster01" + }, + "indexer": { + "enabled": "yes" + } + })")}; + + m_policyManager->initialize(configJson); + + EXPECT_TRUE(m_policyManager->isVulnerabilityDetectionEnabled()); + EXPECT_TRUE(m_policyManager->isIndexerEnabled()); + + EXPECT_EQ(m_policyManager->getFeedUpdateTime(), 3600); + + EXPECT_EQ(m_policyManager->getHostList().count("http://localhost:9200"), 1); + + EXPECT_STREQ(m_policyManager->getUsername().c_str(), ""); + EXPECT_STREQ(m_policyManager->getPassword().c_str(), ""); + EXPECT_STREQ(m_policyManager->getCertificate().c_str(), ""); + EXPECT_STREQ(m_policyManager->getKey().c_str(), ""); + EXPECT_EQ(m_policyManager->getCAList().size(), 0); + EXPECT_EQ(m_policyManager->getCTIUrl(), "cti-url.com"); + EXPECT_EQ(m_policyManager->getTranslationLRUSize(), 2048); + EXPECT_EQ(m_policyManager->getOsdataLRUSize(), 1000); + EXPECT_STREQ("wazuh-states-vulnerabilities-cluster01", + m_policyManager->getIndexerConfiguration().at("name").get_ref().c_str()); } TEST_F(PolicyManagerTest, invalidConfigurationNoCTIUrl) From fa151abde0c6af4a9fd1bca37a56319a0427df74 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Sun, 5 May 2024 22:15:30 -0300 Subject: [PATCH 028/419] Clear delayed queue when an agent or all agents. --- src/shared_modules/utils/rocksDBQueueCF.hpp | 38 ++++++++++++++ .../tests/rocksDBSafeQueuePrefix_test.cpp | 33 ++++++++++++ .../utils/tests/threadSafeMultiQueue_test.cpp | 51 +++++++++++++++++++ .../utils/threadEventDispatcher.hpp | 28 +++++++--- .../utils/threadSafeMultiQueue.hpp | 22 +++++--- .../src/scanOrchestrator/scanOrchestrator.hpp | 12 ++++- 6 files changed, 167 insertions(+), 17 deletions(-) diff --git a/src/shared_modules/utils/rocksDBQueueCF.hpp b/src/shared_modules/utils/rocksDBQueueCF.hpp index 97df7b5f6f0..4bb6bac70ea 100644 --- a/src/shared_modules/utils/rocksDBQueueCF.hpp +++ b/src/shared_modules/utils/rocksDBQueueCF.hpp @@ -223,6 +223,44 @@ class RocksDBQueueCF final return value; } + void clear(std::string_view id) + { + auto deleteElement = [this](const std::string& key) + { + if (!m_db->Delete(rocksdb::WriteOptions(), key).ok()) + { + throw std::runtime_error("Failed to clear element, can't delete it"); + } + }; + + if (id.empty()) + { + // Clear all elements from the queue. + for (const auto& metadata : m_queueMetadata) + { + for (auto i = metadata.second.head; i <= metadata.second.tail; ++i) + { + deleteElement(std::string(metadata.first) + "_" + std::to_string(i)); + } + } + m_queueMetadata.clear(); + } + else + { + if (const auto it {m_queueMetadata.find(id.data())}; it != m_queueMetadata.end()) + { + // Clear all elements from the queue. + for (auto i = it->second.head; i <= it->second.tail; ++i) + { + deleteElement(std::string(id) + "_" + std::to_string(i)); + ++it->second.head; + --it->second.size; + } + m_queueMetadata.erase(it); + } + } + } + private: std::shared_ptr m_db; std::shared_ptr m_readCache; diff --git a/src/shared_modules/utils/tests/rocksDBSafeQueuePrefix_test.cpp b/src/shared_modules/utils/tests/rocksDBSafeQueuePrefix_test.cpp index 5ad312ab323..35e0b816804 100644 --- a/src/shared_modules/utils/tests/rocksDBSafeQueuePrefix_test.cpp +++ b/src/shared_modules/utils/tests/rocksDBSafeQueuePrefix_test.cpp @@ -151,3 +151,36 @@ TEST_F(RocksDBSafeQueuePrefixTest, CreateFolderRecursively) std::error_code ec; std::filesystem::remove_all(DATABASE_NAME, ec); } + +TEST_F(RocksDBSafeQueuePrefixTest, ClearQueue) +{ + queue->push("000", "test"); + queue->push("000", "test2"); + queue->push("000", "test3"); + queue->push("001", "test4"); + queue->push("001", "test5"); + + EXPECT_EQ(3, queue->size("000")); + EXPECT_EQ(2, queue->size("001")); + + queue->clear("000"); + EXPECT_EQ(0, queue->size("000")); + EXPECT_EQ(2, queue->size("001")); + + queue->clear("001"); + EXPECT_EQ(0, queue->size("001")); +} + +TEST_F(RocksDBSafeQueuePrefixTest, ClearAllQueue) +{ + queue->push("000", "test"); + queue->push("000", "test2"); + queue->push("000", "test3"); + queue->push("001", "test4"); + queue->push("001", "test5"); + + queue->clear(""); + EXPECT_EQ(0, queue->size("000")); + EXPECT_EQ(0, queue->size("001")); + EXPECT_TRUE(queue->empty()); +} diff --git a/src/shared_modules/utils/tests/threadSafeMultiQueue_test.cpp b/src/shared_modules/utils/tests/threadSafeMultiQueue_test.cpp index 1d5f93261fe..576a50d935c 100644 --- a/src/shared_modules/utils/tests/threadSafeMultiQueue_test.cpp +++ b/src/shared_modules/utils/tests/threadSafeMultiQueue_test.cpp @@ -133,3 +133,54 @@ TEST_F(ThreadSafeMultiQueueTest, Postpone) EXPECT_TRUE(count == EXPECTED_COUNT_MSGS); } +TEST_F(ThreadSafeMultiQueueTest, Clear) +{ + Utils:: + TSafeMultiQueue> + queue(RocksDBQueueCF("test")); + + rocksdb::Slice slice("DATA"); + queue.push("000", slice); + queue.push("001", slice); + queue.push("002", slice); + EXPECT_FALSE(queue.empty()); + EXPECT_EQ(1, queue.size("000")); + EXPECT_EQ(1, queue.size("001")); + EXPECT_EQ(1, queue.size("002")); + queue.clear("000"); + EXPECT_EQ(0, queue.size("000")); + EXPECT_EQ(1, queue.size("001")); + EXPECT_EQ(1, queue.size("002")); + EXPECT_FALSE(queue.empty()); + queue.clear("001"); + EXPECT_EQ(0, queue.size("000")); + EXPECT_EQ(0, queue.size("001")); + EXPECT_EQ(1, queue.size("002")); + EXPECT_FALSE(queue.empty()); + queue.clear("002"); + EXPECT_EQ(0, queue.size("000")); + EXPECT_EQ(0, queue.size("001")); + EXPECT_EQ(0, queue.size("002")); + EXPECT_TRUE(queue.empty()); +} + +TEST_F(ThreadSafeMultiQueueTest, ClearAll) +{ + Utils:: + TSafeMultiQueue> + queue(RocksDBQueueCF("test")); + + rocksdb::Slice slice("DATA"); + queue.push("000", slice); + queue.push("001", slice); + queue.push("002", slice); + EXPECT_FALSE(queue.empty()); + EXPECT_EQ(1, queue.size("000")); + EXPECT_EQ(1, queue.size("001")); + EXPECT_EQ(1, queue.size("002")); + queue.clear(""); + EXPECT_EQ(0, queue.size("000")); + EXPECT_EQ(0, queue.size("001")); + EXPECT_EQ(0, queue.size("002")); + EXPECT_TRUE(queue.empty()); +} diff --git a/src/shared_modules/utils/threadEventDispatcher.hpp b/src/shared_modules/utils/threadEventDispatcher.hpp index ff55ee634e6..892ea2a51ed 100644 --- a/src/shared_modules/utils/threadEventDispatcher.hpp +++ b/src/shared_modules/utils/threadEventDispatcher.hpp @@ -81,13 +81,13 @@ class TThreadEventDispatcher } } - void push(std::string_view columnName, const T& value) + void push(std::string_view prefix, const T& value) { if constexpr (std::is_same_v>, TSafeQueueType>) { - if (m_running && (UNLIMITED_QUEUE_SIZE == m_maxQueueSize || m_queue->size(columnName) < m_maxQueueSize)) + if (m_running && (UNLIMITED_QUEUE_SIZE == m_maxQueueSize || m_queue->size(prefix) < m_maxQueueSize)) { - m_queue->push(columnName, value); + m_queue->push(prefix, value); } } else @@ -98,6 +98,20 @@ class TThreadEventDispatcher } } + void clear(std::string_view prefix = "") + { + if constexpr (std::is_same_v>, TSafeQueueType>) + { + m_queue->clear(prefix); + } + else + { + // static assert to avoid compilation + static_assert(std::is_same_v>, TSafeQueueType>, + "This method is not supported for this queue type"); + } + } + void cancel() { m_running = false; @@ -123,11 +137,11 @@ class TThreadEventDispatcher } } - size_t size(std::string_view columnName) const + size_t size(std::string_view prefix) const { if constexpr (std::is_same_v>, TSafeQueueType>) { - return m_queue->size(columnName); + return m_queue->size(prefix); } else { @@ -137,11 +151,11 @@ class TThreadEventDispatcher } } - void postpone(std::string_view columnName, const std::chrono::seconds& time) noexcept + void postpone(std::string_view prefix, const std::chrono::seconds& time) noexcept { if constexpr (std::is_same_v>, TSafeQueueType>) { - m_queue->postpone(columnName, time); + m_queue->postpone(prefix, time); } else { diff --git a/src/shared_modules/utils/threadSafeMultiQueue.hpp b/src/shared_modules/utils/threadSafeMultiQueue.hpp index b1ebb15fcf6..f6f42713179 100644 --- a/src/shared_modules/utils/threadSafeMultiQueue.hpp +++ b/src/shared_modules/utils/threadSafeMultiQueue.hpp @@ -46,12 +46,12 @@ namespace Utils cancel(); } - void push(std::string_view columnName, const T& value) + void push(std::string_view prefix, const T& value) { std::scoped_lock lock {m_mutex}; if (!m_canceled) { - m_queue.push(columnName, value); + m_queue.push(prefix, value); m_cv.notify_one(); } } @@ -84,12 +84,12 @@ namespace Utils return std::pair {}; } - void pop(std::string_view columnName) + void pop(std::string_view prefix) { std::scoped_lock lock {m_mutex}; if (!m_canceled) { - m_queue.pop(columnName); + m_queue.pop(prefix); } } @@ -99,10 +99,16 @@ namespace Utils return m_queue.empty(); } - size_t size(std::string_view columnName) const + void clear(std::string_view prefix) { std::scoped_lock lock {m_mutex}; - return m_queue.size(columnName); + m_queue.clear(prefix); + } + + size_t size(std::string_view prefix) const + { + std::scoped_lock lock {m_mutex}; + return m_queue.size(prefix); } void cancel() @@ -119,10 +125,10 @@ namespace Utils return m_canceled; } - void postpone(std::string_view columnName, const std::chrono::seconds& time) noexcept + void postpone(std::string_view prefix, const std::chrono::seconds& time) noexcept { std::scoped_lock lock {m_mutex}; - m_queue.postpone(columnName, time); + m_queue.postpone(prefix, time); } private: diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp index cee32003565..a3e8aabe1de 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp @@ -265,12 +265,20 @@ class TScanOrchestrator final : public TOSPrimitives m_integrityClearOrchestration->handleRequest(std::move(context)); break; // LCOV_EXCL_START - case ScannerType::ReScanAllAgents: m_reScanAllOrchestration->handleRequest(std::move(context)); break; - case ScannerType::ReScanSingleAgent: m_reScanOrchestration->handleRequest(std::move(context)); break; + case ScannerType::ReScanAllAgents: + m_eventDelayedDispatcher->clear(); + m_reScanAllOrchestration->handleRequest(std::move(context)); + break; + case ScannerType::ReScanSingleAgent: + m_eventDelayedDispatcher->clear(context->agentId()); + m_reScanOrchestration->handleRequest(std::move(context)); + break; case ScannerType::CleanupAllAgentData: + m_eventDelayedDispatcher->clear(); m_cleanUpDataOrchestration->handleRequest(std::move(context)); break; case ScannerType::CleanupSingleAgentData: + m_eventDelayedDispatcher->clear(context->agentId()); m_deleteAgentScanOrchestration->handleRequest(std::move(context)); break; case ScannerType::GlobalSyncInventory: From ed3c9b4566cc1545b865dd9720c469f9a2dfb97a Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Sun, 5 May 2024 22:18:19 -0300 Subject: [PATCH 029/419] Decrease the write buffer to use less memory. --- src/shared_modules/utils/rocksDBOptions.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/shared_modules/utils/rocksDBOptions.hpp b/src/shared_modules/utils/rocksDBOptions.hpp index 1e41fc024dc..fb8d6fff616 100644 --- a/src/shared_modules/utils/rocksDBOptions.hpp +++ b/src/shared_modules/utils/rocksDBOptions.hpp @@ -18,8 +18,8 @@ namespace Utils { - constexpr auto ROCKSDB_WRITE_BUFFER_SIZE = 64 * 1024 * 1024; - constexpr auto ROCKSDB_WRITE_BUFFER_MANAGER_SIZE = 64 * 1024 * 1024; + constexpr auto ROCKSDB_WRITE_BUFFER_SIZE = 32 * 1024 * 1024; + constexpr auto ROCKSDB_WRITE_BUFFER_MANAGER_SIZE = 32 * 1024 * 1024; constexpr auto ROCKSDB_MAX_WRITE_BUFFER_NUMBER = 2; constexpr auto ROCKSDB_MAX_OPEN_FILES = 256; constexpr auto ROCKSDB_NUM_LEVELS = 4; From 181d3a55ec68765f789dfddd39d3bfd105d65585 Mon Sep 17 00:00:00 2001 From: GGP1 Date: Mon, 6 May 2024 11:03:45 -0300 Subject: [PATCH 030/419] Disable wazuh-modulesd debug mode --- api/test/integration/env/base/manager/entrypoint.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/api/test/integration/env/base/manager/entrypoint.sh b/api/test/integration/env/base/manager/entrypoint.sh index 19eba338018..4b0f40b4e47 100755 --- a/api/test/integration/env/base/manager/entrypoint.sh +++ b/api/test/integration/env/base/manager/entrypoint.sh @@ -1,8 +1,5 @@ #!/usr/bin/env bash -# Enable debug mode for the modulesd daemon -echo 'wazuh_modules.debug=2' >> /var/ossec/etc/local_internal_options.conf - # Apply API configuration cp -rf /tmp_volume/config/* /var/ossec/ && chown -R wazuh:wazuh /var/ossec/api From 28d25e82a8f93f3ba26e9af19da648b8b9053387 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Tue, 7 May 2024 23:00:58 -0300 Subject: [PATCH 031/419] Fix full scan, to exclude the manager scan if the setting is disabled. --- .../qa/test_data_policy/001/args_002.json | 4 ++-- .../qa/test_data_policy/001/args_004.json | 19 +++++++++++++++ .../qa/test_data_policy/001/args_005.json | 19 +++++++++++++++ .../001/configDisabledAndManagerDisabled.json | 24 +++++++++++++++++++ .../001/configManagerDisabled.json | 24 +++++++++++++++++++ .../qa/test_data_policy/001/expected_003.out | 1 + .../qa/test_data_policy/001/expected_004.out | 3 +++ .../qa/test_data_policy/001/expected_005.out | 11 +++++++++ .../buildAllAgentListContext.hpp | 21 ++++++++++++---- .../src/scanOrchestrator/osScanner.hpp | 3 ++- .../src/scanOrchestrator/scanAgentList.hpp | 2 +- 11 files changed, 122 insertions(+), 9 deletions(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/args_004.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/args_005.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabledAndManagerDisabled.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configManagerDisabled.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_004.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_005.out diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/args_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/args_002.json index 1f5a0f3acba..93dd697b58a 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/args_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/args_002.json @@ -7,11 +7,11 @@ "log.out", "-s", "120", - "-b", + "-h", "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentHotfixesData.json", "-p", "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentPackagesData.json", - "-a", + "-v", "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentOsData.json", "-g", "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/globalData.json", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/args_004.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/args_004.json new file mode 100644 index 00000000000..28bb3f1c299 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/args_004.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabledAndManagerDisabled.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-h", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentPackagesData.json", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/args_005.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/args_005.json new file mode 100644 index 00000000000..522f0ba641e --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/args_005.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configManagerDisabled.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-h", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentPackagesData.json", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabledAndManagerDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabledAndManagerDisabled.json new file mode 100644 index 00000000000..94db8c61344 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabledAndManagerDisabled.json @@ -0,0 +1,24 @@ +{ + "vulnerability-detection": { + "enabled": "no", + "index-status": "yes", + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + }, + "indexer": { + "enabled": "yes", + "hosts": [ + "https://0.0.0.0:9200" + ], + "username": "admin", + "password": "admin", + "ssl": { + "certificate_authorities": [ + "/home/dwordcito/Development/wazuh/src/root-ca.pem" + ], + "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", + "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" + } + }, + "managerNodeName": "wazuh-manager", + "managerDisabledScan": 1 +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configManagerDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configManagerDisabled.json new file mode 100644 index 00000000000..519d55ab6d2 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configManagerDisabled.json @@ -0,0 +1,24 @@ +{ + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + }, + "indexer": { + "enabled": "yes", + "hosts": [ + "https://0.0.0.0:9200" + ], + "username": "admin", + "password": "admin", + "ssl": { + "certificate_authorities": [ + "/home/dwordcito/Development/wazuh/src/root-ca.pem" + ], + "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", + "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" + } + }, + "managerNodeName": "wazuh-manager", + "managerDisabledScan": 1 +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out index 0c4462c45d0..119850d6250 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out @@ -3,6 +3,7 @@ "Vulnerability scanner module is enabled. Re-scanning all agents.", "Event type: 9 processed", "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", + "Fetched 2 agents from Wazuh-DB.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'agent_ubuntu_22' (ID: '000', Version: 'v4.7.1').", "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_004.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_004.out new file mode 100644 index 00000000000..e2416840b32 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_004.out @@ -0,0 +1,3 @@ +[ + "Vulnerability scanner module is disabled" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_005.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_005.out new file mode 100644 index 00000000000..33cee0b94de --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_005.out @@ -0,0 +1,11 @@ +[ + "Vulnerability scanner module started", + "Vulnerability scanner module is enabled. Re-scanning all agents.", + "Fetched 1 agents from Wazuh-DB.", + "Skipping manager agent with id 0.", + "Vulnerability scan for OS 'enterprise_linux' on Agent '001' has completed.", + "Translation for package 'lua-libs' in platform 'rhel' not found. Using provided packageName.", + "Initiating a vulnerability scan for package 'lua-libs' (rpm) (red hat, inc.) with CVE Numbering Authorities (CNA) 'redhat_8' on Agent 'agent_redhat_8' (ID: '001', Version: 'v4.7.1').", + "Vulnerability scan for package 'lua-libs' on Agent '001' has completed.", + "Event type: 7 processed" +] diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp index 3cb88ed25a6..a44b3401d5f 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp @@ -73,21 +73,32 @@ class TBuildAllAgentListContext final : public AbstractHandlerm_agents.push_back({Utils::padString(std::to_string(agent.at("id").get()), '0', 3), - agent.at("name"), - Utils::leftTrim(agent.at("version"), "Wazuh "), - agent.at("ip"), - agent.at("node_name")}); + // If the agent is the manager and the manager scan is disabled, skip it + if (!(isManagerScanDisabled == DisableManagerScanStatus::DISABLE_MANAGER_SCAN && + agent.at("id").get() == 0)) + { + data->m_agents.push_back({Utils::padString(std::to_string(agent.at("id").get()), '0', 3), + agent.at("name"), + Utils::leftTrim(agent.at("version"), "Wazuh "), + agent.at("ip"), + agent.at("node_name")}); + } + else + { + logDebug2(WM_VULNSCAN_LOGTAG, "Skipping manager agent with id 0."); + } } catch (const std::exception& e) { logDebug2(WM_VULNSCAN_LOGTAG, "Error reading global agent data: %s", e.what()); } } + logDebug2(WM_VULNSCAN_LOGTAG, "Fetched %d agents from Wazuh-DB.", data->m_agents.size()); return AbstractHandler>::handleRequest(std::move(data)); } }; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp index 024c30ad095..10344468c02 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp @@ -304,7 +304,8 @@ class TOsScanner final : public AbstractHandler> if (std::find_if(responseHotfixes.begin(), responseHotfixes.end(), [&](const auto& element) { - return element.at("hotfix") == remediation->str(); + return element.contains("hotfix") && + element.at("hotfix") == remediation->str(); }) != responseHotfixes.end()) { logDebug2(WM_VULNSCAN_LOGTAG, diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp index 6e54f395867..6ff042b3ba5 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp @@ -244,7 +244,7 @@ class TScanAgentList final : public AbstractHandlerm_agentsWithIncompletedScan); } } From 53e111bf182f4c827d68a087ffbbf827c7debe00 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Wed, 8 May 2024 01:32:13 -0300 Subject: [PATCH 032/419] Fix unit tests. --- .../tests/unit/buildAllAgentListContext_test.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp index 8743f74a396..ed42bc83f12 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp @@ -33,8 +33,8 @@ TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContext) auto allAgentContext = std::make_shared>>(); - // Context is not used - allAgentContext->handleRequest(nullptr); + auto scanContext = std::make_shared>(); + allAgentContext->handleRequest(scanContext); } TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContextWithElements) From 5fc4d13d44db8eef2a2fdf3f3d8717a4ba2a12d5 Mon Sep 17 00:00:00 2001 From: GGP1 Date: Wed, 8 May 2024 12:43:07 -0300 Subject: [PATCH 033/419] Add error message --- framework/wazuh/core/configuration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/framework/wazuh/core/configuration.py b/framework/wazuh/core/configuration.py index 85592c1c07e..aee10f54f92 100755 --- a/framework/wazuh/core/configuration.py +++ b/framework/wazuh/core/configuration.py @@ -1263,8 +1263,8 @@ def write_ossec_conf(new_conf: str): try: with open(common.OSSEC_CONF, 'w') as f: f.writelines(new_conf) - except Exception: - raise WazuhError(1126) + except Exception as e: + raise WazuhError(1126, extra_message=str(e)) def update_check_is_enabled() -> bool: From cbb2cbdbd590dcf6710963239888eb8cb97e49eb Mon Sep 17 00:00:00 2001 From: MiguelazoDS Cazajous-Miguel Date: Tue, 7 May 2024 17:27:53 -0300 Subject: [PATCH 034/419] Fix disparity between scan by events and re-scans --- src/wazuh_db/wdb.c | 2 +- .../src/scanOrchestrator/scanAgentList.hpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/wazuh_db/wdb.c b/src/wazuh_db/wdb.c index 963697bdac0..2d444e4dcec 100644 --- a/src/wazuh_db/wdb.c +++ b/src/wazuh_db/wdb.c @@ -270,7 +270,7 @@ static const char *SQL_STMT[] = { [WDB_STMT_SYSCOLLECTOR_OSINFO_DELETE_BY_PK] = "DELETE FROM sys_osinfo WHERE os_name = ?;", [WDB_STMT_SYSCOLLECTOR_OSINFO_CLEAR] = "DELETE FROM sys_osinfo;", [WDB_STMT_SYS_HOTFIXES_GET] = "SELECT HOTFIX FROM SYS_HOTFIXES;", - [WDB_STMT_SYS_PROGRAMS_GET] = "SELECT DISTINCT NAME, VERSION, ARCHITECTURE, VENDOR, FORMAT, SOURCE, CPE, MSU_NAME, ITEM_ID FROM SYS_PROGRAMS;", + [WDB_STMT_SYS_PROGRAMS_GET] = "SELECT DISTINCT NAME, VERSION, ARCHITECTURE, VENDOR, FORMAT, SOURCE, CPE, MSU_NAME, ITEM_ID, DESCRIPTION, LOCATION, SIZE, INSTALL_TIME FROM SYS_PROGRAMS;", }; /** diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp index 6e54f395867..7e8802b865b 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp @@ -70,7 +70,7 @@ class TScanAgentList final : public AbstractHandler Date: Wed, 8 May 2024 14:47:52 -0300 Subject: [PATCH 035/419] Scan manager is disabled by default. --- etc/internal_options.conf | 2 +- src/wazuh_modules/wm_syscollector.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/etc/internal_options.conf b/etc/internal_options.conf index 246da1d5359..7818716154a 100755 --- a/etc/internal_options.conf +++ b/etc/internal_options.conf @@ -430,7 +430,7 @@ vulnerability-detection.osdata_lru_size=1000 # Vulnerability detector - Enable or disable the scan manager # 0. Enabled # 1. Disabled -vulnerability-detection.disable_scan_manager=0 +vulnerability-detection.disable_scan_manager=1 # Debug options. # Debug 0 -> no debug diff --git a/src/wazuh_modules/wm_syscollector.c b/src/wazuh_modules/wm_syscollector.c index aea75f6a385..d09290f080e 100644 --- a/src/wazuh_modules/wm_syscollector.c +++ b/src/wazuh_modules/wm_syscollector.c @@ -64,7 +64,7 @@ router_provider_send_fb_func router_provider_send_fb_func_ptr = NULL; ROUTER_PROVIDER_HANDLE rsync_handle = NULL; ROUTER_PROVIDER_HANDLE syscollector_handle = NULL; char *manager_node_name = NULL; -int disable_manager_scan = 0; +int disable_manager_scan = 1; #endif // CLIENT long syscollector_sync_max_eps = 10; // Database synchronization number of events per second (default value) @@ -185,7 +185,7 @@ void* wm_sys_main(wm_sys_t *sys) { } #ifndef CLIENT // Load router module only for manager if is enabled - disable_manager_scan = getDefine_Int("vulnerability-detection", "disable_scan_manager",0 ,1); + disable_manager_scan = getDefine_Int("vulnerability-detection", "disable_scan_manager", 0, 1); if (router_module_ptr = so_get_module_handle("router"), router_module_ptr) { router_provider_create_func_ptr = so_get_function_sym(router_module_ptr, "router_provider_create"); router_provider_send_fb_func_ptr = so_get_function_sym(router_module_ptr, "router_provider_send_fb"); From 0a922f50935b51fb251f0dcf38885bf2b66ee6c9 Mon Sep 17 00:00:00 2001 From: Gabriel Valenzuela Date: Wed, 8 May 2024 23:40:44 -0300 Subject: [PATCH 036/419] CL: - Use reverse iterator. --- .../src/scanOrchestrator/scanContext.hpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index 83e23723876..581d5cb1afc 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -173,11 +173,12 @@ struct TScanContext final m_osData.cpeName = "cpe:/o:"; std::string cpe; - for (auto& [key, value] : osCpeMaps.items()) + for (auto it = osCpeMaps.rbegin(); it != osCpeMaps.rend(); ++it) { - if (Utils::startsWith(m_osData.name, key) || m_osData.platform.compare(key) == 0) + if (m_osData.name.compare(it.key()) == 0 || Utils::startsWith(m_osData.name, it.key()) || + m_osData.platform.compare(it.key()) == 0) { - cpe = value; + cpe = it.value(); break; } } From 21f58b4b9347a6776ab537695e3ecd066b1596a4 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Fri, 10 May 2024 12:20:20 +0200 Subject: [PATCH 037/419] Ignore wdb error on foreign key constraint --- framework/wazuh/core/cluster/common.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/framework/wazuh/core/cluster/common.py b/framework/wazuh/core/cluster/common.py index 91cc193fd13..52e64c20486 100644 --- a/framework/wazuh/core/cluster/common.py +++ b/framework/wazuh/core/cluster/common.py @@ -1685,6 +1685,7 @@ def send_data_to_wdb(data, timeout, info_type='agent-info'): result : dict Dict containing number of updated chunks, error messages (if any) and time spent. """ + ignored_wdb_exceptions = ['Cannot execute Global database query; FOREIGN KEY constraint failed'] result = {'updated_chunks': 0, 'error_messages': {'chunks': [], 'others': []}, 'time_spent': 0} wdb_conn = WazuhDBConnection() before = time.perf_counter() @@ -1705,6 +1706,9 @@ def send_data_to_wdb(data, timeout, info_type='agent-info'): except TimeoutError as e: raise e except Exception as e: + if any(ignored_exception in str(e) for ignored_exception in ignored_wdb_exceptions): + continue + result['error_messages']['chunks'].append((i, str(e))) except TimeoutError: result['error_messages']['others'].append(f'Timeout while processing {info_type} chunks.') @@ -1715,7 +1719,6 @@ def send_data_to_wdb(data, timeout, info_type='agent-info'): wdb_conn.close() return result - def asyncio_exception_handler(loop, context: Dict): """Exception handler used in the protocol. From 3cadbd0d79896ce2302f464c4d003e70e720f002 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Fri, 10 May 2024 12:43:23 +0200 Subject: [PATCH 038/419] Update send_data_to_wdb test --- framework/wazuh/core/cluster/tests/test_common.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/framework/wazuh/core/cluster/tests/test_common.py b/framework/wazuh/core/cluster/tests/test_common.py index 93d6900f2f7..bdf3fd3d811 100644 --- a/framework/wazuh/core/cluster/tests/test_common.py +++ b/framework/wazuh/core/cluster/tests/test_common.py @@ -1539,7 +1539,7 @@ def test_error_receiving_agent_information(): @patch("wazuh.core.cluster.common.WazuhDBConnection") -def test_send_data_to_wdb_ko(WazuhDBConnection_mock): +def test_send_data_to_wdb(WazuhDBConnection_mock): """Check if the data chunks are being properly forward to the Wazuh-db socket.""" class MockWazuhDBConnection: @@ -1554,6 +1554,8 @@ def send(self, data, raw): raise TimeoutError elif self.exceptions == 1: return '' + elif self.exceptions == 2: + raise Exception('Cannot execute Global database query; FOREIGN KEY constraint failed') else: raise Exception @@ -1572,6 +1574,11 @@ def close(self): timeout=15) assert result['updated_chunks'] == 2 + WazuhDBConnection_mock.return_value.exceptions += 1 + result = cluster_common.send_data_to_wdb(data={'chunks': ['1chunk', '2chunk'], 'set_data_command': ''}, + timeout=15) + assert result['updated_chunks'] == 0 + WazuhDBConnection_mock.return_value.exceptions += 1 result = cluster_common.send_data_to_wdb(data={'chunks': ['1chunk', '2chunk'], 'set_data_command': ''}, timeout=15) From 480a144fdc50e4c959b570b3ed4370d60d95b653 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Fri, 10 May 2024 14:43:45 +0200 Subject: [PATCH 039/419] Apply suggested optimizations --- framework/wazuh/core/cluster/common.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/framework/wazuh/core/cluster/common.py b/framework/wazuh/core/cluster/common.py index 52e64c20486..dfefb1f23c8 100644 --- a/framework/wazuh/core/cluster/common.py +++ b/framework/wazuh/core/cluster/common.py @@ -27,6 +27,8 @@ from wazuh.core.cluster import cluster, utils as cluster_utils from wazuh.core.wdb import WazuhDBConnection +IGNORED_WDB_EXCEPTIONS = ['Cannot execute Global database query; FOREIGN KEY constraint failed'] + class Response: """ Define and store a response from a request. @@ -1685,7 +1687,6 @@ def send_data_to_wdb(data, timeout, info_type='agent-info'): result : dict Dict containing number of updated chunks, error messages (if any) and time spent. """ - ignored_wdb_exceptions = ['Cannot execute Global database query; FOREIGN KEY constraint failed'] result = {'updated_chunks': 0, 'error_messages': {'chunks': [], 'others': []}, 'time_spent': 0} wdb_conn = WazuhDBConnection() before = time.perf_counter() @@ -1706,10 +1707,10 @@ def send_data_to_wdb(data, timeout, info_type='agent-info'): except TimeoutError as e: raise e except Exception as e: - if any(ignored_exception in str(e) for ignored_exception in ignored_wdb_exceptions): + error = str(e) + if any(ignored_exception in error for ignored_exception in IGNORED_WDB_EXCEPTIONS): continue - - result['error_messages']['chunks'].append((i, str(e))) + result['error_messages']['chunks'].append((i, error)) except TimeoutError: result['error_messages']['others'].append(f'Timeout while processing {info_type} chunks.') except Exception as e: From 89dc86aadf5117f4999abc8d5cc6e399403cdd6d Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 10 May 2024 10:49:14 -0300 Subject: [PATCH 040/419] CL: - Fix: Categorization of OS and Packages events - Updated UTs --- .../scanOrchestrator/eventDetailsBuilder.hpp | 18 ++++++++++++++---- .../tests/unit/eventDetailsBuilder_test.cpp | 2 +- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp index f84426caf08..1a76209f44b 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp @@ -188,17 +188,27 @@ class TEventDetailsBuilder final : public AbstractHandleraffectedComponentType() == AffectedComponentType::Package || - data->affectedComponentType() == AffectedComponentType::Os) + switch (data->affectedComponentType()) { - ecsData["package"] = package; + case AffectedComponentType::Package: + ecsData["package"] = package; + ecsData["vulnerability"]["category"] = "Packages"; + break; + + case AffectedComponentType::Os: + ecsData["package"] = package; + ecsData["vulnerability"]["category"] = "OS"; + break; + + default: + // No fields required. + break; } // ECS os fields. ecsData["host"]["os"] = os; // ECS vulnerability fields. - ecsData["vulnerability"]["category"] = "Packages"; ecsData["vulnerability"]["classification"] = returnData.data->classification()->str(); ecsData["vulnerability"]["description"] = returnData.data->description()->str(); ecsData["vulnerability"]["detected_at"] = Utils::getCurrentISO8601(); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp index 4901e4e5dfe..f63534e3f74 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp @@ -743,7 +743,7 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulOsInserted) EXPECT_STREQ(elementData.at("host").at("os").at("version").get_ref().c_str(), elementOsVersion.c_str()); - EXPECT_STREQ(elementData.at("vulnerability").at("category").get_ref().c_str(), "Packages"); + EXPECT_STREQ(elementData.at("vulnerability").at("category").get_ref().c_str(), "OS"); EXPECT_STREQ(elementData.at("vulnerability").at("classification").get_ref().c_str(), GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->classification()->c_str()); EXPECT_STREQ(elementData.at("vulnerability").at("description").get_ref().c_str(), From 024e62b2f102e98e20027fe11552dc19a6bc72db Mon Sep 17 00:00:00 2001 From: GGP1 Date: Fri, 10 May 2024 12:38:46 -0300 Subject: [PATCH 041/419] Revert tests reordering --- .../test_cluster_endpoints.tavern.yaml | 812 +++++----- .../test_manager_endpoints.tavern.yaml | 1331 +++++++---------- 2 files changed, 952 insertions(+), 1191 deletions(-) diff --git a/api/test/integration/test_cluster_endpoints.tavern.yaml b/api/test/integration/test_cluster_endpoints.tavern.yaml index 235e717d13c..a72d768372b 100644 --- a/api/test/integration/test_cluster_endpoints.tavern.yaml +++ b/api/test/integration/test_cluster_endpoints.tavern.yaml @@ -1580,7 +1580,7 @@ stages: total_failed_items: 0 --- -test_name: GET /cluster/{node_id}/daemons/stats +test_name: GET /cluster/{node_id}/logs marks: - cluster @@ -1593,11 +1593,10 @@ marks: stages: - # GET /cluster/{node_id}/daemons/stats - - name: Get all daemons' statistics from {node_id} - request: + - name: Read logs {node_id} + request: &get_cluster_logs verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/logs" method: GET headers: Authorization: "Bearer {test_login_token}" @@ -1606,358 +1605,273 @@ stages: json: error: 0 data: - affected_items: - - name: wazuh-remoted - - name: wazuh-analysisd - - name: wazuh-db - total_affected_items: 3 + affected_items: !anything failed_items: [] + total_affected_items: !anyint total_failed_items: 0 - # GET /cluster/{node_id}/daemons/stats?daemons_list=wazuh-remoted - - name: Get statistics from a single daemon from {node_id} + - name: Read logs with filters -> limit=3 {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs params: - daemons_list: wazuh-remoted + limit: 3 response: status_code: 200 json: error: 0 data: affected_items: - - name: wazuh-remoted - total_affected_items: 1 + - &cluster_log + description: !anystr + level: !anystr + tag: !anystr + timestamp: !anystr + - <<: *cluster_log + - <<: *cluster_log failed_items: [] + total_affected_items: !anyint total_failed_items: 0 - # GET /cluster/{node_id}/daemons/stats?daemons_list=wazuh-remoted,wazuh-db,wazuh-analysisd - - name: Get statistics from a list of daemons from {node_id} + - name: Read logs with filters -> limit=4 {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs params: - daemons_list: wazuh-remoted,wazuh-db,wazuh-analysisd + limit: 4 response: status_code: 200 json: error: 0 data: affected_items: - - name: wazuh-remoted - - name: wazuh-db - - name: wazuh-analysisd - total_affected_items: 3 + - <<: *cluster_log + - <<: *cluster_log + - <<: *cluster_log + - <<: *cluster_log failed_items: [] + total_affected_items: !anyint total_failed_items: 0 - # GET /cluster/{node_id}/daemons/stats?daemons_list=wrong-daemon-name - - name: Try to get statistics from a wrong daemon from {node_id} + - name: Read logs with filters -> limit=2, sort=tag {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs params: - daemons_list: wrong-daemon-name - response: - status_code: 400 - ---- -test_name: GET /cluster/{node_id}/stats - -marks: - - cluster - - parametrize: - key: node_id - vals: - - master-node - - worker1 - - worker2 - -stages: - - - name: Cluster stats {node_id} today - request: &get_cluster_stats - verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + limit: 2 + sort: tag response: - status_code: 400 - json: - error: 1308 + verify_response_with: + - function: tavern_utils:test_sort_response + extra_kwargs: + key: "tag" + status_code: 200 - - name: Cluster stats {node_id} 2019-08-27 + - name: Read logs with filters -> limit=1, sort=-level {node_id} request: verify: False - <<: *get_cluster_stats + <<: *get_cluster_logs params: - date: "2019-08-27" + limit: 1 + sort: -level response: + verify_response_with: + - function: tavern_utils:test_sort_response + extra_kwargs: + key: "level" + reverse: true status_code: 200 - json: - error: 0 - data: !anything - - name: Cluster stats {node_id} day without stats + - name: Read logs with filters -> offset=2, limit=3 {node_id} request: verify: False - <<: *get_cluster_stats + <<: *get_cluster_logs params: - date: "1970-01-01" + limit: 3 + offset: 2 response: - status_code: 400 + status_code: 200 json: - error: 1308 - ---- -test_name: GET /cluster/wrong_node/stats - -marks: - - cluster - -stages: + error: 0 + data: + affected_items: + - <<: *cluster_log + - <<: *cluster_log + - <<: *cluster_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Unexisting_node stats + - name: Read logs with filters -> offset=5, limit=2 {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/unexisting-node/stats" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs + params: + limit: 2 + offset: 5 response: status_code: 200 json: - error: 1 + error: 0 data: - affected_items: [] - total_affected_items: 0 - failed_items: - - error: - code: 1730 - id: - - "unexisting-node" - ---- -test_name: GET /cluster/{node_id}/stats/analysisd - -marks: - - cluster - - parametrize: - key: node_id - vals: - - master-node - - worker1 - - worker2 - -stages: + affected_items: + - <<: *cluster_log + - <<: *cluster_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Analysisd stats {node_id} + - name: Read logs with filters -> tag=wazuh-analysisd, limit=1 {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/analysisd" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs + params: + tag: wazuh-analysisd + limit: 1 response: status_code: 200 json: error: 0 data: affected_items: - - total_events_decoded: !anyfloat - syscheck_events_decoded: !anyfloat - syscollector_events_decoded: !anyfloat - rootcheck_events_decoded: !anyfloat - sca_events_decoded: !anyfloat - winevt_events_decoded: !anyfloat - other_events_decoded: !anyfloat - events_processed: !anyfloat - events_received: !anyfloat - events_dropped: !anyfloat - alerts_written: !anyfloat - firewall_written: !anyfloat - fts_written: !anyfloat - syscheck_queue_usage: !anyfloat - syscheck_queue_size: !anyfloat - syscollector_queue_usage: !anyfloat - syscollector_queue_size: !anyfloat - rootcheck_queue_usage: !anyfloat - rootcheck_queue_size: !anyfloat - sca_queue_usage: !anyfloat - sca_queue_size: !anyfloat - hostinfo_queue_usage: !anyfloat - hostinfo_queue_size: !anyfloat - winevt_queue_usage: !anyfloat - winevt_queue_size: !anyfloat - event_queue_usage: !anyfloat - event_queue_size: !anyfloat - rule_matching_queue_usage: !anyfloat - rule_matching_queue_size: !anyfloat - alerts_queue_usage: !anyfloat - alerts_queue_size: !anyfloat - firewall_queue_usage: !anyfloat - firewall_queue_size: !anyfloat - statistical_queue_usage: !anyfloat - statistical_queue_size: !anyfloat - archives_queue_usage: !anyfloat - archives_queue_size: !anyfloat - total_affected_items: 1 + - <<: *cluster_log failed_items: [] + total_affected_items: !anyint total_failed_items: 0 ---- -test_name: GET /cluster/{node_id}/stats/hourly - -marks: - - cluster - - parametrize: - key: node_id - vals: - - master-node - - worker1 - - worker2 - -stages: - - - name: Hourly node stats {node_id} + - name: Read logs with filters -> tag=wazuh-syscheckd, limit=2 {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/hourly" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs + params: + tag: wazuh-syscheckd + limit: 2 response: status_code: 200 json: error: 0 data: affected_items: - - averages: !anything - interactions: !anyint - total_affected_items: 1 + - <<: *cluster_log + tag: wazuh-syscheckd + - <<: *cluster_log + tag: wazuh-syscheckd failed_items: [] + total_affected_items: !anyint total_failed_items: 0 ---- -test_name: GET /cluster/{node_id}/stats/remoted - -marks: - - cluster - - parametrize: - key: node_id - vals: - - master-node - - worker1 - - worker2 - -stages: + - name: Read logs with filters -> tag=wazuh-unknown-daemon {node_id} + request: + verify: False + <<: *get_cluster_logs + params: + tag: wazuh-unknown-daemon + response: + status_code: 200 + json: + error: 0 + data: + affected_items: [] + failed_items: [] + total_affected_items: 0 + total_failed_items: 0 - - name: Remoted stats {node_id} + - name: Read logs with filters -> level=info, limit=1 {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/remoted" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs + params: + level: info + limit: 1 response: status_code: 200 json: error: 0 data: affected_items: - - queue_size: !anyfloat - total_queue_size: !anyfloat - tcp_sessions: !anyfloat - evt_count: !anyfloat - ctrl_msg_count: !anyfloat - discarded_count: !anyfloat - sent_bytes: !anyfloat - recv_bytes: !anyfloat - total_affected_items: 1 + - <<: *cluster_log + level: info failed_items: [] + total_affected_items: !anyint total_failed_items: 0 ---- -test_name: GET /cluster/{node_id}/stats/weekly - -marks: - - cluster - - parametrize: - key: node_id - vals: - - master-node - - worker1 - - worker2 - -stages: + - name: Read logs with filters by query (tag=wazuh-syscheckd, level=info) {node_id} + request: + verify: False + <<: *get_cluster_logs + params: + q: tag=wazuh-syscheckd;level=info + response: + status_code: 200 + verify_response_with: + - function: tavern_utils:test_expected_value + extra_kwargs: + key: "tag" + expected_values: "wazuh-syscheckd" + - function: tavern_utils:test_expected_value + extra_kwargs: + key: "level" + expected_values: "info" - - name: Weekly node stats {node_id} + - name: Read logs using valid select request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/weekly" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs + params: + select: 'timestamp,tag' response: + verify_response_with: + # Check response item keys are the selected keys + function: tavern_utils:test_select_key_affected_items + extra_kwargs: + select_key: 'timestamp,tag' status_code: 200 json: error: 0 data: - affected_items: - - Sun: - hours: !anything - interactions: !anyint - - Mon: - hours: !anything - interactions: !anyint - - Tue: - hours: !anything - interactions: !anyint - - Wed: - hours: !anything - interactions: !anyint - - Thu: - hours: !anything - interactions: !anyint - - Fri: - hours: !anything - interactions: !anyint - - Sat: - hours: !anything - interactions: !anyint - total_affected_items: 7 + total_affected_items: !anyint failed_items: [] total_failed_items: 0 + - name: Try to read logs using invalid select + request: + verify: False + <<: *get_cluster_logs + params: + select: 'noexists' + response: + status_code: 400 + json: &invalid_select + error: 1724 + + - name: Get distinct cluster node logs + request: + verify: False + <<: *get_cluster_logs + params: + distinct: true + response: + status_code: 200 + verify_response_with: + function: tavern_utils:test_distinct_key + --- -test_name: GET /cluster/{node_id}/status +test_name: GET /cluster/{node_id}/logs/summary marks: - cluster - parametrize: key: node_id vals: - - master-node - worker1 - worker2 stages: - - name: Read status {node_id} + - name: Read logs summary {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/status" + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/logs/summary" method: GET headers: Authorization: "Bearer {test_login_token}" @@ -1970,9 +1884,10 @@ stages: failed_items: [] total_affected_items: !anyint total_failed_items: 0 + message: !anystr --- -test_name: GET /cluster/{node_id}/logs +test_name: GET /cluster/{node_id}/daemons/stats marks: - cluster @@ -1985,10 +1900,11 @@ marks: stages: - - name: Read logs {node_id} - request: &get_cluster_logs + # GET /cluster/{node_id}/daemons/stats + - name: Get all daemons' statistics from {node_id} + request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/logs" + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" method: GET headers: Authorization: "Bearer {test_login_token}" @@ -1997,273 +1913,358 @@ stages: json: error: 0 data: - affected_items: !anything + affected_items: + - name: wazuh-remoted + - name: wazuh-analysisd + - name: wazuh-db + total_affected_items: 3 failed_items: [] - total_affected_items: !anyint total_failed_items: 0 - - name: Read logs with filters -> limit=3 {node_id} + # GET /cluster/{node_id}/daemons/stats?daemons_list=wazuh-remoted + - name: Get statistics from a single daemon from {node_id} request: verify: False - <<: *get_cluster_logs + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" + method: GET + headers: + Authorization: "Bearer {test_login_token}" params: - limit: 3 + daemons_list: wazuh-remoted response: status_code: 200 json: error: 0 data: affected_items: - - &cluster_log - description: !anystr - level: !anystr - tag: !anystr - timestamp: !anystr - - <<: *cluster_log - - <<: *cluster_log + - name: wazuh-remoted + total_affected_items: 1 failed_items: [] - total_affected_items: !anyint total_failed_items: 0 - - name: Read logs with filters -> limit=4 {node_id} + # GET /cluster/{node_id}/daemons/stats?daemons_list=wazuh-remoted,wazuh-db,wazuh-analysisd + - name: Get statistics from a list of daemons from {node_id} request: verify: False - <<: *get_cluster_logs + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" + method: GET + headers: + Authorization: "Bearer {test_login_token}" params: - limit: 4 + daemons_list: wazuh-remoted,wazuh-db,wazuh-analysisd response: status_code: 200 json: error: 0 data: affected_items: - - <<: *cluster_log - - <<: *cluster_log - - <<: *cluster_log - - <<: *cluster_log + - name: wazuh-remoted + - name: wazuh-db + - name: wazuh-analysisd + total_affected_items: 3 failed_items: [] - total_affected_items: !anyint total_failed_items: 0 - - name: Read logs with filters -> limit=2, sort=tag {node_id} + # GET /cluster/{node_id}/daemons/stats?daemons_list=wrong-daemon-name + - name: Try to get statistics from a wrong daemon from {node_id} request: verify: False - <<: *get_cluster_logs + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" + method: GET + headers: + Authorization: "Bearer {test_login_token}" params: - limit: 2 - sort: tag + daemons_list: wrong-daemon-name response: - verify_response_with: - - function: tavern_utils:test_sort_response - extra_kwargs: - key: "tag" - status_code: 200 + status_code: 400 - - name: Read logs with filters -> limit=1, sort=-level {node_id} - request: +--- +test_name: GET /cluster/{node_id}/stats + +marks: + - cluster + - parametrize: + key: node_id + vals: + - master-node + - worker1 + - worker2 + +stages: + + - name: Cluster stats {node_id} today + request: &get_cluster_stats verify: False - <<: *get_cluster_logs - params: - limit: 1 - sort: -level + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats" + method: GET + headers: + Authorization: "Bearer {test_login_token}" response: - verify_response_with: - - function: tavern_utils:test_sort_response - extra_kwargs: - key: "level" - reverse: true - status_code: 200 + status_code: 400 + json: + error: 1308 - - name: Read logs with filters -> offset=2, limit=3 {node_id} + - name: Cluster stats {node_id} 2019-08-27 request: verify: False - <<: *get_cluster_logs + <<: *get_cluster_stats params: - limit: 3 - offset: 2 + date: "2019-08-27" response: status_code: 200 json: error: 0 - data: - affected_items: - - <<: *cluster_log - - <<: *cluster_log - - <<: *cluster_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + data: !anything - - name: Read logs with filters -> offset=5, limit=2 {node_id} + - name: Cluster stats {node_id} day without stats request: verify: False - <<: *get_cluster_logs + <<: *get_cluster_stats params: - limit: 2 - offset: 5 + date: "1970-01-01" response: - status_code: 200 + status_code: 400 json: - error: 0 - data: - affected_items: - - <<: *cluster_log - - <<: *cluster_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + error: 1308 - - name: Read logs with filters -> tag=wazuh-analysisd, limit=1 {node_id} +--- +test_name: GET /cluster/wrong_node/stats + +marks: + - cluster + +stages: + + - name: Unexisting_node stats request: verify: False - <<: *get_cluster_logs - params: - tag: wazuh-analysisd - limit: 1 + url: "{protocol:s}://{host:s}:{port:d}/cluster/unexisting-node/stats" + method: GET + headers: + Authorization: "Bearer {test_login_token}" response: status_code: 200 json: - error: 0 + error: 1 data: - affected_items: - - <<: *cluster_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + affected_items: [] + total_affected_items: 0 + failed_items: + - error: + code: 1730 + id: + - "unexisting-node" - - name: Read logs with filters -> tag=wazuh-modulesd, limit=2 {node_id} +--- +test_name: GET /cluster/{node_id}/stats/analysisd + +marks: + - cluster + - parametrize: + key: node_id + vals: + - master-node + - worker1 + - worker2 + +stages: + + - name: Analysisd stats {node_id} request: verify: False - <<: *get_cluster_logs - params: - tag: wazuh-modulesd - limit: 2 + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/analysisd" + method: GET + headers: + Authorization: "Bearer {test_login_token}" response: status_code: 200 json: error: 0 data: affected_items: - - <<: *cluster_log - tag: wazuh-modulesd - - <<: *cluster_log - tag: wazuh-modulesd + - total_events_decoded: !anyfloat + syscheck_events_decoded: !anyfloat + syscollector_events_decoded: !anyfloat + rootcheck_events_decoded: !anyfloat + sca_events_decoded: !anyfloat + winevt_events_decoded: !anyfloat + other_events_decoded: !anyfloat + events_processed: !anyfloat + events_received: !anyfloat + events_dropped: !anyfloat + alerts_written: !anyfloat + firewall_written: !anyfloat + fts_written: !anyfloat + syscheck_queue_usage: !anyfloat + syscheck_queue_size: !anyfloat + syscollector_queue_usage: !anyfloat + syscollector_queue_size: !anyfloat + rootcheck_queue_usage: !anyfloat + rootcheck_queue_size: !anyfloat + sca_queue_usage: !anyfloat + sca_queue_size: !anyfloat + hostinfo_queue_usage: !anyfloat + hostinfo_queue_size: !anyfloat + winevt_queue_usage: !anyfloat + winevt_queue_size: !anyfloat + event_queue_usage: !anyfloat + event_queue_size: !anyfloat + rule_matching_queue_usage: !anyfloat + rule_matching_queue_size: !anyfloat + alerts_queue_usage: !anyfloat + alerts_queue_size: !anyfloat + firewall_queue_usage: !anyfloat + firewall_queue_size: !anyfloat + statistical_queue_usage: !anyfloat + statistical_queue_size: !anyfloat + archives_queue_usage: !anyfloat + archives_queue_size: !anyfloat + total_affected_items: 1 failed_items: [] - total_affected_items: !anyint total_failed_items: 0 - - name: Read logs with filters -> tag=wazuh-unknown-daemon {node_id} +--- +test_name: GET /cluster/{node_id}/stats/hourly + +marks: + - cluster + - parametrize: + key: node_id + vals: + - master-node + - worker1 + - worker2 + +stages: + + - name: Hourly node stats {node_id} request: verify: False - <<: *get_cluster_logs - params: - tag: wazuh-unknown-daemon + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/hourly" + method: GET + headers: + Authorization: "Bearer {test_login_token}" response: status_code: 200 json: error: 0 data: - affected_items: [] + affected_items: + - averages: !anything + interactions: !anyint + total_affected_items: 1 failed_items: [] - total_affected_items: 0 total_failed_items: 0 - - name: Read logs with filters -> level=info, limit=1 {node_id} +--- +test_name: GET /cluster/{node_id}/stats/remoted + +marks: + - cluster + - parametrize: + key: node_id + vals: + - master-node + - worker1 + - worker2 + +stages: + + - name: Remoted stats {node_id} request: verify: False - <<: *get_cluster_logs - params: - level: info - limit: 1 + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/remoted" + method: GET + headers: + Authorization: "Bearer {test_login_token}" response: status_code: 200 json: error: 0 data: affected_items: - - <<: *cluster_log - level: info + - queue_size: !anyfloat + total_queue_size: !anyfloat + tcp_sessions: !anyfloat + evt_count: !anyfloat + ctrl_msg_count: !anyfloat + discarded_count: !anyfloat + sent_bytes: !anyfloat + recv_bytes: !anyfloat + total_affected_items: 1 failed_items: [] - total_affected_items: !anyint total_failed_items: 0 - - name: Read logs with filters by query (tag=sca, level=info) {node_id} - request: - verify: False - <<: *get_cluster_logs - params: - q: tag=sca;level=info - response: - status_code: 200 - verify_response_with: - - function: tavern_utils:test_expected_value - extra_kwargs: - key: "tag" - expected_values: "sca" - - function: tavern_utils:test_expected_value - extra_kwargs: - key: "level" - expected_values: "info" +--- +test_name: GET /cluster/{node_id}/stats/weekly - - name: Read logs using valid select +marks: + - cluster + - parametrize: + key: node_id + vals: + - master-node + - worker1 + - worker2 + +stages: + + - name: Weekly node stats {node_id} request: verify: False - <<: *get_cluster_logs - params: - select: 'timestamp,tag' + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/weekly" + method: GET + headers: + Authorization: "Bearer {test_login_token}" response: - verify_response_with: - # Check response item keys are the selected keys - function: tavern_utils:test_select_key_affected_items - extra_kwargs: - select_key: 'timestamp,tag' status_code: 200 json: error: 0 data: - total_affected_items: !anyint + affected_items: + - Sun: + hours: !anything + interactions: !anyint + - Mon: + hours: !anything + interactions: !anyint + - Tue: + hours: !anything + interactions: !anyint + - Wed: + hours: !anything + interactions: !anyint + - Thu: + hours: !anything + interactions: !anyint + - Fri: + hours: !anything + interactions: !anyint + - Sat: + hours: !anything + interactions: !anyint + total_affected_items: 7 failed_items: [] total_failed_items: 0 - - name: Try to read logs using invalid select - request: - verify: False - <<: *get_cluster_logs - params: - select: 'noexists' - response: - status_code: 400 - json: &invalid_select - error: 1724 - - - name: Get distinct cluster node logs - request: - verify: False - <<: *get_cluster_logs - params: - distinct: true - response: - status_code: 200 - verify_response_with: - function: tavern_utils:test_distinct_key - --- -test_name: GET /cluster/{node_id}/logs/summary +test_name: GET /cluster/{node_id}/status marks: - cluster - parametrize: key: node_id vals: + - master-node - worker1 - worker2 stages: - - name: Read logs summary {node_id} + - name: Read status {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/logs/summary" + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/status" method: GET headers: Authorization: "Bearer {test_login_token}" @@ -2276,7 +2277,6 @@ stages: failed_items: [] total_affected_items: !anyint total_failed_items: 0 - message: !anystr --- test_name: PUT /cluster/restart diff --git a/api/test/integration/test_manager_endpoints.tavern.yaml b/api/test/integration/test_manager_endpoints.tavern.yaml index f3003a2df5c..43570f0888d 100644 --- a/api/test/integration/test_manager_endpoints.tavern.yaml +++ b/api/test/integration/test_manager_endpoints.tavern.yaml @@ -582,615 +582,451 @@ stages: total_failed_items: 0 --- -test_name: GET /manager/api/config - -stages: - - # GET /manager/api/config - - name: Get API configuration - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/api/config" - method: GET - headers: - Authorization: "Bearer {test_login_token}" - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - node_name: !anystr - node_api_config: - host: !anystr - port: !anyint - - https: - enabled: !anybool - key: !anystr - cert: !anystr - use_ca: !anybool - ca: !anystr - ssl_protocol: !anystr - ssl_ciphers: !anystr - logs: - level: !anystr - max_size: - enabled: !anybool - size: !anystr - cors: - enabled: !anybool - source_route: !anystr - expose_headers: !anystr - allow_headers: !anystr - allow_credentials: !anybool - cache: - enabled: !anybool - time: !anything - access: - max_login_attempts: !anyint - block_time: !anyint - max_request_per_minute: !anyint - drop_privileges: !anybool - experimental_features: !anybool - upload_configuration: - remote_commands: - localfile: - allow: !anybool - exceptions: !anything - wodle_command: - allow: !anybool - exceptions: !anything - limits: - eps: - allow: !anybool - agents: - allow_higher_versions: - allow: !anybool - indexer: - allow: !anybool - total_affected_items: 1 - total_failed_items: 0 - failed_items: [] - ---- -test_name: GET /manager/configuration/validation (OK) +test_name: GET /manager/logs stages: - # GET /manager/configuration/validation - - name: Request validation + # GET /manager/logs + - name: Request request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/validation" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET headers: Authorization: "Bearer {test_login_token}" response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - name: !anystr - status: 'OK' - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 - ---- -test_name: GET /manager/validation (KO) - -stages: - - #### Upload corrupted rules file - # PUT /rules/files - - name: Upload corrupted - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/rules/files/new-rules_corrupted.xml" - method: PUT - data: "\n \n \n \n 5716\n 1.1.1.1\n sshd: authentication failed from IP 1.1.1.1.\n authentication_failed,pci_dss_10.2.4,pci_dss_10.2.5,\n \n \n" - headers: - Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream - response: - status_code: 200 - json: - error: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: !anything + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - # GET /manager/configuration/validation - - name: Request validation + # GET /manager/logs + - name: Filters -> limit=4 request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/validation" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET headers: Authorization: "Bearer {test_login_token}" + params: + limit: 4 response: - status_code: 200 - json: - error: 1 - data: - affected_items: [] - failed_items: - - error: - code: 1908 - id: - - !anystr - total_affected_items: 0 - total_failed_items: 1 - ---- -test_name: GET /manager/configuration/{component}/{configuration} - -stages: - - - name: Show the config of analysis/global in the manager - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/global" - headers: - Authorization: "Bearer {test_login_token}" - method: GET - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - global: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: + - &manager_log + description: !anystr + level: !anystr + tag: !anystr + timestamp: !anystr + - <<: *manager_log + - <<: *manager_log + - <<: *manager_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Show the config of analysis/active_response in the manager + # GET /manager/logs + - name: Filters -> limit=2, sort=-level request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/active_response" - headers: - Authorization: "Bearer {test_login_token}" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - active-response: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 - message: !anystr - - - name: Show the config of analysis/alerts in the manager - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/alerts" headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + limit: 2 + sort: -level response: + verify_response_with: + - function: tavern_utils:test_sort_response + extra_kwargs: + key: "level" + reverse: true status_code: 200 - json: - error: 0 - data: - affected_items: - - alerts: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 - - name: Show the config of analysis/command in the manager + # GET /manager/logs + - name: Filters -> offset=3, limit=3 request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/command" - headers: - Authorization: "Bearer {test_login_token}" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - command: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 - - - name: Show the config of analysis/internal in the manager - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/internal" headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + limit: 3 + offset: 3 response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - internal: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: + - <<: *manager_log + - <<: *manager_log + - <<: *manager_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Show the config of auth in the manager + # GET /manager/logs + - name: Filters -> offset=3, level=info, limit=4 request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/auth/auth" - headers: - Authorization: "Bearer {test_login_token}" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - auth: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 - - - name: Show the config of com/internal in the manager - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/com/internal" headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + limit: 4 + offset: 3 + level: info response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - internal: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: + - <<: *manager_log + - <<: *manager_log + - <<: *manager_log + - <<: *manager_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Show the config of logcollector/localfile in the manager + # GET /manager/logs + - name: Filters -> tag=wazuh-analysisd, limit=1 request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/logcollector/localfile" - headers: - Authorization: "Bearer {test_login_token}" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - localfile: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 - - - name: Show the config of logcollector/socket in the manager - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/logcollector/socket" headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + tag: wazuh-analysisd + limit: 1 response: - status_code: 200 - json: - error: 0 - data: - affected_items: [] - failed_items: [] - total_affected_items: 0 - total_failed_items: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: + - <<: *manager_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Show the config of logcollector/internal in the manager + # GET /manager/logs + - name: Filters -> tag=wazuh-syscheckd, limit=1 request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/logcollector/internal" - headers: - Authorization: "Bearer {test_login_token}" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - internal: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 - - - name: Show the config of monitor/internal in the manager - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/monitor/internal" headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + tag: wazuh-syscheckd + limit: 1 response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - monitord: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: + - <<: *manager_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Show the config of monitor/reports in the manager + - name: Filters by query (tag=wazuh-syscheckd, level=info) request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/monitor/reports" - headers: - Authorization: "Bearer {test_login_token}" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - reports: !anything - failed_items: [ ] - total_affected_items: 1 - total_failed_items: 0 - - - name: Show the config of request/remote in the manager - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/request/remote" headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + q: tag=wazuh-syscheckd;level=info response: status_code: 200 - json: - error: 0 - data: - affected_items: - - remote: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + verify_response_with: + - function: tavern_utils:test_expected_value + extra_kwargs: + key: "tag" + expected_values: "wazuh-syscheckd" + - function: tavern_utils:test_expected_value + extra_kwargs: + key: "level" + expected_values: "info" - - name: Show the config of request/internal in the manager + - name: Filters by query (timestamp<2021-07-01) request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/request/internal" - headers: - Authorization: "Bearer {test_login_token}" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - internal: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 - - - name: Show the config of syscheck/syscheck in the manager - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/syscheck/syscheck" headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + q: timestamp<2021-07-01 response: status_code: 200 json: error: 0 data: - affected_items: - - syscheck: !anything + affected_items: [] failed_items: [] - total_affected_items: 1 + total_affected_items: 0 total_failed_items: 0 - - name: Show the config of syscheck/rootcheck in the manager + - name: Filter by non-existent tag request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/syscheck/rootcheck" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + tag: wazuh-unknown-daemon response: status_code: 200 json: error: 0 data: - affected_items: - - rootcheck: !anything + affected_items: [] failed_items: [] - total_affected_items: 1 + total_affected_items: 0 total_failed_items: 0 - - name: Show the config of syscheck/internal in the manager + - name: Read logs using valid select request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/syscheck/internal" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + select: 'timestamp,tag' response: + verify_response_with: + # Check response item keys are the selected keys + function: tavern_utils:test_select_key_affected_items + extra_kwargs: + select_key: 'timestamp,tag' status_code: 200 json: error: 0 data: - affected_items: - - internal: !anything + total_affected_items: !anyint failed_items: [] - total_affected_items: 1 total_failed_items: 0 - - name: Show the config of wazuh-db/internal in the manager + - name: Try to read logs using invalid select request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/wazuh-db/internal" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET headers: Authorization: "Bearer {test_login_token}" + params: + select: 'noexists' response: - status_code: 200 - json: - error: !anyint - data: - affected_items: - - wazuh_db: - commit_time_max: !anyint - commit_time_min: !anyint - open_db_limit: !anyint - worker_pool_size: !anyint - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + status_code: 400 + json: &invalid_select + error: 1724 - - name: Show the config of wazuh-db/wdb in the manager + - name: Get distinct manager logs request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/wazuh-db/wdb" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET headers: Authorization: "Bearer {test_login_token}" + params: + distinct: true response: status_code: 200 - json: - error: !anyint - data: - affected_items: - - wdb: - backup: - - database: !anystr - enabled: !anybool - interval: !anyint - max_files: !anyint - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + verify_response_with: + function: tavern_utils:test_distinct_key - - name: Try to show the config of wmodules/wmodules in the manager +--- +test_name: GET /manager/logs/summary + +stages: + + # GET /manager/logs/summary + - name: Request request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/wmodules/wmodules" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs/summary" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET response: status_code: 200 json: error: 0 data: affected_items: - - wmodules: !anything + - sca: !anything + - wazuh-agentlessd: !anything + - wazuh-analysisd: !anything + - wazuh-authd: !anything + - wazuh-csyslogd: !anything + - wazuh-db: !anything + - wazuh-dbd: !anything + - wazuh-execd: !anything + - wazuh-integratord: !anything + - wazuh-logcollector: !anything + - wazuh-modulesd: !anything + - wazuh-modulesd:agent-upgrade: !anything + - wazuh-modulesd:ciscat: !anything + - wazuh-modulesd:control: !anything + - wazuh-modulesd:database: !anything + - wazuh-modulesd:download: !anything + - wazuh-modulesd:osquery: !anything + - wazuh-modulesd:syscollector: !anything + - wazuh-modulesd:task-manager: !anything + - wazuh-monitord: !anything + - wazuh-remoted: !anything + - wazuh-rootcheck: !anything + - wazuh-syscheckd: !anything failed_items: [] - total_affected_items: 1 + total_affected_items: !anyint total_failed_items: 0 - - name: Try to show the invalid config of component in the manager - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/agent/wmodules" - headers: - Authorization: "Bearer {test_login_token}" - method: GET - response: - status_code: 400 - json: - error: 1128 - --- -test_name: PUT /manager/configuration +test_name: GET /manager/api/config stages: - # PUT /manager/configuration - - name: Upload a valid configuration + # GET /manager/api/config + - name: Get API configuration request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{valid_ossec_conf:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/api/config" + method: GET headers: Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream response: status_code: 200 json: + error: 0 data: affected_items: - - 'manager' - failed_items: [] + - node_name: !anystr + node_api_config: + host: !anystr + port: !anyint + + https: + enabled: !anybool + key: !anystr + cert: !anystr + use_ca: !anybool + ca: !anystr + ssl_protocol: !anystr + ssl_ciphers: !anystr + logs: + level: !anystr + max_size: + enabled: !anybool + size: !anystr + cors: + enabled: !anybool + source_route: !anystr + expose_headers: !anystr + allow_headers: !anystr + allow_credentials: !anybool + cache: + enabled: !anybool + time: !anything + access: + max_login_attempts: !anyint + block_time: !anyint + max_request_per_minute: !anyint + drop_privileges: !anybool + experimental_features: !anybool + upload_configuration: + remote_commands: + localfile: + allow: !anybool + exceptions: !anything + wodle_command: + allow: !anybool + exceptions: !anything + limits: + eps: + allow: !anybool + agents: + allow_higher_versions: + allow: !anybool + indexer: + allow: !anybool total_affected_items: 1 total_failed_items: 0 - error: 0 + failed_items: [] - # GET /manager/configuration/ - - name: Ensure the new config has been applied by checking a field +--- +test_name: GET /manager/configuration/validation (OK) + +stages: + + # GET /manager/configuration/validation + - name: Request validation request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/validation" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET - params: - section: "alerts" - field: "log_alert_level" response: status_code: 200 json: error: 0 data: affected_items: - - alerts: - log_alert_level: '300' + - name: !anystr + status: 'OK' failed_items: [] total_affected_items: 1 total_failed_items: 0 +--- +test_name: GET /manager/validation (KO) - # PUT /manager/configuration - - name: Try to upload an invalid configuration +stages: + + #### Upload corrupted rules file + # PUT /rules/files + - name: Upload corrupted request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + url: "{protocol:s}://{host:s}:{port:d}/rules/files/new-rules_corrupted.xml" method: PUT - data: "{invalid_ossec_conf:s}" + data: "\n \n \n \n 5716\n 1.1.1.1\n sshd: authentication failed from IP 1.1.1.1.\n authentication_failed,pci_dss_10.2.4,pci_dss_10.2.5,\n \n \n" headers: Authorization: "Bearer {test_login_token}" content-type: application/octet-stream response: status_code: 200 json: - error: 1 - data: - affected_items: [] - failed_items: - - error: - code: 1113 - id: - - 'manager' - total_affected_items: 0 - total_failed_items: 1 + error: 0 - # PUT /manager/configuration - - name: Try to upload an empty configuration + # GET /manager/configuration/validation + - name: Request validation request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{invalid_ossec_conf:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/validation" + method: GET headers: Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream response: status_code: 200 json: @@ -1199,607 +1035,532 @@ stages: affected_items: [] failed_items: - error: - code: 1113 + code: 1908 id: - - 'manager' + - !anystr total_affected_items: 0 total_failed_items: 1 - # PUT /manager/configuration - - name: Try to upload an invalid configuration with an invalid content-type +--- +test_name: GET /manager/configuration/{component}/{configuration} + +stages: + + - name: Show the config of analysis/global in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{invalid_ossec_conf:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/global" headers: Authorization: "Bearer {test_login_token}" - content-type: application/json + method: GET response: - status_code: 406 + status_code: 200 json: - error: 6002 + error: 0 + data: + affected_items: + - global: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - # GET /manager/configuration/ - - name: Ensure the config didn't change + - name: Show the config of analysis/active_response in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/active_response" headers: Authorization: "Bearer {test_login_token}" method: GET - params: - section: "alerts" - field: "log_alert_level" response: status_code: 200 json: error: 0 data: affected_items: - - alerts: - log_alert_level: '300' + - active-response: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 + message: !anystr ---- -test_name: GET /manager/version/check - -stages: - - name: Get wazuh version + - name: Show the config of analysis/alerts in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/alerts" headers: Authorization: "Bearer {test_login_token}" - response: - status_code: 200 - save: - json: - wazuh_version: data.api_version - - - name: Get available updates - request: - verify: false - url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" method: GET - headers: - Authorization: "Bearer {test_login_token}" response: status_code: 200 json: error: 0 data: - last_check_date: !anystr - current_version: "v{wazuh_version:s}" - update_check: true - uuid: !anystr - verify_response_with: - - function: tavern_utils:validate_update_check_response + affected_items: + - alerts: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - - name: Get available updates with force option + - name: Show the config of analysis/command in the manager request: - verify: false - url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" - method: GET + verify: False + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/command" headers: Authorization: "Bearer {test_login_token}" - params: - force_query: "true" + method: GET response: status_code: 200 json: error: 0 data: - last_check_date: !anystr - current_version: "v{wazuh_version:s}" - update_check: true - uuid: !anystr - verify_response_with: - - function: tavern_utils:validate_update_check_response - - ---- -test_name: GET /manager/version/check with update_check disabled - -stages: + affected_items: + - command: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - - name: Get wazuh version + - name: Show the config of analysis/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/internal" headers: Authorization: "Bearer {test_login_token}" + method: GET response: status_code: 200 - save: - json: - wazuh_version: data.api_version + json: + error: 0 + data: + affected_items: + - internal: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - - name: Disable the update check + - name: Show the config of auth in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{valid_ossec_conf_with_update_check_disabled:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/auth/auth" headers: Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream + method: GET response: status_code: 200 json: + error: 0 data: affected_items: - - 'manager' + - auth: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - error: 0 - - name: Restart manager to apply the configuration + - name: Show the config of com/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/restart" - method: PUT + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/com/internal" headers: Authorization: "Bearer {test_login_token}" + method: GET response: status_code: 200 json: error: 0 data: affected_items: - - !anystr + - internal: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - delay_after: 50 - - name: Get available updates + - name: Show the config of logcollector/localfile in the manager request: - verify: false - url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" - method: GET + verify: False + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/logcollector/localfile" headers: Authorization: "Bearer {test_login_token}" + method: GET response: status_code: 200 json: error: 0 data: - last_check_date: '' - current_version: "v{wazuh_version:s}" - update_check: false - verify_response_with: - - function: tavern_utils:validate_update_check_response + affected_items: + - localfile: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - - name: Get available updates with force option + - name: Show the config of logcollector/socket in the manager request: - verify: false - url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" - method: GET + verify: False + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/logcollector/socket" headers: Authorization: "Bearer {test_login_token}" - params: - force_query: "true" + method: GET response: status_code: 200 json: error: 0 data: - last_check_date: '' - current_version: "v{wazuh_version:s}" - update_check: false - verify_response_with: - - function: tavern_utils:validate_update_check_response + affected_items: [] + failed_items: [] + total_affected_items: 0 + total_failed_items: 0 - - name: Enable the update check + - name: Show the config of logcollector/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{valid_ossec_conf:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/logcollector/internal" headers: Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream + method: GET response: status_code: 200 json: + error: 0 data: affected_items: - - 'manager' + - internal: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - error: 0 - - name: Restart manager to apply the configuration + - name: Show the config of monitor/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/restart" - method: PUT + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/monitor/internal" headers: Authorization: "Bearer {test_login_token}" + method: GET response: status_code: 200 json: error: 0 data: affected_items: - - !anystr + - monitord: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - delay_after: 50 - ---- -test_name: GET /manager/version/check with update check service error - -stages: - - name: Set an invalid CTI url + - name: Show the config of monitor/reports in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{ossec_conf_with_invalid_cti_url:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/monitor/reports" headers: Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream + method: GET response: status_code: 200 json: + error: 0 data: affected_items: - - 'manager' - failed_items: [] + - reports: !anything + failed_items: [ ] total_affected_items: 1 total_failed_items: 0 - error: 0 - - name: Restart manager to apply the configuration + - name: Show the config of request/remote in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/restart" - method: PUT + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/request/remote" headers: Authorization: "Bearer {test_login_token}" + method: GET response: status_code: 200 json: error: 0 data: affected_items: - - !anystr + - remote: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - delay_after: 50 - - name: Try to get available updates + - name: Show the config of request/internal in the manager request: - verify: false - url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" - method: GET + verify: False + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/request/internal" headers: Authorization: "Bearer {test_login_token}" - response: - status_code: 500 - json: - error: 2100 - - - name: Try to get available updates with force option - request: - verify: false - url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" method: GET - headers: - Authorization: "Bearer {test_login_token}" - params: - force_query: "true" response: - status_code: 500 + status_code: 200 json: - error: 2100 - ---- -test_name: GET /manager/logs - -stages: - - # GET /manager/logs - - name: Request - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET - headers: - Authorization: "Bearer {test_login_token}" - response: - status_code: 200 - json: - error: 0 - data: - affected_items: !anything - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + error: 0 + data: + affected_items: + - internal: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - # GET /manager/logs - - name: Filters -> limit=4 + - name: Show the config of syscheck/syscheck in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/syscheck/syscheck" headers: Authorization: "Bearer {test_login_token}" - params: - limit: 4 - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - &manager_log - description: !anystr - level: !anystr - tag: !anystr - timestamp: !anystr - - <<: *manager_log - - <<: *manager_log - - <<: *manager_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 - - # GET /manager/logs - - name: Filters -> limit=2, sort=-level - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET - headers: - Authorization: "Bearer {test_login_token}" - params: - limit: 2 - sort: -level response: - verify_response_with: - - function: tavern_utils:test_sort_response - extra_kwargs: - key: "level" - reverse: true status_code: 200 + json: + error: 0 + data: + affected_items: + - syscheck: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - # GET /manager/logs - - name: Filters -> offset=3, limit=3 - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET - headers: - Authorization: "Bearer {test_login_token}" - params: - limit: 3 - offset: 3 - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - <<: *manager_log - - <<: *manager_log - - <<: *manager_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 - - # GET /manager/logs - - name: Filters -> offset=3, level=debug, limit=4 + - name: Show the config of syscheck/rootcheck in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/syscheck/rootcheck" headers: Authorization: "Bearer {test_login_token}" - params: - limit: 4 - offset: 3 - level: debug + method: GET response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - <<: *manager_log - - <<: *manager_log - - <<: *manager_log - - <<: *manager_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: + - rootcheck: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - # GET /manager/logs - - name: Filters -> tag=wazuh-modulesd, limit=1 + - name: Show the config of syscheck/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/syscheck/internal" headers: Authorization: "Bearer {test_login_token}" - params: - tag: wazuh-modulesd - limit: 1 + method: GET response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - <<: *manager_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: + - internal: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - # GET /manager/logs - - name: Filters -> tag=wazuh-analysisd, limit=1 + - name: Show the config of wazuh-db/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/wazuh-db/internal" method: GET headers: Authorization: "Bearer {test_login_token}" - params: - tag: wazuh-analysisd - limit: 1 response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - <<: *manager_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + status_code: 200 + json: + error: !anyint + data: + affected_items: + - wazuh_db: + commit_time_max: !anyint + commit_time_min: !anyint + open_db_limit: !anyint + worker_pool_size: !anyint + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - - name: Filters by query (tag=wazuh-modulesd, level=debug) + - name: Show the config of wazuh-db/wdb in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/wazuh-db/wdb" method: GET headers: Authorization: "Bearer {test_login_token}" - params: - q: tag=wazuh-modulesd;level=debug response: status_code: 200 - verify_response_with: - - function: tavern_utils:test_expected_value - extra_kwargs: - key: "tag" - expected_values: "wazuh-modulesd" - - function: tavern_utils:test_expected_value - extra_kwargs: - key: "level" - expected_values: "debug" + json: + error: !anyint + data: + affected_items: + - wdb: + backup: + - database: !anystr + enabled: !anybool + interval: !anyint + max_files: !anyint + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - - name: Filters by query (timestamp<2021-07-01) + - name: Try to show the config of wmodules/wmodules in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/wmodules/wmodules" headers: Authorization: "Bearer {test_login_token}" - params: - q: timestamp<2021-07-01 + method: GET response: status_code: 200 json: error: 0 data: - affected_items: [] + affected_items: + - wmodules: !anything failed_items: [] - total_affected_items: 0 + total_affected_items: 1 total_failed_items: 0 - - name: Filter by non-existent tag + - name: Try to show the invalid config of component in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/agent/wmodules" + headers: + Authorization: "Bearer {test_login_token}" method: GET + response: + status_code: 400 + json: + error: 1128 + +--- +test_name: PUT /manager/configuration + +stages: + + # PUT /manager/configuration + - name: Upload a valid configuration + request: + verify: False + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + method: PUT + data: "{valid_ossec_conf:s}" headers: Authorization: "Bearer {test_login_token}" - params: - tag: wazuh-unknown-daemon + content-type: application/octet-stream response: status_code: 200 json: - error: 0 data: - affected_items: [] + affected_items: + - 'manager' failed_items: [] - total_affected_items: 0 + total_affected_items: 1 total_failed_items: 0 + error: 0 - - name: Read logs using valid select + # GET /manager/configuration/ + - name: Ensure the new config has been applied by checking a field request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" headers: Authorization: "Bearer {test_login_token}" + method: GET params: - select: 'timestamp,tag' + section: "alerts" + field: "log_alert_level" response: - verify_response_with: - # Check response item keys are the selected keys - function: tavern_utils:test_select_key_affected_items - extra_kwargs: - select_key: 'timestamp,tag' status_code: 200 json: error: 0 data: - total_affected_items: !anyint + affected_items: + - alerts: + log_alert_level: '300' failed_items: [] + total_affected_items: 1 total_failed_items: 0 - - name: Try to read logs using invalid select + + # PUT /manager/configuration + - name: Try to upload an invalid configuration request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + method: PUT + data: "{invalid_ossec_conf:s}" headers: Authorization: "Bearer {test_login_token}" - params: - select: 'noexists' + content-type: application/octet-stream response: - status_code: 400 - json: &invalid_select - error: 1724 + status_code: 200 + json: + error: 1 + data: + affected_items: [] + failed_items: + - error: + code: 1113 + id: + - 'manager' + total_affected_items: 0 + total_failed_items: 1 - - name: Get distinct manager logs + # PUT /manager/configuration + - name: Try to upload an empty configuration request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + method: PUT + data: "{invalid_ossec_conf:s}" headers: Authorization: "Bearer {test_login_token}" - params: - distinct: true + content-type: application/octet-stream response: status_code: 200 - verify_response_with: - function: tavern_utils:test_distinct_key - ---- -test_name: GET /manager/logs/summary + json: + error: 1 + data: + affected_items: [] + failed_items: + - error: + code: 1113 + id: + - 'manager' + total_affected_items: 0 + total_failed_items: 1 -stages: + # PUT /manager/configuration + - name: Try to upload an invalid configuration with an invalid content-type + request: + verify: False + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + method: PUT + data: "{invalid_ossec_conf:s}" + headers: + Authorization: "Bearer {test_login_token}" + content-type: application/json + response: + status_code: 406 + json: + error: 6002 - # GET /manager/logs/summary - - name: Request + # GET /manager/configuration/ + - name: Ensure the config didn't change request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs/summary" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" headers: Authorization: "Bearer {test_login_token}" + method: GET + params: + section: "alerts" + field: "log_alert_level" response: status_code: 200 json: error: 0 data: - affected_items: !anything + affected_items: + - alerts: + log_alert_level: '300' failed_items: [] - total_affected_items: !anyint + total_affected_items: 1 total_failed_items: 0 --- From 73619138eeea676b0f949a47583a69d6ee244d1e Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Mon, 6 May 2024 15:24:19 -0300 Subject: [PATCH 042/419] Remove all manager name references. --- .../indexer/template/index-template.json | 8 ---- .../qa/test_data_policy/001/expected_003.out | 4 +- .../qa/test_data_policy/002/expected_003.out | 4 +- .../src/databaseFeedManager/globalData.hpp | 19 --------- .../scanOrchestrator/eventDetailsBuilder.hpp | 1 - .../src/scanOrchestrator/scanContext.hpp | 11 +---- .../src/scanOrchestrator/scanOrchestrator.hpp | 42 ------------------- .../tests/mocks/TrampolineScanContext.hpp | 9 ---- .../tests/unit/eventDetailsBuilder_test.cpp | 24 ----------- .../tests/unit/globalData_test.cpp | 8 ---- .../tests/unit/scanOrchestrator_test.cpp | 14 ------- src/wazuh_modules/wm_vulnerability_scanner.c | 2 +- 12 files changed, 6 insertions(+), 140 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json b/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json index e28984128cb..7468d2edad0 100644 --- a/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json +++ b/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json @@ -255,14 +255,6 @@ } } }, - "manager": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, "schema": { "properties": { "version": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out index 119850d6250..781bc68fc2c 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out @@ -5,9 +5,9 @@ "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Fetched 2 agents from Wazuh-DB.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", - "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'agent_ubuntu_22' (ID: '000', Version: 'v4.7.1').", + "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'manager' (ID: '000', Version: 'v4.7.1').", "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", - "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'agent_ubuntu_22' (ID: '000', Version: 'v4.7.1').", + "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'manager' (ID: '000', Version: 'v4.7.1').", "Vulnerability scan for package 'gzip' on Agent '000' has completed.", "Inserting agent package key: 000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", "Processing and publish key: CVE-2022-1271", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out index 5753c1bf47c..966251c8b0b 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out @@ -2,9 +2,9 @@ "Vulnerability scanner module started", "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", - "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'agent_ubuntu_22' (ID: '000', Version", + "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'manager' (ID: '000', Version", "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", - "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'agent_ubuntu_22' (ID: '000', Version:", + "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'manager' (ID: '000', Version:", "Vulnerability scan for package 'gzip' on Agent '000' has completed.", "Inserting agent package key: 000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", "Processing and publish key: CVE-2022-1271", diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/globalData.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/globalData.hpp index 9ff962977d5..4396def3ead 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/globalData.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/globalData.hpp @@ -26,7 +26,6 @@ class GlobalData final : public Singleton nlohmann::json m_vendorMaps; nlohmann::json m_osCpeMaps; nlohmann::json m_cnaMappings; - std::string m_managerName; public: /** @@ -49,15 +48,6 @@ class GlobalData final : public Singleton m_osCpeMaps = osCpe; } - /** - * @brief Set manager name. - * @param managerName Manager name. - */ - void managerName(std::string_view managerName) - { - m_managerName = managerName; - } - /** * @brief Set CNA mappings. * @param cnaMappings CNA mappings. @@ -97,15 +87,6 @@ class GlobalData final : public Singleton std::shared_lock lock(m_mutex); return m_cnaMappings; } - - /** - * @brief Get manager name. - * @return Manager name. - */ - std::string_view managerName() const - { - return m_managerName; - } }; #endif // _GLOBALDATA_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp index 1a76209f44b..96bc33502b7 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp @@ -226,7 +226,6 @@ class TEventDetailsBuilder final : public AbstractHandlermanagerName(); ecsData["wazuh"]["schema"]["version"] = WAZUH_SCHEMA_VERSION; json["data"] = std::move(ecsData); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index 581d5cb1afc..c21773e3db4 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -1161,7 +1161,7 @@ struct TScanContext final { if (agentId() == "000") { - return managerName(); + return "manager"; } return extractData( @@ -1407,15 +1407,6 @@ struct TScanContext final return m_osData.cpeName; } - /** - * @brief Gets manager name. - * @return Manager name. - */ - std::string_view managerName() const - { - return TGlobalData::instance().managerName(); - } - /** * @brief Gets the message type. * diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp index a3e8aabe1de..09cdc55c7e2 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp @@ -102,47 +102,6 @@ class TScanOrchestrator final : public TOSPrimitives inventoryDatabase, std::move(reportDispatcher)); - nlohmann::json response; // JSON object to store the response from the database - std::string managerName; // String to store the retrieved manager name - - // Try to query the database to retrieve the manager name - try - { - // Query the database to get the manager name - static TSocketDBWrapper socketDBWrapper(WDB_SOCKET); - socketDBWrapper.query(WazuhDBQueryBuilder::builder().globalSelectCommand("agent-name 000").build(), - response); - - // If the response is not empty, retrieve the manager name - if (!response.empty()) - { - managerName = response.front().at("name"); - } - else - { - // If the response is empty, throw an exception - throw std::runtime_error( - "Failed to retrieve manager name. The response from the global database was empty."); - } - } - // Catch any exceptions that occur during the database query - catch (const std::exception& e) - { - // Log a warning indicating the failure to retrieve the manager name - logWarn(WM_VULNSCAN_LOGTAG, "%s, Using the hostname by fallback.", e.what()); - - // Define the maximum size for the hostname - constexpr auto MAX_HOSTNAME_SIZE = 256; - char managerNameRaw[MAX_HOSTNAME_SIZE] = {0}; - - // Get the hostname and store it in the managerName string - TOSPrimitives::gethostname(managerNameRaw, MAX_HOSTNAME_SIZE); - - managerName = managerNameRaw; - } - - // Set the retrieved manager name in the global data object - GlobalData::instance().managerName(managerName); initEventDelayedDispatcher(); } ~TScanOrchestrator() = default; @@ -295,7 +254,6 @@ class TScanOrchestrator final : public TOSPrimitives * @brief Indexer connector. * */ - std::string m_managerName; std::unique_ptr m_inventoryDatabase; std::shared_ptr m_osOrchestration; std::shared_ptr m_packageInsertOrchestration; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineScanContext.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineScanContext.hpp index cd9c55cee1e..9437488621a 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineScanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineScanContext.hpp @@ -59,15 +59,6 @@ struct TrampolineTScanContext final return spScanContext->getType(); } - /** - * @brief Gets manager name. - * @return Manager name. - */ - std::string_view managerName() const - { - return spScanContext->managerName(); - } - /** * @brief Gets agent id. * @return Agent id. diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp index f63534e3f74..b02ce2d09ff 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp @@ -332,14 +332,6 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS2) GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->datePublished()->c_str()); EXPECT_TRUE(elementData.at("vulnerability").at("detected_at").get_ref() <= Utils::getCurrentISO8601()); - - auto managerName = scanContext->managerName(); - constexpr auto MAX_HOSTNAME_SIZE = 256; - std::string realManagerName; - realManagerName.reserve(MAX_HOSTNAME_SIZE); - ::gethostname(realManagerName.data(), realManagerName.size()); - EXPECT_STREQ(elementData.at("wazuh").at("manager").at("name").get_ref().c_str(), - managerName.empty() ? realManagerName.data() : managerName.data()); } TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) @@ -519,14 +511,6 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->datePublished()->c_str()); EXPECT_TRUE(elementData.at("vulnerability").at("detected_at").get_ref() <= Utils::getCurrentISO8601()); - - auto managerName = scanContext->managerName(); - constexpr auto MAX_HOSTNAME_SIZE = 256; - std::string realManagerName; - realManagerName.reserve(MAX_HOSTNAME_SIZE); - ::gethostname(realManagerName.data(), realManagerName.size()); - EXPECT_STREQ(elementData.at("wazuh").at("manager").at("name").get_ref().c_str(), - managerName.empty() ? realManagerName.data() : managerName.data()); } TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageDeleted) @@ -771,12 +755,4 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulOsInserted) GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->datePublished()->c_str()); EXPECT_TRUE(elementData.at("vulnerability").at("detected_at").get_ref() <= Utils::getCurrentISO8601()); - - auto managerName = scanContext->managerName(); - constexpr auto MAX_HOSTNAME_SIZE = 256; - std::string realManagerName; - realManagerName.reserve(MAX_HOSTNAME_SIZE); - ::gethostname(realManagerName.data(), realManagerName.size()); - EXPECT_STREQ(elementData.at("wazuh").at("manager").at("name").get_ref().c_str(), - managerName.empty() ? realManagerName.data() : managerName.data()); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/globalData_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/globalData_test.cpp index 2dabe4dda41..ae41ce0abdc 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/globalData_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/globalData_test.cpp @@ -32,11 +32,3 @@ TEST(GlobalDataTest, StoreAndGetOsCpeMap) GlobalData::instance().osCpeMaps(osCpeRulesJson); EXPECT_STREQ(GlobalData::instance().osCpeMaps().dump().c_str(), osCpeRules.c_str()); } - -TEST(GlobalDataTest, StoreAndGetManagerName) -{ - const std::string managerName {"wazuh"}; - - GlobalData::instance().managerName(managerName); - EXPECT_STREQ(GlobalData::instance().managerName().data(), managerName.c_str()); -} diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp index f122665a3b9..d1bf31558f3 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp @@ -302,7 +302,6 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsert) spScanContext = std::make_shared>(syscollectorDelta); spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); - EXPECT_CALL(*spSocketDBWrapperMock, query(_, _)).Times(1).WillOnce(SetArgReferee<1>(MANAGER_NAME)); TScanOrchestratormanagerName().data(), "test"); } TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageDelete) @@ -418,7 +416,6 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageDelete) spScanContext = std::make_shared>(syscollectorDelta); spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); - EXPECT_CALL(*spSocketDBWrapperMock, query(_, _)).Times(1).WillOnce(SetArgReferee<1>(MANAGER_NAME)); TScanOrchestratormanagerName().data(), "test"); } TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixInsert) @@ -534,7 +530,6 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixInsert) spScanContext = std::make_shared>(syscollectorDelta); spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); - EXPECT_CALL(*spSocketDBWrapperMock, query(_, _)).Times(1).WillOnce(SetArgReferee<1>(MANAGER_NAME)); TScanOrchestratormanagerName().data(), "test"); } TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixDelete) @@ -650,7 +644,6 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixDelete) spScanContext = std::make_shared>(syscollectorDelta); spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); - EXPECT_CALL(*spSocketDBWrapperMock, query(_, _)).Times(1).WillOnce(SetArgReferee<1>(MANAGER_NAME)); TScanOrchestratormanagerName().data(), "test"); } TEST_F(ScanOrchestratorTest, TestRunScannerTypeOs) @@ -751,7 +743,6 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeOs) spScanContext = std::make_shared>(syscollectorDelta); spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); - EXPECT_CALL(*spSocketDBWrapperMock, query(_, _)).Times(1).WillOnce(SetArgReferee<1>(MANAGER_NAME)); TScanOrchestratormanagerName().data(), "test"); } TEST_F(ScanOrchestratorTest, TestRunScannerTypeIntegrityClear) @@ -869,7 +859,6 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeIntegrityClear) spScanContext = std::make_shared>(syscollectorSynchronization); spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); - EXPECT_CALL(*spSocketDBWrapperMock, query(_, _)).Times(1).WillOnce(SetArgReferee<1>(MANAGER_NAME)); TScanOrchestratormanagerName().data(), "test"); } TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsertInDelayed) @@ -985,7 +973,6 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsertInDelayed) spScanContext = std::make_shared>(syscollectorDelta); spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); - EXPECT_CALL(*spSocketDBWrapperMock, query(_, _)).Times(1).WillOnce(SetArgReferee<1>(MANAGER_NAME)); TScanOrchestratormanagerName().data(), "test"); // The sleep is necessary to wait for the delayed event to be processed std::this_thread::sleep_for(std::chrono::seconds(10)); } diff --git a/src/wazuh_modules/wm_vulnerability_scanner.c b/src/wazuh_modules/wm_vulnerability_scanner.c index 051e7472370..64168e53369 100644 --- a/src/wazuh_modules/wm_vulnerability_scanner.c +++ b/src/wazuh_modules/wm_vulnerability_scanner.c @@ -139,7 +139,7 @@ cJSON* wm_vulnerability_scanner_dump(wm_vulnerability_scanner_t * data) { cJSON *root = cJSON_CreateObject(); cJSON_AddItemToObject(root, "vulnerability-detection", cJSON_Duplicate(data->vulnerability_detection, TRUE)); - cJSON_DeleteItemFromObject(cJSON_GetObjectItem(root, "vulnerability-detection"), "indexer-status"); + cJSON_DeleteItemFromObject(cJSON_GetObjectItem(root, "vulnerability-detection"), "index-status"); cJSON_DeleteItemFromObject(cJSON_GetObjectItem(root, "vulnerability-detection"), "cti-url"); cJSON_DeleteItemFromObject(cJSON_GetObjectItem(root, "vulnerability-detection"), "clusterName"); From 9e6377a4048b2424d63e902f368bc98c0dcfaddc Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Wed, 8 May 2024 22:43:04 -0300 Subject: [PATCH 043/419] Use hostname in cluster name field when cluster is disabled. --- src/shared/cluster_utils.c | 27 ++++++++ .../src/policyManager/policyManager.hpp | 35 +++++++++-- .../src/scanOrchestrator/scanContext.hpp | 3 +- src/wazuh_modules/wm_vulnerability_scanner.c | 61 +++++++++++-------- 4 files changed, 93 insertions(+), 33 deletions(-) diff --git a/src/shared/cluster_utils.c b/src/shared/cluster_utils.c index cbf6e1cf65f..5e455d52ebd 100644 --- a/src/shared/cluster_utils.c +++ b/src/shared/cluster_utils.c @@ -171,3 +171,30 @@ char *get_cluster_name(void) { return cluster_name; } + +/** + * Get the cluster status + * @return true if the cluster is enabled, false otherwise + */ +bool get_cluster_status(void) { + OS_XML xml; + const char * xmlf[] = {"ossec_config", "cluster", "disabled", NULL}; + const char *cfgfile = OSSECCONF; + bool cluster_status = false; + + if (OS_ReadXML(cfgfile, &xml) < 0) { + mdebug1(XML_ERROR, cfgfile, xml.err, xml.err_line); + } else { + char *status = OS_GetOneContentforElement(&xml, xmlf); + if (status) { + if (strcmp(status, "no") == 0) { + cluster_status = true; + } + free(status); + } + } + + OS_ClearXML(&xml); + + return cluster_status; +} diff --git a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp index 33fb086774f..65707a47a56 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp @@ -117,9 +117,7 @@ class PolicyManager final : public Singleton } newPolicy["indexer"]["name"] = STATES_VD_INDEX_NAME_PREFIX + - (newPolicy.at("vulnerability-detection").contains("clusterName") - ? newPolicy.at("vulnerability-detection").at("clusterName").get_ref() - : "default"); + newPolicy.at("vulnerability-detection").at("clusterName").get_ref(); if (!newPolicy.at("vulnerability-detection").contains("feed-update-interval")) { @@ -691,9 +689,36 @@ class PolicyManager final : public Singleton * * @return std::string_view The name of the manager node for vulnerability detection. */ - std::string_view getManagerNodeName() const + std::string_view getClusterNodeName() const { - return m_configuration.at("managerNodeName").get_ref(); + if (!m_configuration.contains("clusterNodeName")) + { + return UNKNOWN_VALUE; + } + else + { + return m_configuration.at("clusterNodeName").get_ref(); + } + } + + /** + * @brief Get status of the cluster. + * This function retrieves the cluster status from the configuration and returns it as a bool + * @return bool cluster status. + */ + bool getClusterStatus() const + { + return m_configuration.at("vulnerability-detection").at("clusterEnabled").get(); + } + + /** + * @brief Get cluster name. + * This function retrieves the cluster name from the configuration and returns it as a std::string + * @return std::string cluster name. + */ + std::string getClusterName() const + { + return m_configuration.at("vulnerability-detection").at("clusterName").get_ref(); } /** diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index c21773e3db4..4d973743606 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -1395,7 +1395,8 @@ struct TScanContext final */ std::string_view clusterName() const { - return ""; + static std::string clusterName = PolicyManager::instance().getClusterName(); + return clusterName; } /** diff --git a/src/wazuh_modules/wm_vulnerability_scanner.c b/src/wazuh_modules/wm_vulnerability_scanner.c index 64168e53369..e331a443fe0 100644 --- a/src/wazuh_modules/wm_vulnerability_scanner.c +++ b/src/wazuh_modules/wm_vulnerability_scanner.c @@ -45,38 +45,54 @@ static void wm_vulnerability_scanner_log_config(cJSON * config_json) } } -void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t * data) -{ +void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t * data) { mtinfo(WM_VULNERABILITY_SCANNER_LOGTAG, "Starting vulnerability_scanner module."); - if (vulnerability_scanner_module = so_get_module_handle("vulnerability_scanner"), vulnerability_scanner_module) - { + if (vulnerability_scanner_module = so_get_module_handle("vulnerability_scanner"), vulnerability_scanner_module) { vulnerability_scanner_start_ptr = so_get_function_sym(vulnerability_scanner_module, "vulnerability_scanner_start"); vulnerability_scanner_stop_ptr = so_get_function_sym(vulnerability_scanner_module, "vulnerability_scanner_stop"); // Check for missing configurations. These configurations may miss when using the old deprecated VD config. - if (!cJSON_GetObjectItem(data->vulnerability_detection, "enabled")) - { + if (!cJSON_GetObjectItem(data->vulnerability_detection, "enabled")) { cJSON_AddStringToObject(data->vulnerability_detection, "enabled", "yes"); } - if (!cJSON_GetObjectItem(data->vulnerability_detection, "index-status")) - { + + if (!cJSON_GetObjectItem(data->vulnerability_detection, "index-status")) { cJSON_AddStringToObject(data->vulnerability_detection, "index-status", "yes"); } - if (!cJSON_GetObjectItem(data->vulnerability_detection, "feed-update-interval")) - { + + if (!cJSON_GetObjectItem(data->vulnerability_detection, "feed-update-interval")) { cJSON_AddStringToObject(data->vulnerability_detection, "feed-update-interval", "60m"); } - if (!cJSON_GetObjectItem(data->vulnerability_detection, "clusterName")) - { + + /* Add cluster name to vulnerability detection configurations + * If the cluster is enabled, the cluster name is the cluster name read from the configuration file. + * If the cluster is disabled, the cluster name is the hostname, known as the manager name. + */ + const bool cluster_status = get_cluster_status() + cJSON_AddBoolToObject(data->vulnerability_detection, "clusterEnabled", cluster_status); + + if (cluster_status) { char* cluster_name = get_cluster_name(); cJSON_AddStringToObject(data->vulnerability_detection, "clusterName", cluster_name); os_free(cluster_name); + + char* manager_node_name = get_node_name(); + cJSON_AddStringToObject(data->vulnerability_detection, "managerNodeName", manager_node_name); + os_free(manager_node_name); + } + else { + char hostname[HOST_NAME_MAX + 1]; + if (gethostname(hostname, HOST_NAME_MAX) == 0) { + cJSON_AddStringToObject(data->vulnerability_detection, "clusterName", hostname); + } + else { + cJSON_AddStringToObject(data->vulnerability_detection, "clusterName", "undefined"); + } } - if (vulnerability_scanner_start_ptr) - { + if (vulnerability_scanner_start_ptr) { cJSON *config_json = cJSON_CreateObject(); cJSON_AddItemToObject(config_json, "vulnerability-detection", cJSON_Duplicate(data->vulnerability_detection, TRUE)); cJSON_AddNumberToObject(config_json, "wmMaxEps", wm_max_eps); @@ -84,32 +100,23 @@ void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t * data) cJSON_AddNumberToObject(config_json, "osdataLRUSize", getDefine_Int("vulnerability-detection", "osdata_lru_size", 1, 100000)); cJSON_AddNumberToObject(config_json, "managerDisabledScan", getDefine_Int("vulnerability-detection", "disable_scan_manager", 0, 1)); - char* manager_node_name = get_node_name(); - - cJSON_AddStringToObject(config_json, "managerNodeName", manager_node_name); - os_free(manager_node_name); - - if(indexer_config == NULL) - { + if(indexer_config == NULL) { cJSON_AddItemToObject(config_json, "indexer", cJSON_CreateObject()); } - else - { + else { cJSON_AddItemToObject(config_json, "indexer", cJSON_Duplicate(indexer_config, TRUE)); } wm_vulnerability_scanner_log_config(config_json); vulnerability_scanner_start_ptr(mtLoggingFunctionsWrapper, config_json); cJSON_Delete(config_json); } - else - { + else { mtwarn(WM_VULNERABILITY_SCANNER_LOGTAG, "Unable to start vulnerability_scanner module."); return NULL; } } - else - { + else { mtwarn(WM_VULNERABILITY_SCANNER_LOGTAG, "Unable to load vulnerability_scanner module."); return NULL; } From cd743350eae97b247cd939e00f937fceb384284b Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Thu, 9 May 2024 03:22:40 -0300 Subject: [PATCH 044/419] Change tests and remove the node_name from the events payload. --- src/config/wmodules-vulnerability-detection.c | 3 +- src/headers/agent_messages_adapter.h | 6 +- src/headers/cluster_utils.h | 13 ++- src/remoted/secure.c | 4 +- src/shared/agent_messages_adapter.c | 6 +- .../syscollector_deltas.fbs | 1 - .../syscollector_synchronization.fbs | 1 - src/wazuh_db/wdb_integrity.c | 1 - src/wazuh_db/wdb_parser.c | 1 - src/wazuh_db/wdb_upgrade.c | 1 - .../syscollectorDeltas_test.cpp | 36 +++---- .../syscollectorFb_test.cpp | 30 +++--- .../qa/test_data/001/input_001.json | 3 +- .../qa/test_data/001/input_002.json | 3 +- .../qa/test_data/001/input_003.json | 3 +- .../qa/test_data/002/input_001.json | 3 +- .../qa/test_data/002/input_002.json | 3 +- .../qa/test_data/002/input_003.json | 3 +- .../qa/test_data/002/input_004.json | 3 +- .../qa/test_data/003/input_001.json | 3 +- .../qa/test_data/003/input_002.json | 3 +- .../qa/test_data/003/input_003.json | 3 +- .../qa/test_data/003/input_004.json | 3 +- .../qa/test_data/003/input_005.json | 3 +- .../qa/test_data/003/input_006.json | 3 +- .../qa/test_data/003/input_007.json | 3 +- .../qa/test_data/003/input_008.json | 3 +- .../qa/test_data/004/input_001.json | 3 +- .../qa/test_data/004/input_002.json | 3 +- .../qa/test_data/004/input_003.json | 3 +- .../qa/test_data/004/input_004.json | 3 +- .../qa/test_data/004/input_005.json | 3 +- .../qa/test_data/004/input_006.json | 3 +- .../qa/test_data/004/input_007.json | 3 +- .../qa/test_data/004/input_008.json | 3 +- .../qa/test_data/005/input_001.json | 3 +- .../qa/test_data/005/input_002.json | 3 +- .../qa/test_data/006/input_001.json | 3 +- .../qa/test_data/006/input_002.json | 3 +- .../qa/test_data/006/input_003.json | 3 +- .../qa/test_data/006/input_004.json | 3 +- .../qa/test_data/006/input_005.json | 3 +- .../qa/test_data/006/input_006.json | 3 +- .../qa/test_data/007/input_001.json | 3 +- .../qa/test_data/007/input_002.json | 3 +- .../qa/test_data/008/input_001.json | 3 +- .../qa/test_data/008/input_002.json | 3 +- .../qa/test_data/008/input_003.json | 3 +- .../qa/test_data/009/input_001.json | 3 +- .../qa/test_data/009/input_002.json | 3 +- .../qa/test_data/010/input_001.json | 3 +- .../qa/test_data/010/input_002.json | 3 +- .../qa/test_data/010/input_003.json | 3 +- .../qa/test_data/011/input_001.json | 3 +- .../qa/test_data/011/input_002.json | 3 +- .../qa/test_data/011/input_003.json | 3 +- .../qa/test_data/011/input_004.json | 3 +- .../qa/test_data/011/input_005.json | 3 +- .../qa/test_data_policy/001/config.json | 7 +- .../test_data_policy/001/configDisabled.json | 7 +- .../qa/test_data_policy/002/config.json | 4 +- .../002/configManagerDisabled.json | 4 +- .../src/policyManager/policyManager.hpp | 9 +- .../buildAllAgentListContext.hpp | 11 +-- .../buildSingleAgentListContext.hpp | 3 +- .../scanOrchestrator/cleanAgentInventory.hpp | 25 ++--- .../scanOrchestrator/eventDeleteInventory.hpp | 4 + .../scanOrchestrator/eventDetailsBuilder.hpp | 4 + .../scanOrchestrator/eventInsertInventory.hpp | 45 +++++---- .../src/scanOrchestrator/scanAgentList.hpp | 6 +- .../src/scanOrchestrator/scanContext.hpp | 52 ++++------ .../scanOrchestrator/scanInventorySync.hpp | 14 ++- .../src/vulnerabilityScannerFacade.cpp | 2 - .../tests/unit/alertClearBuilder_test.cpp | 7 +- .../tests/unit/cleanAgentInventory_test.cpp | 8 +- .../tests/unit/cleanInventory_test.cpp | 5 +- .../tests/unit/clearSendReport_test.cpp | 9 +- .../tests/unit/eventDeleteInventory_test.cpp | 3 +- .../tests/unit/eventDetailsBuilder_test.cpp | 13 ++- .../tests/unit/eventInsertInventory_test.cpp | 3 +- .../eventPackageAlertDetailsBuilder_test.cpp | 10 +- .../tests/unit/eventSendReport_test.cpp | 12 +-- .../tests/unit/eventSendReport_test.hpp | 4 +- .../tests/unit/osDataCache_test.hpp | 4 +- .../tests/unit/packageScanner_test.cpp | 6 +- .../tests/unit/policyManager_test.cpp | 53 +++++------ .../tests/unit/resultIndexer_test.cpp | 3 +- .../tests/unit/scanAgentList_test.cpp | 4 +- .../tests/unit/scanContext_test.cpp | 95 ++++--------------- .../tests/unit/scanOrchestrator_test.cpp | 18 ++-- .../unit/scanOsAlertDetailsBuilder_test.cpp | 7 +- .../testtool/scanner/TC-001/step_0.json | 3 +- .../testtool/scanner/TC-001/step_1.json | 3 +- .../testtool/scanner/TC-001/step_2.json | 3 +- .../testtool/scanner/TC-001/step_3.json | 3 +- .../testtool/scanner/TC-002/step_0.json | 3 +- .../testtool/scanner/TC-002/step_1.json | 3 +- .../testtool/scanner/TC-003/step_0.json | 3 +- .../testtool/scanner/TC-003/step_1.json | 3 +- .../testtool/scanner/TC-004/step_0.json | 3 +- .../testtool/scanner/TC-005/step_0.json | 3 +- .../testtool/scanner/TC-005/step_1.json | 3 +- .../testtool/scanner/TC-006/step_0.json | 3 +- .../testtool/scanner/TC-006/step_1.json | 3 +- .../testtool/scanner/TC-006/step_2.json | 5 +- .../testtool/scanner/TC-006/step_3.json | 3 +- .../testtool/scanner/TC-006/step_4.json | 5 +- .../testtool/scanner/TC-006/step_5.json | 3 +- .../testtool/scanner/TC-007/step_0.json | 3 +- .../testtool/scanner/TC-007/step_1.json | 3 +- .../testtool/scanner/TC-007/step_2.json | 3 +- .../testtool/scanner/TC-007/step_3.json | 3 +- .../testtool/scanner/TC-007/step_4.json | 3 +- .../testtool/scanner/TC-007/step_5.json | 3 +- .../testtool/scanner/TC-008/step_0.json | 3 +- .../testtool/scanner/TC-008/step_1.json | 3 +- .../testtool/scanner/TC-008/step_2.json | 3 +- .../testtool/scanner/config.json | 7 +- .../testtool/scanner/file1.json | 3 +- .../testtool/scanner/file2.json | 3 +- .../testtool/scanner/file3.json | 3 +- .../testtool/scanner/file4.json | 3 +- src/wazuh_modules/wm_syscollector.c | 7 +- src/wazuh_modules/wm_vulnerability_scanner.c | 95 ++++++++++++------- 124 files changed, 387 insertions(+), 518 deletions(-) diff --git a/src/config/wmodules-vulnerability-detection.c b/src/config/wmodules-vulnerability-detection.c index 61c8909466f..60aec94cfdc 100644 --- a/src/config/wmodules-vulnerability-detection.c +++ b/src/config/wmodules-vulnerability-detection.c @@ -23,6 +23,8 @@ static const char * valid_paths[] = { NULL }; +extern bool key_is_in_array(const char * keypath, const char ** psearch); + /** * @brief Function used to read, recursively, the configuration related to vulnerability detection. * @@ -34,7 +36,6 @@ void wm_vulnerability_detection_subnode_read(const OS_XML *xml, XML_NODE nodes, xml_node **children; cJSON * subnode; cJSON * existing_item; - cJSON * array_item; _Config global_config; char * subnode_keypath; size_t subnode_keypath_len; diff --git a/src/headers/agent_messages_adapter.h b/src/headers/agent_messages_adapter.h index cedf8398b10..62f51c15dea 100644 --- a/src/headers/agent_messages_adapter.h +++ b/src/headers/agent_messages_adapter.h @@ -29,10 +29,9 @@ void *agent_data_hash_duplicator(void* data); * @param name Name of the agent. * @param id Id of the agent. * @param ip Ip of the agent. - * @param node_name Node name which the agent is reporting to. * @return char* Returns a string representation of the JSON formatted message. Must be freed by the caller. */ -char* adapt_delta_message(const char* data, const char* name, const char* id, const char* ip, const char* node_name, const OSHash *agent_data_hash); +char* adapt_delta_message(const char* data, const char* name, const char* id, const char* ip, const OSHash *agent_data_hash); /** * @brief Takes a syscollector synchronization message and adapts it to a format compatible with the defined flatbuffer schema. @@ -41,9 +40,8 @@ char* adapt_delta_message(const char* data, const char* name, const char* id, co * @param name Name of the agent. * @param id Id of the agent. * @param ip Ip of the agent. - * @param node_name Node name which the agent is reporting to. * @return char* Returns a string representation of the JSON formatted message. Must be freed by the caller. */ -char* adapt_sync_message(const char* data, const char* name, const char* id, const char* ip, const char* node_name, const OSHash *agent_data_hash); +char* adapt_sync_message(const char* data, const char* name, const char* id, const char* ip, const OSHash *agent_data_hash); #endif // _AGENT_MESSAGES_HELPER_HPP diff --git a/src/headers/cluster_utils.h b/src/headers/cluster_utils.h index 35855f9ec37..df9a20534b1 100644 --- a/src/headers/cluster_utils.h +++ b/src/headers/cluster_utils.h @@ -19,19 +19,22 @@ int w_is_worker(void); * @brief Method to read the configuration file and determine if the cluster is enabled or not. It's also possible * to know if the current node is a worker or the master. * - * @param [out] is_worker If the cluster is enabled, a 1 will be written in case it's a worker node and 0 if it's the master. - * OS_INVALID otherwise. + * @param [out] is_worker If the cluster is enabled, a 1 will be written in case it's a worker node and 0 if it's the + * master. OS_INVALID otherwise. * @return int It'll return 1 if the cluster is enabled or 0 if it isn't. OS_INVALID if the information isn't available. */ int w_is_single_node(int* is_worker); // Returns the master node or "undefined" if any node is specified. The memory should be freed by the caller. -char *get_master_node(void); +char* get_master_node(void); // Returns the node name of the manager in cluster. The memory should be freed by the caller. -char *get_node_name(void); +char* get_node_name(void); // Returns the name of the manager in cluster. The memory should be freed by the caller. -char *get_cluster_name(void); +char* get_cluster_name(void); + +// Returns the cluster status. 1 if the cluster is enabled, 0 if it isn't. +bool get_cluster_status(void); #endif diff --git a/src/remoted/secure.c b/src/remoted/secure.c index 87a0daa13f6..0e6c26ddb80 100644 --- a/src/remoted/secure.c +++ b/src/remoted/secure.c @@ -857,9 +857,9 @@ void router_message_forward(char* msg, const char* agent_id, const char* agent_i size_t msg_size = strnlen(msg_start, OS_MAXSTR - message_header_size); if ((msg_size + message_header_size) < OS_MAXSTR) { if (schema_type == MT_SYS_DELTAS) { - msg_to_send = adapt_delta_message(msg_start, agent_name, agent_id, agent_ip, node_name, agent_data_hash); + msg_to_send = adapt_delta_message(msg_start, agent_name, agent_id, agent_ip, agent_data_hash); } else if (schema_type == MT_SYS_SYNC) { - msg_to_send = adapt_sync_message(msg_start, agent_name, agent_id, agent_ip, node_name, agent_data_hash); + msg_to_send = adapt_sync_message(msg_start, agent_name, agent_id, agent_ip, agent_data_hash); } if (msg_to_send) { diff --git a/src/shared/agent_messages_adapter.c b/src/shared/agent_messages_adapter.c index 6d5fcbaf09d..a7ce273722c 100644 --- a/src/shared/agent_messages_adapter.c +++ b/src/shared/agent_messages_adapter.c @@ -18,7 +18,7 @@ void *agent_data_hash_duplicator(void* data) { return cJSON_Duplicate((cJSON*)data, true); } -char* adapt_delta_message(const char* data, const char* name, const char* id, const char* ip, const char* node_name, const OSHash *agent_data_hash) { +char* adapt_delta_message(const char* data, const char* name, const char* id, const char* ip, const OSHash *agent_data_hash) { cJSON* j_msg_to_send = NULL; cJSON* j_agent_info = NULL; cJSON* j_msg = NULL; @@ -56,7 +56,6 @@ char* adapt_delta_message(const char* data, const char* name, const char* id, co cJSON_AddItemToObject(j_agent_info, "agent_version", cJSON_CreateString(__ossec_version)); } - cJSON_AddStringToObject(j_agent_info, "node_name", node_name); cJSON_AddItemToObject(j_msg_to_send, "agent_info", j_agent_info); cJSON_AddItemToObject(j_msg_to_send, "data_type", cJSON_DetachItemFromObject(j_msg, "type")); @@ -72,7 +71,7 @@ char* adapt_delta_message(const char* data, const char* name, const char* id, co return msg_to_send; } -char* adapt_sync_message(const char* data, const char* name, const char* id, const char* ip, const char* node_name, const OSHash *agent_data_hash) { +char* adapt_sync_message(const char* data, const char* name, const char* id, const char* ip, const OSHash *agent_data_hash) { cJSON* j_msg_to_send = NULL; cJSON* j_agent_info = NULL; cJSON* j_msg = NULL; @@ -111,7 +110,6 @@ char* adapt_sync_message(const char* data, const char* name, const char* id, con cJSON_AddItemToObject(j_agent_info, "agent_version", cJSON_CreateString(__ossec_version)); } - cJSON_AddStringToObject(j_agent_info, "node_name", node_name); cJSON_AddItemToObject(j_msg_to_send, "agent_info", j_agent_info); cJSON_AddItemToObject(j_msg_to_send, "data_type", cJSON_DetachItemFromObject(j_msg, "type")); diff --git a/src/shared_modules/utils/flatbuffers/schemas/syscollectorDeltas/syscollector_deltas.fbs b/src/shared_modules/utils/flatbuffers/schemas/syscollectorDeltas/syscollector_deltas.fbs index a9988f913b9..882147bbafb 100644 --- a/src/shared_modules/utils/flatbuffers/schemas/syscollectorDeltas/syscollector_deltas.fbs +++ b/src/shared_modules/utils/flatbuffers/schemas/syscollectorDeltas/syscollector_deltas.fbs @@ -168,7 +168,6 @@ table AgentInfo { agent_ip:string; agent_name:string; agent_version:string; - node_name:string; } table Delta { diff --git a/src/shared_modules/utils/flatbuffers/schemas/syscollectorRsync/syscollector_synchronization.fbs b/src/shared_modules/utils/flatbuffers/schemas/syscollectorRsync/syscollector_synchronization.fbs index ad7e08ef8e3..a5fd8dfad15 100644 --- a/src/shared_modules/utils/flatbuffers/schemas/syscollectorRsync/syscollector_synchronization.fbs +++ b/src/shared_modules/utils/flatbuffers/schemas/syscollectorRsync/syscollector_synchronization.fbs @@ -5,7 +5,6 @@ table AgentInfo { agent_ip:string; agent_name:string; agent_version:string; - node_name:string; } table syscollector_hotfixes { diff --git a/src/wazuh_db/wdb_integrity.c b/src/wazuh_db/wdb_integrity.c index f73e76ed817..755cc2571bf 100644 --- a/src/wazuh_db/wdb_integrity.c +++ b/src/wazuh_db/wdb_integrity.c @@ -75,7 +75,6 @@ void wdbi_report_removed(const char* agent_id, wdb_component_t component, sqlite j_data = cJSON_CreateObject(); cJSON_AddStringToObject(j_agent_info, "agent_id", agent_id); - cJSON_AddStringToObject(j_agent_info, "node_name", gconfig.node_name ? gconfig.node_name : ""); cJSON_AddItemToObject(j_msg_to_send, "agent_info", j_agent_info); switch (component) diff --git a/src/wazuh_db/wdb_parser.c b/src/wazuh_db/wdb_parser.c index fa4856139e0..27e4d831a82 100644 --- a/src/wazuh_db/wdb_parser.c +++ b/src/wazuh_db/wdb_parser.c @@ -5521,7 +5521,6 @@ int wdb_parse_global_delete_agent(wdb_t * wdb, char * input, char * output) { j_agent_info = cJSON_CreateObject(); cJSON_AddStringToObject(j_agent_info, "agent_id", input); - cJSON_AddStringToObject(j_agent_info, "node_name", gconfig.node_name ? gconfig.node_name : ""); cJSON_AddItemToObject(j_msg_to_send, "agent_info", j_agent_info); cJSON_AddStringToObject(j_msg_to_send, "action", "deleteAgent"); diff --git a/src/wazuh_db/wdb_upgrade.c b/src/wazuh_db/wdb_upgrade.c index 38177f972d1..79384902eef 100644 --- a/src/wazuh_db/wdb_upgrade.c +++ b/src/wazuh_db/wdb_upgrade.c @@ -86,7 +86,6 @@ wdb_t * wdb_upgrade(wdb_t *wdb) { j_data = cJSON_CreateObject(); cJSON_AddStringToObject(j_agent_info, "agent_id", wdb->id); - cJSON_AddStringToObject(j_agent_info, "node_name", gconfig.node_name ? gconfig.node_name : ""); cJSON_AddItemToObject(j_msg_to_send, "agent_info", j_agent_info); cJSON_AddStringToObject(j_msg_to_send, "action", "upgradeAgentDB"); diff --git a/src/wazuh_modules/syscollector/tests/sysCollectorFlatbuffers/syscollectorDeltas_test.cpp b/src/wazuh_modules/syscollector/tests/sysCollectorFlatbuffers/syscollectorDeltas_test.cpp index 5b96034efd7..310d42b92ee 100644 --- a/src/wazuh_modules/syscollector/tests/sysCollectorFlatbuffers/syscollectorDeltas_test.cpp +++ b/src/wazuh_modules/syscollector/tests/sysCollectorFlatbuffers/syscollectorDeltas_test.cpp @@ -27,7 +27,7 @@ bool parseJSON(const std::string& schemaStr, const std::string& jsonStr) TEST_F(SyscollectorDeltasTest, NetIfaceParsingSuccess) { - const std::string deltaNetIface {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_network_iface\",\"data\":{\"adapter\":null,\"checksum\":\"078143285c1aff98e196c8fe7e01f5677f44bd44\"" + const std::string deltaNetIface {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_network_iface\",\"data\":{\"adapter\":null,\"checksum\":\"078143285c1aff98e196c8fe7e01f5677f44bd44\"" ",\"item_id\":\"7a60750dd3c25c53f21ff7f44b4743664ddbb66a\",\"mac\":\"02:bf:67:45:e4:dd\",\"mtu\":1500,\"name\":\"enp0s3\",\"rx_bytes\":972800985" ",\"rx_dropped\":0,\"rx_errors\":0,\"rx_packets\":670863,\"scan_time\":\"2023/08/04 19:56:11\",\"state\":\"up\",\"tx_bytes\":6151606,\"tx_dropped\":0" ",\"tx_errors\":0,\"tx_packets\":84746,\"type\":\"ethernet\"},\"operation\":\"MODIFIED\"}"}; @@ -42,7 +42,7 @@ TEST_F(SyscollectorDeltasTest, NetIfaceParsingSuccess) TEST_F(SyscollectorDeltasTest, NetIfaceParsingInvalid) { // mtu is a string that represents an invalid number. - const std::string deltaNetIface {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_network_iface\",\"data\":{\"adapter\":null,\"checksum\":\"078143285c1aff98e196c8fe7e01f5677f44bd44\"" + const std::string deltaNetIface {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_network_iface\",\"data\":{\"adapter\":null,\"checksum\":\"078143285c1aff98e196c8fe7e01f5677f44bd44\"" ",\"item_id\":\"7a60750dd3c25c53f21ff7f44b4743664ddbb66a\",\"mac\":\"02:bf:67:45:e4:dd\",\"mtu\":\"150x\",\"name\":\"enp0s3\",\"rx_bytes\":972800985" ",\"rx_dropped\":0,\"rx_errors\":0,\"rx_packets\":670863,\"scan_time\":\"2023/08/04 19:56:11\",\"state\":\"up\",\"tx_bytes\":6151606,\"tx_dropped\":0" ",\"tx_errors\":0,\"tx_packets\":84746,\"type\":\"ethernet\"},\"operation\":\"MODIFIED\"}"}; @@ -56,7 +56,7 @@ TEST_F(SyscollectorDeltasTest, NetIfaceParsingInvalid) TEST_F(SyscollectorDeltasTest, NetProtoParsingSuccess) { - const std::string deltaNetProto {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_network_protocol\",\"data\":{\"checksum\":\"ddd971d57316a79738a2cf93143966a4e51ede08\",\"dhcp\":\"unknown\"" + const std::string deltaNetProto {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_network_protocol\",\"data\":{\"checksum\":\"ddd971d57316a79738a2cf93143966a4e51ede08\",\"dhcp\":\"unknown\"" ",\"gateway\":\" \",\"iface\":\"enp0s9\",\"item_id\":\"33228317ee8778628d0f2f4fde53b75b92f15f1d\",\"metric\":\"0\",\"scan_time\":\"2023/08/07 15:02:36\"" ",\"type\":\"ipv4\"},\"operation\":\"DELETED\"}"}; @@ -70,7 +70,7 @@ TEST_F(SyscollectorDeltasTest, NetProtoParsingSuccess) TEST_F(SyscollectorDeltasTest, NetProtoParsingInvalid) { // metric is a number. - const std::string deltaNetProto {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_network_protocol\",\"data\":{\"checksum\":\"ddd971d57316a79738a2cf93143966a4e51ede08\",\"dhcp\":\"unknown\"" + const std::string deltaNetProto {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_network_protocol\",\"data\":{\"checksum\":\"ddd971d57316a79738a2cf93143966a4e51ede08\",\"dhcp\":\"unknown\"" ",\"gateway\":\" \",\"iface\":\"enp0s9\",\"item_id\":\"33228317ee8778628d0f2f4fde53b75b92f15f1d\",\"metric\":0,\"scan_time\":\"2023/08/07 15:02:36\"" ",\"type\":\"ipv4\"},\"operation\":\"DELETED\"}"}; @@ -84,7 +84,7 @@ TEST_F(SyscollectorDeltasTest, NetProtoParsingInvalid) TEST_F(SyscollectorDeltasTest, NetAddrParsingSuccess) { // For delta events, syscollector network address provider sends metric and dhcp information. - const std::string deltaNetAddr {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_network_address\",\"data\":{\"address\":\"192.168.0.80\",\"broadcast\":\"192.168.0.255\"" + const std::string deltaNetAddr {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_network_address\",\"data\":{\"address\":\"192.168.0.80\",\"broadcast\":\"192.168.0.255\"" ",\"checksum\":\"c1f9511fa37815d19cee496f21524725ba84ab10\",\"metric\":\"100\",\"dhcp\":\"unknown\",\"iface\":\"enp0s9\",\"item_id\":\"b333013c47d28eb3878068dd59c42e00178bd475\"" ",\"netmask\":\"255.255.255.0\",\"proto\":0,\"scan_time\":\"2023/08/07 15:02:36\"},\"operation\":\"DELETED\"}"}; @@ -98,7 +98,7 @@ TEST_F(SyscollectorDeltasTest, NetAddrParsingSuccess) TEST_F(SyscollectorDeltasTest, NetAddrParsingInvalid) { // Invalid field "oface". - const std::string deltaNetAddr {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_network_address\",\"data\":{\"address\":\"192.168.0.80\",\"broadcast\":\"192.168.0.255\"" + const std::string deltaNetAddr {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_network_address\",\"data\":{\"address\":\"192.168.0.80\",\"broadcast\":\"192.168.0.255\"" ",\"checksum\":\"c1f9511fa37815d19cee496f21524725ba84ab10\",\"oface\":\"enp0s9\",\"item_id\":\"b333013c47d28eb3878068dd59c42e00178bd475\"" ",\"netmask\":\"255.255.255.0\",\"proto\":0,\"scan_time\":\"2023/08/07 15:02:36\"},\"operation\":\"DELETED\"}"}; @@ -111,7 +111,7 @@ TEST_F(SyscollectorDeltasTest, NetAddrParsingInvalid) TEST_F(SyscollectorDeltasTest, OsInfoParsingSuccess) { - const std::string deltaOsInfo {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_osinfo\",\"data\":{\"architecture\":\"x86_64\",\"checksum\":\"1691178971959743855\",\"hostname\":\"focal\",\"os_codename\":\"focal\"" + const std::string deltaOsInfo {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_osinfo\",\"data\":{\"architecture\":\"x86_64\",\"checksum\":\"1691178971959743855\",\"hostname\":\"focal\",\"os_codename\":\"focal\"" ",\"os_major\":\"20\",\"os_minor\":\"04\",\"os_name\":\"Ubuntu\",\"os_patch\":\"6\",\"os_platform\":\"ubuntu\",\"os_version\":\"20.04.6 LTS (Focal Fossa)\"" ",\"release\":\"5.4.0-155-generic\",\"scan_time\":\"2023/08/04 19:56:11\",\"sysname\":\"Linux\",\"version\":\"#172-Ubuntu SMP Fri Jul 7 16:10:02 UTC 2023\"}" ",\"operation\":\"MODIFIED\"}"}; @@ -126,7 +126,7 @@ TEST_F(SyscollectorDeltasTest, OsInfoParsingSuccess) TEST_F(SyscollectorDeltasTest, OsInfoParsingInvalid) { // os_major is an integer. - const std::string deltaOsInfo {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_osinfo\",\"data\":{\"architecture\":\"x86_64\",\"checksum\":\"1691178971959743855\",\"hostname\":\"focal\",\"os_codename\":\"focal\"" + const std::string deltaOsInfo {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_osinfo\",\"data\":{\"architecture\":\"x86_64\",\"checksum\":\"1691178971959743855\",\"hostname\":\"focal\",\"os_codename\":\"focal\"" ",\"os_major\":20,\"os_minor\":\"04\",\"os_name\":\"Ubuntu\",\"os_patch\":\"6\",\"os_platform\":\"ubuntu\",\"os_version\":\"20.04.6 LTS (Focal Fossa)\"" ",\"release\":\"5.4.0-155-generic\",\"scan_time\":\"2023/08/04 19:56:11\",\"sysname\":\"Linux\",\"version\":\"#172-Ubuntu SMP Fri Jul 7 16:10:02 UTC 2023\"}" ",\"operation\":\"MODIFIED\"}"}; @@ -140,7 +140,7 @@ TEST_F(SyscollectorDeltasTest, OsInfoParsingInvalid) TEST_F(SyscollectorDeltasTest, HwInfoParsingSuccess) { - const std::string deltaHwInfo {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_hwinfo\",\"data\":{\"board_serial\":\"0\",\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"cpu_cores\":8" + const std::string deltaHwInfo {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_hwinfo\",\"data\":{\"board_serial\":\"0\",\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"cpu_cores\":8" ",\"cpu_mhz\":2592.0,\"cpu_name\":\"Intel(R) Core(TM) i7-10750H CPU @ 2.60GHz\",\"ram_free\":11547184,\"ram_total\":12251492,\"ram_usage\":6" ",\"scan_time\":\"2023/08/04 19:56:11\"},\"operation\":\"MODIFIED\"}"}; @@ -154,7 +154,7 @@ TEST_F(SyscollectorDeltasTest, HwInfoParsingSuccess) TEST_F(SyscollectorDeltasTest, HwInfoParsingInvalid) { // cpu_mhz is a string that represents an invalid number. - const std::string deltaHwInfo {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_hwinfo\",\"data\":{\"board_serial\":\"0\",\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"cpu_cores\":8" + const std::string deltaHwInfo {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_hwinfo\",\"data\":{\"board_serial\":\"0\",\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"cpu_cores\":8" ",\"cpu_mhz\":\"2592.x\",\"cpu_name\":\"Intel(R) Core(TM) i7-10750H CPU @ 2.60GHz\",\"ram_free\":11547184,\"ram_total\":12251492,\"ram_usage\":6" ",\"scan_time\":\"2023/08/04 19:56:11\"},\"operation\":\"MODIFIED\"}"}; @@ -167,7 +167,7 @@ TEST_F(SyscollectorDeltasTest, HwInfoParsingInvalid) TEST_F(SyscollectorDeltasTest, PortsParsingSuccess) { - const std::string deltaPorts {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_ports\",\"data\":{\"checksum\":\"03f522cdccc8dfbab964981db59b176b178b9dfd\",\"inode\":39968" + const std::string deltaPorts {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_ports\",\"data\":{\"checksum\":\"03f522cdccc8dfbab964981db59b176b178b9dfd\",\"inode\":39968" ",\"item_id\":\"7f98c21162b40ca7871a8292d177a1812ca97547\",\"local_ip\":\"10.0.2.15\",\"local_port\":68,\"pid\":0,\"process\":null,\"protocol\":\"udp\"" ",\"remote_ip\":\"0.0.0.0\",\"remote_port\":0,\"rx_queue\":0,\"scan_time\":\"2023/08/07 12:42:41\",\"state\":null,\"tx_queue\":0},\"operation\":\"INSERTED\"}"}; @@ -181,7 +181,7 @@ TEST_F(SyscollectorDeltasTest, PortsParsingSuccess) TEST_F(SyscollectorDeltasTest, PortsParsingInvalid) { // local_port is a string that represents an invalid number. - const std::string deltaPorts {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_ports\",\"data\":{\"checksum\":\"03f522cdccc8dfbab964981db59b176b178b9dfd\",\"inode\":39968" + const std::string deltaPorts {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_ports\",\"data\":{\"checksum\":\"03f522cdccc8dfbab964981db59b176b178b9dfd\",\"inode\":39968" ",\"item_id\":\"7f98c21162b40ca7871a8292d177a1812ca97547\",\"local_ip\":\"10.0.2.15\",\"local_port\":\"68x\",\"pid\":0,\"process\":null,\"protocol\":\"udp\"" ",\"remote_ip\":\"0.0.0.0\",\"remote_port\":0,\"rx_queue\":0,\"scan_time\":\"2023/08/07 12:42:41\",\"state\":null,\"tx_queue\":0},\"operation\":\"INSERTED\"}"}; @@ -194,7 +194,7 @@ TEST_F(SyscollectorDeltasTest, PortsParsingInvalid) TEST_F(SyscollectorDeltasTest, PackagesParsingSuccess) { - const std::string deltaPackages {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_packages\",\"data\":{\"architecture\":\"amd64\",\"checksum\":\"1e6ce14f97f57d1bbd46ff8e5d3e133171a1bbce\"" + const std::string deltaPackages {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_packages\",\"data\":{\"architecture\":\"amd64\",\"checksum\":\"1e6ce14f97f57d1bbd46ff8e5d3e133171a1bbce\"" ",\"description\":\"library for GIF images (library)\",\"format\":\"deb\",\"groups\":\"libs\",\"item_id\":\"ec465b7eb5fa011a336e95614072e4c7f1a65a53\"" ",\"multiarch\":\"same\",\"name\":\"libgif7\",\"priority\":\"optional\",\"scan_time\":\"2023/08/04 19:56:11\",\"size\":72,\"source\":\"giflib\"" ",\"vendor\":\"Ubuntu Developers \",\"version\":\"5.1.9-1\"},\"operation\":\"INSERTED\"}"}; @@ -209,7 +209,7 @@ TEST_F(SyscollectorDeltasTest, PackagesParsingSuccess) TEST_F(SyscollectorDeltasTest, PackagesParsingInvalid) { // Invalid field "arch" - const std::string deltaPackages {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_packages\",\"data\":{\"arch\":\"amd64\",\"checksum\":\"1e6ce14f97f57d1bbd46ff8e5d3e133171a1bbce\"" + const std::string deltaPackages {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_packages\",\"data\":{\"arch\":\"amd64\",\"checksum\":\"1e6ce14f97f57d1bbd46ff8e5d3e133171a1bbce\"" ",\"description\":\"library for GIF images (library)\",\"format\":\"deb\",\"groups\":\"libs\",\"item_id\":\"ec465b7eb5fa011a336e95614072e4c7f1a65a53\"" ",\"multiarch\":\"same\",\"name\":\"libgif7\",\"priority\":\"optional\",\"scan_time\":\"2023/08/04 19:56:11\",\"size\":72,\"source\":\"giflib\"" ",\"vendor\":\"Ubuntu Developers \",\"version\":\"5.1.9-1\"},\"operation\":\"INSERTED\"}"}; @@ -223,7 +223,7 @@ TEST_F(SyscollectorDeltasTest, PackagesParsingInvalid) TEST_F(SyscollectorDeltasTest, HotfixesParsingSuccess) { - const std::string deltaHotfixes {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_hotfixes\",\"data\":{\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"hotfix\":\"KB4502496\"" + const std::string deltaHotfixes {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_hotfixes\",\"data\":{\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"hotfix\":\"KB4502496\"" ",\"scan_time\":\"2023/08/0419:56:11\"},\"operation\":\"MODIFIED\"}"}; EXPECT_FALSE(flatbufferSchemaStr.empty()); @@ -236,7 +236,7 @@ TEST_F(SyscollectorDeltasTest, HotfixesParsingSuccess) TEST_F(SyscollectorDeltasTest, HotfixesParsingInvalid) { // Invalid field "hitfix". - const std::string deltaHotfixes {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_hotfixes\",\"data\":{\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"hitfix\":\"KB4502496\"" + const std::string deltaHotfixes {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_hotfixes\",\"data\":{\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"hitfix\":\"KB4502496\"" ",\"scan_time\":\"2023/08/0419:56:11\"},\"operation\":\"MODIFIED\"}"}; EXPECT_FALSE(flatbufferSchemaStr.empty()); @@ -249,7 +249,7 @@ TEST_F(SyscollectorDeltasTest, HotfixesParsingInvalid) TEST_F(SyscollectorDeltasTest, ProcessesParsingSuccess) { // We should escape double backslashes the message from agent. - const std::string deltaProcesses {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_processes\",\"data\":{\"checksum\":\"5ca21c17ae78a0ef7463b3b2454126848473cf5b\",\"cmd\":\"C:\\\\Windows\\\\System32\\\\winlogon.exe\"" + const std::string deltaProcesses {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_processes\",\"data\":{\"checksum\":\"5ca21c17ae78a0ef7463b3b2454126848473cf5b\",\"cmd\":\"C:\\\\Windows\\\\System32\\\\winlogon.exe\"" ",\"name\":\"winlogon.exe\",\"nlwp\":6,\"pid\":\"604\",\"ppid\":496,\"priority\":13,\"scan_time\":\"2023/08/07 15:01:57\",\"session\":1,\"size\":3387392" ",\"start_time\":1691420428,\"stime\":0,\"utime\":0,\"vm_size\":14348288},\"operation\":\"MODIFIED\"}"}; @@ -263,7 +263,7 @@ TEST_F(SyscollectorDeltasTest, ProcessesParsingSuccess) TEST_F(SyscollectorDeltasTest, ProcessesParsingInvalid) { // Double backslashes not escaped. - const std::string deltaProcesses {"{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"node01\"},\"data_type\":\"dbsync_processes\",\"data\":{\"checksum\":\"5ca21c17ae78a0ef7463b3b2454126848473cf5b\",\"cmd\":\"C:\\Windows\\\\System32\\\\winlogon.exe\"" + const std::string deltaProcesses {"{\"agent_info\":{\"agent_id\":\"001\"},\"data_type\":\"dbsync_processes\",\"data\":{\"checksum\":\"5ca21c17ae78a0ef7463b3b2454126848473cf5b\",\"cmd\":\"C:\\Windows\\\\System32\\\\winlogon.exe\"" ",\"name\":\"winlogon.exe\",\"nlwp\":6,\"pid\":\"604\",\"ppid\":496,\"priority\":13,\"scan_time\":\"2023/08/07 15:01:57\",\"session\":1,\"size\":3387392" ",\"start_time\":1691420428,\"stime\":0,\"utime\":0,\"vm_size\":14348288},\"operation\":\"MODIFIED\"}"}; diff --git a/src/wazuh_modules/syscollector/tests/sysCollectorFlatbuffers/syscollectorFb_test.cpp b/src/wazuh_modules/syscollector/tests/sysCollectorFlatbuffers/syscollectorFb_test.cpp index aeed33ad8c7..634c2b6e998 100644 --- a/src/wazuh_modules/syscollector/tests/sysCollectorFlatbuffers/syscollectorFb_test.cpp +++ b/src/wazuh_modules/syscollector/tests/sysCollectorFlatbuffers/syscollectorFb_test.cpp @@ -22,7 +22,7 @@ TEST(SyscollectorFbTest, JSONParsePackageUnix) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_packages\",\n attributes: {\n architecture: \"amd64\",\n checksum: \"409378153d05da4d49900316be982e575cb2586b\",\n description: \"GNU C++ compiler for MinGW-w64 targeting Win64\",\n format: \"deb\",\n groups: \"devel\",\n item_id: \"65a25b9b9fe7cb173aa5cc36dc437d9875af8a8e\",\n name: \"g++-mingw-w64-x86-64\",\n priority: \"optional\",\n scan_time: \"0000/00/00 00:00:00\",\n size: 155993,\n source: \"gcc-mingw-w64 (22~exp1ubuntu4)\",\n vendor: \"Stephen Kitt \",\n version: \"9.3.0-7ubuntu1+22~exp1ubuntu4\"\n },\n index: \"65a25b9b9fe7cb173aa5cc36dc437d9875af8a8e\",\n timestamp: \"\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_packages\",\n attributes: {\n architecture: \"amd64\",\n checksum: \"409378153d05da4d49900316be982e575cb2586b\",\n description: \"GNU C++ compiler for MinGW-w64 targeting Win64\",\n format: \"deb\",\n groups: \"devel\",\n item_id: \"65a25b9b9fe7cb173aa5cc36dc437d9875af8a8e\",\n name: \"g++-mingw-w64-x86-64\",\n priority: \"optional\",\n scan_time: \"0000/00/00 00:00:00\",\n size: 155993,\n source: \"gcc-mingw-w64 (22~exp1ubuntu4)\",\n vendor: \"Stephen Kitt \",\n version: \"9.3.0-7ubuntu1+22~exp1ubuntu4\"\n },\n index: \"65a25b9b9fe7cb173aa5cc36dc437d9875af8a8e\",\n timestamp: \"\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -48,7 +48,7 @@ TEST(SyscollectorFbTest, JSONParsePackageUnix) TEST(SyscollectorFbTest, JSONParsePackageWin) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_packages\",\n attributes: {\n checksum: \"9141d4744f95aad5db1cf8cf17c33c2f7dffed40\",\n format: \"win\",\n install_time: \"20230804\",\n item_id: \"e8cc756531b3adaae0e8a51c6800a681f4e903aa\",\n location: \"C:\\\\Users\\\\winuser\\\\AppData\\\\Local\\\\Microsoft\\\\Amazing\\\\Application\",\n name: \"Microsoft Application Amazing Runtime\",\n scan_time: \"0000/00/00 00:00:00\",\n vendor: \"Microsoft Application Amazing\",\n version: \"110.110.110.10.10\"\n },\n index: \"e8cc756531b3adaae0e8a51c6800a681f4e903aa\",\n timestamp: \"\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_packages\",\n attributes: {\n checksum: \"9141d4744f95aad5db1cf8cf17c33c2f7dffed40\",\n format: \"win\",\n install_time: \"20230804\",\n item_id: \"e8cc756531b3adaae0e8a51c6800a681f4e903aa\",\n location: \"C:\\\\Users\\\\winuser\\\\AppData\\\\Local\\\\Microsoft\\\\Amazing\\\\Application\",\n name: \"Microsoft Application Amazing Runtime\",\n scan_time: \"0000/00/00 00:00:00\",\n vendor: \"Microsoft Application Amazing\",\n version: \"110.110.110.10.10\"\n },\n index: \"e8cc756531b3adaae0e8a51c6800a681f4e903aa\",\n timestamp: \"\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -74,7 +74,7 @@ TEST(SyscollectorFbTest, JSONParsePackageWin) TEST(SyscollectorFbTest, JSONParseHotfix) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_hotfixes\",\n attributes: {\n checksum: \"5cfcee837ce896ef9229da1064b2844439ff3cc6\",\n hotfix: \"KB5026037\",\n scan_time: \"0000/00/00 00:00:00\"\n },\n index: \"KB5026037\",\n timestamp: \"\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_hotfixes\",\n attributes: {\n checksum: \"5cfcee837ce896ef9229da1064b2844439ff3cc6\",\n hotfix: \"KB5026037\",\n scan_time: \"0000/00/00 00:00:00\"\n },\n index: \"KB5026037\",\n timestamp: \"\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -100,7 +100,7 @@ TEST(SyscollectorFbTest, JSONParseHotfix) TEST(SyscollectorFbTest, JSONParseProcessUnix) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_processes\",\n attributes: {\n checksum: \"bc425a0d5337df58bd60e54fdb889fbf370d425a\",\n egroup: \"root\",\n euser: \"root\",\n fgroup: \"root\",\n name: \"writeback\",\n nice: -20,\n nlwp: 1,\n pid: \"39\",\n ppid: 2,\n processor: 2,\n rgroup: \"root\",\n ruser: \"root\",\n scan_time: \"20000/00/00 00:00:00\",\n sgroup: \"root\",\n start_time: 1691513206,\n state: \"I\",\n suser: \"root\",\n tgid: 39\n },\n index: \"39\",\n timestamp: \"\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_processes\",\n attributes: {\n checksum: \"bc425a0d5337df58bd60e54fdb889fbf370d425a\",\n egroup: \"root\",\n euser: \"root\",\n fgroup: \"root\",\n name: \"writeback\",\n nice: -20,\n nlwp: 1,\n pid: \"39\",\n ppid: 2,\n processor: 2,\n rgroup: \"root\",\n ruser: \"root\",\n scan_time: \"20000/00/00 00:00:00\",\n sgroup: \"root\",\n start_time: 1691513206,\n state: \"I\",\n suser: \"root\",\n tgid: 39\n },\n index: \"39\",\n timestamp: \"\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -126,7 +126,7 @@ TEST(SyscollectorFbTest, JSONParseProcessUnix) TEST(SyscollectorFbTest, JSONParseProcessWin) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_processes\",\n attributes: {\n checksum: \"62abb948062c25a4065b35b17746ae2442e850d1\",\n cmd: \"C:\\\\Windows\\\\System32\\\\svchost.exe\",\n name: \"svchost.exe\",\n nlwp: 7,\n pid: \"1328\",\n ppid: 680,\n priority: 8,\n scan_time: \"0000/00/00 00:00:00\",\n size: 3534848,\n start_time: 1686590435,\n vm_size: 17723392\n },\n index: \"1328\",\n timestamp: \"\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_processes\",\n attributes: {\n checksum: \"62abb948062c25a4065b35b17746ae2442e850d1\",\n cmd: \"C:\\\\Windows\\\\System32\\\\svchost.exe\",\n name: \"svchost.exe\",\n nlwp: 7,\n pid: \"1328\",\n ppid: 680,\n priority: 8,\n scan_time: \"0000/00/00 00:00:00\",\n size: 3534848,\n start_time: 1686590435,\n vm_size: 17723392\n },\n index: \"1328\",\n timestamp: \"\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -152,7 +152,7 @@ TEST(SyscollectorFbTest, JSONParseProcessWin) TEST(SyscollectorFbTest, JSONParsePortsUnix) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_ports\",\n attributes: {\n checksum: \"02d4570c4cf94ba0f79c34e8a52216fddf73a39a\",\n inode: 42468,\n item_id: \"cb8f094adf3aeb9630f2f51d1beeb5472eb0a8fb\",\n local_ip: \"192.168.0.10\",\n local_port: 37990,\n protocol: \"udp\",\n remote_ip: \"192.168.0.30\",\n remote_port: 1514,\n scan_time: \"0000/00/00 00:00:00\"\n },\n index: \"cb8f094adf3aeb9630f2f51d1beeb5472eb0a8fb\",\n timestamp: \"\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_ports\",\n attributes: {\n checksum: \"02d4570c4cf94ba0f79c34e8a52216fddf73a39a\",\n inode: 42468,\n item_id: \"cb8f094adf3aeb9630f2f51d1beeb5472eb0a8fb\",\n local_ip: \"192.168.0.10\",\n local_port: 37990,\n protocol: \"udp\",\n remote_ip: \"192.168.0.30\",\n remote_port: 1514,\n scan_time: \"0000/00/00 00:00:00\"\n },\n index: \"cb8f094adf3aeb9630f2f51d1beeb5472eb0a8fb\",\n timestamp: \"\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -179,7 +179,7 @@ TEST(SyscollectorFbTest, JSONParsePortsWin) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_ports\",\n attributes: {\n checksum: \"02d4570c4cf94ba0f79c34e8a52216fddf73a39a\",\n inode: 42468,\n item_id: \"cb8f094adf3aeb9630f2f51d1beeb5472eb0a8fb\",\n local_ip: \"192.168.0.10\",\n local_port: 37990,\n protocol: \"udp\",\n remote_ip: \"192.168.0.30\",\n remote_port: 1514,\n scan_time: \"0000/00/00 00:00:00\",\n state: \"\"\n },\n index: \"cb8f094adf3aeb9630f2f51d1beeb5472eb0a8fb\",\n timestamp: \"\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_ports\",\n attributes: {\n checksum: \"02d4570c4cf94ba0f79c34e8a52216fddf73a39a\",\n inode: 42468,\n item_id: \"cb8f094adf3aeb9630f2f51d1beeb5472eb0a8fb\",\n local_ip: \"192.168.0.10\",\n local_port: 37990,\n protocol: \"udp\",\n remote_ip: \"192.168.0.30\",\n remote_port: 1514,\n scan_time: \"0000/00/00 00:00:00\",\n state: \"\"\n },\n index: \"cb8f094adf3aeb9630f2f51d1beeb5472eb0a8fb\",\n timestamp: \"\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -206,7 +206,7 @@ TEST(SyscollectorFbTest, JSONParseHwInfo) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_hwinfo\",\n attributes: {\n board_serial: \"0\",\n checksum: \"5675de235c09762beb0357a54024987ed0c70fd6\",\n cpu_cores: 4,\n cpu_mhz: 24970.0,\n cpu_name: \"Amazing(R) Core(TM) i45-10000H CPU @ 25.00GHz\",\n ram_free: 33603480,\n ram_total: 40133680,\n scan_time: \"0000/00/00 00:00:00\"\n },\n index: \"0\",\n timestamp: \"\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_hwinfo\",\n attributes: {\n board_serial: \"0\",\n checksum: \"5675de235c09762beb0357a54024987ed0c70fd6\",\n cpu_cores: 4,\n cpu_mhz: 24970.0,\n cpu_name: \"Amazing(R) Core(TM) i45-10000H CPU @ 25.00GHz\",\n ram_free: 33603480,\n ram_total: 40133680,\n scan_time: \"0000/00/00 00:00:00\"\n },\n index: \"0\",\n timestamp: \"\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -233,7 +233,7 @@ TEST(SyscollectorFbTest, JSONParseOsInfo) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_osinfo\",\n attributes: {\n checksum: \"1691513227478039559\",\n hostname: \"Supercomputer\",\n os_codename: \"focal\",\n os_major: \"200\",\n os_minor: \"000\",\n os_name: \"Wazuh OS\",\n os_patch: \"2\",\n os_platform: \"bsd\",\n os_version: \"200.000.2\",\n release: \"5.4.0-153-generic\",\n scan_time: \"0000/00/00 00:00:00\",\n sysname: \"Linux\",\n version: \"#170-WazuhOS SMP Fri Jun 16 13:43:31 UTC 2023\"\n },\n index: \"WazuhOS\",\n timestamp: \"\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_osinfo\",\n attributes: {\n checksum: \"1691513227478039559\",\n hostname: \"Supercomputer\",\n os_codename: \"focal\",\n os_major: \"200\",\n os_minor: \"000\",\n os_name: \"Wazuh OS\",\n os_patch: \"2\",\n os_platform: \"bsd\",\n os_version: \"200.000.2\",\n release: \"5.4.0-153-generic\",\n scan_time: \"0000/00/00 00:00:00\",\n sysname: \"Linux\",\n version: \"#170-WazuhOS SMP Fri Jun 16 13:43:31 UTC 2023\"\n },\n index: \"WazuhOS\",\n timestamp: \"\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -260,7 +260,7 @@ TEST(SyscollectorFbTest, JSONParseNetAddr) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_network_address\",\n attributes: {\n address: \"192.168.0.1\",\n broadcast: \"192.168.0.255\",\n checksum: \"c3794bf303c6229bcb40d4070b9820ac4902bd07\",\n iface: \"enp0s3\",\n item_id: \"b79437e85675afeeea2b4e141aca26b27cdcc959\",\n netmask: \"255.255.255.0\",\n scan_time: \"0000/00/00 00:00:00\"\n },\n index: \"b79437e85675afeeea2b4e141aca26b27cdcc959\",\n timestamp: \"\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_network_address\",\n attributes: {\n address: \"192.168.0.1\",\n broadcast: \"192.168.0.255\",\n checksum: \"c3794bf303c6229bcb40d4070b9820ac4902bd07\",\n iface: \"enp0s3\",\n item_id: \"b79437e85675afeeea2b4e141aca26b27cdcc959\",\n netmask: \"255.255.255.0\",\n scan_time: \"0000/00/00 00:00:00\"\n },\n index: \"b79437e85675afeeea2b4e141aca26b27cdcc959\",\n timestamp: \"\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -287,7 +287,7 @@ TEST(SyscollectorFbTest, JSONParseNetItf) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_network_iface\",\n attributes: {\n checksum: \"92a69b6285431e7d67da91e5006d23246628f13c\",\n item_id: \"7a60750dd3c25c53f21ff7f44b4743664ddbb66a\",\n mac: \"XX:XX:XX:XX:XX:XX\",\n mtu: 1500,\n name: \"enp0s3\",\n rx_bytes: 255555,\n rx_dropped: 255555,\n rx_errors: 255555,\n rx_packets: 255555,\n scan_time: \"0000/00/00 00:00:00\",\n state: \"up\",\n tx_bytes: 255555,\n tx_dropped: 255555,\n tx_errors: 255555,\n tx_packets: 255555,\n type: \"quantic_fiber\"\n },\n index: \"7a60750dd3c25c53f21ff7f44b4743664ddbb66a\",\n timestamp: \"\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_network_iface\",\n attributes: {\n checksum: \"92a69b6285431e7d67da91e5006d23246628f13c\",\n item_id: \"7a60750dd3c25c53f21ff7f44b4743664ddbb66a\",\n mac: \"XX:XX:XX:XX:XX:XX\",\n mtu: 1500,\n name: \"enp0s3\",\n rx_bytes: 255555,\n rx_dropped: 255555,\n rx_errors: 255555,\n rx_packets: 255555,\n scan_time: \"0000/00/00 00:00:00\",\n state: \"up\",\n tx_bytes: 255555,\n tx_dropped: 255555,\n tx_errors: 255555,\n tx_packets: 255555,\n type: \"quantic_fiber\"\n },\n index: \"7a60750dd3c25c53f21ff7f44b4743664ddbb66a\",\n timestamp: \"\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -314,7 +314,7 @@ TEST(SyscollectorFbTest, JSONParseNetItfNegativeValues) { // Syscollector network iface can send negative values for some fields. This test is to avoid reverting the changes in the flatbuffer schema. const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_network_iface\",\n attributes: {\n checksum: \"92a69b6285431e7d67da91e5006d23246628f13c\",\n item_id: \"7a60750dd3c25c53f21ff7f44b4743664ddbb66a\",\n mac: \"XX:XX:XX:XX:XX:XX\",\n mtu: -1,\n name: \"enp0s3\",\n rx_bytes: -255555,\n rx_dropped: -255555,\n rx_errors: -255555,\n rx_packets: -255555,\n scan_time: \"0000/00/00 00:00:00\",\n state: \"up\",\n tx_bytes: -255555,\n tx_dropped: -255555,\n tx_errors: -255555,\n tx_packets: -255555,\n type: \"quantic_fiber\"\n },\n index: \"7a60750dd3c25c53f21ff7f44b4743664ddbb66a\",\n timestamp: \"\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"state\",\n data: {\n attributes_type: \"syscollector_network_iface\",\n attributes: {\n checksum: \"92a69b6285431e7d67da91e5006d23246628f13c\",\n item_id: \"7a60750dd3c25c53f21ff7f44b4743664ddbb66a\",\n mac: \"XX:XX:XX:XX:XX:XX\",\n mtu: -1,\n name: \"enp0s3\",\n rx_bytes: -255555,\n rx_dropped: -255555,\n rx_errors: -255555,\n rx_packets: -255555,\n scan_time: \"0000/00/00 00:00:00\",\n state: \"up\",\n tx_bytes: -255555,\n tx_dropped: -255555,\n tx_errors: -255555,\n tx_packets: -255555,\n type: \"quantic_fiber\"\n },\n index: \"7a60750dd3c25c53f21ff7f44b4743664ddbb66a\",\n timestamp: \"\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -341,7 +341,7 @@ TEST(SyscollectorFbTest, JSONIntegrityGlobal) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"integrity_check_left\",\n data: {\n attributes_type: \"syscollector_network_iface\",\n id: 123456789,\n begin: \"73fe1533b96d4e81b56e13df4f25a0684b473de7\",\n end: \"73fe1533b96d4e81b56e13df4f25a0684b473de7\",\n checksum: \"92a69b6285431e7d67da91e5006d23246628f13c\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"integrity_check_left\",\n data: {\n attributes_type: \"syscollector_network_iface\",\n id: 123456789,\n begin: \"73fe1533b96d4e81b56e13df4f25a0684b473de7\",\n end: \"73fe1533b96d4e81b56e13df4f25a0684b473de7\",\n checksum: \"92a69b6285431e7d67da91e5006d23246628f13c\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -368,7 +368,7 @@ TEST(SyscollectorFbTest, JSONIntegrityRight) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"integrity_check_right\",\n data: {\n attributes_type: \"syscollector_network_iface\",\n id: 123456789,\n begin: \"73fe1533b96d4e81b56e13df4f25a0684b473de7\",\n end: \"73fe1533b96d4e81b56e13df4f25a0684b473de7\",\n checksum: \"92a69b6285431e7d67da91e5006d23246628f13c\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"integrity_check_right\",\n data: {\n attributes_type: \"syscollector_network_iface\",\n id: 123456789,\n begin: \"73fe1533b96d4e81b56e13df4f25a0684b473de7\",\n end: \"73fe1533b96d4e81b56e13df4f25a0684b473de7\",\n checksum: \"92a69b6285431e7d67da91e5006d23246628f13c\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; @@ -395,7 +395,7 @@ TEST(SyscollectorFbTest, JSONIntegrityLeft) { const std::string alert_json = - "{\n agent_info: {\n agent_id: \"001\",\n node_name: \"node01\"\n },\n data_type: \"integrity_check_left\",\n data: {\n attributes_type: \"syscollector_network_iface\",\n id: 123456789,\n begin: \"73fe1533b96d4e81b56e13df4f25a0684b473de7\",\n end: \"73fe1533b96d4e81b56e13df4f25a0684b473de7\",\n tail: \"92a69b6285431e7d67da91e5006d23246628f13c\",\n checksum: \"92a69b6285431e7d67da91e5006d23246628f13c\"\n }\n}\n"; + "{\n agent_info: {\n agent_id: \"001\"\n },\n data_type: \"integrity_check_left\",\n data: {\n attributes_type: \"syscollector_network_iface\",\n id: 123456789,\n begin: \"73fe1533b96d4e81b56e13df4f25a0684b473de7\",\n end: \"73fe1533b96d4e81b56e13df4f25a0684b473de7\",\n tail: \"92a69b6285431e7d67da91e5006d23246628f13c\",\n checksum: \"92a69b6285431e7d67da91e5006d23246628f13c\"\n }\n}\n"; flatbuffers::Parser parser; std::string schemaFile; diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/input_001.json index 45ba398c4b7..6b3a943d8b5 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/input_001.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/input_001.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/input_002.json index e47f11641ec..93bb5a47120 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/input_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/input_002.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/input_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/input_003.json index c6f253470a5..0e5b8a22e93 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/input_003.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/001/input_003.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_001.json index 3f71e784823..dc0a7ff1f8a 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_001.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_001.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_002.json index e28cc11fefe..b2973cf2424 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_002.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_003.json index 731c9ff4962..d1fff3c2706 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_003.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_003.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_004.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_004.json index 44cee783f67..1fc9b67fa4b 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_004.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/002/input_004.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_001.json index cf21e1e920c..fc364568f08 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_001.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_001.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_002.json index e631acee6bb..776ccd00c8d 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_002.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_003.json index 9289006e846..13edc172f8f 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_003.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_003.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_004.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_004.json index ad0eacdae25..d9bad1728cf 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_004.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_004.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_005.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_005.json index 151be6cd2ac..640813e4b56 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_005.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_005.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "003", - "node_name": "test_node_name" + "agent_id": "003" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_006.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_006.json index 7f56701c3df..738e4ff1360 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_006.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_006.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "003", - "node_name": "test_node_name" + "agent_id": "003" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_007.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_007.json index 54caafa36a6..7aacd55c4c0 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_007.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_007.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "004", - "node_name": "test_node_name" + "agent_id": "004" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_008.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_008.json index 4e61aaedeed..a25ce8218ba 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_008.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/003/input_008.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "004", - "node_name": "test_node_name" + "agent_id": "004" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_001.json index cf21e1e920c..fc364568f08 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_001.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_001.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_002.json index e93fd1ad2d4..49d7c3da62e 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_002.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_003.json index 9289006e846..13edc172f8f 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_003.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_003.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_004.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_004.json index 5652b1ebdfb..8ab9166cffe 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_004.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_004.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_005.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_005.json index d79a0db6d92..2a60c492432 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_005.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_005.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "003", - "node_name": "test_node_name" + "agent_id": "003" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_006.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_006.json index a0e2ea5c1be..cf9c0b47c98 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_006.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_006.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "003", - "node_name": "test_node_name" + "agent_id": "003" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_007.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_007.json index 54caafa36a6..7aacd55c4c0 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_007.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_007.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "004", - "node_name": "test_node_name" + "agent_id": "004" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_008.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_008.json index f7215c9a725..5f0e0fea8fe 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_008.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/004/input_008.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "004", - "node_name": "test_node_name" + "agent_id": "004" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/005/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/005/input_001.json index 16fb84562e0..44ebb1639fa 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/005/input_001.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/005/input_001.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/005/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/005/input_002.json index fa5fb52a21a..58fc5c83c54 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/005/input_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/005/input_002.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_001.json index 16fb84562e0..44ebb1639fa 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_001.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_001.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_002.json index 8dbf1b1f506..329e0bcfca5 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_002.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_003.json index 3b1224f4fb7..2d39a39dc6f 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_003.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_003.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_004.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_004.json index b5e60d98d1d..756fc3cecf7 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_004.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_004.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_005.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_005.json index 3e8adb55a04..3c2e206eeb2 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_005.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_005.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_006.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_006.json index da64ee958da..770189bd53a 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_006.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/006/input_006.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/007/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/007/input_001.json index 37fc804f3e0..2d1374e600b 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/007/input_001.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/007/input_001.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "state", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/007/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/007/input_002.json index ba8a8398aa9..11e288c9df0 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/007/input_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/007/input_002.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "state", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/input_001.json index 8717f7e821c..2fa269c238a 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/input_001.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/input_001.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "state", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/input_002.json index 7c1b716604d..56c7f626a27 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/input_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/input_002.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "state", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/input_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/input_003.json index 6dca342b127..f93977c4b22 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/input_003.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/input_003.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "integrity_clear", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/input_001.json index 5d49475e1de..a5706ab5e0e 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/input_001.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/input_001.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "009", - "node_name": "test_node_name" + "agent_id": "009" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/input_002.json index 0d3f1bb2611..5358060c87c 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/input_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/input_002.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "009", - "node_name": "test_node_name" + "agent_id": "009" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_001.json index d9821ab0203..0d16e1ef9ce 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_001.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_001.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "010", - "node_name": "test_node_name" + "agent_id": "010" }, "data_type": "state", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_002.json index 7a228d0f56e..6900f52a471 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_002.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "010", - "node_name": "test_node_name" + "agent_id": "010" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_003.json index 7a228d0f56e..6900f52a471 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_003.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/010/input_003.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "010", - "node_name": "test_node_name" + "agent_id": "010" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_001.json index 353f220f46b..52f648a9a6b 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_001.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_001.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "state", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_002.json index 45840ed1ae0..cad46959e74 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_002.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_002.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_003.json index 467a334c6e0..8a17c2ebbce 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_003.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_003.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "state", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_004.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_004.json index 6ff0146c15e..9f78a79bb2f 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_004.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_004.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_005.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_005.json index 658402519e5..d57ef4ec2cb 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_005.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/input_005.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/config.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/config.json index 28f77d8c0d4..cf23bce0adf 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/config.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/config.json @@ -2,7 +2,9 @@ "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", + "clusterName":"cluster01", + "clusterEnabled":false }, "indexer": { "enabled": "yes", @@ -18,6 +20,5 @@ "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" } - }, - "managerNodeName": "wazuh-manager" + } } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabled.json index 04182759e39..2f21d6edb52 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabled.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabled.json @@ -2,7 +2,9 @@ "vulnerability-detection": { "enabled": "no", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", + "clusterName":"cluster01", + "clusterEnabled":false }, "indexer": { "enabled": "yes", @@ -18,6 +20,5 @@ "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" } - }, - "managerNodeName": "wazuh-manager" + } } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/config.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/config.json index 28f77d8c0d4..614e4726a93 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/config.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/config.json @@ -2,7 +2,9 @@ "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", + "clusterName":"cluster01", + "clusterEnabled":false }, "indexer": { "enabled": "yes", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/configManagerDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/configManagerDisabled.json index 519d55ab6d2..fd85e2c72f7 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/configManagerDisabled.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/configManagerDisabled.json @@ -2,7 +2,9 @@ "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", + "clusterName":"cluster01", + "clusterEnabled":false }, "indexer": { "enabled": "yes", diff --git a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp index 65707a47a56..e4af83d16d6 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp @@ -691,14 +691,7 @@ class PolicyManager final : public Singleton */ std::string_view getClusterNodeName() const { - if (!m_configuration.contains("clusterNodeName")) - { - return UNKNOWN_VALUE; - } - else - { - return m_configuration.at("clusterNodeName").get_ref(); - } + return m_configuration.at("clusterNodeName").get_ref(); } /** diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp index a44b3401d5f..ad467087a1b 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp @@ -82,12 +82,11 @@ class TBuildAllAgentListContext final : public AbstractHandler() == 0)) { - data->m_agents.push_back({Utils::padString(std::to_string(agent.at("id").get()), '0', 3), - agent.at("name"), - Utils::leftTrim(agent.at("version"), "Wazuh "), - agent.at("ip"), - agent.at("node_name")}); - } + data->m_agents.push_back({Utils::padString(std::to_string(agent.at("id").get()), '0', 3), + agent.at("name"), + Utils::leftTrim(agent.at("version"), "Wazuh "), + agent.at("ip")}); + } else { logDebug2(WM_VULNSCAN_LOGTAG, "Skipping manager agent with id 0."); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp index 67dc5a7342f..8d73fb3aa34 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp @@ -47,8 +47,7 @@ class TBuildSingleAgentListInfoContext final : public AbstractHandlerm_agents.push_back({data->agentId().data(), data->agentName().data(), data->agentVersion().data(), - data->agentIp().data(), - data->agentNodeName().data()}); + data->agentIp().data()}); return AbstractHandler>::handleRequest(std::move(data)); } }; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp index 3207f68aef0..1599da0302a 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp @@ -59,13 +59,16 @@ class TCleanAgentInventory final * @param data Scan context. * @return std::shared_ptr Abstract handler. */ - std::shared_ptr handleRequest(std::shared_ptr data) override + std::shared_ptr handleRequest(std::shared_ptr ctx) override { - std::string agentKey; - agentKey.append(data->agentId()); + const auto clusterManagerPrefix = + ctx->agentId().compare("000") == 0 && ctx->clusterStatus() ? std::string(ctx->clusterNodeName()) + "_" : ""; + + std::string agentKey = clusterManagerPrefix; + agentKey.append(ctx->agentId()); agentKey.append("_"); - data->m_isInventoryEmpty = true; + ctx->m_isInventoryEmpty = true; auto deleteAll = [this](std::shared_ptr data, const std::string& key, @@ -101,24 +104,24 @@ class TCleanAgentInventory final }; // If the affected type is Agent, delete all inventories for the agent. - if (data->affectedComponentType() == AffectedComponentType::Agent) + if (ctx->affectedComponentType() == AffectedComponentType::Agent) { - deleteAll(data, agentKey, {AffectedComponentType::Os, AffectedComponentType::Package}); + deleteAll(ctx, agentKey, {AffectedComponentType::Os, AffectedComponentType::Package}); } // Delete all entries for the affected type, used for the integrity clear. else { - deleteAll(data, agentKey, {data->affectedComponentType()}); + deleteAll(ctx, agentKey, {ctx->affectedComponentType()}); } // If the affected type is Os, delete the initial scan data. - if (data->affectedComponentType() == AffectedComponentType::Os || - data->affectedComponentType() == AffectedComponentType::Agent) + if (ctx->affectedComponentType() == AffectedComponentType::Os || + ctx->affectedComponentType() == AffectedComponentType::Agent) { - TInventorySync::m_inventoryDatabase.delete_(data->agentId().data(), OS_INITIAL_SCAN); + TInventorySync::m_inventoryDatabase.delete_(ctx->agentId().data(), OS_INITIAL_SCAN); } - return AbstractHandler>::handleRequest(std::move(data)); + return AbstractHandler>::handleRequest(std::move(ctx)); } }; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDeleteInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDeleteInventory.hpp index 19b01adce23..1eba607b649 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDeleteInventory.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDeleteInventory.hpp @@ -53,6 +53,10 @@ class TEventDeleteInventory final std::shared_ptr handleRequest(std::shared_ptr data) override { std::string key; + const auto clusterManagerPrefix = data->agentId().compare("000") == 0 && data->clusterStatus() + ? std::string(data->clusterNodeName()) + "_" + : ""; + key.append(clusterManagerPrefix); key.append(data->agentId()); key.append("_"); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp index 96bc33502b7..8dae49b9315 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp @@ -163,6 +163,10 @@ class TEventDetailsBuilder final : public AbstractHandleragentId().compare("000") == 0 && data->clusterStatus()) + { + populateField(agent, "/ephemeral_id"_json_pointer, data->clusterNodeName()); + } populateField(agent, "/id"_json_pointer, data->agentId()); populateField(agent, "/name"_json_pointer, data->agentName()); populateField(agent, "/type"_json_pointer, "wazuh"); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventInsertInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventInsertInventory.hpp index 5a450119d9c..96819218953 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventInsertInventory.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventInsertInventory.hpp @@ -52,31 +52,36 @@ class TEventInsertInventory final */ std::shared_ptr handleRequest(std::shared_ptr data) override { - std::string key; - key.append(data->agentId()); - key.append("_"); + const auto clusterManagerPrefix = data->agentId().compare("000") == 0 && data->clusterStatus() + ? std::string(data->clusterNodeName()) + "_" + : ""; + + std::string elementKey = clusterManagerPrefix; + elementKey.append(data->agentId()); + elementKey.append("_"); // Create the key for the inventory. - key.append(TInventorySync::affectedComponentKey(data)); + elementKey.append(TInventorySync::affectedComponentKey(data)); std::vector insertList; std::string insertListString; - std::string value; + std::string rawCveList; const auto& column = AFFECTED_COMPONENT_COLUMNS.at(data->affectedComponentType()); // Build json element for each CVE. for (auto& [cve, value] : data->m_elements) { - std::string elementKey = key; - elementKey.append("_"); - elementKey.append(cve); + std::string newElementKey; + newElementKey.append(elementKey); + newElementKey.append("_"); + newElementKey.append(cve); - value = TInventorySync::buildElement("INSERTED", elementKey); + value = TInventorySync::buildElement("INSERTED", newElementKey); } - if (TInventorySync::m_inventoryDatabase.get(key, value, column)) + if (TInventorySync::m_inventoryDatabase.get(elementKey, rawCveList, column)) { - auto listCve = Utils::split(value, ','); + auto listCve = Utils::split(rawCveList, ','); for (auto& [key, value] : data->m_elements) { if (std::find(listCve.begin(), listCve.end(), key) == listCve.end()) @@ -89,10 +94,12 @@ class TEventInsertInventory final if (!insertListString.empty()) { - insertListString.append(value); - logDebug2( - WM_VULNSCAN_LOGTAG, "Updating agent package key: %s -> %s", key.c_str(), insertListString.c_str()); - TInventorySync::m_inventoryDatabase.put(key, insertListString, column); + insertListString.append(rawCveList); + logDebug2(WM_VULNSCAN_LOGTAG, + "Updating agent package key: %s -> %s", + elementKey.c_str(), + insertListString.c_str()); + TInventorySync::m_inventoryDatabase.put(elementKey, insertListString, column); } } else @@ -107,9 +114,11 @@ class TEventInsertInventory final if (!insertListString.empty()) { insertListString.pop_back(); - logDebug2( - WM_VULNSCAN_LOGTAG, "Inserting agent package key: %s -> %s", key.c_str(), insertListString.c_str()); - TInventorySync::m_inventoryDatabase.put(key, insertListString, column); + logDebug2(WM_VULNSCAN_LOGTAG, + "Inserting agent package key: %s -> %s", + elementKey.c_str(), + insertListString.c_str()); + TInventorySync::m_inventoryDatabase.put(elementKey, insertListString, column); } } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp index f53d6f9c161..f9c18ab25e1 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp @@ -66,8 +66,7 @@ class TScanAgentList final : public AbstractHandler( - [](const SyscollectorDeltas::Delta* delta) - { - if (delta->agent_info() == nullptr) - { - return ""; - } - return delta->agent_info()->node_name() ? delta->agent_info()->node_name()->c_str() : ""; - }, - [](const SyscollectorSynchronization::SyncMsg* syncMsg) - { - if (syncMsg->agent_info() == nullptr) - { - return ""; - } - return syncMsg->agent_info()->node_name() ? syncMsg->agent_info()->node_name()->c_str() : ""; - }, - [](const nlohmann::json* jsonData) - { - return jsonData->contains("/agent_info/node_name"_json_pointer) - ? jsonData->at("/agent_info/node_name"_json_pointer).get_ref().c_str() - : ""; - }); - } - /** * @brief Gets os hostName. * @return Os hostName. @@ -1399,6 +1367,26 @@ struct TScanContext final return clusterName; } + /** + * @brief Gets cluster node name + * @return Cluster node name. + */ + std::string_view clusterNodeName() const + { + static std::string_view clusterNodeName = PolicyManager::instance().getClusterNodeName(); + return clusterNodeName; + } + + /** + * @brief Gets cluster status. + * @return Cluster node status. + */ + bool clusterStatus() const + { + static bool clusterStatus = PolicyManager::instance().getClusterStatus(); + return clusterStatus; + } + /** * @brief Gets OS CPE. * @return OS CPE. diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanInventorySync.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanInventorySync.hpp index 4986546445f..a652ba032ea 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanInventorySync.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanInventorySync.hpp @@ -49,7 +49,11 @@ class TScanInventorySync final */ std::shared_ptr handleRequest(std::shared_ptr data) override { - std::string key; + const auto clusterManagerPrefix = data->agentId().compare("000") == 0 && data->clusterStatus() + ? std::string(data->clusterNodeName()) + "_" + : ""; + + std::string key = clusterManagerPrefix; key.append(data->agentId()); key.append("_"); @@ -61,10 +65,10 @@ class TScanInventorySync final std::vector inventoryBase; bool isInventoryEmpty = true; - std::string value; - if (m_inventoryDatabase.get(key, value, column)) + std::string rawInventory; + if (m_inventoryDatabase.get(key, rawInventory, column)) { - inventory = Utils::split(value, ','); + inventory = Utils::split(rawInventory, ','); inventoryBase = inventory; isInventoryEmpty = false; } @@ -132,7 +136,7 @@ class TScanInventorySync final } // If the inventory is empty, set the initial scan to true. - if (!m_inventoryDatabase.get(key, value, OS_INITIAL_SCAN)) + if (!m_inventoryDatabase.get(key, rawInventory, OS_INITIAL_SCAN)) { // Put the initial value(utc time) for the initial scan. m_inventoryDatabase.put(key, Utils::getCurrentISO8601(), OS_INITIAL_SCAN); diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index d88e050187e..e155abd52d8 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -248,7 +248,6 @@ void VulnerabilityScannerFacade::managerScanPolicyChange(Utils::RocksDBWrapper& nlohmann::json dataValueJson; dataValueJson["action"] = "deleteAgent"; dataValueJson["agent_info"]["agent_id"] = "000"; - dataValueJson["agent_info"]["node_name"] = policyManager.getManagerNodeName(); const auto& dataValue = dataValueJson.dump(); const std::vector message(dataValue.begin(), dataValue.end()); @@ -263,7 +262,6 @@ void VulnerabilityScannerFacade::managerScanPolicyChange(Utils::RocksDBWrapper& nlohmann::json dataValueJson; dataValueJson["action"] = "scanAgent"; dataValueJson["agent_info"]["agent_id"] = "000"; - dataValueJson["agent_info"]["node_name"] = policyManager.getManagerNodeName(); const auto& dataValue = dataValueJson.dump(); const std::vector message(dataValue.begin(), dataValue.end()); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/alertClearBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/alertClearBuilder_test.cpp index 4b488c92907..d616eec9ceb 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/alertClearBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/alertClearBuilder_test.cpp @@ -38,8 +38,7 @@ namespace NSAlertClearBuilderTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "integrity_clear", "data": { @@ -63,7 +62,9 @@ void AlertClearBuilderTest::SetUp() "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com" + "cti-url": "cti-url.com", + "clusterName":"clusterName", + "clusterEnabled":false }, "osdataLRUSize":1000 })")}; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanAgentInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanAgentInventory_test.cpp index 83eab414bd7..5f21b504c06 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanAgentInventory_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanAgentInventory_test.cpp @@ -24,8 +24,7 @@ namespace NSCleanAgentInventoryTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "integrity_clear", "data": { @@ -41,8 +40,7 @@ namespace NSCleanAgentInventoryTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "integrity_clear", "data": { @@ -110,7 +108,7 @@ TEST_F(CleanAgentInventoryTest, CleanAgentDataSuccessfulPackage) nlohmann::json jsonData = nlohmann::json::parse( R"({"agent_info": {"agent_id":"001", "agent_version":"4.8.0", "agent_name":"test_agent_name", -"agent_ip":"10.0.0.1", "node_name":"node01"}, "action":"upgradeAgentDB"})"); +"agent_ip":"10.0.0.1"}, "action":"upgradeAgentDB"})"); std::variant data = &jsonData; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanInventory_test.cpp index 93b8514933b..063c7c95fcf 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanInventory_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanInventory_test.cpp @@ -35,8 +35,7 @@ namespace NSCleanInventoryTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "integrity_clear", "data": { @@ -110,7 +109,7 @@ TEST_F(CleanInventoryTest, TestCleanAllData) nlohmann::json jsonData = nlohmann::json::parse( R"({"agent_info": {"agent_id":"001", "agent_version":"4.8.0", "agent_name":"test_agent_name", -"agent_ip":"10.0.0.1", "node_name":"node01"}, "action":"upgradeAgentDB"})"); +"agent_ip":"10.0.0.1"}, "action":"upgradeAgentDB"})"); std::variant data = &jsonData; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.cpp index 53ec968b05c..85da0a6899c 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.cpp @@ -35,8 +35,7 @@ const std::string INTEGRITY_CLEAR_MSG_PACKAGES { "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "integrity_clear", "data": { @@ -52,8 +51,7 @@ const std::string INTEGRITY_CLEAR_MSG_OS { "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "integrity_clear", "data": { @@ -69,8 +67,7 @@ const std::string INTEGRITY_CLEAR_MSG_UNEXPECTED { "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "integrity_clear", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDeleteInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDeleteInventory_test.cpp index 4ee20255737..66a2cce20de 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDeleteInventory_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDeleteInventory_test.cpp @@ -34,8 +34,7 @@ namespace NSEventDeleteInventoryTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp index b02ce2d09ff..9725d416932 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp @@ -40,8 +40,7 @@ namespace NSEventDetailsBuilderTest "agent_id": "001", "agent_ip": "192.168.33.20", "agent_name": "focal", - "agent_version": "4.7.1", - "node_name": "node01" + "agent_version": "4.7.1" }, "data_type": "dbsync_packages", "data": { @@ -73,8 +72,7 @@ namespace NSEventDetailsBuilderTest "agent_id": "001", "agent_ip": "192.168.33.20", "agent_version": "4.7.1", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_packages", "data": { @@ -105,8 +103,7 @@ namespace NSEventDetailsBuilderTest "agent_id": "002", "agent_ip": "192.168.33.30", "agent_version": "4.7.1", - "agent_name": "Microsoft-10", - "node_name": "test_node_name" + "agent_name": "Microsoft-10" }, "data_type": "state", "data": { @@ -141,7 +138,9 @@ void EventDetailsBuilderTest::SetUp() "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com" + "cti-url": "cti-url.com", + "clusterName":"cluster01", + "clusterEnabled":false }, "osdataLRUSize":1000 })")}; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventInsertInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventInsertInventory_test.cpp index 5ee55307745..3ffadbdef23 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventInsertInventory_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventInsertInventory_test.cpp @@ -34,8 +34,7 @@ namespace NSEventInsertInventoryTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp index be616e708f9..517cc1d6e93 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp @@ -39,8 +39,7 @@ namespace NSEventPackageAlertDetailsBuilderTest "agent_id": "001", "agent_ip": "192.168.33.20", "agent_name": "focal", - "agent_version": "4.7.1", - "node_name": "node01" + "agent_version": "4.7.1" }, "data_type": "dbsync_packages", "data": { @@ -72,8 +71,7 @@ namespace NSEventPackageAlertDetailsBuilderTest "agent_id": "001", "agent_ip": "192.168.33.20", "agent_version": "4.7.1", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_packages", "data": { @@ -112,7 +110,9 @@ void EventPackageAlertDetailsBuilderTest::SetUp() "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com" + "cti-url": "cti-url.com", + "clusterName":"cluster01", + "clusterEnabled":false }, "osdataLRUSize":1000 })")}; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.cpp index c3f3654b9b0..3951ba41e87 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.cpp @@ -34,8 +34,7 @@ const std::string SYNC_STATE_MSG { "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "state", "data": { @@ -106,8 +105,7 @@ const std::string DELTA_DELETE_MSG { "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_packages", "data": { @@ -160,8 +158,7 @@ const std::string DELTA_INSERT_OS { "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_osinfo", "data": { @@ -185,8 +182,7 @@ const std::string DELTA_UPDATE_HOTFIX { "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type":"dbsync_hotfixes", "data": diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.hpp index 27d85a01c55..2ccc4c7852e 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.hpp @@ -72,7 +72,9 @@ class EventSendReportTest : public ::testing::Test "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com" + "cti-url": "cti-url.com", + "clusterName":"cluster01", + "clusterEnabled":false }, "osdataLRUSize":1000 })")}; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.hpp index dd500b60a68..8db1d0fe55d 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.hpp @@ -51,7 +51,9 @@ class OsDataCacheTest : public ::testing::Test "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com" + "cti-url": "cti-url.com", + "clusterName":"cluster01", + "clusterEnabled":false }, "osdataLRUSize":1000 })")}; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp index 861087188af..593059b6e17 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp @@ -33,8 +33,7 @@ namespace NSPackageScannerTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_packages", "data": { @@ -64,8 +63,7 @@ namespace NSPackageScannerTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp index e96daf8dcda..145d6d4f82f 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp @@ -168,7 +168,9 @@ TEST_F(PolicyManagerTest, validConfigurationCheckParameters) "enabled": "yes", "index-status": "yes", "feed-update-interval": "60m", - "cti-url": "https://cti-url.com" + "cti-url": "https://cti-url.com", + "clusterName":"clusterName", + "clusterEnabled":false }, "indexer": { "enabled": "yes", @@ -218,7 +220,9 @@ TEST_F(PolicyManagerTest, validConfigurationCheckParametersOffline) "enabled": "yes", "index-status": "no", "offline-url": "file:///var/algo.tar.gz", - "cti-url": "https://cti-url.com" + "cti-url": "https://cti-url.com", + "clusterName":"clusterName", + "clusterEnabled":false } })")}; EXPECT_NO_THROW(m_policyManager->initialize(configJson)); @@ -233,7 +237,7 @@ TEST_F(PolicyManagerTest, validConfigurationCheckParametersOffline) R"({"configData":{"compressionType":"raw","consumerName":"Wazuh VulnerabilityDetector","contentSource":"offline","databasePath":"queue/vd_updater/rocksdb","deleteDownloadedContent":true,"offset":0,"outputFolder":"queue/vd_updater/tmp","url":"file:///var/algo.tar.gz","versionedContent":"false"},"interval":3600,"ondemand":true,"topicName":"vulnerability_feed_manager"})"); } -TEST_F(PolicyManagerTest, validConfigurationDefaultValues) +TEST_F(PolicyManagerTest, invalidConfigurationNoCluster) { const auto& configJson {nlohmann::json::parse(R"({ "vulnerability-detection": { @@ -246,25 +250,7 @@ TEST_F(PolicyManagerTest, validConfigurationDefaultValues) } })")}; - m_policyManager->initialize(configJson); - - EXPECT_TRUE(m_policyManager->isVulnerabilityDetectionEnabled()); - EXPECT_TRUE(m_policyManager->isIndexerEnabled()); - - EXPECT_EQ(m_policyManager->getFeedUpdateTime(), 3600); - - EXPECT_EQ(m_policyManager->getHostList().count("http://localhost:9200"), 1); - - EXPECT_STREQ(m_policyManager->getUsername().c_str(), ""); - EXPECT_STREQ(m_policyManager->getPassword().c_str(), ""); - EXPECT_STREQ(m_policyManager->getCertificate().c_str(), ""); - EXPECT_STREQ(m_policyManager->getKey().c_str(), ""); - EXPECT_EQ(m_policyManager->getCAList().size(), 0); - EXPECT_EQ(m_policyManager->getCTIUrl(), "cti-url.com"); - EXPECT_EQ(m_policyManager->getTranslationLRUSize(), 2048); - EXPECT_EQ(m_policyManager->getOsdataLRUSize(), 1000); - EXPECT_STREQ("wazuh-states-vulnerabilities-default", - m_policyManager->getIndexerConfiguration().at("name").get_ref().c_str()); + EXPECT_ANY_THROW(m_policyManager->initialize(configJson)); } TEST_F(PolicyManagerTest, validConfigurationDefaultValuesWithClusterName) @@ -274,7 +260,8 @@ TEST_F(PolicyManagerTest, validConfigurationDefaultValuesWithClusterName) "enabled": "yes", "index-status": "yes", "cti-url": "cti-url.com", - "clusterName":"cluster01" + "clusterName":"cluster01", + "clusterEnabled":false }, "indexer": { "enabled": "yes" @@ -320,7 +307,9 @@ TEST_F(PolicyManagerTest, validConfigurationDefaultValuesNoIndexer) "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com" + "cti-url": "cti-url.com", + "clusterName":"cluster01", + "clusterEnabled":false } })")}; @@ -346,7 +335,9 @@ TEST_F(PolicyManagerTest, validConfigurationVulnerabilityScannerIgnoreIndexStatu "index-status": "yes", "feed-update-interval": "60m", "offline-url": "file:///var/algo.tar.gz", - "cti-url": "https://cti-url.com" + "cti-url": "https://cti-url.com", + "clusterName":"cluster01", + "clusterEnabled":false }, "indexer": { "enabled": "no", @@ -375,7 +366,9 @@ TEST_F(PolicyManagerTest, validConfigurationCheckFeedUpdateIntervalLessThan60m) "enabled": "yes", "index-status": "yes", "feed-update-interval": "10m", - "cti-url": "https://cti-url.com" + "cti-url": "https://cti-url.com", + "clusterName":"cluster01", + "clusterEnabled":false } })")}; EXPECT_NO_THROW(m_policyManager->initialize(configJson)); @@ -399,7 +392,9 @@ TEST_F(PolicyManagerTest, validConfigurationCheckFeedUpdateIntervalGreaterThan60 "enabled": "yes", "index-status": "yes", "feed-update-interval": "61m", - "cti-url": "https://cti-url.com" + "cti-url": "https://cti-url.com", + "clusterName":"cluster01", + "clusterEnabled":false } })")}; EXPECT_NO_THROW(m_policyManager->initialize(configJson)); @@ -418,7 +413,9 @@ TEST_F(PolicyManagerTest, validConfigurationCheckFeedUpdateIntervalGreaterThan60 const auto& UPDATER_BASIC_CONFIG {nlohmann::json::parse(R"({ "vulnerability-detection": { "enabled": "yes", - "index-status": "yes" + "index-status": "yes", + "clusterName":"cluster01", + "clusterEnabled":false }, "updater": { "interval": 3600, diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp index 45897026db8..19a529be857 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp @@ -33,8 +33,7 @@ namespace NSResultIndexerTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp index c384f1dc278..93400401d8a 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp @@ -91,7 +91,7 @@ TEST_F(ScanAgentListTest, SingleDeleteAndInsertTest) data = &jsonData; auto contextData = std::make_shared>(data); - contextData->m_agents.push_back({"001", "test_agent_name", "4.8.0", "192.168.0.1", "node01"}); + contextData->m_agents.push_back({"001", "test_agent_name", "4.8.0", "192.168.0.1"}); scanAgentList->handleRequest(contextData); } @@ -160,7 +160,7 @@ TEST_F(ScanAgentListTest, EmptyPackagesWDBResponseTest) data = &jsonData; auto contextData = std::make_shared>(data); - contextData->m_agents.push_back({"001", "test_agent_name", "4.8.0", "192.168.0.1", "node01"}); + contextData->m_agents.push_back({"001", "test_agent_name", "4.8.0", "192.168.0.1"}); scanAgentList->handleRequest(contextData); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanContext_test.cpp index e9dc958c5e9..61577749891 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanContext_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanContext_test.cpp @@ -36,8 +36,7 @@ namespace NSScanContextTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_packages", "data": { @@ -66,8 +65,7 @@ namespace NSScanContextTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_packages", "data": { @@ -96,8 +94,7 @@ namespace NSScanContextTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_osinfo", "data": { @@ -126,8 +123,7 @@ namespace NSScanContextTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_hotfixes", "data": { @@ -145,8 +141,7 @@ namespace NSScanContextTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_hotfixes", "data": { @@ -164,8 +159,7 @@ namespace NSScanContextTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_hotfixes", "data": { @@ -181,8 +175,7 @@ namespace NSScanContextTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "state", "data": { @@ -214,8 +207,7 @@ namespace NSScanContextTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "state", "data": { @@ -249,8 +241,7 @@ namespace NSScanContextTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "state", "data": { @@ -272,8 +263,7 @@ namespace NSScanContextTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "integrity_clear", "data": { @@ -342,11 +332,7 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasPackagesInserted) EXPECT_STREQ( scanContext->agentVersion().data(), fbStringGetHelper( - SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->agent_version())); - EXPECT_STREQ(scanContext->agentNodeName().data(), - fbStringGetHelper( - SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->node_name())); - + SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->agent_version())); EXPECT_STREQ( scanContext->packageName().data(), fbStringGetHelper( @@ -465,10 +451,6 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasPackagesDeleted) scanContext->agentVersion().data(), fbStringGetHelper( SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->agent_version())); - EXPECT_STREQ(scanContext->agentNodeName().data(), - fbStringGetHelper( - SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->node_name())); - EXPECT_STREQ( scanContext->packageName().data(), fbStringGetHelper( @@ -571,10 +553,6 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasOsInfo) scanContext->agentVersion().data(), fbStringGetHelper( SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->agent_version())); - EXPECT_STREQ(scanContext->agentNodeName().data(), - fbStringGetHelper( - SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->node_name())); - EXPECT_STREQ( scanContext->osHostName().data(), fbStringGetHelper( @@ -683,9 +661,6 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasHotfixesInserted) scanContext->agentVersion().data(), fbStringGetHelper( SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->agent_version())); - EXPECT_STREQ(scanContext->agentNodeName().data(), - fbStringGetHelper( - SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->node_name())); EXPECT_STREQ(scanContext->osHostName().data(), osData.hostName.c_str()); EXPECT_STREQ(scanContext->osArchitecture().data(), osData.architecture.c_str()); @@ -750,9 +725,6 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasHotfixesDeleted) scanContext->agentVersion().data(), fbStringGetHelper( SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->agent_version())); - EXPECT_STREQ(scanContext->agentNodeName().data(), - fbStringGetHelper( - SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->node_name())); EXPECT_STREQ(scanContext->osHostName().data(), osData.hostName.c_str()); EXPECT_STREQ(scanContext->osArchitecture().data(), osData.architecture.c_str()); @@ -816,11 +788,6 @@ TEST_F(ScanContextTest, TestSyscollectorSynchronizationStateOsInfo) fbStringGetHelper(SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)) ->agent_info() ->agent_version())); - EXPECT_STREQ( - scanContext->agentNodeName().data(), - fbStringGetHelper( - SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer))->agent_info()->node_name())); - EXPECT_STREQ(scanContext->osHostName().data(), fbStringGetHelper(SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)) ->data_as_state() @@ -947,11 +914,6 @@ TEST_F(ScanContextTest, TestSyscollectorSynchronizationStatePackages) fbStringGetHelper(SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)) ->agent_info() ->agent_version())); - EXPECT_STREQ( - scanContext->agentNodeName().data(), - fbStringGetHelper( - SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer))->agent_info()->node_name())); - EXPECT_STREQ(scanContext->packageName().data(), fbStringGetHelper(SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)) ->data_as_state() @@ -1089,11 +1051,6 @@ TEST_F(ScanContextTest, TestSyscollectorSynchronizationStateHotfixes) fbStringGetHelper(SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)) ->agent_info() ->agent_version())); - EXPECT_STREQ( - scanContext->agentNodeName().data(), - fbStringGetHelper( - SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer))->agent_info()->node_name())); - EXPECT_STREQ(scanContext->osHostName().data(), osData.hostName.c_str()); EXPECT_STREQ(scanContext->osArchitecture().data(), osData.architecture.c_str()); EXPECT_STREQ(scanContext->osName().data(), osData.name.c_str()); @@ -1141,10 +1098,6 @@ TEST_F(ScanContextTest, TestSyscollectorSynchronizationIntegrityClear) fbStringGetHelper(SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)) ->agent_info() ->agent_version())); - EXPECT_STREQ( - scanContext->agentNodeName().data(), - fbStringGetHelper( - SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer))->agent_info()->node_name())); } TEST_F(ScanContextTest, TestBuildCPEWindows10) @@ -1155,8 +1108,7 @@ TEST_F(ScanContextTest, TestBuildCPEWindows10) "agent_info": { "agent_id": "001", "agent_ip": "any", - "agent_name": "DESKTOP-5RL9J34", - "node_name": "node01" + "agent_name": "DESKTOP-5RL9J34" }, "data": { "attributes": { @@ -1217,8 +1169,7 @@ TEST_F(ScanContextTest, TestBuildCPECentos) "agent_info": { "agent_id": "007", "agent_ip": "any", - "agent_name": "b256669deac2", - "node_name": "node01" + "agent_name": "b256669deac2" }, "data": { "attributes": { @@ -1281,8 +1232,7 @@ TEST_F(ScanContextTest, TestBuildCPERedHat) "agent_info": { "agent_id": "007", "agent_ip": "any", - "agent_name": "b256669deac2", - "node_name": "node01" + "agent_name": "b256669deac2" }, "data": { "attributes": { @@ -1345,8 +1295,7 @@ TEST_F(ScanContextTest, TestBuildCPEOpensuseTumbleweed) "agent_info": { "agent_id": "018", "agent_ip": "any", - "agent_name": "f8d5df70094a", - "node_name": "node01" + "agent_name": "f8d5df70094a" }, "data": { "attributes": { @@ -1404,8 +1353,7 @@ TEST_F(ScanContextTest, TestBuildCPEOpensuseLeap) "agent_info": { "agent_id": "018", "agent_ip": "any", - "agent_name": "f8d5df70094a", - "node_name": "node01" + "agent_name": "f8d5df70094a" }, "data": { "attributes": { @@ -1463,8 +1411,7 @@ TEST_F(ScanContextTest, TestBuildCPENameFedora) "agent_info": { "agent_id": "019", "agent_ip": "any", - "agent_name": "5624f3047059", - "node_name": "node01" + "agent_name": "5624f3047059" }, "data": { "attributes": { @@ -1524,8 +1471,7 @@ TEST_F(ScanContextTest, TestJSONMessagePackageDelete) "agent_id" : "001", "agent_ip" : "10.0.0.1", "agent_name" : "agent1", - "agent_version" : "v4.8.0", - "node_name" : "node01" + "agent_version" : "v4.8.0" }, "action": "deletePackage", "data": { @@ -1549,7 +1495,6 @@ TEST_F(ScanContextTest, TestJSONMessagePackageDelete) EXPECT_EQ(scanContext->agentIp(), "10.0.0.1"); EXPECT_EQ(scanContext->agentName(), "agent1"); EXPECT_EQ(scanContext->agentVersion(), "v4.8.0"); - EXPECT_EQ(scanContext->agentNodeName(), "node01"); EXPECT_EQ(scanContext->packageName(), "packageName"); EXPECT_EQ(scanContext->packageVersion(), "1.0.0"); EXPECT_EQ(scanContext->packageLocation(), "/usr/bin/packageName"); @@ -1574,8 +1519,7 @@ TEST_F(ScanContextTest, TestJSONMessageHotfixDelete) "agent_id" : "001", "agent_ip" : "10.0.0.1", "agent_name" : "agent1", - "agent_version" : "v4.8.0", - "node_name" : "node01" + "agent_version" : "v4.8.0" }, "action": "deleteHotfix", "data": { @@ -1594,7 +1538,6 @@ TEST_F(ScanContextTest, TestJSONMessageHotfixDelete) EXPECT_EQ(scanContext->agentIp(), "10.0.0.1"); EXPECT_EQ(scanContext->agentName(), "agent1"); EXPECT_EQ(scanContext->agentVersion(), "v4.8.0"); - EXPECT_EQ(scanContext->agentNodeName(), "node01"); EXPECT_EQ(scanContext->packageName(), ""); EXPECT_EQ(scanContext->packageVersion(), ""); EXPECT_EQ(scanContext->packageLocation(), ""); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp index d1bf31558f3..450fa5364d5 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp @@ -41,8 +41,7 @@ namespace NSScanOrchestratorTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_packages", "data": { @@ -72,8 +71,7 @@ namespace NSScanOrchestratorTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_packages", "data": { @@ -103,8 +101,7 @@ namespace NSScanOrchestratorTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_hotfixes", "data": { @@ -122,8 +119,7 @@ namespace NSScanOrchestratorTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_hotfixes", "data": { @@ -141,8 +137,7 @@ namespace NSScanOrchestratorTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "dbsync_osinfo", "data": { @@ -171,8 +166,7 @@ namespace NSScanOrchestratorTest "agent_info": { "agent_id": "001", "agent_ip": "192.168.33.20", - "agent_name": "focal", - "node_name": "node01" + "agent_name": "focal" }, "data_type": "integrity_clear", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp index 05cd8c8f66d..cb7d6377875 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp @@ -35,8 +35,7 @@ namespace NSScanOsAlertDetailsBuilderTest const std::string OS_SCAN_MSG = R"({ "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "state", "data": { @@ -74,7 +73,9 @@ void ScanOsAlertDetailsBuilderTest::SetUp() "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com" + "cti-url": "cti-url.com", + "clusterName":"cluster01", + "clusterEnabled":false }, "osdataLRUSize":1000 })")}; diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_0.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_0.json index aab20c9415b..ffd4a2ddeda 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_0.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_0.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_1.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_1.json index e47f11641ec..93bb5a47120 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_1.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_1.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_2.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_2.json index 31e8b548d45..fbd6758023e 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_2.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_2.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_3.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_3.json index 6404a219757..38190129d66 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_3.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-001/step_3.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-002/step_0.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-002/step_0.json index 292f36830f4..857ea78d044 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-002/step_0.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-002/step_0.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-002/step_1.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-002/step_1.json index 58229f63b13..82b0c6ac908 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-002/step_1.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-002/step_1.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-003/step_0.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-003/step_0.json index 71c9737606d..11d52924f36 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-003/step_0.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-003/step_0.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-003/step_1.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-003/step_1.json index 0795c0dbb65..f7d2df22e0f 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-003/step_1.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-003/step_1.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "integrity_clear", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-004/step_0.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-004/step_0.json index 2cb93aa68b9..d1363eb668c 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-004/step_0.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-004/step_0.json @@ -3,8 +3,7 @@ "agent_id": "001", "agent_version": "4.8.0", "agent_name": "test_agent_name", - "agent_ip": "10.0.0.1", - "node_name": "test_node_name" + "agent_ip": "10.0.0.1" }, "action": "upgradeAgentDB" } diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-005/step_0.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-005/step_0.json index 874185d1635..b18913ec627 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-005/step_0.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-005/step_0.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "000", - "node_name": "test_node_name" + "agent_id": "000" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-005/step_1.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-005/step_1.json index 523ada5c110..072ef1a2a4a 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-005/step_1.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-005/step_1.json @@ -3,8 +3,7 @@ "agent_id": "000", "agent_version": "4.8.0", "agent_name": "test_agent_name", - "agent_ip": "10.0.0.1", - "node_name": "test_node_name" + "agent_ip": "10.0.0.1" }, "action": "upgradeAgentDB" } diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_0.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_0.json index 7d46d32f3a6..e794a5ff264 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_0.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_0.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_1.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_1.json index 58229f63b13..82b0c6ac908 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_1.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_1.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_2.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_2.json index fa59d919121..c7ac591b21e 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_2.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_2.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "dbsync_osinfo", "data": { @@ -21,4 +20,4 @@ "version":"#172-Ubuntu SMP Fri Jul 7 16:10:02 UTC 2023" }, "operation": "INSERTED" - } + } diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_3.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_3.json index 204ba5e713f..6b1164293d3 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_3.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_3.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "002", - "node_name": "test_node_name" + "agent_id": "002" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_4.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_4.json index 9c63583474b..ae798b5ac7b 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_4.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_4.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "003", - "node_name": "test_node_name" + "agent_id": "003" }, "data_type": "dbsync_osinfo", "data": { @@ -21,4 +20,4 @@ "version":"#172-Ubuntu SMP Fri Jul 7 16:10:02 UTC 2023" }, "operation": "INSERTED" - } + } diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_5.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_5.json index d05e3fd5555..062c726bcfb 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_5.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-006/step_5.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "003", - "node_name": "test_node_name" + "agent_id": "003" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_0.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_0.json index cc7b9f8b429..b61de8b5b05 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_0.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_0.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "000", - "node_name": "test_node_name" + "agent_id": "000" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_1.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_1.json index 264f16857b6..b3951392de4 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_1.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_1.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "000", - "node_name": "test_node_name" + "agent_id": "000" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_2.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_2.json index e5dddf850f9..8d870b48ff4 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_2.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_2.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "000", - "node_name": "test_node_name" + "agent_id": "000" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_3.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_3.json index 5967dbbcd42..d723e68ebcb 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_3.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_3.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "000", - "node_name": "test_node_name" + "agent_id": "000" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_4.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_4.json index 0b6c0765b47..519d780a9b1 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_4.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_4.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_5.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_5.json index 601545ad0b7..96e267e38e9 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_5.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/step_5.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/step_0.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/step_0.json index c7f825cbc4f..37590f248d3 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/step_0.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/step_0.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "000", - "node_name": "test_node_name" + "agent_id": "000" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/step_1.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/step_1.json index 0b072893383..58f3846d75f 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/step_1.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/step_1.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "000", - "node_name": "test_node_name" + "agent_id": "000" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/step_2.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/step_2.json index 0b072893383..58f3846d75f 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/step_2.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/step_2.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "000", - "node_name": "test_node_name" + "agent_id": "000" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.json index 483f45e6514..96905161921 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.json @@ -2,7 +2,9 @@ "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", + "clusterName":"cluster01", + "clusterEnabled":false }, "indexer": { "enabled": "yes", @@ -18,6 +20,5 @@ "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" } - }, - "managerNodeName": "wazuh-manager" + } } diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file1.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file1.json index 71c9737606d..11d52924f36 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file1.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file1.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_packages", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file2.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file2.json index 389eabae6bd..624bfaa0d88 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file2.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file2.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "000", - "node_name": "test_node_name" + "agent_id": "000" }, "data_type": "state", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file3.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file3.json index a3c4ca4f2d9..780cbfcf450 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file3.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file3.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "dbsync_osinfo", "data": { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file4.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file4.json index 394af815f43..5dd48a42ac0 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file4.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/file4.json @@ -1,7 +1,6 @@ { "agent_info": { - "agent_id": "001", - "node_name": "test_node_name" + "agent_id": "001" }, "data_type": "state", "data": { diff --git a/src/wazuh_modules/wm_syscollector.c b/src/wazuh_modules/wm_syscollector.c index d09290f080e..b2f8fcfec34 100644 --- a/src/wazuh_modules/wm_syscollector.c +++ b/src/wazuh_modules/wm_syscollector.c @@ -63,7 +63,6 @@ router_provider_create_func router_provider_create_func_ptr = NULL; router_provider_send_fb_func router_provider_send_fb_func_ptr = NULL; ROUTER_PROVIDER_HANDLE rsync_handle = NULL; ROUTER_PROVIDER_HANDLE syscollector_handle = NULL; -char *manager_node_name = NULL; int disable_manager_scan = 1; #endif // CLIENT @@ -105,7 +104,7 @@ static void wm_sys_send_diff_message(const void* data) { #ifndef CLIENT if(!disable_manager_scan) { - char* msg_to_send = adapt_delta_message(data, "localhost", "000", "127.0.0.1", manager_node_name, NULL); + char* msg_to_send = adapt_delta_message(data, "localhost", "000", "127.0.0.1", NULL); if (msg_to_send && router_provider_send_fb_func_ptr) { router_provider_send_fb_func_ptr(syscollector_handle, msg_to_send, syscollector_deltas_SCHEMA); } @@ -119,7 +118,7 @@ static void wm_sys_send_dbsync_message(const void* data) { #ifndef CLIENT if(!disable_manager_scan) { - char* msg_to_send = adapt_sync_message(data, "localhost", "000", "127.0.0.1", manager_node_name, NULL); + char* msg_to_send = adapt_sync_message(data, "localhost", "000", "127.0.0.1", NULL); if (msg_to_send && router_provider_send_fb_func_ptr) { router_provider_send_fb_func_ptr(rsync_handle, msg_to_send, syscollector_synchronization_SCHEMA); } @@ -197,7 +196,6 @@ void* wm_sys_main(wm_sys_t *sys) { } else { mwarn("Failed to load router module."); } - manager_node_name = get_node_name(); #endif // CLIENT } else { #ifdef __hpux @@ -262,7 +260,6 @@ void* wm_sys_main(wm_sys_t *sys) { #ifndef CLIENT so_free_library(router_module_ptr); router_module_ptr = NULL; - os_free(manager_node_name); #endif // CLIENT syscollector_module = NULL; mtinfo(WM_SYS_LOGTAG, "Module finished."); diff --git a/src/wazuh_modules/wm_vulnerability_scanner.c b/src/wazuh_modules/wm_vulnerability_scanner.c index e331a443fe0..48020764103 100644 --- a/src/wazuh_modules/wm_vulnerability_scanner.c +++ b/src/wazuh_modules/wm_vulnerability_scanner.c @@ -10,15 +10,15 @@ */ #include "wm_vulnerability_scanner.h" +#include "config/indexer-config.h" #include "external/cJSON/cJSON.h" #include "sym_load.h" #include "vulnerability_scanner.h" -#include "config/indexer-config.h" -static void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t * data); -static void wm_vulnerability_scanner_destroy(wm_vulnerability_scanner_t * data); -static void wm_vulnerability_scanner_stop(wm_vulnerability_scanner_t * data); -cJSON* wm_vulnerability_scanner_dump(wm_vulnerability_scanner_t * data); +static void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data); +static void wm_vulnerability_scanner_destroy(wm_vulnerability_scanner_t* data); +static void wm_vulnerability_scanner_stop(wm_vulnerability_scanner_t* data); +cJSON* wm_vulnerability_scanner_dump(wm_vulnerability_scanner_t* data); void* vulnerability_scanner_module = NULL; vulnerability_scanner_start_func vulnerability_scanner_start_ptr = NULL; @@ -34,35 +34,42 @@ const wm_context WM_VULNERABILITY_SCANNER_CONTEXT = { .query = NULL, }; -static void wm_vulnerability_scanner_log_config(cJSON * config_json) +static void wm_vulnerability_scanner_log_config(cJSON* config_json) { - if (config_json) { - char * config_str = cJSON_PrintUnformatted(config_json); - if (config_str) { + if (config_json) + { + char* config_str = cJSON_PrintUnformatted(config_json); + if (config_str) + { mtdebug1(WM_VULNERABILITY_SCANNER_LOGTAG, "%s", config_str); cJSON_free(config_str); } } } -void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t * data) { +void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data) +{ mtinfo(WM_VULNERABILITY_SCANNER_LOGTAG, "Starting vulnerability_scanner module."); - if (vulnerability_scanner_module = so_get_module_handle("vulnerability_scanner"), vulnerability_scanner_module) { + if (vulnerability_scanner_module = so_get_module_handle("vulnerability_scanner"), vulnerability_scanner_module) + { vulnerability_scanner_start_ptr = so_get_function_sym(vulnerability_scanner_module, "vulnerability_scanner_start"); vulnerability_scanner_stop_ptr = so_get_function_sym(vulnerability_scanner_module, "vulnerability_scanner_stop"); // Check for missing configurations. These configurations may miss when using the old deprecated VD config. - if (!cJSON_GetObjectItem(data->vulnerability_detection, "enabled")) { + if (!cJSON_GetObjectItem(data->vulnerability_detection, "enabled")) + { cJSON_AddStringToObject(data->vulnerability_detection, "enabled", "yes"); } - if (!cJSON_GetObjectItem(data->vulnerability_detection, "index-status")) { + if (!cJSON_GetObjectItem(data->vulnerability_detection, "index-status")) + { cJSON_AddStringToObject(data->vulnerability_detection, "index-status", "yes"); } - if (!cJSON_GetObjectItem(data->vulnerability_detection, "feed-update-interval")) { + if (!cJSON_GetObjectItem(data->vulnerability_detection, "feed-update-interval")) + { cJSON_AddStringToObject(data->vulnerability_detection, "feed-update-interval", "60m"); } @@ -70,53 +77,69 @@ void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t * data) { * If the cluster is enabled, the cluster name is the cluster name read from the configuration file. * If the cluster is disabled, the cluster name is the hostname, known as the manager name. */ - const bool cluster_status = get_cluster_status() + const bool cluster_status = get_cluster_status(); cJSON_AddBoolToObject(data->vulnerability_detection, "clusterEnabled", cluster_status); - if (cluster_status) { + if (cluster_status) + { char* cluster_name = get_cluster_name(); cJSON_AddStringToObject(data->vulnerability_detection, "clusterName", cluster_name); os_free(cluster_name); char* manager_node_name = get_node_name(); - cJSON_AddStringToObject(data->vulnerability_detection, "managerNodeName", manager_node_name); + cJSON_AddStringToObject(data->vulnerability_detection, "clusterNodeName", manager_node_name); os_free(manager_node_name); } - else { + else + { char hostname[HOST_NAME_MAX + 1]; - if (gethostname(hostname, HOST_NAME_MAX) == 0) { + if (gethostname(hostname, HOST_NAME_MAX) == 0) + { cJSON_AddStringToObject(data->vulnerability_detection, "clusterName", hostname); } - else { + else + { cJSON_AddStringToObject(data->vulnerability_detection, "clusterName", "undefined"); } + + cJSON_AddStringToObject(data->vulnerability_detection, "clusterNodeName", "undefined"); } - if (vulnerability_scanner_start_ptr) { - cJSON *config_json = cJSON_CreateObject(); - cJSON_AddItemToObject(config_json, "vulnerability-detection", cJSON_Duplicate(data->vulnerability_detection, TRUE)); + if (vulnerability_scanner_start_ptr) + { + cJSON* config_json = cJSON_CreateObject(); + cJSON_AddItemToObject( + config_json, "vulnerability-detection", cJSON_Duplicate(data->vulnerability_detection, TRUE)); cJSON_AddNumberToObject(config_json, "wmMaxEps", wm_max_eps); - cJSON_AddNumberToObject(config_json, "translationLRUSize", getDefine_Int("vulnerability-detection", "translation_lru_size", 1, 100000)); - cJSON_AddNumberToObject(config_json, "osdataLRUSize", getDefine_Int("vulnerability-detection", "osdata_lru_size", 1, 100000)); - cJSON_AddNumberToObject(config_json, "managerDisabledScan", getDefine_Int("vulnerability-detection", "disable_scan_manager", 0, 1)); - - - if(indexer_config == NULL) { + cJSON_AddNumberToObject(config_json, + "translationLRUSize", + getDefine_Int("vulnerability-detection", "translation_lru_size", 1, 100000)); + cJSON_AddNumberToObject( + config_json, "osdataLRUSize", getDefine_Int("vulnerability-detection", "osdata_lru_size", 1, 100000)); + cJSON_AddNumberToObject(config_json, + "managerDisabledScan", + getDefine_Int("vulnerability-detection", "disable_scan_manager", 0, 1)); + + if (indexer_config == NULL) + { cJSON_AddItemToObject(config_json, "indexer", cJSON_CreateObject()); } - else { + else + { cJSON_AddItemToObject(config_json, "indexer", cJSON_Duplicate(indexer_config, TRUE)); } wm_vulnerability_scanner_log_config(config_json); vulnerability_scanner_start_ptr(mtLoggingFunctionsWrapper, config_json); cJSON_Delete(config_json); } - else { + else + { mtwarn(WM_VULNERABILITY_SCANNER_LOGTAG, "Unable to start vulnerability_scanner module."); return NULL; } } - else { + else + { mtwarn(WM_VULNERABILITY_SCANNER_LOGTAG, "Unable to load vulnerability_scanner module."); return NULL; } @@ -124,12 +147,12 @@ void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t * data) { return NULL; } -void wm_vulnerability_scanner_destroy(wm_vulnerability_scanner_t * data) +void wm_vulnerability_scanner_destroy(wm_vulnerability_scanner_t* data) { free(data); } -void wm_vulnerability_scanner_stop(__attribute__((unused))wm_vulnerability_scanner_t *data) +void wm_vulnerability_scanner_stop(__attribute__((unused)) wm_vulnerability_scanner_t* data) { mtinfo(WM_VULNERABILITY_SCANNER_LOGTAG, "Stopping vulnerability_scanner module."); if (vulnerability_scanner_stop_ptr) @@ -142,9 +165,9 @@ void wm_vulnerability_scanner_stop(__attribute__((unused))wm_vulnerability_scann } } -cJSON* wm_vulnerability_scanner_dump(wm_vulnerability_scanner_t * data) +cJSON* wm_vulnerability_scanner_dump(wm_vulnerability_scanner_t* data) { - cJSON *root = cJSON_CreateObject(); + cJSON* root = cJSON_CreateObject(); cJSON_AddItemToObject(root, "vulnerability-detection", cJSON_Duplicate(data->vulnerability_detection, TRUE)); cJSON_DeleteItemFromObject(cJSON_GetObjectItem(root, "vulnerability-detection"), "index-status"); cJSON_DeleteItemFromObject(cJSON_GetObjectItem(root, "vulnerability-detection"), "cti-url"); From 374608d8db2c410c36488954d1ba83c503afbc57 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 10 May 2024 01:53:35 -0300 Subject: [PATCH 045/419] Add more qa tests. --- .../qa/test_data_policy/001/config.json | 8 +-- .../test_data_policy/001/configDisabled.json | 8 +-- .../001/configDisabledAndManagerDisabled.json | 5 +- .../001/configManagerDisabled.json | 5 +- .../qa/test_data_policy/002/config.json | 7 +- .../002/configManagerDisabled.json | 10 +-- .../src/policyManager/policyManager.hpp | 13 ++-- .../scanOrchestrator/eventDetailsBuilder.hpp | 4 +- .../src/scanOrchestrator/scanContext.hpp | 2 +- .../tests/unit/alertClearBuilder_test.cpp | 8 +-- .../tests/unit/eventDetailsBuilder_test.cpp | 26 +++---- .../eventPackageAlertDetailsBuilder_test.cpp | 8 +-- .../tests/unit/eventSendReport_test.hpp | 8 +-- .../tests/unit/osDataCache_test.hpp | 8 +-- .../tests/unit/policyManager_test.cpp | 64 +++++++++--------- .../unit/scanOsAlertDetailsBuilder_test.cpp | 8 +-- .../testtool/scanner/TC-007/config.json | 2 +- .../testtool/scanner/TC-008/config.json | 2 +- .../testtool/scanner/config.json | 24 +++---- src/wazuh_modules/wm_vulnerability_scanner.c | 67 ++++++++++--------- 20 files changed, 140 insertions(+), 147 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/config.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/config.json index cf23bce0adf..5759ee9f737 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/config.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/config.json @@ -2,9 +2,7 @@ "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", - "clusterName":"cluster01", - "clusterEnabled":false + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" }, "indexer": { "enabled": "yes", @@ -20,5 +18,7 @@ "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" } - } + }, + "clusterName":"cluster01", + "clusterEnabled":false } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabled.json index 2f21d6edb52..0713132c2af 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabled.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabled.json @@ -2,9 +2,7 @@ "vulnerability-detection": { "enabled": "no", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", - "clusterName":"cluster01", - "clusterEnabled":false + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" }, "indexer": { "enabled": "yes", @@ -20,5 +18,7 @@ "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" } - } + }, + "clusterName":"cluster01", + "clusterEnabled":false } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabledAndManagerDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabledAndManagerDisabled.json index 94db8c61344..cd8dafdafb9 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabledAndManagerDisabled.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configDisabledAndManagerDisabled.json @@ -19,6 +19,7 @@ "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" } }, - "managerNodeName": "wazuh-manager", - "managerDisabledScan": 1 + "managerDisabledScan": 1, + "clusterName":"cluster01", + "clusterEnabled":false } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configManagerDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configManagerDisabled.json index 519d55ab6d2..439c15b97e6 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configManagerDisabled.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/configManagerDisabled.json @@ -19,6 +19,7 @@ "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" } }, - "managerNodeName": "wazuh-manager", - "managerDisabledScan": 1 + "managerDisabledScan": 1, + "clusterName":"cluster01", + "clusterEnabled":false } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/config.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/config.json index 614e4726a93..5759ee9f737 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/config.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/config.json @@ -2,9 +2,7 @@ "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", - "clusterName":"cluster01", - "clusterEnabled":false + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" }, "indexer": { "enabled": "yes", @@ -21,5 +19,6 @@ "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" } }, - "managerNodeName": "wazuh-manager" + "clusterName":"cluster01", + "clusterEnabled":false } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/configManagerDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/configManagerDisabled.json index fd85e2c72f7..90a04ed750e 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/configManagerDisabled.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/configManagerDisabled.json @@ -2,9 +2,7 @@ "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", - "clusterName":"cluster01", - "clusterEnabled":false + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" }, "indexer": { "enabled": "yes", @@ -21,6 +19,8 @@ "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" } }, - "managerNodeName": "wazuh-manager", - "managerDisabledScan": 1 + "managerDisabledScan": 1, + "clusterNodeName": "wazuh-manager", + "clusterName":"cluster01", + "clusterEnabled":false } diff --git a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp index e4af83d16d6..cf72d2c7135 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp @@ -116,8 +116,7 @@ class PolicyManager final : public Singleton newPolicy["indexer"]["ssl"]["key"] = ""; } newPolicy["indexer"]["name"] = - STATES_VD_INDEX_NAME_PREFIX + - newPolicy.at("vulnerability-detection").at("clusterName").get_ref(); + STATES_VD_INDEX_NAME_PREFIX + newPolicy.at("clusterName").get_ref(); if (!newPolicy.at("vulnerability-detection").contains("feed-update-interval")) { @@ -685,11 +684,11 @@ class PolicyManager final : public Singleton * @brief Retrieves the name of the manager node for vulnerability detection. * * This function retrieves the name of the manager node for vulnerability detection - * from the configuration and returns it as a std::string_view. + * from the configuration and returns it as a std::string. * - * @return std::string_view The name of the manager node for vulnerability detection. + * @return std::string The name of the manager node for vulnerability detection. */ - std::string_view getClusterNodeName() const + std::string getClusterNodeName() const { return m_configuration.at("clusterNodeName").get_ref(); } @@ -701,7 +700,7 @@ class PolicyManager final : public Singleton */ bool getClusterStatus() const { - return m_configuration.at("vulnerability-detection").at("clusterEnabled").get(); + return m_configuration.at("clusterEnabled").get(); } /** @@ -711,7 +710,7 @@ class PolicyManager final : public Singleton */ std::string getClusterName() const { - return m_configuration.at("vulnerability-detection").at("clusterName").get_ref(); + return m_configuration.at("clusterName").get_ref(); } /** diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp index 8dae49b9315..b5eb178393d 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDetailsBuilder.hpp @@ -227,9 +227,7 @@ class TEventDetailsBuilder final : public AbstractHandlerclusterName(); ecsData["wazuh"]["schema"]["version"] = WAZUH_SCHEMA_VERSION; json["data"] = std::move(ecsData); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index 444693adc4b..76026488efd 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -1373,7 +1373,7 @@ struct TScanContext final */ std::string_view clusterNodeName() const { - static std::string_view clusterNodeName = PolicyManager::instance().getClusterNodeName(); + static std::string clusterNodeName = PolicyManager::instance().getClusterNodeName(); return clusterNodeName; } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/alertClearBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/alertClearBuilder_test.cpp index d616eec9ceb..fd58b482499 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/alertClearBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/alertClearBuilder_test.cpp @@ -62,11 +62,11 @@ void AlertClearBuilderTest::SetUp() "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com", - "clusterName":"clusterName", - "clusterEnabled":false + "cti-url": "cti-url.com" }, - "osdataLRUSize":1000 + "osdataLRUSize":1000, + "clusterName":"clusterName", + "clusterEnabled":false })")}; PolicyManager::instance().initialize(configJson); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp index 9725d416932..2a179105651 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp @@ -138,11 +138,11 @@ void EventDetailsBuilderTest::SetUp() "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com", - "clusterName":"cluster01", - "clusterEnabled":false + "cti-url": "cti-url.com" }, - "osdataLRUSize":1000 + "osdataLRUSize":1000, + "clusterName":"cluster01", + "clusterEnabled":false })")}; PolicyManager::instance().initialize(configJson); } @@ -320,11 +320,9 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS2) EXPECT_STREQ( elementData.at("vulnerability").at("severity").get_ref().c_str(), Utils::toSentenceCase(GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->severity()->str()).c_str()); - auto vulnerabilityDetection = PolicyManager::instance().getVulnerabilityDetection(); + auto clusterName = PolicyManager::instance().getClusterName(); EXPECT_STREQ(elementData.at("wazuh").at("cluster").at("name").get_ref().c_str(), - vulnerabilityDetection.contains("clusterName") - ? vulnerabilityDetection.at("clusterName").get_ref().c_str() - : ""); + clusterName.c_str()); EXPECT_STREQ(elementData.at("wazuh").at("schema").at("version").get_ref().c_str(), WAZUH_SCHEMA_VERSION); EXPECT_STREQ(elementData.at("vulnerability").at("published_at").get_ref().c_str(), @@ -499,11 +497,9 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) EXPECT_STREQ( elementData.at("vulnerability").at("severity").get_ref().c_str(), Utils::toSentenceCase(GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->severity()->str()).c_str()); - auto vulnerabilityDetection = PolicyManager::instance().getVulnerabilityDetection(); + auto clusterName = PolicyManager::instance().getClusterName(); EXPECT_STREQ(elementData.at("wazuh").at("cluster").at("name").get_ref().c_str(), - vulnerabilityDetection.contains("clusterName") - ? vulnerabilityDetection.at("clusterName").get_ref().c_str() - : ""); + clusterName.c_str()); EXPECT_STREQ(elementData.at("wazuh").at("schema").at("version").get_ref().c_str(), WAZUH_SCHEMA_VERSION); EXPECT_STREQ(elementData.at("vulnerability").at("published_at").get_ref().c_str(), @@ -745,11 +741,9 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulOsInserted) EXPECT_STREQ( elementData.at("vulnerability").at("severity").get_ref().c_str(), Utils::toSentenceCase(GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->severity()->str()).c_str()); - auto vulnerabilityDetection = PolicyManager::instance().getVulnerabilityDetection(); + auto clusterName = PolicyManager::instance().getClusterName(); EXPECT_STREQ(elementData.at("wazuh").at("cluster").at("name").get_ref().c_str(), - vulnerabilityDetection.contains("clusterName") - ? vulnerabilityDetection.at("clusterName").get_ref().c_str() - : ""); + clusterName.c_str()); EXPECT_STREQ(elementData.at("vulnerability").at("published_at").get_ref().c_str(), GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->datePublished()->c_str()); EXPECT_TRUE(elementData.at("vulnerability").at("detected_at").get_ref() <= diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp index 517cc1d6e93..f5e58aa25b7 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp @@ -110,11 +110,11 @@ void EventPackageAlertDetailsBuilderTest::SetUp() "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com", - "clusterName":"cluster01", - "clusterEnabled":false + "cti-url": "cti-url.com" }, - "osdataLRUSize":1000 + "osdataLRUSize":1000, + "clusterName":"cluster01", + "clusterEnabled":false })")}; PolicyManager::instance().initialize(configJson); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.hpp index 2ccc4c7852e..18061a2e4bb 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.hpp @@ -72,11 +72,11 @@ class EventSendReportTest : public ::testing::Test "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com", - "clusterName":"cluster01", - "clusterEnabled":false + "cti-url": "cti-url.com" }, - "osdataLRUSize":1000 + "osdataLRUSize":1000, + "clusterName":"cluster01", + "clusterEnabled":false })")}; PolicyManager::instance().initialize(configJson); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.hpp index 8db1d0fe55d..c70ea63b1a9 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.hpp @@ -51,11 +51,11 @@ class OsDataCacheTest : public ::testing::Test "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com", - "clusterName":"cluster01", - "clusterEnabled":false + "cti-url": "cti-url.com" }, - "osdataLRUSize":1000 + "osdataLRUSize":1000, + "clusterName":"cluster01", + "clusterEnabled":false })")}; PolicyManager::instance().initialize(configJson); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp index 145d6d4f82f..fde79e24a41 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp @@ -168,9 +168,7 @@ TEST_F(PolicyManagerTest, validConfigurationCheckParameters) "enabled": "yes", "index-status": "yes", "feed-update-interval": "60m", - "cti-url": "https://cti-url.com", - "clusterName":"clusterName", - "clusterEnabled":false + "cti-url": "https://cti-url.com" }, "indexer": { "enabled": "yes", @@ -184,7 +182,9 @@ TEST_F(PolicyManagerTest, validConfigurationCheckParameters) } }, "translationLRUSize": 5000, - "osdataLRUSize": 6000 + "osdataLRUSize": 6000, + "clusterName":"clusterName", + "clusterEnabled":false })")}; EXPECT_NO_THROW(m_policyManager->initialize(configJson)); @@ -220,10 +220,10 @@ TEST_F(PolicyManagerTest, validConfigurationCheckParametersOffline) "enabled": "yes", "index-status": "no", "offline-url": "file:///var/algo.tar.gz", - "cti-url": "https://cti-url.com", - "clusterName":"clusterName", - "clusterEnabled":false - } + "cti-url": "https://cti-url.com" + }, + "clusterName":"clusterName", + "clusterEnabled":false })")}; EXPECT_NO_THROW(m_policyManager->initialize(configJson)); @@ -259,13 +259,13 @@ TEST_F(PolicyManagerTest, validConfigurationDefaultValuesWithClusterName) "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com", - "clusterName":"cluster01", - "clusterEnabled":false + "cti-url": "cti-url.com" }, "indexer": { "enabled": "yes" - } + }, + "clusterName":"cluster01", + "clusterEnabled":false })")}; m_policyManager->initialize(configJson); @@ -307,10 +307,10 @@ TEST_F(PolicyManagerTest, validConfigurationDefaultValuesNoIndexer) "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com", - "clusterName":"cluster01", - "clusterEnabled":false - } + "cti-url": "cti-url.com" + }, + "clusterName":"cluster01", + "clusterEnabled":false })")}; m_policyManager->initialize(configJson); @@ -335,9 +335,7 @@ TEST_F(PolicyManagerTest, validConfigurationVulnerabilityScannerIgnoreIndexStatu "index-status": "yes", "feed-update-interval": "60m", "offline-url": "file:///var/algo.tar.gz", - "cti-url": "https://cti-url.com", - "clusterName":"cluster01", - "clusterEnabled":false + "cti-url": "https://cti-url.com" }, "indexer": { "enabled": "no", @@ -350,7 +348,9 @@ TEST_F(PolicyManagerTest, validConfigurationVulnerabilityScannerIgnoreIndexStatu "certificate": "cert", "key": "ItsASecret!" } - } + }, + "clusterName":"cluster01", + "clusterEnabled":false })")}; EXPECT_NO_THROW(m_policyManager->initialize(configJson)); @@ -366,10 +366,10 @@ TEST_F(PolicyManagerTest, validConfigurationCheckFeedUpdateIntervalLessThan60m) "enabled": "yes", "index-status": "yes", "feed-update-interval": "10m", - "cti-url": "https://cti-url.com", - "clusterName":"cluster01", - "clusterEnabled":false - } + "cti-url": "https://cti-url.com" + }, + "clusterName":"cluster01", + "clusterEnabled":false })")}; EXPECT_NO_THROW(m_policyManager->initialize(configJson)); @@ -392,10 +392,10 @@ TEST_F(PolicyManagerTest, validConfigurationCheckFeedUpdateIntervalGreaterThan60 "enabled": "yes", "index-status": "yes", "feed-update-interval": "61m", - "cti-url": "https://cti-url.com", - "clusterName":"cluster01", - "clusterEnabled":false - } + "cti-url": "https://cti-url.com" + }, + "clusterName":"cluster01", + "clusterEnabled":false })")}; EXPECT_NO_THROW(m_policyManager->initialize(configJson)); @@ -413,9 +413,7 @@ TEST_F(PolicyManagerTest, validConfigurationCheckFeedUpdateIntervalGreaterThan60 const auto& UPDATER_BASIC_CONFIG {nlohmann::json::parse(R"({ "vulnerability-detection": { "enabled": "yes", - "index-status": "yes", - "clusterName":"cluster01", - "clusterEnabled":false + "index-status": "yes" }, "updater": { "interval": 3600, @@ -433,7 +431,9 @@ const auto& UPDATER_BASIC_CONFIG {nlohmann::json::parse(R"({ "url": "https://updater-url.com", "offset": 0 } - } + }, + "clusterName":"cluster01", + "clusterEnabled":false })")}; TEST_F(PolicyManagerTest, updaterValidConfiguration) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp index cb7d6377875..7a156617562 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp @@ -73,11 +73,11 @@ void ScanOsAlertDetailsBuilderTest::SetUp() "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "cti-url.com", - "clusterName":"cluster01", - "clusterEnabled":false + "cti-url": "cti-url.com" }, - "osdataLRUSize":1000 + "osdataLRUSize":1000, + "clusterName":"cluster01", + "clusterEnabled":false })")}; PolicyManager::instance().initialize(configJson); } diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/config.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/config.json index a1470e1363e..d8958d812fc 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/config.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-007/config.json @@ -19,6 +19,6 @@ "key": "/home/dwordcito/wazuh-1-key.pem" } }, - "managerNodeName": "test_node_name", + "clusterNodeName": "test_node_name", "managerDisabledScan": 0 } diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/config.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/config.json index a1470e1363e..d8958d812fc 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/config.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/TC-008/config.json @@ -19,6 +19,6 @@ "key": "/home/dwordcito/wazuh-1-key.pem" } }, - "managerNodeName": "test_node_name", + "clusterNodeName": "test_node_name", "managerDisabledScan": 0 } diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.json index 96905161921..e15bae1da0b 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.json @@ -1,12 +1,10 @@ { - "vulnerability-detection": { + "vulnerability-detection": { "enabled": "yes", "index-status": "yes", - "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", - "clusterName":"cluster01", - "clusterEnabled":false - }, - "indexer": { + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + }, + "indexer": { "enabled": "yes", "hosts": [ "https://0.0.0.0:9200" @@ -14,11 +12,13 @@ "username": "admin", "password": "admin", "ssl": { - "certificate_authorities": [ - "/home/dwordcito/Development/wazuh/src/root-ca.pem" - ], - "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", - "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" + "certificate_authorities": [ + "/home/dwordcito/Development/wazuh/src/root-ca.pem" + ], + "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", + "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" } - } + }, + "clusterName":"cluster01", + "clusterEnabled":false } diff --git a/src/wazuh_modules/wm_vulnerability_scanner.c b/src/wazuh_modules/wm_vulnerability_scanner.c index 48020764103..69e7ee0df74 100644 --- a/src/wazuh_modules/wm_vulnerability_scanner.c +++ b/src/wazuh_modules/wm_vulnerability_scanner.c @@ -73,39 +73,7 @@ void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data) cJSON_AddStringToObject(data->vulnerability_detection, "feed-update-interval", "60m"); } - /* Add cluster name to vulnerability detection configurations - * If the cluster is enabled, the cluster name is the cluster name read from the configuration file. - * If the cluster is disabled, the cluster name is the hostname, known as the manager name. - */ - const bool cluster_status = get_cluster_status(); - cJSON_AddBoolToObject(data->vulnerability_detection, "clusterEnabled", cluster_status); - - if (cluster_status) - { - char* cluster_name = get_cluster_name(); - cJSON_AddStringToObject(data->vulnerability_detection, "clusterName", cluster_name); - os_free(cluster_name); - - char* manager_node_name = get_node_name(); - cJSON_AddStringToObject(data->vulnerability_detection, "clusterNodeName", manager_node_name); - os_free(manager_node_name); - } - else - { - char hostname[HOST_NAME_MAX + 1]; - if (gethostname(hostname, HOST_NAME_MAX) == 0) - { - cJSON_AddStringToObject(data->vulnerability_detection, "clusterName", hostname); - } - else - { - cJSON_AddStringToObject(data->vulnerability_detection, "clusterName", "undefined"); - } - - cJSON_AddStringToObject(data->vulnerability_detection, "clusterNodeName", "undefined"); - } - - if (vulnerability_scanner_start_ptr) + if (vulnerability_scanner_start_ptr) { cJSON* config_json = cJSON_CreateObject(); cJSON_AddItemToObject( @@ -128,6 +96,39 @@ void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data) { cJSON_AddItemToObject(config_json, "indexer", cJSON_Duplicate(indexer_config, TRUE)); } + + /* Add cluster name to vulnerability detection configurations + * If the cluster is enabled, the cluster name is the cluster name read from the configuration file. + * If the cluster is disabled, the cluster name is the hostname, known as the manager name. + */ + const bool cluster_status = get_cluster_status(); + cJSON_AddBoolToObject(config_json, "clusterEnabled", cluster_status); + + if (cluster_status) + { + char* cluster_name = get_cluster_name(); + cJSON_AddStringToObject(config_json, "clusterName", cluster_name); + os_free(cluster_name); + + char* manager_node_name = get_node_name(); + cJSON_AddStringToObject(config_json, "clusterNodeName", manager_node_name); + os_free(manager_node_name); + } + else + { + char hostname[HOST_NAME_MAX + 1]; + if (gethostname(hostname, HOST_NAME_MAX) == 0) + { + cJSON_AddStringToObject(config_json, "clusterName", hostname); + } + else + { + cJSON_AddStringToObject(config_json, "clusterName", "undefined"); + } + + cJSON_AddStringToObject(config_json, "clusterNodeName", "undefined"); + } + wm_vulnerability_scanner_log_config(config_json); vulnerability_scanner_start_ptr(mtLoggingFunctionsWrapper, config_json); cJSON_Delete(config_json); From 6c122daa0e88883a22a9e28f29553419cbae98c3 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 10 May 2024 01:59:48 -0300 Subject: [PATCH 046/419] Fix CSF of modified files. --- .../scanOrchestrator/buildAllAgentListContext.hpp | 10 +++++----- .../buildSingleAgentListContext.hpp | 6 ++---- .../src/scanOrchestrator/scanAgentList.hpp | 15 +++++---------- .../tests/unit/scanContext_test.cpp | 2 +- 4 files changed, 13 insertions(+), 20 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp index ad467087a1b..6b3a35a7334 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp @@ -82,11 +82,11 @@ class TBuildAllAgentListContext final : public AbstractHandler() == 0)) { - data->m_agents.push_back({Utils::padString(std::to_string(agent.at("id").get()), '0', 3), - agent.at("name"), - Utils::leftTrim(agent.at("version"), "Wazuh "), - agent.at("ip")}); - } + data->m_agents.push_back({Utils::padString(std::to_string(agent.at("id").get()), '0', 3), + agent.at("name"), + Utils::leftTrim(agent.at("version"), "Wazuh "), + agent.at("ip")}); + } else { logDebug2(WM_VULNSCAN_LOGTAG, "Skipping manager agent with id 0."); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp index 8d73fb3aa34..94de673c44a 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp @@ -44,10 +44,8 @@ class TBuildSingleAgentListInfoContext final : public AbstractHandler handleRequest(std::shared_ptr data) override { - data->m_agents.push_back({data->agentId().data(), - data->agentName().data(), - data->agentVersion().data(), - data->agentIp().data()}); + data->m_agents.push_back( + {data->agentId().data(), data->agentName().data(), data->agentVersion().data(), data->agentIp().data()}); return AbstractHandler>::handleRequest(std::move(data)); } }; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp index f9c18ab25e1..ff79315c74e 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp @@ -62,11 +62,9 @@ class TScanAgentList final : public AbstractHandleragentVersion().data(), fbStringGetHelper( - SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->agent_version())); + SyscollectorDeltas::GetDelta(reinterpret_cast(buffer))->agent_info()->agent_version())); EXPECT_STREQ( scanContext->packageName().data(), fbStringGetHelper( From 508115afc59ad2fbca87747d7e85265aa0e22166 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 10 May 2024 02:03:27 -0300 Subject: [PATCH 047/419] Fix Doxygen warnings. --- .../src/scanOrchestrator/cleanAgentInventory.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp index 1599da0302a..18ebc42ea83 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp @@ -56,7 +56,7 @@ class TCleanAgentInventory final /** * @brief Handles request and passes control to the next step of the chain. * - * @param data Scan context. + * @param ctx Scan context. * @return std::shared_ptr Abstract handler. */ std::shared_ptr handleRequest(std::shared_ptr ctx) override From c0fb93b174b35a691ff6938e5dd2efdf6b029adb Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 10 May 2024 02:07:51 -0300 Subject: [PATCH 048/419] Add tests with cluster enabled. --- .../003/agentHotfixesData.json | 174 ++++++++++++++++++ .../qa/test_data_policy/003/agentOsData.json | 39 ++++ .../003/agentPackagesData.json | 50 +++++ .../qa/test_data_policy/003/args_001.json | 19 ++ .../qa/test_data_policy/003/args_002.json | 19 ++ .../qa/test_data_policy/003/args_003.json | 19 ++ .../qa/test_data_policy/003/args_004.json | 19 ++ .../qa/test_data_policy/003/args_005.json | 19 ++ .../qa/test_data_policy/003/config.json | 25 +++ .../test_data_policy/003/configDisabled.json | 25 +++ .../003/configDisabledAndManagerDisabled.json | 26 +++ .../003/configManagerDisabled.json | 26 +++ .../qa/test_data_policy/003/expected_001.out | 3 + .../qa/test_data_policy/003/expected_002.out | 3 + .../qa/test_data_policy/003/expected_003.out | 22 +++ .../qa/test_data_policy/003/expected_004.out | 3 + .../qa/test_data_policy/003/expected_005.out | 15 ++ .../qa/test_data_policy/003/globalData.json | 58 ++++++ .../004/agentHotfixesData.json | 170 +++++++++++++++++ .../qa/test_data_policy/004/agentOsData.json | 39 ++++ .../004/agentPackagesData.json | 50 +++++ .../qa/test_data_policy/004/args_001.json | 19 ++ .../qa/test_data_policy/004/args_002.json | 19 ++ .../qa/test_data_policy/004/args_003.json | 19 ++ .../qa/test_data_policy/004/args_004.json | 19 ++ .../qa/test_data_policy/004/config.json | 25 +++ .../004/configManagerDisabled.json | 26 +++ .../qa/test_data_policy/004/expected_001.out | 3 + .../qa/test_data_policy/004/expected_002.out | 5 + .../qa/test_data_policy/004/expected_003.out | 12 ++ .../qa/test_data_policy/004/expected_004.out | 7 + .../qa/test_data_policy/004/globalData.json | 58 ++++++ 32 files changed, 1035 insertions(+) create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_001.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_002.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_003.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_004.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_005.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/config.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configDisabled.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configDisabledAndManagerDisabled.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configManagerDisabled.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_001.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_002.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_004.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentHotfixesData.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentOsData.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentPackagesData.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_001.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_002.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_003.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_004.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/config.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/configManagerDisabled.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_001.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_002.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_003.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_004.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/globalData.json diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json new file mode 100644 index 00000000000..f5c66184a1e --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json @@ -0,0 +1,174 @@ +{ + "1001": + [ + { + "hotfix": "KB2468871" + }, + { + "hotfix": "KB2478063" + }, + { + "hotfix": "KB2533523" + }, + { + "hotfix": "KB2544514" + }, + { + "hotfix": "KB2600211" + }, + { + "hotfix": "KB2600217" + }, + { + "hotfix": "KB4502496" + }, + { + "hotfix": "KB4512577" + }, + { + "hotfix": "KB4512578" + }, + { + "hotfix": "KB4514366" + }, + { + "hotfix": "KB4535680" + }, + { + "hotfix": "KB4535684" + }, + { + "hotfix": "KB4535685" + }, + { + "hotfix": "KB4577586" + }, + { + "hotfix": "KB4580325" + }, + { + "hotfix": "KB4589208" + }, + { + "hotfix": "KB4601558" + }, + { + "hotfix": "KB5003171" + }, + { + "hotfix": "KB5003243" + }, + { + "hotfix": "KB5034619" + }, + { + "hotfix": "KB5034768" + }, + { + "hotfix": "KB5034863" + } + ], + "1002": + [ + { + "hotfix": "KB2468871" + }, + { + "hotfix": "KB2478063" + }, + { + "hotfix": "KB2533523" + }, + { + "hotfix": "KB2544514" + }, + { + "hotfix": "KB2600211" + }, + { + "hotfix": "KB2600217" + }, + { + "hotfix": "KB5008882" + }, + { + "hotfix": "KB5010523" + }, + { + "hotfix": "KB5011497" + } + ], + "1003": + [ + { + "hotfix": "KB2468871" + }, + { + "hotfix": "KB2478063" + }, + { + "hotfix": "KB2533523" + }, + { + "hotfix": "KB2544514" + }, + { + "hotfix": "KB2600211" + }, + { + "hotfix": "KB2600217" + }, + { + "hotfix": "KB4502496" + }, + { + "hotfix": "KB4512577" + }, + { + "hotfix": "KB4512578" + }, + { + "hotfix": "KB4514366" + }, + { + "hotfix": "KB4535680" + }, + { + "hotfix": "KB4535684" + }, + { + "hotfix": "KB4535685" + }, + { + "hotfix": "KB4577586" + }, + { + "hotfix": "KB4580325" + }, + { + "hotfix": "KB4589208" + }, + { + "hotfix": "KB4601558" + }, + { + "hotfix": "KB5003171" + }, + { + "hotfix": "KB5003243" + }, + { + "hotfix": "KB5034619" + }, + { + "hotfix": "KB5034768" + }, + { + "hotfix": "KB5034863" + } + ], + "1004": + { + "status": "NOT_SYNCED" + } +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json new file mode 100644 index 00000000000..0fe7b6af977 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json @@ -0,0 +1,39 @@ +{ + "000": [ + { + "architecture": "x86_64", + "checksum": "1704514361693635656", + "hostname": "ubuntu-jammy", + "os_codename": "jammy", + "os_major": "22", + "os_minor": "04", + "os_name": "Ubuntu", + "os_patch": "3", + "os_platform": "ubuntu", + "os_version": "22.04.3 LTS (Jammy Jellyfish)", + "reference": "f22553c945b045bfc0d162cb890344d2f4fa8609", + "release": "5.15.0-91-generic", + "scan_id": 0, + "scan_time": "2024/01/06 04:12:44", + "sysname": "Linux", + "version": "#101-Ubuntu SMP Tue Nov 14 13:30:08 UTC 2023" + } + ], + "001": [ + { + "architecture": "x86_64", + "checksum": "1704514864922425008", + "hostname": "vagrant", + "os_major": "8", + "os_name": "Red Hat Enterprise Linux", + "os_platform": "rhel", + "os_version": "8.9", + "reference": "e778c1fe83f2b15cdb013471a2c8223132c9e1ca", + "release": "4.14.311-233.529.amzn2.x86_64", + "scan_id": 0, + "scan_time": "2024/01/06 04:21:05", + "sysname": "Linux", + "version": "#1 SMP Thu Mar 23 09:54:12 UTC 2023" + } + ] +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json new file mode 100644 index 00000000000..27bf67a941b --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json @@ -0,0 +1,50 @@ +{ + "000": [ + { + "scan_id": "0", + "scan_time": "2024/01/11 00:05:48", + "format": "deb", + "name": "gzip", + "priority": "required", + "section": "utils", + "size": 245, + "vendor": "Ubuntu Developers ", + "install_time": "", + "version": "1.10-0ubuntu4.1", + "architecture": "amd64", + "multiarch": "", + "source": "", + "description": "GNU compression utilities", + "location": "", + "triaged": "0", + "cpe": "", + "msu_name": "", + "checksum": "653552fc5b2cc4c4cc281ee1a2fdd55351cae8f4", + "item_id": "040334345fd47ab6e72026cf3c45640456198fb4" + } + ], + "001": [ + { + "scan_id": "0", + "scan_time": "2024/01/11 00:05:58", + "format": "rpm", + "name": "lua-libs", + "priority": "", + "section": "Development/Languages", + "size": 247936, + "vendor": "Red Hat, Inc.", + "install_time": "1698808920", + "version": "5.3.4-11.el8", + "architecture": "x86_64", + "multiarch": "", + "source": "", + "description": "Libraries for lua", + "location": "", + "triaged": "0", + "cpe": "", + "msu_name": "", + "checksum": "70901207054653e2ef475cad7b77d31c4757b16d", + "item_id": "6a15840a129f0021c18e7a09e88e1dc7f1ef84b0" + } + ] +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_001.json new file mode 100644 index 00000000000..7ea8dd1acd6 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_001.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/config.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json", + "-a", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_002.json new file mode 100644 index 00000000000..fd93fa34a7f --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_002.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configDisabled.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-h", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json", + "-v", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_003.json new file mode 100644 index 00000000000..5c5f8a40711 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_003.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/config.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-h", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_004.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_004.json new file mode 100644 index 00000000000..9582806d786 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_004.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configDisabledAndManagerDisabled.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-h", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_005.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_005.json new file mode 100644 index 00000000000..ea9362b5e0c --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_005.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configManagerDisabled.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-h", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/config.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/config.json new file mode 100644 index 00000000000..3e494d263f8 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/config.json @@ -0,0 +1,25 @@ +{ + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + }, + "indexer": { + "enabled": "yes", + "hosts": [ + "https://0.0.0.0:9200" + ], + "username": "admin", + "password": "admin", + "ssl": { + "certificate_authorities": [ + "/home/dwordcito/Development/wazuh/src/root-ca.pem" + ], + "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", + "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" + } + }, + "clusterName":"cluster01", + "clusterEnabled":true, + "clusterNodeName":"node01" +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configDisabled.json new file mode 100644 index 00000000000..018dca9cb9c --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configDisabled.json @@ -0,0 +1,25 @@ +{ + "vulnerability-detection": { + "enabled": "no", + "index-status": "yes", + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + }, + "indexer": { + "enabled": "yes", + "hosts": [ + "https://0.0.0.0:9200" + ], + "username": "admin", + "password": "admin", + "ssl": { + "certificate_authorities": [ + "/home/dwordcito/Development/wazuh/src/root-ca.pem" + ], + "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", + "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" + } + }, + "clusterName":"cluster01", + "clusterEnabled":true, + "clusterNodeName":"node01" +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configDisabledAndManagerDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configDisabledAndManagerDisabled.json new file mode 100644 index 00000000000..159d521be2a --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configDisabledAndManagerDisabled.json @@ -0,0 +1,26 @@ +{ + "vulnerability-detection": { + "enabled": "no", + "index-status": "yes", + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + }, + "indexer": { + "enabled": "yes", + "hosts": [ + "https://0.0.0.0:9200" + ], + "username": "admin", + "password": "admin", + "ssl": { + "certificate_authorities": [ + "/home/dwordcito/Development/wazuh/src/root-ca.pem" + ], + "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", + "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" + } + }, + "managerDisabledScan": 1, + "clusterName":"cluster01", + "clusterEnabled":true, + "clusterNodeName":"node01" +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configManagerDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configManagerDisabled.json new file mode 100644 index 00000000000..939a73fe1fa --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configManagerDisabled.json @@ -0,0 +1,26 @@ +{ + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + }, + "indexer": { + "enabled": "yes", + "hosts": [ + "https://0.0.0.0:9200" + ], + "username": "admin", + "password": "admin", + "ssl": { + "certificate_authorities": [ + "/home/dwordcito/Development/wazuh/src/root-ca.pem" + ], + "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", + "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" + } + }, + "managerDisabledScan": 1, + "clusterName":"cluster01", + "clusterEnabled":true, + "clusterNodeName":"node01" +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_001.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_001.out new file mode 100644 index 00000000000..4b74d44d4e3 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_001.out @@ -0,0 +1,3 @@ +[ + "Vulnerability scanner module started" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_002.out new file mode 100644 index 00000000000..e2416840b32 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_002.out @@ -0,0 +1,3 @@ +[ + "Vulnerability scanner module is disabled" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out new file mode 100644 index 00000000000..a8687fe8b19 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out @@ -0,0 +1,22 @@ +[ + "Vulnerability scanner module started", + "Vulnerability scanner module is enabled. Re-scanning all agents.", + "Event type: 9 processed", + "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", + "Fetched 2 agents from Wazuh-DB.", + "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", + "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'manager' (ID: '000', Version: 'v4.7.1').", + "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", + "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'manager' (ID: '000', Version: 'v4.7.1').", + "Vulnerability scan for package 'gzip' on Agent '000' has completed.", + "Inserting agent package key: node01_000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", + "Processing and publish key: CVE-2022-1271", + "Vulnerability scan for OS 'enterprise_linux' on Agent '001' has completed.", + "Translation for package 'lua-libs' in platform 'rhel' not found. Using provided packageName.", + "Initiating a vulnerability scan for package 'lua-libs' (rpm) (red hat, inc.) with CVE Numbering Authorities (CNA) 'redhat_8' on Agent 'agent_redhat_8' (ID: '001', Version: 'v4.7.1').", + "Vulnerability scan for package 'lua-libs' on Agent '001' has completed.", + "Event type: 7 processed", + "Inserting agent package key: 001_6a15840a129f0021c18e7a09e88e1dc7f1ef84b0 -> CVE-2020-24370", + "Building event details for component type: 1", + "Processing and publish key: CVE-2020-24370" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_004.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_004.out new file mode 100644 index 00000000000..e2416840b32 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_004.out @@ -0,0 +1,3 @@ +[ + "Vulnerability scanner module is disabled" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out new file mode 100644 index 00000000000..2c2795f52fe --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out @@ -0,0 +1,15 @@ +[ + "Vulnerability scanner module started", + "Vulnerability scanner module is enabled. Re-scanning all agents.", + "Fetched 1 agents from Wazuh-DB.", + "Skipping manager agent with id 0.", + "Vulnerability scan for OS 'enterprise_linux' on Agent '001' has completed.", + "Translation for package 'lua-libs' in platform 'rhel' not found. Using provided packageName.", + "Initiating a vulnerability scan for package 'lua-libs' (rpm) (red hat, inc.) with CVE Numbering Authorities (CNA) 'redhat_8' on Agent 'agent_redhat_8' (ID: '001', Version: 'v4.7.1').", + "Vulnerability scan for package 'lua-libs' on Agent '001' has completed.", + "Event type: 7 processed", + "Element key: 001_6a15840a129f0021c18e7a09e88e1dc7f1ef84b0", + "Inserting agent package key: 001_6a15840a129f0021c18e7a09e88e1dc7f1ef84b0 -> CVE-2020-24370", + "Building event details for component type: 1", + "Processing and publish key: CVE-2020-24370" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json new file mode 100644 index 00000000000..5025d61115f --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json @@ -0,0 +1,58 @@ +[ + { + "id": 0, + "name": "agent_ubuntu_22", + "ip": "10.0.0.3", + "register_ip": "any", + "internal_key": "9e369606c6f3c20a114f399853abff716f07cdec0ebd30a1e2a59b6b4b439795", + "os_name": "Ubuntu", + "os_version": "22.04.6 LTS", + "os_major": "22", + "os_minor": "04", + "os_codename": "jammy", + "os_build": "ubuntu", + "os_platform": "ubuntu", + "os_uname": "agent_ubuntu_22 | 6.5.13-7-MANJARO | #1 SMP PREEMPT_DYNAMIC Wed Dec 20 07:15:58 UTC 2023", + "os_arch": "x86_64", + "version": "Wazuh v4.7.1", + "config_sum": "ab73af41699f13fdd81903b5f23d8d00", + "merged_sum": "4a8724b20dee0124ff9656783c490c4e", + "manager_host": "pr-test", + "node_name": "node01", + "date_add": "1704931524", + "last_keepalive": "1705096327", + "group": "default", + "group_hash": "37a8eec1", + "group_sync_status": "synced", + "sync_status": "synced", + "connection_status": "disconnected" + }, + { + "id": 1, + "name": "agent_redhat_8", + "ip": "10.0.0.18", + "register_ip": "any", + "internal_key": "3426434231c609dcebcab2676d732db376ceae26af88d13c864668fc3acac778", + "os_name": "Red Hat Enterprise Linux", + "os_version": "8.9", + "os_major": "8", + "os_minor": "9", + "os_codename": "Ootpa", + "os_build": "rhel", + "os_platform": "rhel", + "os_uname": "agent_redhat_8 | 6.5.13-7-MANJARO | #1 SMP PREEMPT_DYNAMIC Wed Dec 20 07:15:58 UTC 2023", + "os_arch": "x86_64", + "version": "Wazuh v4.7.1", + "config_sum": "ab73af41699f13fdd81903b5f23d8d00", + "merged_sum": "4a8724b20dee0124ff9656783c490c4e", + "manager_host": "pr-test", + "node_name": "node01", + "date_add": "1704931528", + "last_keepalive": "1705096327", + "group": "default", + "group_hash": "37a8eec1", + "group_sync_status": "synced", + "sync_status": "synced", + "connection_status": "disconnected" + } +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentHotfixesData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentHotfixesData.json new file mode 100644 index 00000000000..7bd280f89cc --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentHotfixesData.json @@ -0,0 +1,170 @@ +{ + "1001": [ + { + "hotfix": "KB2468871" + }, + { + "hotfix": "KB2478063" + }, + { + "hotfix": "KB2533523" + }, + { + "hotfix": "KB2544514" + }, + { + "hotfix": "KB2600211" + }, + { + "hotfix": "KB2600217" + }, + { + "hotfix": "KB4502496" + }, + { + "hotfix": "KB4512577" + }, + { + "hotfix": "KB4512578" + }, + { + "hotfix": "KB4514366" + }, + { + "hotfix": "KB4535680" + }, + { + "hotfix": "KB4535684" + }, + { + "hotfix": "KB4535685" + }, + { + "hotfix": "KB4577586" + }, + { + "hotfix": "KB4580325" + }, + { + "hotfix": "KB4589208" + }, + { + "hotfix": "KB4601558" + }, + { + "hotfix": "KB5003171" + }, + { + "hotfix": "KB5003243" + }, + { + "hotfix": "KB5034619" + }, + { + "hotfix": "KB5034768" + }, + { + "hotfix": "KB5034863" + } + ], + "1002": [ + { + "hotfix": "KB2468871" + }, + { + "hotfix": "KB2478063" + }, + { + "hotfix": "KB2533523" + }, + { + "hotfix": "KB2544514" + }, + { + "hotfix": "KB2600211" + }, + { + "hotfix": "KB2600217" + }, + { + "hotfix": "KB5008882" + }, + { + "hotfix": "KB5010523" + }, + { + "hotfix": "KB5011497" + } + ], + "1003": [ + { + "hotfix": "KB2468871" + }, + { + "hotfix": "KB2478063" + }, + { + "hotfix": "KB2533523" + }, + { + "hotfix": "KB2544514" + }, + { + "hotfix": "KB2600211" + }, + { + "hotfix": "KB2600217" + }, + { + "hotfix": "KB4502496" + }, + { + "hotfix": "KB4512577" + }, + { + "hotfix": "KB4512578" + }, + { + "hotfix": "KB4514366" + }, + { + "hotfix": "KB4535680" + }, + { + "hotfix": "KB4535684" + }, + { + "hotfix": "KB4535685" + }, + { + "hotfix": "KB4577586" + }, + { + "hotfix": "KB4580325" + }, + { + "hotfix": "KB4589208" + }, + { + "hotfix": "KB4601558" + }, + { + "hotfix": "KB5003171" + }, + { + "hotfix": "KB5003243" + }, + { + "hotfix": "KB5034619" + }, + { + "hotfix": "KB5034768" + }, + { + "hotfix": "KB5034863" + } + ], + "1004": { + "status": "NOT_SYNCED" + } +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentOsData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentOsData.json new file mode 100644 index 00000000000..0fe7b6af977 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentOsData.json @@ -0,0 +1,39 @@ +{ + "000": [ + { + "architecture": "x86_64", + "checksum": "1704514361693635656", + "hostname": "ubuntu-jammy", + "os_codename": "jammy", + "os_major": "22", + "os_minor": "04", + "os_name": "Ubuntu", + "os_patch": "3", + "os_platform": "ubuntu", + "os_version": "22.04.3 LTS (Jammy Jellyfish)", + "reference": "f22553c945b045bfc0d162cb890344d2f4fa8609", + "release": "5.15.0-91-generic", + "scan_id": 0, + "scan_time": "2024/01/06 04:12:44", + "sysname": "Linux", + "version": "#101-Ubuntu SMP Tue Nov 14 13:30:08 UTC 2023" + } + ], + "001": [ + { + "architecture": "x86_64", + "checksum": "1704514864922425008", + "hostname": "vagrant", + "os_major": "8", + "os_name": "Red Hat Enterprise Linux", + "os_platform": "rhel", + "os_version": "8.9", + "reference": "e778c1fe83f2b15cdb013471a2c8223132c9e1ca", + "release": "4.14.311-233.529.amzn2.x86_64", + "scan_id": 0, + "scan_time": "2024/01/06 04:21:05", + "sysname": "Linux", + "version": "#1 SMP Thu Mar 23 09:54:12 UTC 2023" + } + ] +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentPackagesData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentPackagesData.json new file mode 100644 index 00000000000..d476d167cac --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentPackagesData.json @@ -0,0 +1,50 @@ +{ + "000": [ + { + "scan_id": "0", + "scan_time": "2024/01/11 00:05:48", + "format": "deb", + "name": "gzip", + "priority": "required", + "section": "utils", + "size": 245, + "vendor": "Ubuntu Developers ", + "install_time": "", + "version": "1.10-0ubuntu4.1", + "architecture": "amd64", + "multiarch": "", + "source": "", + "description": "GNU compression utilities", + "location": "", + "triaged": "0", + "cpe": "", + "msu_name": "", + "checksum": "653552fc5b2cc4c4cc281ee1a2fdd55351cae8f4", + "item_id": "040334345fd47ab6e72026cf3c45640456198fb4" + } + ], + "001": [ + { + "scan_id": "0", + "scan_time": "2024/01/11 00:05:58", + "format": "rpm", + "name": "lua-libs", + "priority": "", + "section": "Development/Languages", + "size": 247936, + "vendor": "Red Hat, Inc.", + "install_time": "1698808920", + "version": "5.3.4-12.el8", + "architecture": "x86_64", + "multiarch": "", + "source": "", + "description": "Libraries for lua", + "location": "", + "triaged": "0", + "cpe": "", + "msu_name": "", + "checksum": "70901207054653e2ef475cad7b77d31c4757b16d", + "item_id": "6a15840a129f0021c18e7a09e88e1dc7f1ef84b0" + } + ] +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_001.json new file mode 100644 index 00000000000..40dd738152b --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_001.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/config.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-h", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentPackagesData.json", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_002.json new file mode 100644 index 00000000000..6a7b9c28517 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_002.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/configManagerDisabled.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-h", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentPackagesData.json", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_003.json new file mode 100644 index 00000000000..40dd738152b --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_003.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/config.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-h", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentPackagesData.json", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_004.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_004.json new file mode 100644 index 00000000000..6a7b9c28517 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/args_004.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/configManagerDisabled.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-h", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentPackagesData.json", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/config.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/config.json new file mode 100644 index 00000000000..3e494d263f8 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/config.json @@ -0,0 +1,25 @@ +{ + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + }, + "indexer": { + "enabled": "yes", + "hosts": [ + "https://0.0.0.0:9200" + ], + "username": "admin", + "password": "admin", + "ssl": { + "certificate_authorities": [ + "/home/dwordcito/Development/wazuh/src/root-ca.pem" + ], + "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", + "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" + } + }, + "clusterName":"cluster01", + "clusterEnabled":true, + "clusterNodeName":"node01" +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/configManagerDisabled.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/configManagerDisabled.json new file mode 100644 index 00000000000..939a73fe1fa --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/configManagerDisabled.json @@ -0,0 +1,26 @@ +{ + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + }, + "indexer": { + "enabled": "yes", + "hosts": [ + "https://0.0.0.0:9200" + ], + "username": "admin", + "password": "admin", + "ssl": { + "certificate_authorities": [ + "/home/dwordcito/Development/wazuh/src/root-ca.pem" + ], + "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", + "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" + } + }, + "managerDisabledScan": 1, + "clusterName":"cluster01", + "clusterEnabled":true, + "clusterNodeName":"node01" +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_001.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_001.out new file mode 100644 index 00000000000..4b74d44d4e3 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_001.out @@ -0,0 +1,3 @@ +[ + "Vulnerability scanner module started" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_002.out new file mode 100644 index 00000000000..c881207832f --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_002.out @@ -0,0 +1,5 @@ +[ + "Vulnerability scanner module started", + "Vulnerability scanner in manager deactivated. Performing clean-up.", + "Event type: 10 processed" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_003.out new file mode 100644 index 00000000000..a01813e1e16 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_003.out @@ -0,0 +1,12 @@ +[ + "Vulnerability scanner module started", + "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", + "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", + "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'manager' (ID: '000', Version", + "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", + "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'manager' (ID: '000', Version:", + "Vulnerability scan for package 'gzip' on Agent '000' has completed.", + "Inserting agent package key: node01_000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", + "Processing and publish key: CVE-2022-1271", + "Event type: 8 processed" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_004.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_004.out new file mode 100644 index 00000000000..20b4ea8ab5b --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_004.out @@ -0,0 +1,7 @@ +[ + "Vulnerability scanner module started", + "Vulnerability scanner in manager deactivated. Performing clean-up.", + "Event type: 10 processed", + "Processing and publish key: CVE-2022-1271", + "Deleting package agent vulnerabilities key: node01_000_040334345fd47ab6e72026cf3c45640456198fb4" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/globalData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/globalData.json new file mode 100644 index 00000000000..5025d61115f --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/globalData.json @@ -0,0 +1,58 @@ +[ + { + "id": 0, + "name": "agent_ubuntu_22", + "ip": "10.0.0.3", + "register_ip": "any", + "internal_key": "9e369606c6f3c20a114f399853abff716f07cdec0ebd30a1e2a59b6b4b439795", + "os_name": "Ubuntu", + "os_version": "22.04.6 LTS", + "os_major": "22", + "os_minor": "04", + "os_codename": "jammy", + "os_build": "ubuntu", + "os_platform": "ubuntu", + "os_uname": "agent_ubuntu_22 | 6.5.13-7-MANJARO | #1 SMP PREEMPT_DYNAMIC Wed Dec 20 07:15:58 UTC 2023", + "os_arch": "x86_64", + "version": "Wazuh v4.7.1", + "config_sum": "ab73af41699f13fdd81903b5f23d8d00", + "merged_sum": "4a8724b20dee0124ff9656783c490c4e", + "manager_host": "pr-test", + "node_name": "node01", + "date_add": "1704931524", + "last_keepalive": "1705096327", + "group": "default", + "group_hash": "37a8eec1", + "group_sync_status": "synced", + "sync_status": "synced", + "connection_status": "disconnected" + }, + { + "id": 1, + "name": "agent_redhat_8", + "ip": "10.0.0.18", + "register_ip": "any", + "internal_key": "3426434231c609dcebcab2676d732db376ceae26af88d13c864668fc3acac778", + "os_name": "Red Hat Enterprise Linux", + "os_version": "8.9", + "os_major": "8", + "os_minor": "9", + "os_codename": "Ootpa", + "os_build": "rhel", + "os_platform": "rhel", + "os_uname": "agent_redhat_8 | 6.5.13-7-MANJARO | #1 SMP PREEMPT_DYNAMIC Wed Dec 20 07:15:58 UTC 2023", + "os_arch": "x86_64", + "version": "Wazuh v4.7.1", + "config_sum": "ab73af41699f13fdd81903b5f23d8d00", + "merged_sum": "4a8724b20dee0124ff9656783c490c4e", + "manager_host": "pr-test", + "node_name": "node01", + "date_add": "1704931528", + "last_keepalive": "1705096327", + "group": "default", + "group_hash": "37a8eec1", + "group_sync_status": "synced", + "sync_status": "synced", + "connection_status": "disconnected" + } +] From 59b20d066e1019263f1490ddd9082e10654f0d87 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 10 May 2024 02:53:43 -0300 Subject: [PATCH 049/419] Add test coverage. --- .../qa/test_data_policy/003/expected_005.out | 1 - .../buildAllAgentListContext.hpp | 6 ++- .../tests/unit/eventDetailsBuilder_test.cpp | 40 +++++++++++++++++-- 3 files changed, 42 insertions(+), 5 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out index 2c2795f52fe..70664c51519 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out @@ -8,7 +8,6 @@ "Initiating a vulnerability scan for package 'lua-libs' (rpm) (red hat, inc.) with CVE Numbering Authorities (CNA) 'redhat_8' on Agent 'agent_redhat_8' (ID: '001', Version: 'v4.7.1').", "Vulnerability scan for package 'lua-libs' on Agent '001' has completed.", "Event type: 7 processed", - "Element key: 001_6a15840a129f0021c18e7a09e88e1dc7f1ef84b0", "Inserting agent package key: 001_6a15840a129f0021c18e7a09e88e1dc7f1ef84b0 -> CVE-2020-24370", "Building event details for component type: 1", "Processing and publish key: CVE-2020-24370" diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp index 6b3a35a7334..eb3798e4565 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp @@ -67,11 +67,13 @@ class TBuildAllAgentListContext final : public AbstractHandlerm_agents.size()); return AbstractHandler>::handleRequest(std::move(data)); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp index 2a179105651..d44b18d9d31 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp @@ -123,6 +123,37 @@ namespace NSEventDetailsBuilderTest } } })"; + const std::string DELTA_PACKAGES_INSERTED_MANAGER_MSG = + R"( + { + "agent_info": { + "agent_id": "000", + "agent_ip": "192.168.33.20", + "agent_name": "focal", + "agent_version": "4.7.1" + }, + "data_type": "dbsync_packages", + "data": { + "architecture": "amd64", + "checksum": "1e6ce14f97f57d1bbd46ff8e5d3e133171a1bbce", + "description": "library for GIF images library", + "format": "deb", + "groups": "libs", + "item_id": "ec465b7eb5fa011a336e95614072e4c7f1a65a53", + "multiarch": "same", + "name": "libgif7", + "priority": "optional", + "scan_time": "2023/08/04 19:56:11", + "size": 72, + "source": "giflib", + "vendor": "Ubuntu Developers ", + "version": "5.1.9-1", + "install_time": "1577890801", + "location":" " + }, + "operation": "INSERTED" + } + )"; const std::string CVEID {"CVE-2024-1234"}; } // namespace NSEventDetailsBuilderTest @@ -142,7 +173,8 @@ void EventDetailsBuilderTest::SetUp() }, "osdataLRUSize":1000, "clusterName":"cluster01", - "clusterEnabled":false + "clusterEnabled":true, + "clusterNodeName":"node01" })")}; PolicyManager::instance().initialize(configJson); } @@ -395,13 +427,13 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); - ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MANAGER_MSG.c_str())); uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); auto scanContext = std::make_shared>(syscollectorDelta); scanContext->m_elements[CVEID] = - R"({"operation":"INSERTED", "id":"001_ec465b7eb5fa011a336e95614072e4c7f1a65a53_CVE-2024-1234"})"_json; + R"({"operation":"INSERTED", "id":"000_ec465b7eb5fa011a336e95614072e4c7f1a65a53_CVE-2024-1234"})"_json; scanContext->m_alerts[CVEID] = nlohmann::json::object(); // Mock one alert TEventDetailsBuilder> eventDetailsBuilder( @@ -421,6 +453,8 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) auto& elementData = scanContext->m_elements[CVEID].at("data"); + EXPECT_STREQ(elementData.at("agent").at("ephemeral_id").get_ref().c_str(), + scanContext->clusterNodeName().data()); EXPECT_STREQ(elementData.at("agent").at("id").get_ref().c_str(), scanContext->agentId().data()); EXPECT_STREQ(elementData.at("agent").at("name").get_ref().c_str(), scanContext->agentName().data()); From a0dbc3e3fa06253134cc6640798607baf3f979e3 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 10 May 2024 03:11:27 -0300 Subject: [PATCH 050/419] Fix C unit tests and add limits.h --- src/unit_tests/remoted/test_secure.c | 30 ++++++++++---------- src/unit_tests/wazuh_db/test_wdb_integrity.c | 8 +++--- src/wazuh_modules/wm_vulnerability_scanner.c | 3 +- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/src/unit_tests/remoted/test_secure.c b/src/unit_tests/remoted/test_secure.c index 4bc20906f80..cbc7e421735 100644 --- a/src/unit_tests/remoted/test_secure.c +++ b/src/unit_tests/remoted/test_secure.c @@ -2051,7 +2051,7 @@ void test_router_message_forward_invalid_sync_json_message(void **state) { test_agent_info* data = (test_agent_info*)(*state); char* message = "5:syscollector:{\"message\":\"not_valid\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"}}"; + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"}}"; router_rsync_handle = (ROUTER_PROVIDER_HANDLE)(1); @@ -2073,7 +2073,7 @@ void test_router_message_forward_valid_integrity_check_global(void **state) test_agent_info* data = (test_agent_info*)(*state); char* message = "5:syscollector:{\"component\":\"syscollector_hwinfo\",\"data\":{\"begin\":\"0\",\"checksum\":\"b66d0703ee882571cd1865f393bd34f7d5940339\"," "\"end\":\"0\",\"id\":1691259777},\"type\":\"integrity_check_global\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"integrity_check_global\",\"data\":" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"integrity_check_global\",\"data\":" "{\"attributes_type\":\"syscollector_hwinfo\",\"begin\":\"0\",\"checksum\":\"b66d0703ee882571cd1865f393bd34f7d5940339\",\"end\":\"0\",\"id\":1691259777}}"; router_rsync_handle = (ROUTER_PROVIDER_HANDLE)(1); @@ -2094,7 +2094,7 @@ void test_router_message_forward_valid_integrity_check_left(void **state) test_agent_info* data = (test_agent_info*)(*state); char* message = "5:syscollector:{\"component\":\"syscollector_packages\",\"data\":{\"begin\":\"01113a00fcdafa43d111ecb669202119c946ebe5\",\"checksum\":\"54c13892eb9ee18b0012086b76a89f41e73d64a1\"," "\"end\":\"40795337f16a208e4d0a2280fbd5c794c9877dcb\",\"id\":1693338981,\"tail\":\"408cb243d2d52ad6414ba602e375b3b6b5f5cd77\"},\"type\":\"integrity_check_global\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"integrity_check_global\",\"data\":" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"integrity_check_global\",\"data\":" "{\"attributes_type\":\"syscollector_packages\",\"begin\":\"01113a00fcdafa43d111ecb669202119c946ebe5\",\"checksum\":\"54c13892eb9ee18b0012086b76a89f41e73d64a1\",\"end\":\"40795337f16a208e4d0a2280fbd5c794c9877dcb\",\"id\":1693338981,\"tail\":\"408cb243d2d52ad6414ba602e375b3b6b5f5cd77\"}}"; router_rsync_handle = (ROUTER_PROVIDER_HANDLE)(1); @@ -2115,7 +2115,7 @@ void test_router_message_forward_valid_integrity_check_right(void **state) test_agent_info* data = (test_agent_info*)(*state); char* message = "5:syscollector:{\"component\":\"syscollector_packages\",\"data\":{\"begin\":\"85c5676f6e5082ef99bba397b90559cd36fbbeca\",\"checksum\":\"d33c176f028188be38b394af5eed1e66bb8ad40e\"," "\"end\":\"ffee8da05f37fa760fc5eee75dd0ea9e71228d05\",\"id\":1693338981},\"type\":\"integrity_check_right\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"integrity_check_right\",\"data\":" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"integrity_check_right\",\"data\":" "{\"attributes_type\":\"syscollector_packages\",\"begin\":\"85c5676f6e5082ef99bba397b90559cd36fbbeca\",\"checksum\":\"d33c176f028188be38b394af5eed1e66bb8ad40e\",\"end\":\"ffee8da05f37fa760fc5eee75dd0ea9e71228d05\",\"id\":1693338981}}"; router_rsync_handle = (ROUTER_PROVIDER_HANDLE)(1); @@ -2135,7 +2135,7 @@ void test_router_message_forward_valid_integrity_clear(void **state) { test_agent_info* data = (test_agent_info*)(*state); char* message = "5:syscollector:{\"component\":\"syscollector_hwinfo\",\"data\":{\"id\":1693338619},\"type\":\"integrity_check_clear\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"integrity_check_clear\",\"data\":" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"integrity_check_clear\",\"data\":" "{\"attributes_type\":\"syscollector_hwinfo\",\"id\":1693338619}}"; router_rsync_handle = (ROUTER_PROVIDER_HANDLE)(1); @@ -2176,7 +2176,7 @@ void test_router_message_forward_invalid_delta_json_message(void **state) { test_agent_info* data = (test_agent_info*)(*state); char* message = "d:syscollector:{\"message\":\"not_valid\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"}}"; + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"}}"; router_syscollector_handle = (ROUTER_PROVIDER_HANDLE)(1); @@ -2200,7 +2200,7 @@ void test_router_message_forward_valid_delta_packages_json_message(void **state) ",\"description\":\"library for GIF images (library)\",\"format\":\"deb\",\"groups\":\"libs\",\"item_id\":\"ec465b7eb5fa011a336e95614072e4c7f1a65a53\"" ",\"multiarch\":\"same\",\"name\":\"libgif7\",\"priority\":\"optional\",\"scan_time\":\"2023/08/04 19:56:11\",\"size\":72,\"source\":\"giflib\"" ",\"vendor\":\"Ubuntu Developers \",\"version\":\"5.1.9-1\"},\"operation\":\"INSERTED\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"dbsync_packages\",\"data\":{\"architecture\":\"amd64\",\"checksum\":\"1e6ce14f97f57d1bbd46ff8e5d3e133171a1bbce\"" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"dbsync_packages\",\"data\":{\"architecture\":\"amd64\",\"checksum\":\"1e6ce14f97f57d1bbd46ff8e5d3e133171a1bbce\"" ",\"description\":\"library for GIF images (library)\",\"format\":\"deb\",\"groups\":\"libs\",\"item_id\":\"ec465b7eb5fa011a336e95614072e4c7f1a65a53\"" ",\"multiarch\":\"same\",\"name\":\"libgif7\",\"priority\":\"optional\",\"scan_time\":\"2023/08/04 19:56:11\",\"size\":72,\"source\":\"giflib\"" ",\"vendor\":\"Ubuntu Developers \",\"version\":\"5.1.9-1\"},\"operation\":\"INSERTED\"}"; @@ -2225,7 +2225,7 @@ void test_router_message_forward_valid_delta_os_json_message(void **state) ",\"description\":\"library for GIF images (library)\",\"format\":\"deb\",\"groups\":\"libs\",\"item_id\":\"ec465b7eb5fa011a336e95614072e4c7f1a65a53\"" ",\"multiarch\":\"same\",\"name\":\"libgif7\",\"priority\":\"optional\",\"scan_time\":\"2023/08/04 19:56:11\",\"size\":72,\"source\":\"giflib\"" ",\"vendor\":\"Ubuntu Developers \",\"version\":\"5.1.9-1\"},\"operation\":\"INSERTED\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"dbsync_packages\",\"data\":{\"architecture\":\"amd64\",\"checksum\":\"1e6ce14f97f57d1bbd46ff8e5d3e133171a1bbce\"" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"dbsync_packages\",\"data\":{\"architecture\":\"amd64\",\"checksum\":\"1e6ce14f97f57d1bbd46ff8e5d3e133171a1bbce\"" ",\"description\":\"library for GIF images (library)\",\"format\":\"deb\",\"groups\":\"libs\",\"item_id\":\"ec465b7eb5fa011a336e95614072e4c7f1a65a53\"" ",\"multiarch\":\"same\",\"name\":\"libgif7\",\"priority\":\"optional\",\"scan_time\":\"2023/08/04 19:56:11\",\"size\":72,\"source\":\"giflib\"" ",\"vendor\":\"Ubuntu Developers \",\"version\":\"5.1.9-1\"},\"operation\":\"INSERTED\"}"; @@ -2250,7 +2250,7 @@ void test_router_message_forward_valid_delta_netiface_json_message(void **state) ",\"item_id\":\"7a60750dd3c25c53f21ff7f44b4743664ddbb66a\",\"mac\":\"02:bf:67:45:e4:dd\",\"mtu\":1500,\"name\":\"enp0s3\",\"rx_bytes\":972800985" ",\"rx_dropped\":0,\"rx_errors\":0,\"rx_packets\":670863,\"scan_time\":\"2023/08/04 19:56:11\",\"state\":\"up\",\"tx_bytes\":6151606,\"tx_dropped\":0" ",\"tx_errors\":0,\"tx_packets\":84746,\"type\":\"ethernet\"},\"operation\":\"MODIFIED\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"dbsync_network_iface\",\"data\":{\"adapter\":null,\"checksum\":\"078143285c1aff98e196c8fe7e01f5677f44bd44\"" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"dbsync_network_iface\",\"data\":{\"adapter\":null,\"checksum\":\"078143285c1aff98e196c8fe7e01f5677f44bd44\"" ",\"item_id\":\"7a60750dd3c25c53f21ff7f44b4743664ddbb66a\",\"mac\":\"02:bf:67:45:e4:dd\",\"mtu\":1500,\"name\":\"enp0s3\",\"rx_bytes\":972800985" ",\"rx_dropped\":0,\"rx_errors\":0,\"rx_packets\":670863,\"scan_time\":\"2023/08/04 19:56:11\",\"state\":\"up\",\"tx_bytes\":6151606,\"tx_dropped\":0" ",\"tx_errors\":0,\"tx_packets\":84746,\"type\":\"ethernet\"},\"operation\":\"MODIFIED\"}"; @@ -2274,7 +2274,7 @@ void test_router_message_forward_valid_delta_netproto_json_message(void **state) char* message = "d:syscollector:{\"type\":\"dbsync_network_protocol\",\"data\":{\"checksum\":\"ddd971d57316a79738a2cf93143966a4e51ede08\",\"dhcp\":\"unknown\"" ",\"gateway\":\" \",\"iface\":\"enp0s9\",\"item_id\":\"33228317ee8778628d0f2f4fde53b75b92f15f1d\",\"metric\":\"0\",\"scan_time\":\"2023/08/07 15:02:36\"" ",\"type\":\"ipv4\"},\"operation\":\"DELETED\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"dbsync_network_protocol\",\"data\":{\"checksum\":\"ddd971d57316a79738a2cf93143966a4e51ede08\",\"dhcp\":\"unknown\"" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"dbsync_network_protocol\",\"data\":{\"checksum\":\"ddd971d57316a79738a2cf93143966a4e51ede08\",\"dhcp\":\"unknown\"" ",\"gateway\":\" \",\"iface\":\"enp0s9\",\"item_id\":\"33228317ee8778628d0f2f4fde53b75b92f15f1d\",\"metric\":\"0\",\"scan_time\":\"2023/08/07 15:02:36\"" ",\"type\":\"ipv4\"},\"operation\":\"DELETED\"}"; @@ -2297,7 +2297,7 @@ void test_router_message_forward_valid_delta_netaddr_json_message(void **state) char* message = "d:syscollector:{\"type\":\"dbsync_network_address\",\"data\":{\"address\":\"192.168.0.80\",\"broadcast\":\"192.168.0.255\"" ",\"checksum\":\"c1f9511fa37815d19cee496f21524725ba84ab10\",\"iface\":\"enp0s9\",\"item_id\":\"b333013c47d28eb3878068dd59c42e00178bd475\"" ",\"netmask\":\"255.255.255.0\",\"proto\":0,\"scan_time\":\"2023/08/07 15:02:36\"},\"operation\":\"DELETED\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"dbsync_network_address\",\"data\":{\"address\":\"192.168.0.80\",\"broadcast\":\"192.168.0.255\"" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"dbsync_network_address\",\"data\":{\"address\":\"192.168.0.80\",\"broadcast\":\"192.168.0.255\"" ",\"checksum\":\"c1f9511fa37815d19cee496f21524725ba84ab10\",\"iface\":\"enp0s9\",\"item_id\":\"b333013c47d28eb3878068dd59c42e00178bd475\"" ",\"netmask\":\"255.255.255.0\",\"proto\":0,\"scan_time\":\"2023/08/07 15:02:36\"},\"operation\":\"DELETED\"}"; @@ -2321,7 +2321,7 @@ void test_router_message_forward_valid_delta_hardware_json_message(void **state) ",\"cpu_mhz\":2592.0,\"cpu_name\":\"Intel(R) Core(TM) i7-10750H CPU @ 2.60GHz\",\"ram_free\":11547184,\"ram_total\":12251492,\"ram_usage\":6" ",\"scan_time\":\"2023/08/04 19:56:11\"},\"operation\":\"MODIFIED\"}"; // Trailing zeros are truncated. - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"dbsync_hwinfo\",\"data\":{\"board_serial\":\"0\",\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"cpu_cores\":8" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"dbsync_hwinfo\",\"data\":{\"board_serial\":\"0\",\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"cpu_cores\":8" ",\"cpu_mhz\":2592,\"cpu_name\":\"Intel(R) Core(TM) i7-10750H CPU @ 2.60GHz\",\"ram_free\":11547184,\"ram_total\":12251492,\"ram_usage\":6" ",\"scan_time\":\"2023/08/04 19:56:11\"},\"operation\":\"MODIFIED\"}"; @@ -2344,7 +2344,7 @@ void test_router_message_forward_valid_delta_ports_json_message(void **state) char* message = "d:syscollector:{\"type\":\"dbsync_ports\",\"data\":{\"checksum\":\"03f522cdccc8dfbab964981db59b176b178b9dfd\",\"inode\":39968" ",\"item_id\":\"7f98c21162b40ca7871a8292d177a1812ca97547\",\"local_ip\":\"10.0.2.15\",\"local_port\":68,\"pid\":0,\"process\":null,\"protocol\":\"udp\"" ",\"remote_ip\":\"0.0.0.0\",\"remote_port\":0,\"rx_queue\":0,\"scan_time\":\"2023/08/07 12:42:41\",\"state\":null,\"tx_queue\":0},\"operation\":\"INSERTED\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"dbsync_ports\",\"data\":{\"checksum\":\"03f522cdccc8dfbab964981db59b176b178b9dfd\",\"inode\":39968" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"dbsync_ports\",\"data\":{\"checksum\":\"03f522cdccc8dfbab964981db59b176b178b9dfd\",\"inode\":39968" ",\"item_id\":\"7f98c21162b40ca7871a8292d177a1812ca97547\",\"local_ip\":\"10.0.2.15\",\"local_port\":68,\"pid\":0,\"process\":null,\"protocol\":\"udp\"" ",\"remote_ip\":\"0.0.0.0\",\"remote_port\":0,\"rx_queue\":0,\"scan_time\":\"2023/08/07 12:42:41\",\"state\":null,\"tx_queue\":0},\"operation\":\"INSERTED\"}"; @@ -2367,7 +2367,7 @@ void test_router_message_forward_valid_delta_processes_json_message(void **state char* message = "d:syscollector:{\"type\":\"dbsync_processes\",\"data\":{\"checksum\":\"5ca21c17ae78a0ef7463b3b2454126848473cf5b\",\"cmd\":\"C:\\\\Windows\\\\System32\\\\winlogon.exe\"" ",\"name\":\"winlogon.exe\",\"nlwp\":6,\"pid\":\"604\",\"ppid\":496,\"priority\":13,\"scan_time\":\"2023/08/07 15:01:57\",\"session\":1,\"size\":3387392" ",\"start_time\":1691420428,\"stime\":0,\"utime\":0,\"vm_size\":14348288},\"operation\":\"MODIFIED\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"dbsync_processes\",\"data\":{\"checksum\":\"5ca21c17ae78a0ef7463b3b2454126848473cf5b\",\"cmd\":\"C:\\\\Windows\\\\System32\\\\winlogon.exe\"" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"dbsync_processes\",\"data\":{\"checksum\":\"5ca21c17ae78a0ef7463b3b2454126848473cf5b\",\"cmd\":\"C:\\\\Windows\\\\System32\\\\winlogon.exe\"" ",\"name\":\"winlogon.exe\",\"nlwp\":6,\"pid\":\"604\",\"ppid\":496,\"priority\":13,\"scan_time\":\"2023/08/07 15:01:57\",\"session\":1,\"size\":3387392" ",\"start_time\":1691420428,\"stime\":0,\"utime\":0,\"vm_size\":14348288},\"operation\":\"MODIFIED\"}"; @@ -2389,7 +2389,7 @@ void test_router_message_forward_valid_delta_hotfixes_json_message(void **state) test_agent_info* data = (test_agent_info*)(*state); char* message = "d:syscollector:{\"type\":\"dbsync_hotfixes\",\"data\":{\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"hotfix\":\"KB4502496\"" ",\"scan_time\":\"2023/08/0419:56:11\"},\"operation\":\"MODIFIED\"}"; - char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\",\"node_name\":\"test_node_name\"},\"data_type\":\"dbsync_hotfixes\",\"data\":{\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"hotfix\":\"KB4502496\"" + char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"agent_ip\":\"192.168.33.20\",\"agent_name\":\"focal\"},\"data_type\":\"dbsync_hotfixes\",\"data\":{\"checksum\":\"f6eea592bc11465ecacc92ddaea188ef3faf0a1f\",\"hotfix\":\"KB4502496\"" ",\"scan_time\":\"2023/08/0419:56:11\"},\"operation\":\"MODIFIED\"}"; router_syscollector_handle = (ROUTER_PROVIDER_HANDLE)(1); diff --git a/src/unit_tests/wazuh_db/test_wdb_integrity.c b/src/unit_tests/wazuh_db/test_wdb_integrity.c index 31aa8771585..90799a79c9d 100644 --- a/src/unit_tests/wazuh_db/test_wdb_integrity.c +++ b/src/unit_tests/wazuh_db/test_wdb_integrity.c @@ -1851,7 +1851,7 @@ void test_wdbi_report_removed_packages_success(void **state) { wdb_component_t component = WDB_SYSCOLLECTOR_PACKAGES; sqlite3_stmt* stmt = NULL; router_agent_events_handle = (ROUTER_PROVIDER_HANDLE)1; - const char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"\"},\"action\":\"deletePackage\"," + const char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\"},\"action\":\"deletePackage\"," "\"data\":{\"name\":\"name\",\"version\":\"version\",\"architecture\":\"architecture\",\"format\":\"format\",\"location\":\"location\",\"item_id\":\"item_id\"}}"; expect_value(__wrap_sqlite3_column_text, iCol, 0); @@ -1882,7 +1882,7 @@ void test_wdbi_report_removed_hotfixes_success(void **state) { wdb_component_t component = WDB_SYSCOLLECTOR_HOTFIXES; sqlite3_stmt* stmt = NULL; router_agent_events_handle = (ROUTER_PROVIDER_HANDLE)1; - const char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"\"},\"action\":\"deleteHotfix\"," + const char* expected_message = "{\"agent_info\":{\"agent_id\":\"001\"},\"action\":\"deleteHotfix\"," "\"data\":{\"hotfix\":\"hotfix\"}}"; expect_value(__wrap_sqlite3_column_text, iCol, 0); @@ -1903,10 +1903,10 @@ void test_wdbi_report_removed_hotfixes_success_multiple_steps(void **state) { wdb_component_t component = WDB_SYSCOLLECTOR_HOTFIXES; sqlite3_stmt* stmt = NULL; router_agent_events_handle = (ROUTER_PROVIDER_HANDLE)1; - const char* expected_message_1 = "{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"\"},\"action\":\"deleteHotfix\"," + const char* expected_message_1 = "{\"agent_info\":{\"agent_id\":\"001\"},\"action\":\"deleteHotfix\"," "\"data\":{\"hotfix\":\"hotfix1\"}}"; - const char* expected_message_2 = "{\"agent_info\":{\"agent_id\":\"001\",\"node_name\":\"\"},\"action\":\"deleteHotfix\"," + const char* expected_message_2 = "{\"agent_info\":{\"agent_id\":\"001\"},\"action\":\"deleteHotfix\"," "\"data\":{\"hotfix\":\"hotfix2\"}}"; // First hotfix diff --git a/src/wazuh_modules/wm_vulnerability_scanner.c b/src/wazuh_modules/wm_vulnerability_scanner.c index 69e7ee0df74..3af92530f98 100644 --- a/src/wazuh_modules/wm_vulnerability_scanner.c +++ b/src/wazuh_modules/wm_vulnerability_scanner.c @@ -14,6 +14,7 @@ #include "external/cJSON/cJSON.h" #include "sym_load.h" #include "vulnerability_scanner.h" +#include static void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data); static void wm_vulnerability_scanner_destroy(wm_vulnerability_scanner_t* data); @@ -73,7 +74,7 @@ void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data) cJSON_AddStringToObject(data->vulnerability_detection, "feed-update-interval", "60m"); } - if (vulnerability_scanner_start_ptr) + if (vulnerability_scanner_start_ptr) { cJSON* config_json = cJSON_CreateObject(); cJSON_AddItemToObject( From 31c79ffa5eb442b09ecaf7343becc1ca20141389 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 10 May 2024 09:41:14 -0300 Subject: [PATCH 051/419] Fix unit tests. --- src/wazuh_modules/wm_vulnerability_scanner.c | 71 +++++++------------- 1 file changed, 26 insertions(+), 45 deletions(-) diff --git a/src/wazuh_modules/wm_vulnerability_scanner.c b/src/wazuh_modules/wm_vulnerability_scanner.c index 3af92530f98..d526df0f24d 100644 --- a/src/wazuh_modules/wm_vulnerability_scanner.c +++ b/src/wazuh_modules/wm_vulnerability_scanner.c @@ -14,7 +14,10 @@ #include "external/cJSON/cJSON.h" #include "sym_load.h" #include "vulnerability_scanner.h" -#include + +#ifndef HOST_NAME_MAX +#define HOST_NAME_MAX 255 +#endif static void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data); static void wm_vulnerability_scanner_destroy(wm_vulnerability_scanner_t* data); @@ -35,47 +38,38 @@ const wm_context WM_VULNERABILITY_SCANNER_CONTEXT = { .query = NULL, }; -static void wm_vulnerability_scanner_log_config(cJSON* config_json) -{ - if (config_json) - { +static void wm_vulnerability_scanner_log_config(cJSON* config_json) { + if (config_json) { char* config_str = cJSON_PrintUnformatted(config_json); - if (config_str) - { + if (config_str) { mtdebug1(WM_VULNERABILITY_SCANNER_LOGTAG, "%s", config_str); cJSON_free(config_str); } } } -void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data) -{ +void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data) { mtinfo(WM_VULNERABILITY_SCANNER_LOGTAG, "Starting vulnerability_scanner module."); - if (vulnerability_scanner_module = so_get_module_handle("vulnerability_scanner"), vulnerability_scanner_module) - { + if (vulnerability_scanner_module = so_get_module_handle("vulnerability_scanner"), vulnerability_scanner_module) { vulnerability_scanner_start_ptr = so_get_function_sym(vulnerability_scanner_module, "vulnerability_scanner_start"); vulnerability_scanner_stop_ptr = so_get_function_sym(vulnerability_scanner_module, "vulnerability_scanner_stop"); // Check for missing configurations. These configurations may miss when using the old deprecated VD config. - if (!cJSON_GetObjectItem(data->vulnerability_detection, "enabled")) - { + if (!cJSON_GetObjectItem(data->vulnerability_detection, "enabled")) { cJSON_AddStringToObject(data->vulnerability_detection, "enabled", "yes"); } - if (!cJSON_GetObjectItem(data->vulnerability_detection, "index-status")) - { + if (!cJSON_GetObjectItem(data->vulnerability_detection, "index-status")) { cJSON_AddStringToObject(data->vulnerability_detection, "index-status", "yes"); } - if (!cJSON_GetObjectItem(data->vulnerability_detection, "feed-update-interval")) - { + if (!cJSON_GetObjectItem(data->vulnerability_detection, "feed-update-interval")) { cJSON_AddStringToObject(data->vulnerability_detection, "feed-update-interval", "60m"); } - if (vulnerability_scanner_start_ptr) - { + if (vulnerability_scanner_start_ptr) { cJSON* config_json = cJSON_CreateObject(); cJSON_AddItemToObject( config_json, "vulnerability-detection", cJSON_Duplicate(data->vulnerability_detection, TRUE)); @@ -89,12 +83,10 @@ void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data) "managerDisabledScan", getDefine_Int("vulnerability-detection", "disable_scan_manager", 0, 1)); - if (indexer_config == NULL) - { + if (indexer_config == NULL) { cJSON_AddItemToObject(config_json, "indexer", cJSON_CreateObject()); } - else - { + else { cJSON_AddItemToObject(config_json, "indexer", cJSON_Duplicate(indexer_config, TRUE)); } @@ -105,8 +97,7 @@ void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data) const bool cluster_status = get_cluster_status(); cJSON_AddBoolToObject(config_json, "clusterEnabled", cluster_status); - if (cluster_status) - { + if (cluster_status) { char* cluster_name = get_cluster_name(); cJSON_AddStringToObject(config_json, "clusterName", cluster_name); os_free(cluster_name); @@ -115,15 +106,12 @@ void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data) cJSON_AddStringToObject(config_json, "clusterNodeName", manager_node_name); os_free(manager_node_name); } - else - { + else { char hostname[HOST_NAME_MAX + 1]; - if (gethostname(hostname, HOST_NAME_MAX) == 0) - { + if (gethostname(hostname, HOST_NAME_MAX) == 0) { cJSON_AddStringToObject(config_json, "clusterName", hostname); } - else - { + else { cJSON_AddStringToObject(config_json, "clusterName", "undefined"); } @@ -134,14 +122,12 @@ void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data) vulnerability_scanner_start_ptr(mtLoggingFunctionsWrapper, config_json); cJSON_Delete(config_json); } - else - { + else { mtwarn(WM_VULNERABILITY_SCANNER_LOGTAG, "Unable to start vulnerability_scanner module."); return NULL; } } - else - { + else { mtwarn(WM_VULNERABILITY_SCANNER_LOGTAG, "Unable to load vulnerability_scanner module."); return NULL; } @@ -149,26 +135,21 @@ void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data) return NULL; } -void wm_vulnerability_scanner_destroy(wm_vulnerability_scanner_t* data) -{ +void wm_vulnerability_scanner_destroy(wm_vulnerability_scanner_t* data) { free(data); } -void wm_vulnerability_scanner_stop(__attribute__((unused)) wm_vulnerability_scanner_t* data) -{ +void wm_vulnerability_scanner_stop(__attribute__((unused)) wm_vulnerability_scanner_t* data) { mtinfo(WM_VULNERABILITY_SCANNER_LOGTAG, "Stopping vulnerability_scanner module."); - if (vulnerability_scanner_stop_ptr) - { + if (vulnerability_scanner_stop_ptr) { vulnerability_scanner_stop_ptr(); } - else - { + else { mtwarn(WM_VULNERABILITY_SCANNER_LOGTAG, "Unable to stop vulnerability_scanner module."); } } -cJSON* wm_vulnerability_scanner_dump(wm_vulnerability_scanner_t* data) -{ +cJSON* wm_vulnerability_scanner_dump(wm_vulnerability_scanner_t* data) { cJSON* root = cJSON_CreateObject(); cJSON_AddItemToObject(root, "vulnerability-detection", cJSON_Duplicate(data->vulnerability_detection, TRUE)); cJSON_DeleteItemFromObject(cJSON_GetObjectItem(root, "vulnerability-detection"), "index-status"); From 4ca5aabdb17574842f057b57e5b9eb06ce65934b Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 10 May 2024 15:54:43 -0300 Subject: [PATCH 052/419] Fix manager name --- .../template.json | 8 ------- .../template.json | 8 ------- .../indexer/template/index-template.json | 4 ++++ .../qa/test_data_policy/001/expected_003.out | 4 ++-- .../qa/test_data_policy/002/expected_003.out | 4 ++-- .../qa/test_data_policy/003/expected_003.out | 4 ++-- .../qa/test_data_policy/004/expected_003.out | 4 ++-- .../src/databaseFeedManager/globalData.hpp | 21 +++++++++++++++++++ .../src/scanOrchestrator/scanContext.hpp | 3 ++- .../src/scanOrchestrator/scanOrchestrator.hpp | 9 ++++++++ .../tests/unit/scanOrchestrator_test.cpp | 9 -------- 11 files changed, 44 insertions(+), 34 deletions(-) diff --git a/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/template.json b/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/template.json index 77f2a3334f2..9d853800e6e 100644 --- a/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/template.json +++ b/src/shared_modules/indexer_connector/qa/test_data/test_add_bulk_indexer_connector/template.json @@ -259,14 +259,6 @@ } } }, - "manager": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, "schema": { "properties": { "version": { diff --git a/src/shared_modules/indexer_connector/qa/test_data/test_initialize_indexer_connector/template.json b/src/shared_modules/indexer_connector/qa/test_data/test_initialize_indexer_connector/template.json index 77f2a3334f2..9d853800e6e 100644 --- a/src/shared_modules/indexer_connector/qa/test_data/test_initialize_indexer_connector/template.json +++ b/src/shared_modules/indexer_connector/qa/test_data/test_initialize_indexer_connector/template.json @@ -259,14 +259,6 @@ } } }, - "manager": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, "schema": { "properties": { "version": { diff --git a/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json b/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json index 7468d2edad0..cd3206113ae 100644 --- a/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json +++ b/src/wazuh_modules/vulnerability_scanner/indexer/template/index-template.json @@ -44,6 +44,10 @@ } } }, + "ephemeral_id": { + "ignore_above": 1024, + "type": "keyword" + }, "id": { "ignore_above": 1024, "type": "keyword" diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out index 781bc68fc2c..6f81452efcf 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out @@ -5,9 +5,9 @@ "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Fetched 2 agents from Wazuh-DB.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", - "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'manager' (ID: '000', Version: 'v4.7.1').", + "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent", "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", - "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'manager' (ID: '000', Version: 'v4.7.1').", + "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to '').", "Vulnerability scan for package 'gzip' on Agent '000' has completed.", "Inserting agent package key: 000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", "Processing and publish key: CVE-2022-1271", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out index 966251c8b0b..d3d09ce7e52 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out @@ -2,9 +2,9 @@ "Vulnerability scanner module started", "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", - "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'manager' (ID: '000', Version", + "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical'", "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", - "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'manager' (ID: '000', Version:", + "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to '').", "Vulnerability scan for package 'gzip' on Agent '000' has completed.", "Inserting agent package key: 000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", "Processing and publish key: CVE-2022-1271", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out index a8687fe8b19..8668258ceb5 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out @@ -5,9 +5,9 @@ "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Fetched 2 agents from Wazuh-DB.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", - "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'manager' (ID: '000', Version: 'v4.7.1').", + "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical'", "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", - "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'manager' (ID: '000', Version: 'v4.7.1').", + "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to '').", "Vulnerability scan for package 'gzip' on Agent '000' has completed.", "Inserting agent package key: node01_000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", "Processing and publish key: CVE-2022-1271", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_003.out index a01813e1e16..b170087b388 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_003.out @@ -2,9 +2,9 @@ "Vulnerability scanner module started", "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", - "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical' on Agent 'manager' (ID: '000', Version", + "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical'", "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", - "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to ''). - Agent 'manager' (ID: '000', Version:", + "Match found, the package 'gzip', is vulnerable to 'CVE-2022-1271'. Current version: '1.10-0ubuntu4.1' (less than '1.10-4ubuntu4' or equal to '').", "Vulnerability scan for package 'gzip' on Agent '000' has completed.", "Inserting agent package key: node01_000_040334345fd47ab6e72026cf3c45640456198fb4 -> CVE-2022-1271", "Processing and publish key: CVE-2022-1271", diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/globalData.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/globalData.hpp index 4396def3ead..f4e979e1444 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/globalData.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/globalData.hpp @@ -26,6 +26,7 @@ class GlobalData final : public Singleton nlohmann::json m_vendorMaps; nlohmann::json m_osCpeMaps; nlohmann::json m_cnaMappings; + std::string m_managerName; public: /** @@ -58,6 +59,16 @@ class GlobalData final : public Singleton m_cnaMappings = cnaMappings; } + /** + * @brief Set manager name. + * @param name Manager name. + */ + void managerName(std::string_view name) + { + std::unique_lock lock(m_mutex); + m_managerName = name; + } + /** * @brief Get vendor map data. * @return vendor map data. @@ -87,6 +98,16 @@ class GlobalData final : public Singleton std::shared_lock lock(m_mutex); return m_cnaMappings; } + + /** + * @brief Get manager name. + * @return Manager name. + */ + std::string managerName() const + { + std::shared_lock lock(m_mutex); + return m_managerName.empty() ? "manager" : m_managerName; + } }; #endif // _GLOBALDATA_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index 76026488efd..e2608ce006a 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -1160,7 +1160,8 @@ struct TScanContext final { if (agentId() == "000") { - return "manager"; + static std::string managerName = GlobalData::instance().managerName(); + return managerName; } return extractData( diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp index 09cdc55c7e2..8d000d38e8c 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp @@ -102,6 +102,15 @@ class TScanOrchestrator final : public TOSPrimitives inventoryDatabase, std::move(reportDispatcher)); + // Define the maximum size for the hostname + constexpr auto MAX_HOSTNAME_SIZE = 256; + char managerNameRaw[MAX_HOSTNAME_SIZE] = {0}; + + // Get the hostname and store it in the managerName string + TOSPrimitives::gethostname(managerNameRaw, MAX_HOSTNAME_SIZE); + + GlobalData::instance().managerName(managerNameRaw); + initEventDelayedDispatcher(); } ~TScanOrchestrator() = default; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp index 450fa5364d5..4d868427732 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp @@ -176,15 +176,6 @@ namespace NSScanOrchestratorTest } )"; - const auto MANAGER_NAME = - R"( - { - "manager": { - "name": "test" - } - } - )"_json; - const std::string TEST_PATH {"/tmp/socket"}; } // namespace NSScanOrchestratorTest From 765a1d3827ff1f3171e4160e575acbf1a8c06d9b Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 10 May 2024 16:56:51 -0300 Subject: [PATCH 053/419] Add unit tests. --- .../src/databaseFeedManager/globalData.hpp | 16 ++++++++-------- .../tests/unit/globalData_test.cpp | 8 ++++++++ 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/globalData.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/globalData.hpp index f4e979e1444..6eaa6dc236b 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/globalData.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/globalData.hpp @@ -50,23 +50,23 @@ class GlobalData final : public Singleton } /** - * @brief Set CNA mappings. - * @param cnaMappings CNA mappings. + * @brief Set manager name. + * @param name Manager name. */ - void cnaMappings(const nlohmann::json& cnaMappings) + void managerName(std::string_view name) { std::unique_lock lock(m_mutex); - m_cnaMappings = cnaMappings; + m_managerName = name; } /** - * @brief Set manager name. - * @param name Manager name. + * @brief Set CNA mappings. + * @param cnaMappings CNA mappings. */ - void managerName(std::string_view name) + void cnaMappings(const nlohmann::json& cnaMappings) { std::unique_lock lock(m_mutex); - m_managerName = name; + m_cnaMappings = cnaMappings; } /** diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/globalData_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/globalData_test.cpp index ae41ce0abdc..2dabe4dda41 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/globalData_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/globalData_test.cpp @@ -32,3 +32,11 @@ TEST(GlobalDataTest, StoreAndGetOsCpeMap) GlobalData::instance().osCpeMaps(osCpeRulesJson); EXPECT_STREQ(GlobalData::instance().osCpeMaps().dump().c_str(), osCpeRules.c_str()); } + +TEST(GlobalDataTest, StoreAndGetManagerName) +{ + const std::string managerName {"wazuh"}; + + GlobalData::instance().managerName(managerName); + EXPECT_STREQ(GlobalData::instance().managerName().data(), managerName.c_str()); +} From b31911cf5a59015d00d007c42e132aeb82d539dd Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Sat, 11 May 2024 11:28:55 -0300 Subject: [PATCH 054/419] Changes based on PR comments. --- .../indexer_connector/src/indexerConnector.cpp | 5 +++++ .../tests/component/indexerConnector_test.cpp | 14 ++++++++++++++ src/shared_modules/utils/stringHelper.h | 6 ++++++ .../utils/tests/stringHelper_test.cpp | 7 +++++++ .../src/policyManager/policyManager.hpp | 2 +- 5 files changed, 33 insertions(+), 1 deletion(-) diff --git a/src/shared_modules/indexer_connector/src/indexerConnector.cpp b/src/shared_modules/indexer_connector/src/indexerConnector.cpp index f1341c743d4..0e5fbc9b556 100644 --- a/src/shared_modules/indexer_connector/src/indexerConnector.cpp +++ b/src/shared_modules/indexer_connector/src/indexerConnector.cpp @@ -335,6 +335,11 @@ IndexerConnector::IndexerConnector( // Get index name. m_indexName = config.at("name").get_ref(); + if (Utils::haveUpperCaseCharacters(m_indexName)) + { + throw std::runtime_error("Index name must be lowercase."); + } + m_db = std::make_unique(std::string(DATABASE_BASE_PATH) + "db/" + m_indexName); auto secureCommunication = SecureCommunication::builder(); diff --git a/src/shared_modules/indexer_connector/tests/component/indexerConnector_test.cpp b/src/shared_modules/indexer_connector/tests/component/indexerConnector_test.cpp index 557414c2a8b..db369431f0c 100644 --- a/src/shared_modules/indexer_connector/tests/component/indexerConnector_test.cpp +++ b/src/shared_modules/indexer_connector/tests/component/indexerConnector_test.cpp @@ -576,3 +576,17 @@ TEST_F(IndexerConnectorTest, TemplateFileNotFoundThrows) EXPECT_THROW(IndexerConnector(indexerConfig, INVALID_TEMPLATE_FILE_PATH, logFunction, INDEXER_TIMEOUT), std::runtime_error); } + +/** + * @brief Test the initialization with upper case character in the index name. + * + */ +TEST_F(IndexerConnectorTest, UpperCaseCharactersIndexName) +{ + + // Create connector and wait until the connection is established. + nlohmann::json indexerConfig; + indexerConfig["name"] = "UPPER_case_INDEX"; + indexerConfig["hosts"] = nlohmann::json::array({A_ADDRESS}); + EXPECT_THROW(IndexerConnector(indexerConfig, TEMPLATE_FILE_PATH, logFunction, INDEXER_TIMEOUT), std::runtime_error); +} diff --git a/src/shared_modules/utils/stringHelper.h b/src/shared_modules/utils/stringHelper.h index 864a2b879b2..28dfb807a99 100644 --- a/src/shared_modules/utils/stringHelper.h +++ b/src/shared_modules/utils/stringHelper.h @@ -261,6 +261,12 @@ namespace Utils return temp; } + static bool haveUpperCaseCharacters(const std::string& str) + { + return std::any_of( + std::begin(str), std::end(str), [](std::string::value_type character) { return std::isupper(character); }); + } + static std::string toSentenceCase(const std::string& str) { std::string temp; diff --git a/src/shared_modules/utils/tests/stringHelper_test.cpp b/src/shared_modules/utils/tests/stringHelper_test.cpp index a5be9c5e70d..8132d1270da 100644 --- a/src/shared_modules/utils/tests/stringHelper_test.cpp +++ b/src/shared_modules/utils/tests/stringHelper_test.cpp @@ -582,3 +582,10 @@ TEST_F(StringUtilsTest, padString) EXPECT_EQ(Utils::padString("", '0', 4), "0000"); } +TEST_F(StringUtilsTest, haveUpperCaseCharacters) +{ + EXPECT_TRUE(Utils::haveUpperCaseCharacters("Test")); + EXPECT_FALSE(Utils::haveUpperCaseCharacters("test")); + EXPECT_FALSE(Utils::haveUpperCaseCharacters("")); +} + diff --git a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp index cf72d2c7135..153587d4c66 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp @@ -116,7 +116,7 @@ class PolicyManager final : public Singleton newPolicy["indexer"]["ssl"]["key"] = ""; } newPolicy["indexer"]["name"] = - STATES_VD_INDEX_NAME_PREFIX + newPolicy.at("clusterName").get_ref(); + Utils::toLowerCase(STATES_VD_INDEX_NAME_PREFIX + newPolicy.at("clusterName").get_ref()); if (!newPolicy.at("vulnerability-detection").contains("feed-update-interval")) { From 09bb65bea949fb1c80c418e46ccce56ae3d1fc84 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Sun, 12 May 2024 00:45:26 -0300 Subject: [PATCH 055/419] Add prefix to synchronization algorithm, when the cluster is enabled. --- .../scanOrchestrator/globalSyncInventory.hpp | 8 +- .../tests/mocks/MockIndexerConnector.hpp | 7 + .../mocks/TrampolineIndexerConnector.hpp | 10 + .../tests/unit/globalSyncInventory_test.cpp | 227 ++++++++++++++++++ .../tests/unit/globalSyncInventory_test.hpp | 57 +++++ 5 files changed, 308 insertions(+), 1 deletion(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/tests/unit/globalSyncInventory_test.cpp create mode 100644 src/wazuh_modules/vulnerability_scanner/tests/unit/globalSyncInventory_test.hpp diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/globalSyncInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/globalSyncInventory.hpp index 086a6d1b98c..8ddef13253d 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/globalSyncInventory.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/globalSyncInventory.hpp @@ -51,7 +51,13 @@ class TGlobalSyncInventory final : public AbstractHandlersync(data->agentId().data()); + const auto clusterManagerPrefix = data->agentId().compare("000") == 0 && data->clusterStatus() + ? std::string(data->clusterNodeName()) + "_" + : ""; + std::string key = clusterManagerPrefix; + key.append(data->agentId()); + + m_indexerConnector->sync(key); } return AbstractHandler>::handleRequest(std::move(data)); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockIndexerConnector.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockIndexerConnector.hpp index 11e86d369b8..3ead444f2f3 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockIndexerConnector.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockIndexerConnector.hpp @@ -38,6 +38,13 @@ class MockIndexerConnector * @note This method is intended for testing purposes and does not perform any real action. */ MOCK_METHOD(void, publish, (const std::string& message), (const)); + + /** + * @brief Mock method for syncing. + * + * @note This method is intended for testing purposes and does not perform any real action. + */ + MOCK_METHOD(void, sync, (const std::string& agentId), (const)); }; #endif // _MOCK_INDEXERCONNECTOR_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineIndexerConnector.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineIndexerConnector.hpp index 9d4ce9eafc4..3696357a66d 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineIndexerConnector.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineIndexerConnector.hpp @@ -41,6 +41,16 @@ class TrampolineIndexerConnector final { spIndexerConnectorMock->publish(message); } + + /** + * @brief Sync the agent with the indexer. + * + * @param agentId Agent identifier. + */ + void sync(const std::string& agentId) const + { + spIndexerConnectorMock->sync(agentId); + } }; #endif //_TRAMPOLINE_INDEXERCONNECTOR_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/globalSyncInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/globalSyncInventory_test.cpp new file mode 100644 index 00000000000..40608fc0ba4 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/globalSyncInventory_test.cpp @@ -0,0 +1,227 @@ +/* + * Wazuh Send report test + * Copyright (C) 2015, Wazuh Inc. + * May 12, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#include "globalSyncInventory_test.hpp" +#include "TrampolineIndexerConnector.hpp" +#include "TrampolineOsDataCache.hpp" +#include "flatbuffers/idl.h" +#include "flatbuffers/include/syscollector_synchronization_schema.h" +#include "globalSyncInventory.hpp" +#include "scanContext.hpp" + +using ::testing::_; + +const std::string SYNCHRONIZATION_INTEGRITY_GLOBAL_000_MSG = + R"( + { + "agent_info": { + "agent_id": "000", + "agent_ip": "192.168.33.20", + "agent_name": "focal" + }, + "data_type": "integrity_check_global", + "data": { + "id": 1700236640, + "attributes_type": "syscollector_packages" + } + } + )"; + +const std::string SYNCHRONIZATION_INTEGRITY_GLOBAL_001_MSG = + R"( + { + "agent_info": { + "agent_id": "001", + "agent_ip": "192.168.33.20", + "agent_name": "focal" + }, + "data_type": "integrity_check_global", + "data": { + "id": 1700236640, + "attributes_type": "syscollector_packages" + } + } + )"; + +/** + * @brief Test the sync message with the agent 000 and cluster enabled. + * In this case is expected to send the message to the indexer with the node name as prefix of the agent id. + */ +TEST_F(GlobalSyncInventoryTest, SyncMessageWithClusterEnabled) +{ + // Policy manager initialization. + const auto& configJson {nlohmann::json::parse(R"({ + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "cti-url": "cti-url.com" + }, + "osdataLRUSize":1000, + "clusterName":"cluster01", + "clusterNodeName":"node01", + "clusterEnabled":true + })")}; + + PolicyManager::instance().initialize(configJson); + + spIndexerConnectorMock = std::make_shared(); + EXPECT_CALL(*spIndexerConnectorMock, sync("node01_000")).Times(1); + + auto pIndexerConnectorTrap = std::make_shared(); + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "osdata_codeName", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_synchronization_SCHEMA)); + ASSERT_TRUE(parser.Parse(SYNCHRONIZATION_INTEGRITY_GLOBAL_000_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorSynchronization = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); + auto scanContextOriginal = std::make_shared>(syscollectorSynchronization); + + auto spGlobalSyncInventory = + std::make_shared>>( + pIndexerConnectorTrap); + + EXPECT_NO_THROW(spGlobalSyncInventory->handleRequest(scanContextOriginal)); + PolicyManager::instance().teardown(); +} + +/** + * @brief Test the sync message with the agent 001 and cluster enabled. + * In this case is expected to send the message to the indexer without the node name as prefix of the agent id. + */ +TEST_F(GlobalSyncInventoryTest, SyncMessageWithClusterEnabledWithoutNodeName) +{ + // Policy manager initialization. + const auto& configJson {nlohmann::json::parse(R"({ + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "cti-url": "cti-url.com" + }, + "osdataLRUSize":1000, + "clusterName":"cluster01", + "clusterEnabled":true + })")}; + + PolicyManager::instance().initialize(configJson); + spIndexerConnectorMock = std::make_shared(); + EXPECT_CALL(*spIndexerConnectorMock, sync("001")).Times(1); + + auto pIndexerConnectorTrap = std::make_shared(); + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "osdata_codeName", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_synchronization_SCHEMA)); + ASSERT_TRUE(parser.Parse(SYNCHRONIZATION_INTEGRITY_GLOBAL_001_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorSynchronization = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); + auto scanContextOriginal = std::make_shared>(syscollectorSynchronization); + + auto spGlobalSyncInventory = + std::make_shared>>( + pIndexerConnectorTrap); + + EXPECT_NO_THROW(spGlobalSyncInventory->handleRequest(scanContextOriginal)); + PolicyManager::instance().teardown(); +} + +/** + * @brief Test the globalSyncInventory class with indexer nullptr as class constructor parameter. + * In this case do nothing is expected to be called. + */ +TEST_F(GlobalSyncInventoryTest, GlobalSyncInventoryWithIndexerNullptr) +{ + // Policy manager initialization. + const auto& configJson {nlohmann::json::parse(R"({ + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "cti-url": "cti-url.com" + }, + "osdataLRUSize":1000, + "clusterName":"cluster01", + "clusterEnabled":false + })")}; + + PolicyManager::instance().initialize(configJson); + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "osdata_codeName", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_synchronization_SCHEMA)); + ASSERT_TRUE(parser.Parse(SYNCHRONIZATION_INTEGRITY_GLOBAL_001_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorSynchronization = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); + auto scanContextOriginal = std::make_shared>(syscollectorSynchronization); + + auto spGlobalSyncInventory = + std::make_shared>>( + nullptr); + + EXPECT_NO_THROW(spGlobalSyncInventory->handleRequest(scanContextOriginal)); + PolicyManager::instance().teardown(); +} diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/globalSyncInventory_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/globalSyncInventory_test.hpp new file mode 100644 index 00000000000..9215d660ef0 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/globalSyncInventory_test.hpp @@ -0,0 +1,57 @@ +/* + * Wazuh Global sync inventory + * Copyright (C) 2015, Wazuh Inc. + * May 11, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _GLOBAL_SYNC_INVENTORY_TEST_HPP +#define _GLOBAL_SYNC_INVENTORY_TEST_HPP + +#include "MockOsDataCache.hpp" +#include "policyManager.hpp" +#include "gtest/gtest.h" + +// External shared pointers definitions +extern std::shared_ptr spOsDataCacheMock; + +/** + * @brief GlobalSyncInventory test class. + * + */ +class GlobalSyncInventoryTest : public ::testing::Test +{ +protected: + /** + * @brief Construct a new GlobalSyncInventoryTest object + * + */ + GlobalSyncInventoryTest() = default; + + /** + * @brief Destroy the GlobalSyncInventoryTest object + * + */ + ~GlobalSyncInventoryTest() override = default; + + /** + * @brief Set the environment for testing. + * + */ + void SetUp() override {} + + /** + * @brief Clean the environment after testing. + * + */ + void TearDown() override + { + spOsDataCacheMock.reset(); + } +}; + +#endif // _GLOBAL_SYNC_INVENTORY_TEST_HPP From 6b0c5acc1c08e1233a318251bf8ea9ecf0aa0400 Mon Sep 17 00:00:00 2001 From: Gabriel Valenzuela Date: Thu, 9 May 2024 03:21:08 -0300 Subject: [PATCH 056/419] CL: - Refactor wazuhDbWrapper --- src/shared_modules/utils/socketDBWrapper.hpp | 55 +++++++++++-------- src/shared_modules/utils/socketWrapper.hpp | 1 + .../utils/tests/socketDBWrapper_test.cpp | 43 ++++----------- .../utils/tests/socketDBWrapper_test.hpp | 11 +++- .../wazuh_db/test_wdb_global_helpers.c | 2 +- .../buildAllAgentListContext.hpp | 7 +-- .../src/scanOrchestrator/osDataCache.hpp | 18 +----- .../src/scanOrchestrator/osScanner.hpp | 7 +-- .../src/scanOrchestrator/scanAgentList.hpp | 10 ++-- .../src/scanOrchestrator/scanOrchestrator.hpp | 1 - .../src/vulnerabilityScannerFacade.cpp | 5 ++ .../tests/mocks/MockSocketDBWrapper.hpp | 5 +- .../tests/mocks/TrampolineSocketDBWrapper.hpp | 6 -- .../tests/unit/scanOrchestrator_test.cpp | 26 ++------- .../testtool/wazuhDBQuery/main.cpp | 5 +- 15 files changed, 79 insertions(+), 123 deletions(-) diff --git a/src/shared_modules/utils/socketDBWrapper.hpp b/src/shared_modules/utils/socketDBWrapper.hpp index c3bd31bc1e0..78fedff5311 100644 --- a/src/shared_modules/utils/socketDBWrapper.hpp +++ b/src/shared_modules/utils/socketDBWrapper.hpp @@ -15,12 +15,14 @@ #include "json.hpp" #include "socketClient.hpp" #include "socketDBWrapperException.hpp" +#include "singleton.hpp" #include #include #include #include auto constexpr DB_WRAPPER_QUERY_WAIT_TIME {5000}; +auto constexpr WDB_SOCKET {"queue/db/wdb"}; char constexpr DB_WRAPPER_OK[] = {"ok"}; char constexpr DB_WRAPPER_ERROR[] = {"err"}; @@ -40,7 +42,7 @@ enum class DbQueryStatus : uint8_t INVALID_RESPONSE }; -class SocketDBWrapper final +class SocketDBWrapper final : public Singleton { private: std::unique_ptr, EpollWrapper>> m_dbSocket; @@ -51,15 +53,17 @@ class SocketDBWrapper final std::mutex m_mutexMessage; std::mutex m_mutexResponse; std::condition_variable m_conditionVariable; - std::string m_socketPath; + std::atomic m_teardown {false}; +public: - void initializeSocket() + void init() { m_dbSocket = - std::make_unique, EpollWrapper>>(m_socketPath); + std::make_unique, EpollWrapper>>(WDB_SOCKET); m_dbSocket->connect( [&](const char* body, uint32_t bodySize, const char*, uint32_t) { + std::cerr << "Received (SOCKETDBWRAPPER) data: " << body << std::endl; std::scoped_lock lock {m_mutexResponse}; std::string responsePacket(body, bodySize); @@ -148,40 +152,33 @@ class SocketDBWrapper final }); } -public: - explicit SocketDBWrapper(std::string socketPath) - : m_socketPath(std::move(socketPath)) - { - initializeSocket(); - } - void query(const std::string& query, nlohmann::json& response) { // Acquire lock to avoid multiple threads sending queries at the same time std::scoped_lock lockMessage {m_mutexMessage}; - // Acquire lock before clearing the response - std::unique_lock lockResponse {m_mutexResponse}; - - if (!m_dbSocket) + if (m_teardown.load()) { - initializeSocket(); + return; } + // Acquire lock before clearing the response + std::unique_lock lockResponse {m_mutexResponse}; + m_response.clear(); m_responsePartial.clear(); // coverity[missing_lock] m_exceptionStr.clear(); m_dbSocket->send(query.c_str(), query.size()); - if (const auto res = - m_conditionVariable.wait_for(lockResponse, std::chrono::milliseconds(DB_WRAPPER_QUERY_WAIT_TIME)); - res == std::cv_status::timeout) + std::cerr << "m_conditionVariable.wait(lockResponse) - BEFORE" << std::endl; + m_conditionVariable.wait(lockResponse); + std::cerr << "m_conditionVariable.wait(lockResponse) - AFTER" << std::endl; + + // Check if the object was destroyed. If so, return and do not process the response + if(m_teardown.load()) { - // Restart the socket connection to avoid the reception of old messages - m_dbSocket->stop(); - initializeSocket(); - throw std::runtime_error("Timeout waiting for DB response"); + return; } if (!m_exceptionStr.empty()) @@ -200,6 +197,18 @@ class SocketDBWrapper final response = m_response; } + + /** + * @brief Teardown the Socket DB Wrapper object + * + */ + void teardown() + { + std::cerr<<"Teardown SocketDBWrapper"<stop(); + } }; #endif // _SOCKET_DB_WRAPPER_HPP diff --git a/src/shared_modules/utils/socketWrapper.hpp b/src/shared_modules/utils/socketWrapper.hpp index c168fcf535d..c6fa098364b 100644 --- a/src/shared_modules/utils/socketWrapper.hpp +++ b/src/shared_modules/utils/socketWrapper.hpp @@ -679,6 +679,7 @@ class Socket final : public T // Send the data. while (bufferSize != amountSent) { + std::cerr<<"Sending data to socket"< {""}; + m_responses = std::vector {" "}; nlohmann::json output; - SocketDBWrapper socketDBWrapper(TEST_SOCKET); // The exception captured here is the timeout - EXPECT_THROW(socketDBWrapper.query(m_query, output), std::exception); + EXPECT_THROW(SocketDBWrapper::instance().query(m_query, output), std::exception); } TEST_F(SocketDBWrapperTest, ErrorTest) @@ -35,8 +34,7 @@ TEST_F(SocketDBWrapperTest, ErrorTest) m_responses = std::vector {R"(err Things happened)"}; nlohmann::json output; - SocketDBWrapper socketDBWrapper(TEST_SOCKET); - EXPECT_THROW(socketDBWrapper.query(m_query, output), std::exception); + EXPECT_THROW(SocketDBWrapper::instance().query(m_query, output), std::exception); } TEST_F(SocketDBWrapperTest, UnknownTest) @@ -45,8 +43,7 @@ TEST_F(SocketDBWrapperTest, UnknownTest) m_responses = std::vector {R"(unk Things happened)"}; nlohmann::json output; - SocketDBWrapper socketDBWrapper(TEST_SOCKET); - EXPECT_THROW(socketDBWrapper.query(m_query, output), std::exception); + EXPECT_THROW(SocketDBWrapper::instance().query(m_query, output), std::exception); } TEST_F(SocketDBWrapperTest, IgnoreTest) @@ -55,8 +52,7 @@ TEST_F(SocketDBWrapperTest, IgnoreTest) m_responses = std::vector {R"(ign Things happened)"}; nlohmann::json output; - SocketDBWrapper socketDBWrapper(TEST_SOCKET); - EXPECT_THROW(socketDBWrapper.query(m_query, output), std::exception); + EXPECT_THROW(SocketDBWrapper::instance().query(m_query, output), std::exception); } TEST_F(SocketDBWrapperTest, DueTest) @@ -68,8 +64,7 @@ TEST_F(SocketDBWrapperTest, DueTest) R"(ok {"status":"SUCCESS"})"}; nlohmann::json output; - SocketDBWrapper socketDBWrapper(TEST_SOCKET); - EXPECT_NO_THROW(socketDBWrapper.query(m_query, output)); + EXPECT_NO_THROW(SocketDBWrapper::instance().query(m_query, output)); ASSERT_EQ(output[0].at("field"), "value1"); ASSERT_EQ(output[1].at("field"), "value2"); @@ -82,20 +77,9 @@ TEST_F(SocketDBWrapperTest, InvalidTest) m_responses = std::vector {R"(Invalid)"}; nlohmann::json output; - SocketDBWrapper socketDBWrapper(TEST_SOCKET); - EXPECT_THROW(socketDBWrapper.query(m_query, output), std::exception); + EXPECT_THROW(SocketDBWrapper::instance().query(m_query, output), std::exception); } -TEST_F(SocketDBWrapperTest, TimeoutTest) -{ - m_query = "SELECT * FROM test_table;"; - m_responses = std::vector {R"(ok [{"field": "value"}])"}; - m_sleepTime = DB_WRAPPER_QUERY_WAIT_TIME + 10; - - nlohmann::json output; - SocketDBWrapper socketDBWrapper(TEST_SOCKET); - EXPECT_THROW(socketDBWrapper.query(m_query, output), std::exception); -} TEST_F(SocketDBWrapperTest, OkTest) { @@ -103,23 +87,20 @@ TEST_F(SocketDBWrapperTest, OkTest) m_responses = std::vector {R"(ok [{"field": "value"}])"}; nlohmann::json output; - SocketDBWrapper socketDBWrapper(TEST_SOCKET); - EXPECT_NO_THROW(socketDBWrapper.query(m_query, output)); + EXPECT_NO_THROW(SocketDBWrapper::instance().query(m_query, output)); ASSERT_EQ(output[0].at("field"), "value"); } TEST_F(SocketDBWrapperTestNoSetUp, NoSocketTest) { - std::unique_ptr socketDBWrapper; - EXPECT_NO_THROW(socketDBWrapper = std::make_unique(TEST_SOCKET)); + SocketDBWrapper::instance(); std::this_thread::sleep_for(std::chrono::seconds(2)); - EXPECT_NO_THROW(socketDBWrapper.reset()); + EXPECT_NO_THROW(SocketDBWrapper::instance().teardown()); } TEST_F(SocketDBWrapperTestNoSetUp, NoSocketTestNoSleep) { - std::unique_ptr socketDBWrapper; - EXPECT_NO_THROW(socketDBWrapper = std::make_unique(TEST_SOCKET)); - EXPECT_NO_THROW(socketDBWrapper.reset()); + SocketDBWrapper::instance(); + EXPECT_NO_THROW(SocketDBWrapper::instance().teardown()); } diff --git a/src/shared_modules/utils/tests/socketDBWrapper_test.hpp b/src/shared_modules/utils/tests/socketDBWrapper_test.hpp index 5eb9aa2808a..28d51a54cb3 100644 --- a/src/shared_modules/utils/tests/socketDBWrapper_test.hpp +++ b/src/shared_modules/utils/tests/socketDBWrapper_test.hpp @@ -13,17 +13,21 @@ #define _SOCKET_DB_WRAPPER_TEST_HPP #include "socketServer.hpp" +#include "socketDBWrapper.hpp" #include "gtest/gtest.h" #include #include -auto constexpr TEST_SOCKET {"tmp/temp_sock"}; +auto constexpr TEST_SOCKET {"queue/db/wdb"}; + class SocketDBWrapperTest : public ::testing::Test { protected: SocketDBWrapperTest() - : m_sleepTime {0} {}; + : m_sleepTime {0} { + SocketDBWrapper::instance().init(); + }; ~SocketDBWrapperTest() override = default; void SetUp() override @@ -34,6 +38,7 @@ class SocketDBWrapperTest : public ::testing::Test m_socketServer->listen( [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) { + std::cerr<<"Received (SOCKET SERVER) data: "<send(fd, response.c_str(), response.size()); } }); }; void TearDown() override { + std::cerr<<"TearDown"<stop(); m_socketServer.reset(); m_query.clear(); diff --git a/src/unit_tests/wazuh_db/test_wdb_global_helpers.c b/src/unit_tests/wazuh_db/test_wdb_global_helpers.c index 025e732c049..4c50658c900 100644 --- a/src/unit_tests/wazuh_db/test_wdb_global_helpers.c +++ b/src/unit_tests/wazuh_db/test_wdb_global_helpers.c @@ -2205,7 +2205,7 @@ void test_wdb_get_all_agents_wdbc_parse_error(void **state) { void test_wdb_get_all_agents_success(void **state) { const char *query_str = "global get-all-agents last_id 0"; - // Setting the payload +// Setting the payload set_payload = 1; strcpy(test_payload, "ok [{\"id\":1},{\"id\":2},{\"id\":3}]"); cJSON* test_json = __real_cJSON_Parse(test_payload+3); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp index eb3798e4565..77177e19b0a 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp @@ -56,16 +56,13 @@ class TBuildAllAgentListContext final : public AbstractHandler handleRequest(std::shared_ptr data) override { - // Instance the socketWrapper for wazuhDb - static SocketDBWrapper wdbSocketWrapper(WDB_SOCKET); - nlohmann::json response; try { // Execute query - wdbSocketWrapper.query(WazuhDBQueryBuilder::builder().global().selectAll().fromTable("agent").build(), - response); + SocketDBWrapper::instance().query( + WazuhDBQueryBuilder::builder().global().selectAll().fromTable("agent").build(), response); } // LCOV_EXCL_START catch (const std::exception& e) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp index 27d545bb13f..310fa79b5a6 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp @@ -21,8 +21,6 @@ #include #include -auto constexpr WDB_SOCKET {"queue/db/wdb"}; - /** * @brief Os structure. */ @@ -54,14 +52,14 @@ class OsDataCache final : public Singleton private: LRUCache m_osData {PolicyManager::instance().getOsdataLRUSize()}; std::shared_mutex m_mutex; - std::optional m_wdbSocketWrapper {std::nullopt}; Os getOsDataFromWdb(const std::string& agentId) { nlohmann::json response; try { - m_wdbSocketWrapper->query(WazuhDBQueryBuilder::builder().agentGetOsInfoCommand(agentId).build(), response); + SocketDBWrapper::instance().query(WazuhDBQueryBuilder::builder().agentGetOsInfoCommand(agentId).build(), + response); } catch (const std::exception& e) { @@ -111,18 +109,6 @@ class OsDataCache final : public Singleton return *value; } - if (!m_wdbSocketWrapper) - { - try - { - m_wdbSocketWrapper.emplace(WDB_SOCKET); - } - catch (...) - { - throw WdbDataException("Error creating socketDBWrapper", agentId); - } - } - // This may throw an exception that will be captured by the caller method. auto osData = getOsDataFromWdb(agentId); m_osData.insertKey(agentId, osData); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp index 10344468c02..4e0b4bf0a71 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp @@ -28,9 +28,7 @@ * the vulnerabilities found. * */ -template +template class TOsScanner final : public AbstractHandler> { private: @@ -56,13 +54,12 @@ class TOsScanner final : public AbstractHandler> std::shared_ptr handleRequest(std::shared_ptr data) override { nlohmann::json responseHotfixes; - static TSocketDBWrapper wdbSocketWrapper(WDB_SOCKET); if (data->osPlatform() == "windows") { try { - wdbSocketWrapper.query( + SocketDBWrapper::instance().query( WazuhDBQueryBuilder::builder().agentGetHotfixesCommand(data->agentId().data()).build(), responseHotfixes); } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp index ff79315c74e..7c62a53ab54 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp @@ -45,12 +45,11 @@ class TScanAgentList final : public AbstractHandler>, typename TIndexerConnector = IndexerConnector, typename TDatabaseFeedManager = DatabaseFeedManager, - typename TSocketDBWrapper = SocketDBWrapper, typename TOSPrimitives = OSPrimitives, auto DelayedPostponeSeconds = DELAYED_POSTPONE_SECONDS> class TScanOrchestrator final : public TOSPrimitives diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index e155abd52d8..270c1d10aec 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -339,6 +339,8 @@ void VulnerabilityScannerFacade::start( return; } + SocketDBWrapper::instance().init(); + // Indexer connector initialization. if (policyManager.isIndexerEnabled()) { @@ -441,4 +443,7 @@ void VulnerabilityScannerFacade::stop() PolicyManager::instance().teardown(); m_reportDispatcher.reset(); m_eventDispatcher.reset(); + + // Destroy socketDbWrapper + SocketDBWrapper::instance().teardown(); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockSocketDBWrapper.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockSocketDBWrapper.hpp index 99932c7b9c4..10b0691c16e 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockSocketDBWrapper.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockSocketDBWrapper.hpp @@ -39,12 +39,9 @@ class MockSocketDBWrapper MockSocketDBWrapper() = default; /** - * @brief Mock Constructor + * @brief Mock Destructor * - * @note This method is intended for testing purposes and does not perform any real action. */ - MockSocketDBWrapper(const std::string&) {}; - virtual ~MockSocketDBWrapper() = default; /** diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineSocketDBWrapper.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineSocketDBWrapper.hpp index 6b8c2d7c3a3..6806ce51b28 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineSocketDBWrapper.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineSocketDBWrapper.hpp @@ -27,12 +27,6 @@ class TrampolineSocketDBWrapper final */ TrampolineSocketDBWrapper() = default; - /** - * @brief Constructor. - * - */ - TrampolineSocketDBWrapper(const std::string&) {}; - /** * @brief Destructor. * diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp index 4d868427732..a45f3e4a94e 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp @@ -286,14 +286,12 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsert) std::shared_mutex mutexScanOrchestrator; spScanContext = std::make_shared>(syscollectorDelta); - spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); TScanOrchestrator>, MockIndexerConnector, - MockDatabaseFeedManager, - TrampolineSocketDBWrapper> + MockDatabaseFeedManager> scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); flatbuffers::FlatBufferBuilder builder; @@ -400,14 +398,12 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageDelete) std::shared_mutex mutexScanOrchestrator; spScanContext = std::make_shared>(syscollectorDelta); - spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); TScanOrchestrator>, MockIndexerConnector, - MockDatabaseFeedManager, - TrampolineSocketDBWrapper> + MockDatabaseFeedManager> scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); flatbuffers::FlatBufferBuilder builder; @@ -514,14 +510,12 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixInsert) std::shared_mutex mutexScanOrchestrator; spScanContext = std::make_shared>(syscollectorDelta); - spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); TScanOrchestrator>, MockIndexerConnector, - MockDatabaseFeedManager, - TrampolineSocketDBWrapper> + MockDatabaseFeedManager> scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); flatbuffers::FlatBufferBuilder builder; @@ -628,14 +622,12 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixDelete) std::shared_mutex mutexScanOrchestrator; spScanContext = std::make_shared>(syscollectorDelta); - spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); TScanOrchestrator>, MockIndexerConnector, - MockDatabaseFeedManager, - TrampolineSocketDBWrapper> + MockDatabaseFeedManager> scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); flatbuffers::FlatBufferBuilder builder; @@ -727,14 +719,12 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeOs) std::shared_mutex mutexScanOrchestrator; spScanContext = std::make_shared>(syscollectorDelta); - spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); TScanOrchestrator>, MockIndexerConnector, - MockDatabaseFeedManager, - TrampolineSocketDBWrapper> + MockDatabaseFeedManager> scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); flatbuffers::FlatBufferBuilder builder; @@ -843,14 +833,12 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeIntegrityClear) std::shared_mutex mutexScanOrchestrator; spScanContext = std::make_shared>(syscollectorSynchronization); - spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); TScanOrchestrator>, MockIndexerConnector, - MockDatabaseFeedManager, - TrampolineSocketDBWrapper> + MockDatabaseFeedManager> scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); flatbuffers::FlatBufferBuilder builder; @@ -957,14 +945,12 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsertInDelayed) std::shared_mutex mutexScanOrchestrator; spScanContext = std::make_shared>(syscollectorDelta); - spSocketDBWrapperMock = std::make_shared(WDB_SOCKET); TScanOrchestrator>, MockIndexerConnector, MockDatabaseFeedManager, - TrampolineSocketDBWrapper, OSPrimitives, 1> // Delayed time 1 second scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/wazuhDBQuery/main.cpp b/src/wazuh_modules/vulnerability_scanner/testtool/wazuhDBQuery/main.cpp index bdaecca8d95..20ef880a1f3 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/wazuhDBQuery/main.cpp +++ b/src/wazuh_modules/vulnerability_scanner/testtool/wazuhDBQuery/main.cpp @@ -43,9 +43,8 @@ int main(const int argc, const char* argv[]) // Read json configuration file auto configuration = nlohmann::json::parse(std::ifstream(cmdLineArgs.getConfigurationFilePath())); - - SocketDBWrapper socketDBWrapper(WAZUH_DB_SOCK); nlohmann::json response; + SocketDBWrapper::instance().init(); for (const auto& query : configuration.at("queries")) { @@ -64,7 +63,7 @@ int main(const int argc, const char* argv[]) } auto queryStr = wazuhDBQueryBuilder.build(); - socketDBWrapper.query(queryStr, response); + SocketDBWrapper::instance().query(queryStr, response); std::cout << "Response to \"" << queryStr << "\":\n" << response.dump(4) << std::endl; } From 6cf2b8f0b2c89e6ba9a73adb20db83705799d83d Mon Sep 17 00:00:00 2001 From: Gabriel Valenzuela Date: Thu, 9 May 2024 21:46:45 -0300 Subject: [PATCH 057/419] CL: - Refactor UTs. --- .../src/scanOrchestrator/osDataCache.hpp | 7 ++- .../src/scanOrchestrator/scanContext.hpp | 2 +- .../src/scanOrchestrator/scanOrchestrator.hpp | 1 + .../tests/mocks/MockSocketDBWrapper.hpp | 9 ++- .../tests/mocks/TrampolineSocketDBWrapper.hpp | 11 +++- .../tests/unit/clearSendReport_test.cpp | 1 - .../tests/unit/eventSendReport_test.cpp | 1 - .../tests/unit/osDataCache_test.cpp | 47 +++++++++------ .../tests/unit/scanOrchestrator_test.cpp | 57 ++++++++++++++++--- .../tests/unit/scanOrchestrator_test.hpp | 5 ++ 10 files changed, 107 insertions(+), 34 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp index 310fa79b5a6..8fe8caf16b2 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp @@ -47,7 +47,8 @@ struct Os final /** * @brief OsDataCache class. */ -class OsDataCache final : public Singleton +template +class OsDataCache final : public Singleton> { private: LRUCache m_osData {PolicyManager::instance().getOsdataLRUSize()}; @@ -58,8 +59,8 @@ class OsDataCache final : public Singleton nlohmann::json response; try { - SocketDBWrapper::instance().query(WazuhDBQueryBuilder::builder().agentGetOsInfoCommand(agentId).build(), - response); + TSocketDBWrapper::instance().query(WazuhDBQueryBuilder::builder().agentGetOsInfoCommand(agentId).build(), + response); } catch (const std::exception& e) { diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index e2608ce006a..3b27a5b8f4a 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -111,7 +111,7 @@ struct AgentData * @tparam TOsDataCache os data cache type. * @tparam TGlobalData global data type. */ -template +template, typename TGlobalData = GlobalData> struct TScanContext final { private: diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp index 2863f205e8a..6da8b0b06f5 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp @@ -51,6 +51,7 @@ template class TScanOrchestrator final : public TOSPrimitives { diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockSocketDBWrapper.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockSocketDBWrapper.hpp index 10b0691c16e..0ee8130746d 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockSocketDBWrapper.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockSocketDBWrapper.hpp @@ -28,7 +28,7 @@ * This class is used in unit tests only to verify interactions with a content * register without actually performing real operations on it. */ -class MockSocketDBWrapper +class MockSocketDBWrapper : public Singleton { public: /** @@ -50,6 +50,13 @@ class MockSocketDBWrapper * @note This method is intended for testing purposes and does not perform any real action. */ MOCK_METHOD(void, query, (const std::string& query, nlohmann::json& response), ()); + + /** + * @brief Mock method for init. + * + * @note This method is intended for testing purposes and does not perform any real action. + */ + MOCK_METHOD(void, init, (), ()); }; #endif // _MOCK_SOCKETDBWRAPPER_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineSocketDBWrapper.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineSocketDBWrapper.hpp index 6806ce51b28..9aaa18d607e 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineSocketDBWrapper.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineSocketDBWrapper.hpp @@ -18,7 +18,7 @@ extern std::shared_ptr spSocketDBWrapperMock; /** * @brief This class is a wrapper of the trampoline socket DB wrapper */ -class TrampolineSocketDBWrapper final +class TrampolineSocketDBWrapper final : public Singleton { public: /** @@ -43,6 +43,15 @@ class TrampolineSocketDBWrapper final { spSocketDBWrapperMock->query(query, response); } + + /** + * @brief Mock method for initializing the orchestrator. + * + */ + void init() + { + spSocketDBWrapperMock->init(); + } }; #endif //_TRAMPOLINE_SOCKETDBWRAPPER_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.cpp index 85da0a6899c..ac9f3113faf 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.cpp @@ -24,7 +24,6 @@ using ::testing::_; -const std::string TEST_PATH {"/tmp/socket"}; const size_t MAX_RETRIES {10}; auto constexpr TEST_REPORTS_QUEUE_PATH {"queue/vd/reports"}; auto constexpr TEST_REPORTS_BULK_SIZE {1}; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.cpp index 3951ba41e87..f5aeacc3f34 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.cpp @@ -22,7 +22,6 @@ using ::testing::_; -const std::string TEST_PATH {"/tmp/socket"}; const size_t MAX_RETRIES {10}; auto constexpr TEST_REPORTS_QUEUE_PATH {"queue/vd/reports"}; auto constexpr TEST_REPORTS_BULK_SIZE {1}; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp index d57cc25d589..638bf58940e 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp @@ -10,6 +10,10 @@ */ #include "osDataCache_test.hpp" +#include "MockSocketDBWrapper.hpp" +#include "TrampolineSocketDBWrapper.hpp" + +auto constexpr EMPTY_RESPONSE = ""; TEST_F(OsDataCacheTest, TestSetAndGetSuccess) { @@ -26,8 +30,10 @@ TEST_F(OsDataCacheTest, TestSetAndGetSuccess) m_socketServer->send(fd, "err ", 4); }); - OsDataCache cache; + OsDataCache cache; std::string agentId {"1"}; + spSocketDBWrapperMock = std::make_shared(); + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)).Times(1); // Try to get value from empty cache EXPECT_THROW(cache.getOsData(agentId), WdbDataException); @@ -75,22 +81,23 @@ TEST_F(OsDataCacheTest, TestSetAndGetSuccess) TEST_F(OsDataCacheTest, TestDbQuery) { // Create fake response - nlohmann::json response; - response["hostname"] = "hostName"; - response["architecture"] = "architecture"; - response["os_name"] = "name"; - response["os_codename"] = "codeName"; - response["os_major"] = "majorVersion"; - response["os_minor"] = "minorVersion"; - response["os_patch"] = "patch"; - response["os_build"] = "build"; - response["os_platform"] = "platform"; - response["os_version"] = "version"; - response["os_release"] = "release"; - response["os_display_version"] = "displayVersion"; - response["sysname"] = "sysName"; - response["version"] = "kernelVersion"; - response["release"] = "kernelRelease"; + nlohmann::json response = nlohmann::json::array(); + response.push_back(nlohmann::json::object()); + response.at(0)["hostname"] = "hostName"; + response.at(0)["architecture"] = "architecture"; + response.at(0)["os_name"] = "name"; + response.at(0)["os_codename"] = "codeName"; + response.at(0)["os_major"] = "majorVersion"; + response.at(0)["os_minor"] = "minorVersion"; + response.at(0)["os_patch"] = "patch"; + response.at(0)["os_build"] = "build"; + response.at(0)["os_platform"] = "platform"; + response.at(0)["os_version"] = "version"; + response.at(0)["os_release"] = "release"; + response.at(0)["os_display_version"] = "displayVersion"; + response.at(0)["sysname"] = "sysName"; + response.at(0)["version"] = "kernelVersion"; + response.at(0)["release"] = "kernelRelease"; std::string responseString = response.dump(); std::string finalResponse = "ok "; @@ -108,8 +115,12 @@ TEST_F(OsDataCacheTest, TestDbQuery) m_socketServer->send(fd, finalResponse.c_str(), finalResponse.size()); }); - OsDataCache cache; + OsDataCache cache; std::string agentId {"1"}; + spSocketDBWrapperMock = std::make_shared(); + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillRepeatedly(testing::SetArgReferee<1>(response)); // Get value from cache auto osDataRetrieved = cache.getOsData(agentId); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp index a45f3e4a94e..2de9052c746 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp @@ -28,6 +28,8 @@ auto constexpr TEST_REPORTS_QUEUE_PATH {"queue/vd/reports"}; auto constexpr TEST_REPORTS_BULK_SIZE {1}; +auto constexpr TEST_SOCKET {"queue/db/wdb"}; +auto constexpr SLEEP_TIME {0}; using ::testing::_; using testing::Return; @@ -176,7 +178,6 @@ namespace NSScanOrchestratorTest } )"; - const std::string TEST_PATH {"/tmp/socket"}; } // namespace NSScanOrchestratorTest // Shared pointers definitions @@ -186,7 +187,30 @@ std::shared_ptr> spScanContext; using namespace NSScanOrchestratorTest; -void ScanOrchestratorTest::SetUp() {} +void ScanOrchestratorTest::SetUp() +{ + m_socketServer = + std::make_shared, EpollWrapper>>(TEST_SOCKET); + + m_socketServer->listen( + [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) + { + std::cerr << "Received (SOCKET SERVER) data: " << data << std::endl; + std::ignore = dataHeader; + std::ignore = sizeHeader; + + std::string receivedMsg(data, size); + ASSERT_EQ(receivedMsg, m_query); + + std::this_thread::sleep_for(std::chrono::milliseconds(SLEEP_TIME)); + + for (const auto& response : m_responses) + { + std::cerr << "send (SOCKET SERVER) data: " << response.c_str() << std::endl; + m_socketServer->send(fd, response.c_str(), response.size()); + } + }); +} void ScanOrchestratorTest::TearDown() { @@ -194,6 +218,8 @@ void ScanOrchestratorTest::TearDown() spOsDataCacheMock.reset(); spSocketDBWrapperMock.reset(); spScanContext.reset(); + m_socketServer->stop(); + m_socketServer.reset(); Log::deassignLogFunction(); } @@ -291,7 +317,9 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsert) TrampolineFactoryOrchestrator, MockAbstractHandler>, MockIndexerConnector, - MockDatabaseFeedManager> + MockDatabaseFeedManager, + OSPrimitives, + TrampolineSocketDBWrapper> scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); flatbuffers::FlatBufferBuilder builder; @@ -403,7 +431,9 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageDelete) TrampolineFactoryOrchestrator, MockAbstractHandler>, MockIndexerConnector, - MockDatabaseFeedManager> + MockDatabaseFeedManager, + OSPrimitives, + TrampolineSocketDBWrapper> scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); flatbuffers::FlatBufferBuilder builder; @@ -515,7 +545,9 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixInsert) TrampolineFactoryOrchestrator, MockAbstractHandler>, MockIndexerConnector, - MockDatabaseFeedManager> + MockDatabaseFeedManager, + OSPrimitives, + TrampolineSocketDBWrapper> scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); flatbuffers::FlatBufferBuilder builder; @@ -627,7 +659,9 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixDelete) TrampolineFactoryOrchestrator, MockAbstractHandler>, MockIndexerConnector, - MockDatabaseFeedManager> + MockDatabaseFeedManager, + OSPrimitives, + TrampolineSocketDBWrapper> scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); flatbuffers::FlatBufferBuilder builder; @@ -724,7 +758,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeOs) TrampolineFactoryOrchestrator, MockAbstractHandler>, MockIndexerConnector, - MockDatabaseFeedManager> + MockDatabaseFeedManager, + OSPrimitives, + TrampolineSocketDBWrapper> + scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); flatbuffers::FlatBufferBuilder builder; @@ -838,7 +875,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeIntegrityClear) TrampolineFactoryOrchestrator, MockAbstractHandler>, MockIndexerConnector, - MockDatabaseFeedManager> + MockDatabaseFeedManager, + OSPrimitives, + TrampolineSocketDBWrapper> + scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); flatbuffers::FlatBufferBuilder builder; @@ -952,6 +992,7 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsertInDelayed) MockIndexerConnector, MockDatabaseFeedManager, OSPrimitives, + TrampolineSocketDBWrapper, 1> // Delayed time 1 second scanOrchestrator(spIndexerConnectorMock, spDatabaseFeedManagerMock, spReportDispatcher, mutexScanOrchestrator); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.hpp index 26ee26e0cd3..afe4cf2837f 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.hpp @@ -12,6 +12,7 @@ #ifndef _SCAN_ORCHESTRATOR_TEST_HPP #define _SCAN_ORCHESTRATOR_TEST_HPP +#include "socketServer.hpp" #include "gtest/gtest.h" /** @@ -35,6 +36,10 @@ class ScanOrchestratorTest : public ::testing::Test * */ void TearDown() override; + + std::shared_ptr, EpollWrapper>> m_socketServer; + std::string m_query; + std::vector m_responses; // LCOV_EXCL_STOP }; From 771f3cc97a8b0cc9df4dbb77a729f64ac42f2516 Mon Sep 17 00:00:00 2001 From: Gabriel Valenzuela Date: Fri, 10 May 2024 12:15:08 -0300 Subject: [PATCH 058/419] CL: - Refactor UTs. Fix errors. --- src/shared_modules/utils/socketDBWrapper.hpp | 4 --- .../utils/tests/socketDBWrapper_test.hpp | 4 +-- .../tests/unit/scanOrchestrator_test.cpp | 27 +------------------ .../tests/unit/scanOrchestrator_test.hpp | 4 --- 4 files changed, 2 insertions(+), 37 deletions(-) diff --git a/src/shared_modules/utils/socketDBWrapper.hpp b/src/shared_modules/utils/socketDBWrapper.hpp index 78fedff5311..fbf00c62669 100644 --- a/src/shared_modules/utils/socketDBWrapper.hpp +++ b/src/shared_modules/utils/socketDBWrapper.hpp @@ -63,7 +63,6 @@ class SocketDBWrapper final : public Singleton m_dbSocket->connect( [&](const char* body, uint32_t bodySize, const char*, uint32_t) { - std::cerr << "Received (SOCKETDBWRAPPER) data: " << body << std::endl; std::scoped_lock lock {m_mutexResponse}; std::string responsePacket(body, bodySize); @@ -171,9 +170,7 @@ class SocketDBWrapper final : public Singleton m_exceptionStr.clear(); m_dbSocket->send(query.c_str(), query.size()); - std::cerr << "m_conditionVariable.wait(lockResponse) - BEFORE" << std::endl; m_conditionVariable.wait(lockResponse); - std::cerr << "m_conditionVariable.wait(lockResponse) - AFTER" << std::endl; // Check if the object was destroyed. If so, return and do not process the response if(m_teardown.load()) @@ -204,7 +201,6 @@ class SocketDBWrapper final : public Singleton */ void teardown() { - std::cerr<<"Teardown SocketDBWrapper"<stop(); diff --git a/src/shared_modules/utils/tests/socketDBWrapper_test.hpp b/src/shared_modules/utils/tests/socketDBWrapper_test.hpp index 28d51a54cb3..7d5d4cc37e4 100644 --- a/src/shared_modules/utils/tests/socketDBWrapper_test.hpp +++ b/src/shared_modules/utils/tests/socketDBWrapper_test.hpp @@ -38,7 +38,6 @@ class SocketDBWrapperTest : public ::testing::Test m_socketServer->listen( [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) { - std::cerr<<"Received (SOCKET SERVER) data: "<send(fd, response.c_str(), response.size()); } }); }; void TearDown() override { - std::cerr<<"TearDown"<stop(); m_socketServer.reset(); m_query.clear(); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp index 2de9052c746..5070fa143bd 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp @@ -187,30 +187,7 @@ std::shared_ptr> spScanContext; using namespace NSScanOrchestratorTest; -void ScanOrchestratorTest::SetUp() -{ - m_socketServer = - std::make_shared, EpollWrapper>>(TEST_SOCKET); - - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::cerr << "Received (SOCKET SERVER) data: " << data << std::endl; - std::ignore = dataHeader; - std::ignore = sizeHeader; - - std::string receivedMsg(data, size); - ASSERT_EQ(receivedMsg, m_query); - - std::this_thread::sleep_for(std::chrono::milliseconds(SLEEP_TIME)); - - for (const auto& response : m_responses) - { - std::cerr << "send (SOCKET SERVER) data: " << response.c_str() << std::endl; - m_socketServer->send(fd, response.c_str(), response.size()); - } - }); -} +void ScanOrchestratorTest::SetUp() {} void ScanOrchestratorTest::TearDown() { @@ -218,8 +195,6 @@ void ScanOrchestratorTest::TearDown() spOsDataCacheMock.reset(); spSocketDBWrapperMock.reset(); spScanContext.reset(); - m_socketServer->stop(); - m_socketServer.reset(); Log::deassignLogFunction(); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.hpp index afe4cf2837f..7be6ccfd10d 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.hpp @@ -36,10 +36,6 @@ class ScanOrchestratorTest : public ::testing::Test * */ void TearDown() override; - - std::shared_ptr, EpollWrapper>> m_socketServer; - std::string m_query; - std::vector m_responses; // LCOV_EXCL_STOP }; From 155aa8ba4101706c7cb10708ffa0efad2b818aa8 Mon Sep 17 00:00:00 2001 From: Gabriel Valenzuela Date: Fri, 10 May 2024 14:42:22 -0300 Subject: [PATCH 059/419] CL: - Fix memleak. --- src/shared_modules/utils/socketDBWrapper.hpp | 21 +++++++++++-------- .../vulnerabilityScannerFacade_test.cpp | 12 ++++------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/src/shared_modules/utils/socketDBWrapper.hpp b/src/shared_modules/utils/socketDBWrapper.hpp index fbf00c62669..acc70255026 100644 --- a/src/shared_modules/utils/socketDBWrapper.hpp +++ b/src/shared_modules/utils/socketDBWrapper.hpp @@ -13,15 +13,14 @@ #define _SOCKET_DB_WRAPPER_HPP #include "json.hpp" +#include "singleton.hpp" #include "socketClient.hpp" #include "socketDBWrapperException.hpp" -#include "singleton.hpp" #include #include #include #include -auto constexpr DB_WRAPPER_QUERY_WAIT_TIME {5000}; auto constexpr WDB_SOCKET {"queue/db/wdb"}; char constexpr DB_WRAPPER_OK[] = {"ok"}; @@ -54,12 +53,11 @@ class SocketDBWrapper final : public Singleton std::mutex m_mutexResponse; std::condition_variable m_conditionVariable; std::atomic m_teardown {false}; -public: - void init() +public: + void init() { - m_dbSocket = - std::make_unique, EpollWrapper>>(WDB_SOCKET); + m_dbSocket = std::make_unique, EpollWrapper>>(WDB_SOCKET); m_dbSocket->connect( [&](const char* body, uint32_t bodySize, const char*, uint32_t) { @@ -169,11 +167,16 @@ class SocketDBWrapper final : public Singleton // coverity[missing_lock] m_exceptionStr.clear(); + if (!m_dbSocket) + { + throw std::runtime_error("Socket DB Wrapper not initialized"); + } + m_dbSocket->send(query.c_str(), query.size()); m_conditionVariable.wait(lockResponse); // Check if the object was destroyed. If so, return and do not process the response - if(m_teardown.load()) + if (m_teardown.load()) { return; } @@ -198,12 +201,12 @@ class SocketDBWrapper final : public Singleton /** * @brief Teardown the Socket DB Wrapper object * - */ + */ void teardown() { m_teardown.store(true); m_conditionVariable.notify_all(); - m_dbSocket->stop(); + m_dbSocket->stop(); } }; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/component/vulnerabilityScannerFacade_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/component/vulnerabilityScannerFacade_test.cpp index f81abd6dfa2..1cde13cce3e 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/component/vulnerabilityScannerFacade_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/component/vulnerabilityScannerFacade_test.cpp @@ -14,6 +14,8 @@ #include #include +auto constexpr DELAY {1}; + /* * @brief Log function used by VulnerabilityScannerFacade. */ @@ -44,7 +46,7 @@ TEST_F(VulnerabilityScannerFacadeTest, TestSingletonOfTheVulnerabilityScannerFac /* * @brief Test start method of the VulnerabilityScannerFacade class. */ -TEST_F(VulnerabilityScannerFacadeTest, TestStartMethod) +TEST_F(VulnerabilityScannerFacadeTest, TestStartToEndMethod) { // TODO: Remove GTEST_SKIP and add EXPECTS once the implementation of the 'Indexer Connector' module is completed GTEST_SKIP(); @@ -53,14 +55,8 @@ TEST_F(VulnerabilityScannerFacadeTest, TestStartMethod) auto& vulnerabilityScannerFacade {VulnerabilityScannerFacade::instance()}; EXPECT_NO_THROW(vulnerabilityScannerFacade.start(logFunction, configuration)); -} -/* - * @brief Test stop method of the VulnerabilityScannerFacade class. - */ -TEST_F(VulnerabilityScannerFacadeTest, TestStopMethod) -{ - auto& vulnerabilityScannerFacade {VulnerabilityScannerFacade::instance()}; + std::this_thread::sleep_for(std::chrono::seconds(DELAY)); EXPECT_NO_THROW(vulnerabilityScannerFacade.stop()); } From 86e3ad36561c5420bc108ead9e50c90ad6291036 Mon Sep 17 00:00:00 2001 From: Gabriel Valenzuela Date: Fri, 10 May 2024 20:44:19 -0300 Subject: [PATCH 060/419] CL: - Refactor of code and UTs fix. --- .../buildAllAgentListContext.hpp | 4 +- .../src/scanOrchestrator/scanAgentList.hpp | 11 +-- .../tests/mocks/MockSocketDBWrapper.hpp | 1 + .../tests/mocks/TrampolineSocketDBWrapper.hpp | 1 + .../unit/buildAllAgentListContext_test.cpp | 52 +++++------ .../unit/buildAllAgentListContext_test.hpp | 14 +-- .../tests/unit/scanAgentList_test.cpp | 88 +++++++------------ .../tests/unit/scanAgentList_test.hpp | 24 +---- 8 files changed, 68 insertions(+), 127 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp index 77177e19b0a..2374682b21f 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp @@ -27,7 +27,7 @@ * * @tparam TScanContext scan context type. */ -template +template class TBuildAllAgentListContext final : public AbstractHandler> { @@ -61,7 +61,7 @@ class TBuildAllAgentListContext final : public AbstractHandler>> + typename TAbstractHandler = AbstractHandler>, + typename TSocketDBWrapper = SocketDBWrapper> class TScanAgentList final : public AbstractHandler> { private: @@ -48,8 +49,8 @@ class TScanAgentList final : public AbstractHandler spSocketDBWrapperMock; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp index ed42bc83f12..7aca549855d 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp @@ -11,27 +11,16 @@ #include "buildAllAgentListContext_test.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineSocketDBWrapper.hpp" #include "buildAllAgentListContext.hpp" TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContext) { - std::filesystem::create_directories(TEST_DB_PATH); - m_socketServer = - std::make_shared, EpollWrapper>>(TEST_SOCKET_PATH); + spSocketDBWrapperMock = std::make_shared(); + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)).Times(1); - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = dataHeader; - std::ignore = sizeHeader; - - std::string receivedMsg(data, size); - EXPECT_STREQ(receivedMsg.c_str(), EXPECTED_QUERY.c_str()); - - m_socketServer->send(fd, "ok []", 5); - }); - - auto allAgentContext = std::make_shared>>(); + auto allAgentContext = + std::make_shared, TrampolineSocketDBWrapper>>(); auto scanContext = std::make_shared>(); allAgentContext->handleRequest(scanContext); @@ -41,22 +30,25 @@ TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContextWithElements) { static const std::string MESSAGE { R"(ok [{"id":1, "name":"name", "version": "Wazuh 4.4.4", "ip":"192.168.0.1","node_name":"node_1"}])"}; - m_socketServer = - std::make_shared, EpollWrapper>>(TEST_SOCKET_PATH); - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = dataHeader; - std::ignore = sizeHeader; - - std::string receivedMsg(data, size); - EXPECT_STREQ(receivedMsg.c_str(), EXPECTED_QUERY.c_str()); + spSocketDBWrapperMock = std::make_shared(); - m_socketServer->send(fd, MESSAGE.c_str(), MESSAGE.size()); - }); - - auto allAgentContext = std::make_shared>>(); + nlohmann::json queryResult = nlohmann::json::parse(R"([ + { + "id": 1, + "name": "name", + "version": "Wazuh 4.4.4", + "ip": "192.168.0.1", + "node_name": "node_1" + } + ])"); + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(queryResult)); + + auto allAgentContext = + std::make_shared, TrampolineSocketDBWrapper>>(); auto scanContext = std::make_shared>(); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.hpp index 79ac26ec2bb..7499600a76d 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.hpp @@ -15,9 +15,6 @@ #include "socketServer.hpp" #include "gtest/gtest.h" -const std::filesystem::path TEST_DB_PATH {std::filesystem::current_path() / "queue/db"}; -const std::filesystem::path TEST_SOCKET_PATH {TEST_DB_PATH / "wdb"}; -const std::string EXPECTED_QUERY {"global sql SELECT * FROM agent "}; /** * @brief FetchFromGlobalDB test class. */ @@ -38,16 +35,7 @@ class BuildAllAgentListContextTest : public ::testing::Test * @brief Clean the environment after testing. * */ - void TearDown() override - { - std::filesystem::remove_all("queue"); - m_socketServer.reset(); - } - - /** - * @brief Fake server socket. - */ - std::shared_ptr, EpollWrapper>> m_socketServer; + void TearDown() override {} // LCOV_EXCL_STOP }; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp index 93400401d8a..11cc6850402 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp @@ -11,8 +11,10 @@ #include "scanAgentList_test.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineSocketDBWrapper.hpp" #include "scanAgentList.hpp" #include "shared_modules/utils/mocks/chainOfResponsabilityMock.h" +#include "socketDBWrapperException.hpp" using testing::_; @@ -20,33 +22,16 @@ const std::string EXPECTED_QUERY_PACKAGE {"agent 001 package get "}; const std::string EXPECTED_QUERY_OS {"agent 001 osinfo get "}; const std::string EXPECTED_QUERY_MANAGER {"agent 0 package get "}; const std::string PACKAGES_RESPONSE { - R"(ok [{"architecture":"x86_64","checksum":"qwerty","description":"Web Browser","format":"deb","groups":"","install_time":"02/22/2024 00:00:00","item_id":"ytrewq","location":"","multiarch":"","name":"Firefox","priority":"","scan_time":"02/21/2024 00:00:00","size":0,"source":"","vendor":"canonical","version":"122.0.1"},{"architecture":"x86_64","checksum":"asdfgh","description":"Text editor","format":"deb","groups":"","install_time":"02/22/2024 00:00:00","item_id":"hgfdsa","location":"","multiarch":"","name":"Neovim","priority":"","scan_time":"02/21/2024 00:00:00","size":0,"source":"","vendor":"canonical","version":"0.9.5"}])"}; + R"([{"architecture":"x86_64","checksum":"qwerty","description":"Web Browser","format":"deb","groups":"","install_time":"02/22/2024 00:00:00","item_id":"ytrewq","location":"","multiarch":"","name":"Firefox","priority":"","scan_time":"02/21/2024 00:00:00","size":0,"source":"","vendor":"canonical","version":"122.0.1"},{"architecture":"x86_64","checksum":"asdfgh","description":"Text editor","format":"deb","groups":"","install_time":"02/22/2024 00:00:00","item_id":"hgfdsa","location":"","multiarch":"","name":"Neovim","priority":"","scan_time":"02/21/2024 00:00:00","size":0,"source":"","vendor":"canonical","version":"0.9.5"}])"}; const std::string OS_RESPONSE { - R"(ok [{"checksum":"qwerty","hostname":"osdata_hostname","os_build":"osdata_build","os_codename":"upstream","os_display_version":"osdata_displayVersion","os_major":"osdata_majorVersion","os_minor":"osdata_minorVersion","os_name":"osdata_name","os_patch":"osdata_patch","os_platform":"osdata_platform","os_release":"osdata_release","os_version":"osdata_version","release":"osdata_release","scan_time":"02/21/2024 00:00:00","sysname":"osdata_sysName","version":"osdata_version"}])"}; + R"([{"checksum":"qwerty","hostname":"osdata_hostname","os_build":"osdata_build","os_codename":"upstream","os_display_version":"osdata_displayVersion","os_major":"osdata_majorVersion","os_minor":"osdata_minorVersion","os_name":"osdata_name","os_patch":"osdata_patch","os_platform":"osdata_platform","os_release":"osdata_release","os_version":"osdata_version","release":"osdata_release","scan_time":"02/21/2024 00:00:00","sysname":"osdata_sysName","version":"osdata_version"}])"}; -const std::string RESPONSE_EMPTY {R"(ok )"}; +const std::string RESPONSE_EMPTY {R"([])"}; TEST_F(ScanAgentListTest, SingleDeleteAndInsertTest) { - const std::vector EXPECTED_QUERIES {EXPECTED_QUERY_OS, EXPECTED_QUERY_PACKAGE}; - const std::vector RESPONSES {OS_RESPONSE, PACKAGES_RESPONSE}; - - auto index = 0; - - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = dataHeader; - std::ignore = sizeHeader; - - std::string receivedMsg(data, size); - EXPECT_STREQ(receivedMsg.c_str(), EXPECTED_QUERIES.at(index).c_str()); - - m_socketServer->send(fd, RESPONSES.at(index).c_str(), RESPONSES.at(index).size()); - - ++index; - }); + spSocketDBWrapperMock = std::make_shared(); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -81,8 +66,17 @@ TEST_F(ScanAgentListTest, SingleDeleteAndInsertTest) auto scanAgentList = std::make_shared, - MockAbstractHandler>>>>( - spPackageInsertOrchestrationMock, spOsOrchestrationMock); + MockAbstractHandler>>, + TrampolineSocketDBWrapper>>(spPackageInsertOrchestrationMock, + spOsOrchestrationMock); + + EXPECT_CALL(*spSocketDBWrapperMock, query(EXPECTED_QUERY_OS, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(nlohmann::json::parse(OS_RESPONSE))); + + EXPECT_CALL(*spSocketDBWrapperMock, query(EXPECTED_QUERY_PACKAGE, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(nlohmann::json::parse(PACKAGES_RESPONSE))); nlohmann::json jsonData = nlohmann::json::parse( R"({"agent_info": {"agent_id":"001", "agent_version":"4.8.0", "agent_name":"test_agent_name", "agent_ip":"10.0.0.1", "node_name":"node01"}, "action":"upgradeAgentDB"})"); @@ -98,24 +92,8 @@ TEST_F(ScanAgentListTest, SingleDeleteAndInsertTest) TEST_F(ScanAgentListTest, EmptyPackagesWDBResponseTest) { - const std::vector EXPECTED_QUERIES {EXPECTED_QUERY_OS, EXPECTED_QUERY_PACKAGE}; - const std::vector RESPONSES {OS_RESPONSE, RESPONSE_EMPTY}; - - auto index = 0; - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = dataHeader; - std::ignore = sizeHeader; - - std::string receivedMsg(data, size); - EXPECT_STREQ(receivedMsg.c_str(), EXPECTED_QUERIES.at(index).c_str()); - - m_socketServer->send(fd, RESPONSES.at(index).c_str(), RESPONSES.at(index).size()); - - ++index; - }); + spSocketDBWrapperMock = std::make_shared(); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -150,8 +128,17 @@ TEST_F(ScanAgentListTest, EmptyPackagesWDBResponseTest) auto scanAgentList = std::make_shared, - MockAbstractHandler>>>>( - spPackageInsertOrchestrationMock, spOsOrchestrationMock); + MockAbstractHandler>>, + TrampolineSocketDBWrapper>>(spPackageInsertOrchestrationMock, + spOsOrchestrationMock); + + EXPECT_CALL(*spSocketDBWrapperMock, query(EXPECTED_QUERY_OS, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(nlohmann::json::parse(OS_RESPONSE))); + + EXPECT_CALL(*spSocketDBWrapperMock, query(EXPECTED_QUERY_PACKAGE, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(nlohmann::json::parse(RESPONSE_EMPTY))); nlohmann::json jsonData = nlohmann::json::parse( R"({"agent_info": {"agent_id":"001", "agent_version":"4.8.0", "agent_name":"test_agent_name", "agent_ip":"10.0.0.1", "node_name":"node01"}, "action":"upgradeAgentDB"})"); @@ -167,17 +154,7 @@ TEST_F(ScanAgentListTest, EmptyPackagesWDBResponseTest) TEST_F(ScanAgentListTest, DISABLED_InsertAllTestNotSyncedResponse) { - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = dataHeader; - std::ignore = sizeHeader; - - std::string receivedMsg(data, size); - EXPECT_STREQ(receivedMsg.c_str(), EXPECTED_QUERY_MANAGER.c_str()); - - m_socketServer->send(fd, PACKAGES_RESPONSE.c_str(), PACKAGES_RESPONSE.size()); - }); + spSocketDBWrapperMock = std::make_shared(); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -209,8 +186,9 @@ TEST_F(ScanAgentListTest, DISABLED_InsertAllTestNotSyncedResponse) auto scanAgentList = std::make_shared, - MockAbstractHandler>>>>( - spOsOrchestrationMock, spPackageInsertOrchestrationMock); + MockAbstractHandler>>, + TrampolineSocketDBWrapper>>(spOsOrchestrationMock, + spPackageInsertOrchestrationMock); nlohmann::json jsonData = nlohmann::json::parse(R"([{"status":"NOT_SYNCED"}])"); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.hpp index 76d3ebd36e7..18cee2e195e 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.hpp @@ -18,10 +18,6 @@ #include "socketServer.hpp" #include "gtest/gtest.h" -const std::filesystem::path TEST_DB_PATH {std::filesystem::current_path() / "queue/db"}; -const std::filesystem::path TEST_SOCKET_PATH {TEST_DB_PATH / "wdb"}; -const std::filesystem::path INVENTORY_DB_PATH {"queue/vd/inventory"}; - /** * @brief ScanAgentList test class. * @@ -37,29 +33,13 @@ class ScanAgentListTest : public ::testing::Test * @brief Set the environment for testing. * */ - void SetUp() override - { - m_inventoryDatabase = std::make_unique(INVENTORY_DB_PATH); - - std::filesystem::create_directories(TEST_DB_PATH); - m_socketServer = - std::make_shared, EpollWrapper>>(TEST_SOCKET_PATH); - } + void SetUp() override {} /** * @brief Clean the environment after testing. * */ - void TearDown() override - { - std::filesystem::remove_all("queue"); - m_socketServer.reset(); - } - - /** - * @brief Fake server socket. - */ - std::shared_ptr, EpollWrapper>> m_socketServer; + void TearDown() override {} /** * @brief RocksDB inventory database. From 5dde818e4dc3f9a9442c68057059705fdbb45030 Mon Sep 17 00:00:00 2001 From: Gabriel Valenzuela Date: Sat, 11 May 2024 16:15:58 -0300 Subject: [PATCH 061/419] CL: - Remove cerr line. --- src/shared_modules/utils/socketWrapper.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/shared_modules/utils/socketWrapper.hpp b/src/shared_modules/utils/socketWrapper.hpp index c6fa098364b..c168fcf535d 100644 --- a/src/shared_modules/utils/socketWrapper.hpp +++ b/src/shared_modules/utils/socketWrapper.hpp @@ -679,7 +679,6 @@ class Socket final : public T // Send the data. while (bufferSize != amountSent) { - std::cerr<<"Sending data to socket"< Date: Mon, 13 May 2024 00:14:10 -0300 Subject: [PATCH 062/419] Fix unit tests and add condition variable wait predicate --- src/shared_modules/utils/socketDBWrapper.hpp | 4 +++- src/shared_modules/utils/tests/socketDBWrapper_test.cpp | 2 -- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/shared_modules/utils/socketDBWrapper.hpp b/src/shared_modules/utils/socketDBWrapper.hpp index acc70255026..33f733587fc 100644 --- a/src/shared_modules/utils/socketDBWrapper.hpp +++ b/src/shared_modules/utils/socketDBWrapper.hpp @@ -57,6 +57,7 @@ class SocketDBWrapper final : public Singleton public: void init() { + m_teardown.store(false); m_dbSocket = std::make_unique, EpollWrapper>>(WDB_SOCKET); m_dbSocket->connect( [&](const char* body, uint32_t bodySize, const char*, uint32_t) @@ -173,7 +174,8 @@ class SocketDBWrapper final : public Singleton } m_dbSocket->send(query.c_str(), query.size()); - m_conditionVariable.wait(lockResponse); + m_conditionVariable.wait( + lockResponse, [this] { return !m_response.empty() || !m_exceptionStr.empty() || m_teardown.load(); }); // Check if the object was destroyed. If so, return and do not process the response if (m_teardown.load()) diff --git a/src/shared_modules/utils/tests/socketDBWrapper_test.cpp b/src/shared_modules/utils/tests/socketDBWrapper_test.cpp index 662a6866ba5..79415990d7d 100644 --- a/src/shared_modules/utils/tests/socketDBWrapper_test.cpp +++ b/src/shared_modules/utils/tests/socketDBWrapper_test.cpp @@ -14,7 +14,6 @@ #include "socketDBWrapper.hpp" // temp header -#include "wazuhDBQueryBuilder.hpp" #include #include @@ -80,7 +79,6 @@ TEST_F(SocketDBWrapperTest, InvalidTest) EXPECT_THROW(SocketDBWrapper::instance().query(m_query, output), std::exception); } - TEST_F(SocketDBWrapperTest, OkTest) { m_query = "SELECT * FROM test_table;"; From 0d68fa8d2fecf90f412b2ea7144e66fc70e1c98b Mon Sep 17 00:00:00 2001 From: Gabriel Valenzuela Date: Mon, 13 May 2024 10:50:09 -0300 Subject: [PATCH 063/419] CL: - Fix QA test output and format. --- .../qa/test_data/008/expected_001.out | 14 +++++------ .../qa/test_data/008/expected_002.out | 24 +++++++++---------- .../qa/test_data/008/expected_003.out | 8 +++---- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_001.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_001.out index 76d18c9e38b..2209228c58e 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_001.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_001.out @@ -1,9 +1,9 @@ [ -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2024-21405", -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2024-21372", -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2024-21371", -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2024-21340", -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2024-21338", -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2024-21341", -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2023-32040" + "Processing and publish key: CVE-2024-21405", + "Processing and publish key: CVE-2024-21372", + "Processing and publish key: CVE-2024-21371", + "Processing and publish key: CVE-2024-21340", + "Processing and publish key: CVE-2024-21338", + "Processing and publish key: CVE-2024-21341", + "Processing and publish key: CVE-2023-32040" ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_002.out index 02c67a94f2d..7b24b6533b8 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_002.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_002.out @@ -1,15 +1,15 @@ [ -"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21338", -"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21340", -"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21341", -"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21371", -"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21372", -"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21405", -"Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21341\",\"cvss\":{\"cvss3\":{\"base_score\":6.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046", -"Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21338\",\"cvss\":{\"cvss3\":{\"base_score\":7.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", -"Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21340\",\"cvss\":{\"cvss3\":{\"base_score\":4.6}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", -"Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21371\",\"cvss\":{\"cvss3\":{\"base_score\":7.0}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", -"Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21372\",\"cvss\":{\"cvss3\":{\"base_score\":8.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", -"Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21405\",\"cvss\":{\"cvss3\":{\"base_score\":7.0}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}" + "Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21338", + "Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21340", + "Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21341", + "Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21371", + "Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21372", + "Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21405", + "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21341\",\"cvss\":{\"cvss3\":{\"base_score\":6.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046", + "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21338\",\"cvss\":{\"cvss3\":{\"base_score\":7.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", + "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21340\",\"cvss\":{\"cvss3\":{\"base_score\":4.6}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", + "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21371\",\"cvss\":{\"cvss3\":{\"base_score\":7.0}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", + "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21372\",\"cvss\":{\"cvss3\":{\"base_score\":8.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", + "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21405\",\"cvss\":{\"cvss3\":{\"base_score\":7.0}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}" ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_003.out index 6b374ce7627..d0b3beadcab 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_003.out @@ -1,6 +1,6 @@ [ -"Processing and publish key: CVE-2023-32040", -"Deleting os agent vulnerabilities key: 002_Microsoft Windows 10 Pro", -"Vulnerability report for agent ID 002, clean all OS alert.", -"1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"status\":\"Clear\",\"title\":\"There is no information of installed packages. Vulnerabilities cleared.\",\"type\":\"Packages\"}}" + "Processing and publish key: CVE-2023-32040", + "Deleting os agent vulnerabilities key: 002_Microsoft Windows 10 Pro", + "Vulnerability report for agent ID 002, clean all OS alert.", + "1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"status\":\"Clear\",\"title\":\"There is no information of installed packages. Vulnerabilities cleared.\",\"type\":\"Packages\"}}" ] From af6615862c03b56574d48a529f67342a21fdc91a Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Mon, 13 May 2024 16:16:47 -0300 Subject: [PATCH 064/419] CL: - Fix QA test output (removed white spaces) --- .../qa/test_data/008/expected_001.out | 14 +++++------ .../qa/test_data/008/expected_002.out | 24 +++++++++---------- .../qa/test_data/008/expected_003.out | 8 +++---- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_001.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_001.out index 2209228c58e..0febf0db927 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_001.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_001.out @@ -1,9 +1,9 @@ [ - "Processing and publish key: CVE-2024-21405", - "Processing and publish key: CVE-2024-21372", - "Processing and publish key: CVE-2024-21371", - "Processing and publish key: CVE-2024-21340", - "Processing and publish key: CVE-2024-21338", - "Processing and publish key: CVE-2024-21341", - "Processing and publish key: CVE-2023-32040" +"Processing and publish key: CVE-2024-21405", +"Processing and publish key: CVE-2024-21372", +"Processing and publish key: CVE-2024-21371", +"Processing and publish key: CVE-2024-21340", +"Processing and publish key: CVE-2024-21338", +"Processing and publish key: CVE-2024-21341", +"Processing and publish key: CVE-2023-32040" ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_002.out index 7b24b6533b8..02c67a94f2d 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_002.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_002.out @@ -1,15 +1,15 @@ [ - "Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21338", - "Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21340", - "Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21341", - "Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21371", - "Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21372", - "Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21405", - "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21341\",\"cvss\":{\"cvss3\":{\"base_score\":6.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046", - "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21338\",\"cvss\":{\"cvss3\":{\"base_score\":7.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", - "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21340\",\"cvss\":{\"cvss3\":{\"base_score\":4.6}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", - "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21371\",\"cvss\":{\"cvss3\":{\"base_score\":7.0}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", - "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21372\",\"cvss\":{\"cvss3\":{\"base_score\":8.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", - "Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21405\",\"cvss\":{\"cvss3\":{\"base_score\":7.0}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}" +"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21338", +"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21340", +"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21341", +"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21371", +"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21372", +"Removing element from inventory: 002_Microsoft Windows 10 Pro_CVE-2024-21405", +"Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21341\",\"cvss\":{\"cvss3\":{\"base_score\":6.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046", +"Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21338\",\"cvss\":{\"cvss3\":{\"base_score\":7.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", +"Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21340\",\"cvss\":{\"cvss3\":{\"base_score\":4.6}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", +"Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21371\",\"cvss\":{\"cvss3\":{\"base_score\":7.0}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", +"Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21372\",\"cvss\":{\"cvss3\":{\"base_score\":8.8}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}", +"Report sent: 1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"cve\":\"CVE-2024-21405\",\"cvss\":{\"cvss3\":{\"base_score\":7.0}},\"enumeration\":\"CVE\",\"package\":{\"architecture\":\"x86_64\",\"name\":\"Microsoft Windows 10 Pro\",\"version\":\"10.0.19045.4046\"}" ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_003.out index d0b3beadcab..6b374ce7627 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_003.out @@ -1,6 +1,6 @@ [ - "Processing and publish key: CVE-2023-32040", - "Deleting os agent vulnerabilities key: 002_Microsoft Windows 10 Pro", - "Vulnerability report for agent ID 002, clean all OS alert.", - "1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"status\":\"Clear\",\"title\":\"There is no information of installed packages. Vulnerabilities cleared.\",\"type\":\"Packages\"}}" +"Processing and publish key: CVE-2023-32040", +"Deleting os agent vulnerabilities key: 002_Microsoft Windows 10 Pro", +"Vulnerability report for agent ID 002, clean all OS alert.", +"1:[002] () ->vulnerability-detector:{\"vulnerability\":{\"status\":\"Clear\",\"title\":\"There is no information of installed packages. Vulnerabilities cleared.\",\"type\":\"Packages\"}}" ] From 305329737779bb2ee8fd2e711878e58abb784290 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Mon, 13 May 2024 17:07:11 -0300 Subject: [PATCH 065/419] Fix changes based on test output. --- src/shared_modules/utils/socketDBWrapper.hpp | 20 ++++++++----------- .../qa/test_data/008/expected_001.out | 14 ++++++------- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/src/shared_modules/utils/socketDBWrapper.hpp b/src/shared_modules/utils/socketDBWrapper.hpp index 33f733587fc..c988dc62b3e 100644 --- a/src/shared_modules/utils/socketDBWrapper.hpp +++ b/src/shared_modules/utils/socketDBWrapper.hpp @@ -52,12 +52,13 @@ class SocketDBWrapper final : public Singleton std::mutex m_mutexMessage; std::mutex m_mutexResponse; std::condition_variable m_conditionVariable; - std::atomic m_teardown {false}; + bool m_teardown {false}; + bool m_dataReady {false}; public: void init() { - m_teardown.store(false); + m_teardown = false; m_dbSocket = std::make_unique, EpollWrapper>>(WDB_SOCKET); m_dbSocket->connect( [&](const char* body, uint32_t bodySize, const char*, uint32_t) @@ -145,6 +146,7 @@ class SocketDBWrapper final : public Singleton m_queryStatus = DbQueryStatus::INVALID_RESPONSE; m_exceptionStr = "DB query invalid response: " + responsePacket; } + m_dataReady = true; m_conditionVariable.notify_one(); } }); @@ -154,8 +156,9 @@ class SocketDBWrapper final : public Singleton { // Acquire lock to avoid multiple threads sending queries at the same time std::scoped_lock lockMessage {m_mutexMessage}; + m_dataReady = false; - if (m_teardown.load()) + if (m_teardown) { return; } @@ -174,14 +177,7 @@ class SocketDBWrapper final : public Singleton } m_dbSocket->send(query.c_str(), query.size()); - m_conditionVariable.wait( - lockResponse, [this] { return !m_response.empty() || !m_exceptionStr.empty() || m_teardown.load(); }); - - // Check if the object was destroyed. If so, return and do not process the response - if (m_teardown.load()) - { - return; - } + m_conditionVariable.wait(lockResponse, [this] { return m_dataReady || m_teardown; }); if (!m_exceptionStr.empty()) { @@ -206,7 +202,7 @@ class SocketDBWrapper final : public Singleton */ void teardown() { - m_teardown.store(true); + m_teardown = true; m_conditionVariable.notify_all(); m_dbSocket->stop(); } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_001.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_001.out index 76d18c9e38b..0febf0db927 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_001.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/008/expected_001.out @@ -1,9 +1,9 @@ [ -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2024-21405", -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2024-21372", -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2024-21371", -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2024-21340", -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2024-21338", -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2024-21341", -"wazuh-modulesd:vulnerability-scanner:resultIndexer.hpp:56 handleRequest : Processing and publish key: CVE-2023-32040" +"Processing and publish key: CVE-2024-21405", +"Processing and publish key: CVE-2024-21372", +"Processing and publish key: CVE-2024-21371", +"Processing and publish key: CVE-2024-21340", +"Processing and publish key: CVE-2024-21338", +"Processing and publish key: CVE-2024-21341", +"Processing and publish key: CVE-2023-32040" ] From 47f75e889720db3083314dcb90635e33d8a6491e Mon Sep 17 00:00:00 2001 From: Gabriel Valenzuela Date: Mon, 13 May 2024 19:31:21 -0300 Subject: [PATCH 066/419] CL: - Apply review comments. --- src/shared_modules/utils/tests/socketDBWrapper_test.cpp | 1 - .../vulnerability_scanner/tests/unit/osDataCache_test.cpp | 2 -- .../tests/unit/scanOrchestrator_test.cpp | 2 -- .../tests/unit/scanOrchestrator_test.hpp | 1 - .../vulnerability_scanner/testtool/scanner/main.cpp | 5 ++++- 5 files changed, 4 insertions(+), 7 deletions(-) diff --git a/src/shared_modules/utils/tests/socketDBWrapper_test.cpp b/src/shared_modules/utils/tests/socketDBWrapper_test.cpp index 79415990d7d..2cbbe649dbe 100644 --- a/src/shared_modules/utils/tests/socketDBWrapper_test.cpp +++ b/src/shared_modules/utils/tests/socketDBWrapper_test.cpp @@ -23,7 +23,6 @@ TEST_F(SocketDBWrapperTest, EmptyTest) m_responses = std::vector {" "}; nlohmann::json output; - // The exception captured here is the timeout EXPECT_THROW(SocketDBWrapper::instance().query(m_query, output), std::exception); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp index 638bf58940e..3d4b9b63e34 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp @@ -13,8 +13,6 @@ #include "MockSocketDBWrapper.hpp" #include "TrampolineSocketDBWrapper.hpp" -auto constexpr EMPTY_RESPONSE = ""; - TEST_F(OsDataCacheTest, TestSetAndGetSuccess) { // Start fake server diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp index 5070fa143bd..213f47a6d78 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp @@ -28,8 +28,6 @@ auto constexpr TEST_REPORTS_QUEUE_PATH {"queue/vd/reports"}; auto constexpr TEST_REPORTS_BULK_SIZE {1}; -auto constexpr TEST_SOCKET {"queue/db/wdb"}; -auto constexpr SLEEP_TIME {0}; using ::testing::_; using testing::Return; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.hpp index 7be6ccfd10d..26ee26e0cd3 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.hpp @@ -12,7 +12,6 @@ #ifndef _SCAN_ORCHESTRATOR_TEST_HPP #define _SCAN_ORCHESTRATOR_TEST_HPP -#include "socketServer.hpp" #include "gtest/gtest.h" /** diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp index a44d59d0e5c..ea05d13826f 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp @@ -230,6 +230,7 @@ int main(const int argc, const char* argv[]) std::string responseMessage = fakeAgentPackages.contains(agentId) ? "ok " + fakeAgentPackages[agentId].dump() : "ok []"; + std::cout << "Response message for sys_programs: " << responseMessage << std::endl; fakeDBServer->send(fd, responseMessage.c_str(), responseMessage.size()); } else if (tableName.find("sys_osinfo") != std::string::npos || tokens[2] == "osinfo") @@ -237,6 +238,7 @@ int main(const int argc, const char* argv[]) std::string responseMessage = fakeAgentOsData.contains(agentId) ? "ok " + fakeAgentOsData[agentId].dump() : "ok []"; + std::cout << "Response message for sys_osinfo: " << responseMessage << std::endl; fakeDBServer->send(fd, responseMessage.c_str(), responseMessage.size()); } else if (tableName.find("sys_hotfixes") != std::string::npos || tokens[2] == "hotfix") @@ -244,7 +246,7 @@ int main(const int argc, const char* argv[]) std::string responseMessage = fakeAgentHotfixes.contains(agentId) ? "ok " + fakeAgentHotfixes[agentId].dump() : "ok []"; - std::cout << "Response message: " << responseMessage << std::endl; + std::cout << "Response message for sys_hotfixes: " << responseMessage << std::endl; fakeDBServer->send(fd, responseMessage.c_str(), responseMessage.size()); } else @@ -368,6 +370,7 @@ int main(const int argc, const char* argv[]) if (parser.Parse(jsonInputFile.c_str())) { isDelta = false; + std::cout << "jsonInputFile: " << jsonInputFile.c_str() << "\n"; std::cout << "Syscollector synchronization parsed successfully" << std::endl; } else From a57af3effb54f088c57cb21c700ea7db7d6b6e83 Mon Sep 17 00:00:00 2001 From: Marcel Kemp Date: Tue, 14 May 2024 10:44:39 +0200 Subject: [PATCH 067/419] build: bump revision to 40810 --- api/api/spec/spec.yaml | 2 +- framework/wazuh/core/cluster/__init__.py | 2 +- src/Doxyfile | 2 +- src/REVISION | 2 +- src/init/wazuh-client.sh | 2 +- src/init/wazuh-local.sh | 2 +- src/init/wazuh-server.sh | 2 +- src/win32/wazuh-installer.nsi | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index ce9be6cd4c5..2ceb1477065 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -41,7 +41,7 @@ info: version: '4.8.0' - x-revision: '40809' + x-revision: '40810' title: 'Wazuh API REST' license: name: 'GPL 2.0' diff --git a/framework/wazuh/core/cluster/__init__.py b/framework/wazuh/core/cluster/__init__.py index a87460ae2d7..83ac0bd8c49 100644 --- a/framework/wazuh/core/cluster/__init__.py +++ b/framework/wazuh/core/cluster/__init__.py @@ -5,7 +5,7 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 __version__ = '4.8.0' -__revision__ = '40809' +__revision__ = '40810' __author__ = "Wazuh Inc" __wazuh_name__ = "Wazuh" __licence__ = "\ diff --git a/src/Doxyfile b/src/Doxyfile index 8002130447c..a3a6de3761c 100644 --- a/src/Doxyfile +++ b/src/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "WAZUH" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = "v4.8.0-40809" +PROJECT_NUMBER = "v4.8.0-40810" # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/src/REVISION b/src/REVISION index 7a374c6741e..0a01e47db9f 100644 --- a/src/REVISION +++ b/src/REVISION @@ -1 +1 @@ -40809 +40810 diff --git a/src/init/wazuh-client.sh b/src/init/wazuh-client.sh index aef11529fef..b294d3006b7 100755 --- a/src/init/wazuh-client.sh +++ b/src/init/wazuh-client.sh @@ -12,7 +12,7 @@ DIR=`dirname $PWD`; # Installation info VERSION="v4.8.0" -REVISION="40809" +REVISION="40810" TYPE="agent" ### Do not modify below here ### diff --git a/src/init/wazuh-local.sh b/src/init/wazuh-local.sh index 6a592296cad..53c8b93d5ca 100644 --- a/src/init/wazuh-local.sh +++ b/src/init/wazuh-local.sh @@ -14,7 +14,7 @@ PLIST=${DIR}/bin/.process_list; # Installation info VERSION="v4.8.0" -REVISION="40809" +REVISION="40810" TYPE="local" ### Do not modify below here ### diff --git a/src/init/wazuh-server.sh b/src/init/wazuh-server.sh index 68f366ced2d..e54ccf92ed7 100755 --- a/src/init/wazuh-server.sh +++ b/src/init/wazuh-server.sh @@ -14,7 +14,7 @@ PLIST=${DIR}/bin/.process_list; # Installation info VERSION="v4.8.0" -REVISION="40809" +REVISION="40810" TYPE="server" ### Do not modify below here ### diff --git a/src/win32/wazuh-installer.nsi b/src/win32/wazuh-installer.nsi index d998c5c89b9..43596608308 100644 --- a/src/win32/wazuh-installer.nsi +++ b/src/win32/wazuh-installer.nsi @@ -21,7 +21,7 @@ !define MUI_ICON install.ico !define MUI_UNICON uninstall.ico !define VERSION "4.8.0" -!define REVISION "40809" +!define REVISION "40810" !define NAME "Wazuh" !define SERVICE "WazuhSvc" From d2bc8e7fa26621f888ca2c562e8014b3fbc18b6c Mon Sep 17 00:00:00 2001 From: jr0me Date: Mon, 13 May 2024 16:46:37 -0300 Subject: [PATCH 068/419] Set security level when creating SSL context for HP-UX agents. Due to the migration to OpenSSL 3.0, HP-UX agents where no longer able to register due to errors during the certificates verification. Setting the security level from the default (1) to 4 fixes the issue by incrementing security to 192 bits. --- src/os_auth/ssl.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/os_auth/ssl.c b/src/os_auth/ssl.c index 6b33569db39..ccaa8bc9577 100644 --- a/src/os_auth/ssl.c +++ b/src/os_auth/ssl.c @@ -26,6 +26,8 @@ #include "shared.h" #include "auth.h" +#define CTX_SECURITY_LEVEL_4 4 + /* Global variables */ BIO *bio_err; @@ -111,6 +113,9 @@ SSL_CTX *get_ssl_context(const char *ciphers, int auto_method) // If auto_method isn't set, allow TLSv1.2 only if (!auto_method) { +#ifdef HPUX + SSL_CTX_set_security_level(ctx, CTX_SECURITY_LEVEL_4); +#endif SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1 | SSL_OP_NO_TLSv1_1); } From 52e1c9926f9f9b063637b740da2c25243de263b4 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Wed, 15 May 2024 00:30:21 -0300 Subject: [PATCH 069/419] Get flobal agent information when an upgrade or re-scan single agent is triggered. --- .../buildSingleAgentListContext.hpp | 27 +++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp index 94de673c44a..bccc29bc380 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp @@ -15,6 +15,7 @@ #include "chainOfResponsability.hpp" #include "loggerHelper.h" #include "scanContext.hpp" +#include "socketDBWrapper.hpp" #include "vulnerabilityScanner.hpp" #include "wazuhDBQueryBuilder.hpp" @@ -24,8 +25,9 @@ * This class is responsible for managing the execution of queries within the global Wazuh environment. * * @tparam TScanContext scan context type. + * @tparam TSocketDBWrapper socket database wrapper type */ -template +template class TBuildSingleAgentListInfoContext final : public AbstractHandler> { @@ -44,8 +46,29 @@ class TBuildSingleAgentListInfoContext final : public AbstractHandler handleRequest(std::shared_ptr data) override { + nlohmann::json response; + + try + { + // Execute query + TSocketDBWrapper::instance().query( + WazuhDBQueryBuilder::builder() + .globalGetCommand(std::string("agent-info ") + data->agentId().data()) + .build(), + response); + } + // LCOV_EXCL_START + catch (const std::exception& e) + { + logDebug2(WM_VULNSCAN_LOGTAG, "Error executing query to fetch global agent data. Reason: %s.", e.what()); + throw WdbDataException("Error executing query to fetch global agent data", data->agentId().data()); + } + // LCOV_EXCL_STOP + // Agent in the first element of the array. + const auto& agent = response.back(); data->m_agents.push_back( - {data->agentId().data(), data->agentName().data(), data->agentVersion().data(), data->agentIp().data()}); + {data->agentId().data(), agent.at("name"), Utils::leftTrim(agent.at("version"), "Wazuh "), agent.at("ip")}); + return AbstractHandler>::handleRequest(std::move(data)); } }; From 5ce3ad2459cab73078f5be0b02e6b7a751816da4 Mon Sep 17 00:00:00 2001 From: Juan Cabrera Date: Wed, 15 May 2024 10:11:42 +0000 Subject: [PATCH 070/419] Bump version to 4.7.5 --- CHANGELOG.md | 3 +++ api/api/spec/spec.yaml | 8 ++++---- api/setup.py | 2 +- framework/setup.py | 2 +- framework/wazuh/__init__.py | 2 +- framework/wazuh/core/cluster/__init__.py | 4 ++-- src/Doxyfile | 2 +- src/REVISION | 2 +- src/VERSION | 2 +- src/headers/defs.h | 2 +- src/init/wazuh-client.sh | 4 ++-- src/init/wazuh-local.sh | 4 ++-- src/init/wazuh-server.sh | 4 ++-- src/win32/version.rc | 4 ++-- src/win32/wazuh-installer.nsi | 6 +++--- src/win32/wazuh-installer.wxs | 2 +- 16 files changed, 28 insertions(+), 25 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5059d06ad00..13bffa310c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,9 @@ # Change Log All notable changes to this project will be documented in this file. +## [v4.7.5] + + ## [v4.7.4] ### Manager diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index e9b6f11aa5d..fdf4093a297 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -40,12 +40,12 @@ info: - version: '4.7.4' - x-revision: '40717' + version: '4.7.5' + x-revision: '40718' title: 'Wazuh API REST' license: name: 'GPL 2.0' - url: 'https://github.com/wazuh/wazuh/blob/v4.7.4/LICENSE' + url: 'https://github.com/wazuh/wazuh/blob/v4.7.5/LICENSE' servers: - url: '{protocol}://{host}:{port}' @@ -7099,7 +7099,7 @@ paths: api_version: "v4.5.0" revision: '40100' license_name: "GPL 2.0" - license_url: "https://github.com/wazuh/wazuh/blob/v4.7.4/LICENSE" + license_url: "https://github.com/wazuh/wazuh/blob/v4.7.5/LICENSE" hostname: "wazuh" timestamp: "2019-04-02T08:08:11Z" diff --git a/api/setup.py b/api/setup.py index 3c4031bfefc..57c231afdcd 100755 --- a/api/setup.py +++ b/api/setup.py @@ -15,7 +15,7 @@ setup( name='api', - version='4.7.4', + version='4.7.5', description="Wazuh API", author_email="hello@wazuh.com", author="Wazuh", diff --git a/framework/setup.py b/framework/setup.py index c4999a39034..93f561e8570 100755 --- a/framework/setup.py +++ b/framework/setup.py @@ -41,7 +41,7 @@ def run(self): setup(name='wazuh', - version='4.7.4', + version='4.7.5', description='Wazuh control with Python', url='https://github.com/wazuh', author='Wazuh', diff --git a/framework/wazuh/__init__.py b/framework/wazuh/__init__.py index 5be6bb662f9..afd892b8ef8 100755 --- a/framework/wazuh/__init__.py +++ b/framework/wazuh/__init__.py @@ -18,7 +18,7 @@ """ -__version__ = '4.7.4' +__version__ = '4.7.5' msg = "\n\nPython 2.7 or newer not found." diff --git a/framework/wazuh/core/cluster/__init__.py b/framework/wazuh/core/cluster/__init__.py index a9cb10250fc..3754106f44c 100644 --- a/framework/wazuh/core/cluster/__init__.py +++ b/framework/wazuh/core/cluster/__init__.py @@ -4,8 +4,8 @@ # Created by Wazuh, Inc. . # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 -__version__ = '4.7.4' -__revision__ = '40717' +__version__ = '4.7.5' +__revision__ = '40718' __author__ = "Wazuh Inc" __wazuh_name__ = "Wazuh" __licence__ = "\ diff --git a/src/Doxyfile b/src/Doxyfile index 64ebf325d78..ff0d51ba7ec 100644 --- a/src/Doxyfile +++ b/src/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "WAZUH" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = "v4.7.4-40717" +PROJECT_NUMBER = "v4.7.5-40718" # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/src/REVISION b/src/REVISION index f7bda4846ff..4119a90ff7a 100644 --- a/src/REVISION +++ b/src/REVISION @@ -1 +1 @@ -40717 +40718 diff --git a/src/VERSION b/src/VERSION index ece1b3d5fb2..8e042965dfc 100644 --- a/src/VERSION +++ b/src/VERSION @@ -1 +1 @@ -v4.7.4 +v4.7.5 diff --git a/src/headers/defs.h b/src/headers/defs.h index 61e112f3bd9..5dfb3d3427a 100644 --- a/src/headers/defs.h +++ b/src/headers/defs.h @@ -67,7 +67,7 @@ /* Some global names */ #define __ossec_name "Wazuh" -#define __ossec_version "v4.7.4" +#define __ossec_version "v4.7.5" #define __author "Wazuh Inc." #define __contact "info@wazuh.com" #define __site "http://www.wazuh.com" diff --git a/src/init/wazuh-client.sh b/src/init/wazuh-client.sh index 26f75e6b64f..bc50b5daebf 100755 --- a/src/init/wazuh-client.sh +++ b/src/init/wazuh-client.sh @@ -11,8 +11,8 @@ PWD=`pwd` DIR=`dirname $PWD`; # Installation info -VERSION="v4.7.4" -REVISION="40717" +VERSION="v4.7.5" +REVISION="40718" TYPE="agent" ### Do not modify below here ### diff --git a/src/init/wazuh-local.sh b/src/init/wazuh-local.sh index d61dffd2dfc..24bef291051 100644 --- a/src/init/wazuh-local.sh +++ b/src/init/wazuh-local.sh @@ -13,8 +13,8 @@ DIR=`dirname $PWD`; PLIST=${DIR}/bin/.process_list; # Installation info -VERSION="v4.7.4" -REVISION="40717" +VERSION="v4.7.5" +REVISION="40718" TYPE="local" ### Do not modify below here ### diff --git a/src/init/wazuh-server.sh b/src/init/wazuh-server.sh index 129663ca32d..12fb7d699e3 100755 --- a/src/init/wazuh-server.sh +++ b/src/init/wazuh-server.sh @@ -13,8 +13,8 @@ DIR=`dirname $PWD`; PLIST=${DIR}/bin/.process_list; # Installation info -VERSION="v4.7.4" -REVISION="40717" +VERSION="v4.7.5" +REVISION="40718" TYPE="server" ### Do not modify below here ### diff --git a/src/win32/version.rc b/src/win32/version.rc index d7ef47b5543..ea6efbca0e2 100644 --- a/src/win32/version.rc +++ b/src/win32/version.rc @@ -16,11 +16,11 @@ #define QUOTE(x) Q(x) #ifndef VER_PRODUCTVERSION -#define VER_PRODUCTVERSION 4,7,4,0 +#define VER_PRODUCTVERSION 4,7,5,0 #endif #ifndef VER_PRODUCTVERSION_STR -#define VER_PRODUCTVERSION_STR v4.7.4 +#define VER_PRODUCTVERSION_STR v4.7.5 #endif #ifndef VER_FILEFLAGS diff --git a/src/win32/wazuh-installer.nsi b/src/win32/wazuh-installer.nsi index d422c0c007a..99741ed3741 100644 --- a/src/win32/wazuh-installer.nsi +++ b/src/win32/wazuh-installer.nsi @@ -20,8 +20,8 @@ ; general !define MUI_ICON install.ico !define MUI_UNICON uninstall.ico -!define VERSION "4.7.4" -!define REVISION "40717" +!define VERSION "4.7.5" +!define REVISION "40718" !define NAME "Wazuh" !define SERVICE "WazuhSvc" @@ -36,7 +36,7 @@ Name "${NAME} Windows Agent v${VERSION}" BrandingText "Copyright (C) 2015, Wazuh Inc." OutFile "${OutFile}" -VIProductVersion "4.7.4.0" +VIProductVersion "4.7.5.0" VIAddVersionKey ProductName "${NAME}" VIAddVersionKey CompanyName "Wazuh Inc." VIAddVersionKey LegalCopyright "2023 - Wazuh Inc." diff --git a/src/win32/wazuh-installer.wxs b/src/win32/wazuh-installer.wxs index e9893568bd4..949ab85b780 100644 --- a/src/win32/wazuh-installer.wxs +++ b/src/win32/wazuh-installer.wxs @@ -1,6 +1,6 @@ - + From 343ff10cf8873f0c8650fb3047861ed020f77d79 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Wed, 15 May 2024 15:16:57 -0300 Subject: [PATCH 071/419] Add tests and requested changes in PR. --- .../buildSingleAgentListContext.hpp | 14 +- .../unit/buildSingleAgentListContext_test.cpp | 142 ++++++++++++++++++ .../unit/buildSingleAgentListContext_test.hpp | 42 ++++++ .../testtool/scanner/main.cpp | 21 ++- 4 files changed, 212 insertions(+), 7 deletions(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/tests/unit/buildSingleAgentListContext_test.cpp create mode 100644 src/wazuh_modules/vulnerability_scanner/tests/unit/buildSingleAgentListContext_test.hpp diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp index bccc29bc380..13ab6622e9f 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp @@ -64,11 +64,15 @@ class TBuildSingleAgentListInfoContext final : public AbstractHandleragentId().data()); } // LCOV_EXCL_STOP - // Agent in the first element of the array. - const auto& agent = response.back(); - data->m_agents.push_back( - {data->agentId().data(), agent.at("name"), Utils::leftTrim(agent.at("version"), "Wazuh "), agent.at("ip")}); - + // Return elements should be one agent. + if (response.size() == 1) + { + const auto& agent = response.front(); + data->m_agents.push_back({data->agentId().data(), + agent.at("name"), + Utils::leftTrim(agent.at("version"), "Wazuh "), + agent.at("ip")}); + } return AbstractHandler>::handleRequest(std::move(data)); } }; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildSingleAgentListContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildSingleAgentListContext_test.cpp new file mode 100644 index 00000000000..b3caa8d5f46 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildSingleAgentListContext_test.cpp @@ -0,0 +1,142 @@ +/* + * Wazuh Vulnerability Scanner - Unit Tests + * Copyright (C) 2015, Wazuh Inc. + * February 21, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#include "buildSingleAgentListContext_test.hpp" +#include "TrampolineOsDataCache.hpp" +#include "TrampolineSocketDBWrapper.hpp" +#include "buildSingleAgentListContext.hpp" + +TEST_F(BuildSingleAgentListContextTest, BuildSingleAgentListContext) +{ + spSocketDBWrapperMock = std::make_shared(); + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)).Times(1); + + const auto jsonData = + R"({ + "agent_info": { + "agent_id":"001" + }, + "action":"upgradeAgentDB"})"_json; + + std::variant + data = &jsonData; + + auto singleAgentContext = std::make_shared< + TBuildSingleAgentListInfoContext, TrampolineSocketDBWrapper>>(); + + auto scanContext = std::make_shared>(data); + singleAgentContext->handleRequest(scanContext); +} + +TEST_F(BuildSingleAgentListContextTest, BuildSingleAgentListContextEmpty) +{ + spSocketDBWrapperMock = std::make_shared(); + + nlohmann::json queryResult = R"([])"_json; + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(queryResult)); + + const auto jsonData = + R"({ + "agent_info": { + "agent_id":"001" + }, + "action":"upgradeAgentDB"})"_json; + + std::variant + data = &jsonData; + + auto singleAgentContext = std::make_shared< + TBuildSingleAgentListInfoContext, TrampolineSocketDBWrapper>>(); + + auto scanContext = std::make_shared>(data); + + // Context is not used + singleAgentContext->handleRequest(scanContext); + + EXPECT_EQ(scanContext->m_agents.size(), 0); +} + +TEST_F(BuildSingleAgentListContextTest, BuildSingleAgentListContextWithElements) +{ + spSocketDBWrapperMock = std::make_shared(); + const auto jsonData = + R"({ + "agent_info": { + "agent_id":"001" + }, + "action":"upgradeAgentDB"})"_json; + + nlohmann::json queryResult = R"([{ + "id": 1, + "name": "name", + "version": "Wazuh v4.4.4", + "ip": "192.168.0.1", + "node_name": "node_1" + }])"_json; + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(queryResult)); + + std::variant + data = &jsonData; + + auto singleAgentContext = std::make_shared< + TBuildSingleAgentListInfoContext, TrampolineSocketDBWrapper>>(); + + auto scanContext = std::make_shared>(data); + + // Context is not used + singleAgentContext->handleRequest(scanContext); + + EXPECT_EQ(scanContext->m_agents.size(), 1); + + auto agent = scanContext->m_agents[0]; + + EXPECT_EQ(agent.id, "001"); + EXPECT_EQ(agent.name, "name"); + EXPECT_EQ(agent.version, "v4.4.4"); + EXPECT_EQ(agent.ip, "192.168.0.1"); +} + +TEST_F(BuildSingleAgentListContextTest, BuildSingleAgentListContextMultiple) +{ + spSocketDBWrapperMock = std::make_shared(); + + nlohmann::json queryResult = R"([{"element":"element1"},{"element":"element2"}])"_json; + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(queryResult)); + + const auto jsonData = + R"({ + "agent_info": { + "agent_id":"001" + }, + "action":"upgradeAgentDB"})"_json; + + std::variant + data = &jsonData; + + auto singleAgentContext = std::make_shared< + TBuildSingleAgentListInfoContext, TrampolineSocketDBWrapper>>(); + + auto scanContext = std::make_shared>(data); + + // Context is not used + singleAgentContext->handleRequest(scanContext); + + EXPECT_EQ(scanContext->m_agents.size(), 0); +} diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildSingleAgentListContext_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildSingleAgentListContext_test.hpp new file mode 100644 index 00000000000..60e0a8ff616 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildSingleAgentListContext_test.hpp @@ -0,0 +1,42 @@ +/* + * Wazuh Vulnerability Scanner - Unit Tests + * Copyright (C) 2015, Wazuh Inc. + * May 15, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _BUILD_SINGLE_AGENT_LIST_CONTEXT_HPP +#define _BUILD_SINGLE_AGENT_LIST_CONTEXT_HPP + +#include "socketServer.hpp" +#include "gtest/gtest.h" + +/** + * @brief FetchFromGlobalDB test class. + */ +class BuildSingleAgentListContextTest : public ::testing::Test +{ +protected: + // LCOV_EXCL_START + BuildSingleAgentListContextTest() = default; + ~BuildSingleAgentListContextTest() override = default; + + /** + * @brief Set the environment for testing. + * + */ + void SetUp() override {} + + /** + * @brief Clean the environment after testing. + * + */ + void TearDown() override {} + // LCOV_EXCL_STOP +}; + +#endif // _BUILD_SINGLE_AGENT_LIST_CONTEXT_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp index ea05d13826f..a816ade3289 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp @@ -219,8 +219,25 @@ int main(const int argc, const char* argv[]) { if (tokens[0] == "global") { - std::string successMessage = "ok " + fakeGlobalData.dump(); - fakeDBServer->send(fd, successMessage.c_str(), successMessage.size()); + if (tokens[1] == "get-agent-info") + { + const auto agentId = std::stoi(tokens[2]); + for (const auto& agent : fakeGlobalData) + { + if (agent["id"] == agentId) + { + std::string responseMessage = "ok " + agent.dump(); + std::cout << "Response message for global get-agent-info: " << responseMessage; + fakeDBServer->send(fd, responseMessage.c_str(), responseMessage.size()); + return; + } + } + } + else + { + std::string successMessage = "ok " + fakeGlobalData.dump(); + fakeDBServer->send(fd, successMessage.c_str(), successMessage.size()); + } } else if (tokens[0] == "agent" && Utils::isNumber(tokens[1])) { From b1edbdfe39970c06496a6f097e23f9037d1fd7bf Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Wed, 15 May 2024 16:05:28 -0300 Subject: [PATCH 072/419] Add test for more coverage. --- .../unit/buildSingleAgentListContext_test.cpp | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildSingleAgentListContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildSingleAgentListContext_test.cpp index b3caa8d5f46..486a79c360f 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildSingleAgentListContext_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildSingleAgentListContext_test.cpp @@ -140,3 +140,33 @@ TEST_F(BuildSingleAgentListContextTest, BuildSingleAgentListContextMultiple) EXPECT_EQ(scanContext->m_agents.size(), 0); } + +TEST_F(BuildSingleAgentListContextTest, ExceptionOnDB) +{ + spSocketDBWrapperMock = std::make_shared(); + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::Throw(SocketDbWrapperException("Error on DB"))); + + const auto jsonData = R"( + { + "agent_info": { + "agent_id":"001" + }, + "action":"upgradeAgentDB" + })"_json; + + std::variant + data = &jsonData; + + auto singleAgentContext = std::make_shared< + TBuildSingleAgentListInfoContext, TrampolineSocketDBWrapper>>(); + + auto scanContext = std::make_shared>(data); + + // Context is not used + EXPECT_THROW(singleAgentContext->handleRequest(scanContext), WdbDataException); + + spSocketDBWrapperMock.reset(); +} From 4ab0721d1243b4b47af786e0c848f91c2b6b75cf Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Wed, 15 May 2024 16:34:50 -0300 Subject: [PATCH 073/419] Remove coverage exclussion. --- .../src/scanOrchestrator/buildSingleAgentListContext.hpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp index 13ab6622e9f..a01c10bd366 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildSingleAgentListContext.hpp @@ -57,13 +57,11 @@ class TBuildSingleAgentListInfoContext final : public AbstractHandleragentId().data()); } - // LCOV_EXCL_STOP // Return elements should be one agent. if (response.size() == 1) { From 0a2927c2dbaee79c08af28257fab586ff6604f0a Mon Sep 17 00:00:00 2001 From: Tomas Turina Date: Wed, 15 May 2024 21:02:29 +0000 Subject: [PATCH 074/419] Remove rollback mechanism from update agent groups queries --- src/wazuh_db/wdb_parser.c | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/src/wazuh_db/wdb_parser.c b/src/wazuh_db/wdb_parser.c index 27e4d831a82..97b44d4e11d 100644 --- a/src/wazuh_db/wdb_parser.c +++ b/src/wazuh_db/wdb_parser.c @@ -1131,15 +1131,7 @@ int wdb_parse(char * input, char * output, int peer) { result = OS_INVALID; } else { gettimeofday(&begin, 0); - if (wdb_commit2(wdb) < 0) { - snprintf(output, OS_MAXSTR + 1, "err Cannot commit current transaction to continue"); - result = OS_INVALID; - } else { - result = wdb_parse_global_delete_group(wdb, next, output); - if (result == OS_INVALID && wdb_rollback2(wdb) < 0) { - mdebug1("Global DB Cannot rollback transaction."); - } - } + result = wdb_parse_global_delete_group(wdb, next, output); gettimeofday(&end, 0); timersub(&end, &begin, &diff); w_inc_global_group_delete_group_time(diff); @@ -1174,15 +1166,7 @@ int wdb_parse(char * input, char * output, int peer) { result = OS_INVALID; } else { gettimeofday(&begin, 0); - if (wdb_commit2(wdb) < 0) { - snprintf(output, OS_MAXSTR + 1, "err Cannot commit current transaction to continue"); - result = OS_INVALID; - } else { - result = wdb_parse_global_set_agent_groups(wdb, next, output); - if (result == OS_INVALID && wdb_rollback2(wdb) < 0) { - mdebug1("Global DB Cannot rollback transaction."); - } - } + result = wdb_parse_global_set_agent_groups(wdb, next, output); gettimeofday(&end, 0); timersub(&end, &begin, &diff); w_inc_global_agent_set_agent_groups_time(diff); From 126a915a0d938484a112b6f33461467a80c7279a Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Tue, 14 May 2024 19:22:16 -0300 Subject: [PATCH 075/419] wazuh-db: Add new endpoint to recalculate agent group hashes. --- src/wazuh_db/wdb.c | 1 + src/wazuh_db/wdb.h | 24 ++++++++++++++- src/wazuh_db/wdb_global.c | 61 ++++++++++++++++++++++++++++++++++----- src/wazuh_db/wdb_parser.c | 20 +++++++++++++ src/wazuh_db/wdb_state.c | 15 ++++++++++ src/wazuh_db/wdb_state.h | 15 ++++++++++ 6 files changed, 127 insertions(+), 9 deletions(-) diff --git a/src/wazuh_db/wdb.c b/src/wazuh_db/wdb.c index 7b89636c79a..0981a7d37ec 100644 --- a/src/wazuh_db/wdb.c +++ b/src/wazuh_db/wdb.c @@ -206,6 +206,7 @@ static const char *SQL_STMT[] = { [WDB_STMT_GLOBAL_GET_AGENTS] = "SELECT id FROM agent WHERE id > ?;", [WDB_STMT_GLOBAL_GET_AGENTS_BY_CONNECTION_STATUS] = "SELECT id FROM agent WHERE id > ? AND connection_status = ?;", [WDB_STMT_GLOBAL_GET_AGENTS_BY_CONNECTION_STATUS_AND_NODE] = "SELECT id FROM agent WHERE id > ? AND connection_status = ? AND node_name = ? ORDER BY id LIMIT ?;", + [WDB_STMT_GLOBAL_GET_AGENTS_TO_RECALCULATE_GROUP_HASH] = "SELECT id, group_hash FROM agent WHERE id > 0 AND `group` IS NOT NULL;", [WDB_STMT_GLOBAL_GET_AGENT_INFO] = "SELECT * FROM agent WHERE id = ?;", [WDB_STMT_GLOBAL_RESET_CONNECTION_STATUS] = "UPDATE agent SET connection_status = 'disconnected', status_code = ?, sync_status = ?, disconnection_time = STRFTIME('%s', 'NOW') where connection_status != 'disconnected' AND connection_status != 'never_connected' AND id != 0;", [WDB_STMT_GLOBAL_GET_AGENTS_TO_DISCONNECT] = "SELECT id FROM agent WHERE id > ? AND (connection_status = 'active' OR connection_status = 'pending') AND last_keepalive < ?;", diff --git a/src/wazuh_db/wdb.h b/src/wazuh_db/wdb.h index 20b1f862226..d725a35d571 100644 --- a/src/wazuh_db/wdb.h +++ b/src/wazuh_db/wdb.h @@ -265,6 +265,7 @@ typedef enum wdb_stmt { WDB_STMT_GLOBAL_GET_AGENTS, WDB_STMT_GLOBAL_GET_AGENTS_BY_CONNECTION_STATUS, WDB_STMT_GLOBAL_GET_AGENTS_BY_CONNECTION_STATUS_AND_NODE, + WDB_STMT_GLOBAL_GET_AGENTS_TO_RECALCULATE_GROUP_HASH, WDB_STMT_GLOBAL_GET_AGENT_INFO, WDB_STMT_GLOBAL_GET_AGENTS_TO_DISCONNECT, WDB_STMT_GLOBAL_RESET_CONNECTION_STATUS, @@ -1159,6 +1160,16 @@ int wdb_parse_global_get_agent_labels(wdb_t * wdb, char * input, char * output); */ int wdb_parse_get_groups_integrity(wdb_t * wdb, char * input, char* output); +/** + * @brief Function to recalculate the agent group hash in global.db. + * + * @param wdb The global struct database. + * @param output Response of the query. + * @return 0 Success: response contains "ok". + * -1 On error: response contains "err" and an error description. + */ +int wdb_parse_global_recalculate_agent_group_hashes(wdb_t* wdb, char* output); + /** * @brief Function to get all the agent information. * @@ -1332,14 +1343,25 @@ int wdb_parse_global_set_agent_groups(wdb_t* wdb, char* input, char* output); /** * @brief Function to recalculate the agent group hash. + * Compares the new hash with the old one, if they are different it updates it in the database. * * @param [in] wdb The global struct database. * @param [in] agent_id Int with the agent id. * @param [in] sync_status String with the sync_status to be set. + * @param [in] old_hash String with the old hash. * @return WDBC_OK Success. * WDBC_ERROR On error. */ -int wdb_global_recalculate_agent_groups_hash(wdb_t* wdb, int agent_id, char* sync_status); +int wdb_global_recalculate_agent_groups_hash(wdb_t* wdb, int agent_id, char* sync_status, const char* old_hash); + +/** + * @brief Function to recalculate the agent group hash for all agents. + * + * @param [in] wdb The global struct database. + * @return OS_SUCCESS Success. + * OS_INVALID On error. + */ +int wdb_global_recalculate_all_agent_groups_hash(wdb_t* wdb); /** * @brief Function to parse sync-agent-info-get params and set next ID to iterate on further calls. diff --git a/src/wazuh_db/wdb_global.c b/src/wazuh_db/wdb_global.c index f3597c00aca..4f3e993b3a3 100644 --- a/src/wazuh_db/wdb_global.c +++ b/src/wazuh_db/wdb_global.c @@ -927,7 +927,7 @@ int wdb_global_delete_group(wdb_t *wdb, char* group_name) { cJSON* agent_id = cJSON_GetObjectItem(agent_id_item, "id_agent"); if (cJSON_IsNumber(agent_id)) { if (WDBC_ERROR == wdb_global_if_empty_set_default_agent_group(wdb, agent_id->valueint) || - WDBC_ERROR == wdb_global_recalculate_agent_groups_hash(wdb, agent_id->valueint, sync_status)) { + WDBC_ERROR == wdb_global_recalculate_agent_groups_hash(wdb, agent_id->valueint, sync_status, NULL)) { merror("Couldn't recalculate hash group for agent: '%03d'", agent_id->valueint); } } @@ -1468,7 +1468,7 @@ wdbc_result wdb_global_set_agent_groups(wdb_t *wdb, wdb_groups_set_mode_t mode, } } if (OS_SUCCESS == valid_groups) { - if (WDBC_ERROR == wdb_global_recalculate_agent_groups_hash(wdb, agent_id, sync_status)) { + if (WDBC_ERROR == wdb_global_recalculate_agent_groups_hash(wdb, agent_id, sync_status, NULL)) { ret = WDBC_ERROR; merror("Couldn't recalculate hash group for agent: '%03d'", agent_id); } @@ -1482,7 +1482,7 @@ wdbc_result wdb_global_set_agent_groups(wdb_t *wdb, wdb_groups_set_mode_t mode, return ret; } -int wdb_global_recalculate_agent_groups_hash(wdb_t* wdb, int agent_id, char* sync_status) { +int wdb_global_recalculate_agent_groups_hash(wdb_t* wdb, int agent_id, char* sync_status, const char* old_hash) { int result = WDBC_OK; char* agent_groups_csv = wdb_global_calculate_agent_group_csv(wdb, agent_id); char groups_hash[WDB_GROUP_HASH_SIZE+1] = {0}; @@ -1491,17 +1491,62 @@ int wdb_global_recalculate_agent_groups_hash(wdb_t* wdb, int agent_id, char* syn } else { mwarn("The groups were empty right after the set for agent '%03d'", agent_id); } - if (WDBC_ERROR == wdb_global_set_agent_group_context(wdb, agent_id, agent_groups_csv, agent_groups_csv ? groups_hash : NULL, sync_status)) { - result = WDBC_ERROR; - merror("There was an error assigning the groups context to agent '%03d'", agent_id); + + if (old_hash != NULL && strcmp(old_hash, groups_hash) == 0) { + mdebug2("No need to update the group hash for agent id '%03d', the new hash '%s' matches the old hash '%s'", agent_id, groups_hash, old_hash); + } else { + if (WDBC_ERROR == wdb_global_set_agent_group_context(wdb, agent_id, agent_groups_csv, agent_groups_csv ? groups_hash : NULL, sync_status)) { + result = WDBC_ERROR; + merror("There was an error assigning the groups context to agent '%03d'", agent_id); + } + wdb_global_group_hash_cache(WDB_GLOBAL_GROUP_HASH_CLEAR, NULL); } os_free(agent_groups_csv); - wdb_global_group_hash_cache(WDB_GLOBAL_GROUP_HASH_CLEAR, NULL); - return result; } +int wdb_global_recalculate_all_agent_groups_hash(wdb_t* wdb) { + int is_worker = OS_INVALID; + char* sync_status = NULL; + + //Prepare SQL query + if (!wdb->transaction && wdb_begin2(wdb) < 0) { + mdebug1("Cannot begin transaction"); + return OS_INVALID; + } + + if (wdb_stmt_cache(wdb, WDB_STMT_GLOBAL_GET_AGENTS_TO_RECALCULATE_GROUP_HASH) < 0) { + mdebug1("Cannot cache statement"); + return OS_INVALID; + } + sqlite3_stmt* stmt = wdb->stmt[WDB_STMT_GLOBAL_GET_AGENTS_TO_RECALCULATE_GROUP_HASH]; + + //Get agents to recalculate hash + cJSON* j_stmt_result = wdb_exec_stmt(stmt); + cJSON* agent = NULL; + sync_status = (w_is_single_node(&is_worker) || is_worker)?"synced":"syncreq"; + cJSON_ArrayForEach(agent, j_stmt_result) { + cJSON* id = cJSON_GetObjectItem(agent, "id"); + cJSON* old_group_hash = cJSON_GetObjectItem(agent, "group_hash"); + if (cJSON_IsNumber(id)) { + if (WDBC_ERROR == wdb_global_recalculate_agent_groups_hash(wdb, id->valueint, sync_status, cJSON_GetStringValue(old_group_hash))) { + merror("Couldn't recalculate hash group for agent: '%03d'", id->valueint); + cJSON_Delete(j_stmt_result); + return OS_INVALID; + } + } + else { + merror("Invalid element returned by get all agents query"); + cJSON_Delete(j_stmt_result); + return OS_INVALID; + } + } + cJSON_Delete(j_stmt_result); + + return OS_SUCCESS; +} + int wdb_global_set_agent_groups_sync_status(wdb_t *wdb, int id, const char* sync_status) { sqlite3_stmt *stmt = wdb_init_stmt_in_cache(wdb, WDB_STMT_GLOBAL_GROUP_SYNC_SET); if (stmt == NULL) { diff --git a/src/wazuh_db/wdb_parser.c b/src/wazuh_db/wdb_parser.c index a3c7d2de5ff..63d0c0d3074 100644 --- a/src/wazuh_db/wdb_parser.c +++ b/src/wazuh_db/wdb_parser.c @@ -1215,6 +1215,13 @@ int wdb_parse(char * input, char * output, int peer) { timersub(&end, &begin, &diff); w_inc_global_agent_get_groups_integrity_time(diff); } + } else if (strcmp(query, "recalculate-agent-group-hashes") == 0) { + w_inc_global_agent_recalculate_agent_group_hashes(); + gettimeofday(&begin, 0); + result = wdb_parse_global_recalculate_agent_group_hashes(wdb, output); + gettimeofday(&end, 0); + timersub(&end, &begin, &diff); + w_inc_global_agent_recalculate_agent_group_hashes_time(diff); } else if (strcmp(query, "disconnect-agents") == 0) { w_inc_global_agent_disconnect_agents(); if (!next) { @@ -6020,6 +6027,19 @@ int wdb_parse_get_groups_integrity(wdb_t* wdb, char* input, char* output) { return OS_SUCCESS; } +int wdb_parse_global_recalculate_agent_group_hashes(wdb_t* wdb, char* output) { + + if (OS_SUCCESS != wdb_global_recalculate_all_agent_groups_hash(wdb)) { + mdebug1("Error recalculating group hash of agents in global.db."); + snprintf(output, OS_MAXSTR + 1, "err Error recalculating group hash of agents in global.db"); + return OS_INVALID; + } + + snprintf(output, OS_MAXSTR + 1, "ok"); + + return OS_SUCCESS; +} + int wdb_parse_global_get_agent_info(wdb_t* wdb, char* input, char* output) { int agent_id = 0; cJSON *agent_info = NULL; diff --git a/src/wazuh_db/wdb_state.c b/src/wazuh_db/wdb_state.c index b6b32726835..c63c770dbf0 100644 --- a/src/wazuh_db/wdb_state.c +++ b/src/wazuh_db/wdb_state.c @@ -778,6 +778,18 @@ void w_inc_global_agent_get_groups_integrity_time(struct timeval time) { w_mutex_unlock(&db_state_t_mutex); } +void w_inc_global_agent_recalculate_agent_group_hashes() { + w_mutex_lock(&db_state_t_mutex); + wdb_state.queries_breakdown.global_breakdown.agent.recalculate_agent_group_hashes_queries++; + w_mutex_unlock(&db_state_t_mutex); +} + +void w_inc_global_agent_recalculate_agent_group_hashes_time(struct timeval time) { + w_mutex_lock(&db_state_t_mutex); + timeradd(&wdb_state.queries_breakdown.global_breakdown.agent.recalculate_agent_group_hashes_time, &time, &wdb_state.queries_breakdown.global_breakdown.agent.recalculate_agent_group_hashes_time); + w_mutex_unlock(&db_state_t_mutex); +} + void w_inc_global_group_insert_agent_group() { w_mutex_lock(&db_state_t_mutex); wdb_state.queries_breakdown.global_breakdown.group.insert_agent_group_queries++; @@ -1165,6 +1177,7 @@ cJSON* wdb_create_state_json() { cJSON_AddNumberToObject(_global_tables_agent, "get-all-agents", wdb_state_cpy.queries_breakdown.global_breakdown.agent.get_all_agents_queries); cJSON_AddNumberToObject(_global_tables_agent, "get-distinct-groups", wdb_state_cpy.queries_breakdown.global_breakdown.agent.get_distinct_groups_queries); cJSON_AddNumberToObject(_global_tables_agent, "get-groups-integrity", wdb_state_cpy.queries_breakdown.global_breakdown.agent.get_groups_integrity_queries); + cJSON_AddNumberToObject(_global_tables_agent, "recalculate-agent-group-hashes", wdb_state_cpy.queries_breakdown.global_breakdown.agent.recalculate_agent_group_hashes_queries); cJSON_AddNumberToObject(_global_tables_agent, "insert-agent", wdb_state_cpy.queries_breakdown.global_breakdown.agent.insert_agent_queries); cJSON_AddNumberToObject(_global_tables_agent, "reset-agents-connection", wdb_state_cpy.queries_breakdown.global_breakdown.agent.reset_agents_connection_queries); cJSON_AddNumberToObject(_global_tables_agent, "select-agent-group", wdb_state_cpy.queries_breakdown.global_breakdown.agent.select_agent_group_queries); @@ -1361,6 +1374,7 @@ cJSON* wdb_create_state_json() { cJSON_AddNumberToObject(_global_tables_agent_t, "get-all-agents", timeval_to_milis(wdb_state_cpy.queries_breakdown.global_breakdown.agent.get_all_agents_time)); cJSON_AddNumberToObject(_global_tables_agent_t, "get-distinct-groups", timeval_to_milis(wdb_state_cpy.queries_breakdown.global_breakdown.agent.get_distinct_groups_time)); cJSON_AddNumberToObject(_global_tables_agent_t, "get-groups-integrity", timeval_to_milis(wdb_state_cpy.queries_breakdown.global_breakdown.agent.get_groups_integrity_time)); + cJSON_AddNumberToObject(_global_tables_agent_t, "recalculate-agent-group-hashes", timeval_to_milis(wdb_state_cpy.queries_breakdown.global_breakdown.agent.recalculate_agent_group_hashes_time)); cJSON_AddNumberToObject(_global_tables_agent_t, "insert-agent", timeval_to_milis(wdb_state_cpy.queries_breakdown.global_breakdown.agent.insert_agent_time)); cJSON_AddNumberToObject(_global_tables_agent_t, "reset-agents-connection", timeval_to_milis(wdb_state_cpy.queries_breakdown.global_breakdown.agent.reset_agents_connection_time)); cJSON_AddNumberToObject(_global_tables_agent_t, "select-agent-group", timeval_to_milis(wdb_state_cpy.queries_breakdown.global_breakdown.agent.select_agent_group_time)); @@ -1514,6 +1528,7 @@ STATIC uint64_t get_global_time(wdb_state_t *state){ timeradd(&task_time, &state->queries_breakdown.global_breakdown.agent.sync_agent_groups_get_time, &task_time); timeradd(&task_time, &state->queries_breakdown.global_breakdown.agent.set_agent_groups_time, &task_time); timeradd(&task_time, &state->queries_breakdown.global_breakdown.agent.get_groups_integrity_time, &task_time); + timeradd(&task_time, &state->queries_breakdown.global_breakdown.agent.recalculate_agent_group_hashes_time, &task_time); timeradd(&task_time, &state->queries_breakdown.global_breakdown.group.insert_agent_group_time, &task_time); timeradd(&task_time, &state->queries_breakdown.global_breakdown.group.delete_group_time, &task_time); timeradd(&task_time, &state->queries_breakdown.global_breakdown.group.select_groups_time, &task_time); diff --git a/src/wazuh_db/wdb_state.h b/src/wazuh_db/wdb_state.h index 8f8f37f56cb..fea3a55b031 100644 --- a/src/wazuh_db/wdb_state.h +++ b/src/wazuh_db/wdb_state.h @@ -134,6 +134,7 @@ typedef struct _global_agent_t { uint64_t get_all_agents_queries; uint64_t get_distinct_groups_queries; uint64_t get_groups_integrity_queries; + uint64_t recalculate_agent_group_hashes_queries; uint64_t insert_agent_queries; uint64_t reset_agents_connection_queries; uint64_t select_agent_group_queries; @@ -155,6 +156,7 @@ typedef struct _global_agent_t { struct timeval get_all_agents_time; struct timeval get_distinct_groups_time; struct timeval get_groups_integrity_time; + struct timeval recalculate_agent_group_hashes_time; struct timeval insert_agent_time; struct timeval reset_agents_connection_time; struct timeval select_agent_group_time; @@ -1030,6 +1032,19 @@ void w_inc_global_agent_get_groups_integrity(); */ void w_inc_global_agent_get_groups_integrity_time(struct timeval time); +/** + * @brief Increment recalculate-agent-group-hashes global agent queries counter + * + */ +void w_inc_global_agent_recalculate_agent_group_hashes(); + +/** + * @brief Increment recalculate-agent-group-hashes global agent time counter + * + * @param time Value to increment the counter. + */ +void w_inc_global_agent_recalculate_agent_group_hashes_time(struct timeval time); + /** * @brief Increment insert-agent-group global group queries counter * From dd22e61259cdcfabbc78369caadabacb2a995324 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Tue, 14 May 2024 23:06:00 -0300 Subject: [PATCH 076/419] wazuh-db: Added tests in test_wdb_global_parser for endpoint recalculate-agent-group-hashes. --- src/unit_tests/wazuh_db/CMakeLists.txt | 3 +- .../wazuh_db/test_wdb_global_parser.c | 62 +++++++++++++++++++ .../wazuh/wazuh_db/wdb_global_wrappers.c | 4 ++ .../wazuh/wazuh_db/wdb_global_wrappers.h | 2 + .../wazuh/wazuh_db/wdb_state_wrappers.c | 8 +++ .../wazuh/wazuh_db/wdb_state_wrappers.h | 4 ++ 6 files changed, 82 insertions(+), 1 deletion(-) diff --git a/src/unit_tests/wazuh_db/CMakeLists.txt b/src/unit_tests/wazuh_db/CMakeLists.txt index 2402f8312cf..e6b97e82422 100644 --- a/src/unit_tests/wazuh_db/CMakeLists.txt +++ b/src/unit_tests/wazuh_db/CMakeLists.txt @@ -81,7 +81,8 @@ list(APPEND wdb_tests_flags "-Wl,--wrap,wdb_open_global -Wl,--wrap,wdb_leave -Wl -Wl,--wrap,wdb_commit2 -Wl,--wrap,wdb_vacuum -Wl,--wrap,wdb_get_db_state -Wl,--wrap,wdb_finalize_all_statements \ -Wl,--wrap,wdb_update_last_vacuum_data -Wl,--wrap,wdb_get_db_free_pages_percentage -Wl,--wrap,wdb_global_get_distinct_agent_groups \ -Wl,--wrap,w_inc_global_agent_get_distinct_groups -Wl,--wrap,w_inc_global_agent_get_distinct_groups_time \ - -Wl,--wrap,w_inc_global_open_time -Wl,--wrap,wdb_pool_leave ${DEBUG_OP_WRAPPERS}") + -Wl,--wrap,w_inc_global_open_time -Wl,--wrap,wdb_pool_leave -Wl,--wrap,w_inc_global_agent_recalculate_agent_group_hashes \ + -Wl,--wrap,w_inc_global_agent_recalculate_agent_group_hashes_time -Wl,--wrap,wdb_global_recalculate_all_agent_groups_hash ${DEBUG_OP_WRAPPERS}") list(APPEND wdb_tests_names "test_wdb_global") list(APPEND wdb_tests_flags "-Wl,--wrap,wdb_exec -Wl,--wrap,sqlite3_errmsg -Wl,--wrap,wdb_begin2 -Wl,--wrap,wdb_stmt_cache -Wl,--wrap,sqlite3_bind_int \ diff --git a/src/unit_tests/wazuh_db/test_wdb_global_parser.c b/src/unit_tests/wazuh_db/test_wdb_global_parser.c index 5afd98c2848..f4431bd54b5 100644 --- a/src/unit_tests/wazuh_db/test_wdb_global_parser.c +++ b/src/unit_tests/wazuh_db/test_wdb_global_parser.c @@ -4797,6 +4797,65 @@ void test_wdb_parse_global_get_distinct_agent_groups_result_null_with_last_hash( assert_int_equal(ret, OS_INVALID); } +/* Tests wdb_parse_global_recalculate_agent_group_hashes */ + +void test_wdb_parse_global_recalculate_agent_group_hashes_error(void **state) +{ + int ret = 0; + test_struct_t *data = (test_struct_t *)*state; + char query[OS_BUFFER_SIZE] = "global recalculate-agent-group-hashes"; + + will_return(__wrap_wdb_open_global, data->wdb); + expect_string(__wrap__mdebug2, formatted_msg, "Global query: recalculate-agent-group-hashes"); + will_return(__wrap_wdb_global_recalculate_all_agent_groups_hash, OS_INVALID); + expect_string(__wrap__mdebug1, formatted_msg, "Error recalculating group hash of agents in global.db."); + + expect_function_call(__wrap_w_inc_queries_total); + expect_function_call(__wrap_w_inc_global); + expect_function_call(__wrap_gettimeofday); + expect_function_call(__wrap_gettimeofday); + expect_function_call(__wrap_w_inc_global_open_time); + expect_function_call(__wrap_w_inc_global_agent_recalculate_agent_group_hashes); + expect_function_call(__wrap_gettimeofday); + expect_function_call(__wrap_gettimeofday); + expect_function_call(__wrap_w_inc_global_agent_recalculate_agent_group_hashes_time); + + expect_function_call(__wrap_wdb_pool_leave); + + ret = wdb_parse(query, data->output, 0); + + assert_string_equal(data->output, "err Error recalculating group hash of agents in global.db"); + assert_int_equal(ret, OS_INVALID); +} + +void test_wdb_parse_global_recalculate_agent_group_hashes_success(void **state) +{ + int ret = 0; + test_struct_t *data = (test_struct_t *)*state; + char query[OS_BUFFER_SIZE] = "global recalculate-agent-group-hashes"; + + will_return(__wrap_wdb_open_global, data->wdb); + expect_string(__wrap__mdebug2, formatted_msg, "Global query: recalculate-agent-group-hashes"); + will_return(__wrap_wdb_global_recalculate_all_agent_groups_hash, OS_SUCCESS); + + expect_function_call(__wrap_w_inc_queries_total); + expect_function_call(__wrap_w_inc_global); + expect_function_call(__wrap_gettimeofday); + expect_function_call(__wrap_gettimeofday); + expect_function_call(__wrap_w_inc_global_open_time); + expect_function_call(__wrap_w_inc_global_agent_recalculate_agent_group_hashes); + expect_function_call(__wrap_gettimeofday); + expect_function_call(__wrap_gettimeofday); + expect_function_call(__wrap_w_inc_global_agent_recalculate_agent_group_hashes_time); + + expect_function_call(__wrap_wdb_pool_leave); + + ret = wdb_parse(query, data->output, 0); + + assert_string_equal(data->output, "ok"); + assert_int_equal(ret, OS_SUCCESS); +} + int main() { const struct CMUnitTest tests[] = { @@ -4982,6 +5041,9 @@ int main() cmocka_unit_test_setup_teardown(test_wdb_parse_global_get_distinct_agent_groups_result_null, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_wdb_parse_global_get_distinct_agent_groups_success_with_last_hash, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_wdb_parse_global_get_distinct_agent_groups_result_null_with_last_hash, test_setup, test_teardown), + /* Tests wdb_parse_global_recalculate_agent_group_hashes */ + cmocka_unit_test_setup_teardown(test_wdb_parse_global_recalculate_agent_group_hashes_error, test_setup, test_teardown), + cmocka_unit_test_setup_teardown(test_wdb_parse_global_recalculate_agent_group_hashes_success, test_setup, test_teardown), }; return cmocka_run_group_tests(tests, NULL, NULL); diff --git a/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_global_wrappers.c b/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_global_wrappers.c index 490a7ec5c60..5d01452b31f 100644 --- a/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_global_wrappers.c +++ b/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_global_wrappers.c @@ -367,3 +367,7 @@ cJSON* __wrap_wdb_global_get_distinct_agent_groups( __attribute__((unused)) wd *status = mock(); return mock_ptr_type(cJSON*); } + +int __wrap_wdb_global_recalculate_all_agent_groups_hash(__attribute__((unused)) wdb_t *wdb) { + return mock(); +} diff --git a/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_global_wrappers.h b/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_global_wrappers.h index 9bf56f1ebde..c14fd409b33 100644 --- a/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_global_wrappers.h +++ b/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_global_wrappers.h @@ -110,4 +110,6 @@ int __wrap_wdb_remove_group_db(const char *name, int *sock); cJSON* __wrap_wdb_global_get_distinct_agent_groups( __attribute__((unused)) wdb_t *wdb, char *group_hash, wdbc_result* status); +int __wrap_wdb_global_recalculate_all_agent_groups_hash(__attribute__((unused)) wdb_t *wdb); + #endif diff --git a/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_state_wrappers.c b/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_state_wrappers.c index 0445e9af784..bbdc29ff348 100644 --- a/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_state_wrappers.c +++ b/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_state_wrappers.c @@ -236,6 +236,14 @@ void __wrap_w_inc_global_agent_get_distinct_groups_time(__attribute__((unused))s function_called(); } +void __wrap_w_inc_global_agent_recalculate_agent_group_hashes() { + function_called(); +} + +void __wrap_w_inc_global_agent_recalculate_agent_group_hashes_time(__attribute__((unused))struct timeval diff) { + function_called(); +} + // Global group counters void __wrap_w_inc_global_group_insert_agent_group() { diff --git a/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_state_wrappers.h b/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_state_wrappers.h index caa8177b5ff..6bc118b7c01 100644 --- a/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_state_wrappers.h +++ b/src/unit_tests/wrappers/wazuh/wazuh_db/wdb_state_wrappers.h @@ -128,6 +128,10 @@ void __wrap_w_inc_global_agent_get_distinct_groups(); void __wrap_w_inc_global_agent_get_distinct_groups_time(__attribute__((unused))struct timeval diff); +void __wrap_w_inc_global_agent_recalculate_agent_group_hashes(); + +void __wrap_w_inc_global_agent_recalculate_agent_group_hashes_time(__attribute__((unused))struct timeval diff); + // Global group counters void __wrap_w_inc_global_group_insert_agent_group(); From 1f2913b40da9085063137de0ab4635481ad2a631 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Wed, 15 May 2024 00:22:59 -0300 Subject: [PATCH 077/419] wazuh-db: Added tests for wdb_global_recalculate_all_agent_groups_hash. --- src/unit_tests/wazuh_db/test_wdb_global.c | 116 ++++++++++++++++++++++ 1 file changed, 116 insertions(+) diff --git a/src/unit_tests/wazuh_db/test_wdb_global.c b/src/unit_tests/wazuh_db/test_wdb_global.c index 0286773b2c4..bc1bb28b0c9 100644 --- a/src/unit_tests/wazuh_db/test_wdb_global.c +++ b/src/unit_tests/wazuh_db/test_wdb_global.c @@ -9212,6 +9212,116 @@ void test_wdb_global_get_distinct_agent_groups_succes_ok(void **state) __real_cJSON_Delete(result); } +/* Tests wdb_global_recalculate_all_agent_groups_hash */ + +void test_wdb_global_recalculate_all_agent_groups_hash_transaction_fail(void **state) +{ + test_struct_t *data = (test_struct_t *)*state; + + will_return(__wrap_wdb_begin2, -1); + expect_string(__wrap__mdebug1, formatted_msg, "Cannot begin transaction"); + + int result = wdb_global_recalculate_all_agent_groups_hash(data->wdb); + + assert_int_equal(result, OS_INVALID); +} + +void test_wdb_global_recalculate_all_agent_groups_hash_cache_fail(void **state) +{ + test_struct_t *data = (test_struct_t *)*state; + + will_return(__wrap_wdb_begin2, 1); + will_return(__wrap_wdb_stmt_cache, -1); + expect_string(__wrap__mdebug1, formatted_msg, "Cannot cache statement"); + + int result = wdb_global_recalculate_all_agent_groups_hash(data->wdb); + + assert_int_equal(result, OS_INVALID); +} + +void test_wdb_global_recalculate_all_agent_groups_hash_exec_stmt_null(void **state) +{ + test_struct_t *data = (test_struct_t *)*state; + + will_return(__wrap_wdb_begin2, 1); + will_return(__wrap_wdb_stmt_cache, 1); + will_return(__wrap_wdb_exec_stmt, NULL); + + will_return(__wrap_w_is_single_node, 1); + will_return(__wrap_w_is_single_node, 1); + + expect_function_call(__wrap_cJSON_Delete); + + int result = wdb_global_recalculate_all_agent_groups_hash(data->wdb); + + assert_int_equal(result, OS_SUCCESS); +} + +void test_wdb_global_recalculate_all_agent_groups_hash_invalid_id(void **state) +{ + test_struct_t *data = (test_struct_t *)*state; + cJSON *json_agent = NULL; + + json_agent = cJSON_CreateObject(); + cJSON_AddStringToObject(json_agent, "id", "id_str"); + + will_return(__wrap_wdb_begin2, 1); + will_return(__wrap_wdb_stmt_cache, 1); + will_return(__wrap_wdb_exec_stmt, json_agent); + + will_return(__wrap_w_is_single_node, 1); + will_return(__wrap_w_is_single_node, 1); + + expect_string(__wrap__merror, formatted_msg, "Invalid element returned by get all agents query"); + + expect_function_call(__wrap_cJSON_Delete); + + int result = wdb_global_recalculate_all_agent_groups_hash(data->wdb); + + assert_int_equal(result, OS_INVALID); + __real_cJSON_Delete(json_agent); +} + +void test_wdb_global_recalculate_all_agent_groups_hash_recalculate_error(void **state) +{ + test_struct_t *data = (test_struct_t *)*state; + cJSON *json_agent = NULL; + int agent_id = 1; + + cJSON* j_stmt_result = __real_cJSON_CreateArray(); + json_agent = cJSON_CreateObject(); + cJSON_AddItemToObject(json_agent, "id", cJSON_CreateNumber(agent_id)); + cJSON_AddItemToArray(j_stmt_result, json_agent); + + will_return(__wrap_wdb_begin2, 1); + will_return(__wrap_wdb_stmt_cache, 1); + will_return(__wrap_wdb_exec_stmt, j_stmt_result); + + will_return(__wrap_w_is_single_node, 1); + will_return(__wrap_w_is_single_node, 1); + + /* wdb_global_calculate_agent_group_csv */ + will_return(__wrap_wdb_begin2, -1); + expect_string(__wrap__mdebug1, formatted_msg, "Cannot begin transaction"); + expect_string(__wrap__mdebug1, formatted_msg, "Unable to get groups of agent '001'"); + expect_string(__wrap__mwarn, formatted_msg, "The groups were empty right after the set for agent '001'"); + + /* wdb_global_set_agent_group_context */ + expect_value(__wrap_wdb_init_stmt_in_cache, statement_index, WDB_STMT_GLOBAL_GROUP_CTX_SET); + will_return(__wrap_wdb_init_stmt_in_cache, NULL); + expect_string(__wrap__merror, formatted_msg, "There was an error assigning the groups context to agent '001'"); + + expect_string(__wrap__merror, formatted_msg, "Couldn't recalculate hash group for agent: '001'"); + + expect_function_call(__wrap_cJSON_Delete); + + int result = wdb_global_recalculate_all_agent_groups_hash(data->wdb); + + assert_int_equal(result, OS_INVALID); + __real_cJSON_Delete(j_stmt_result); +} + + int main() { const struct CMUnitTest tests[] = { @@ -9680,6 +9790,12 @@ int main() cmocka_unit_test_setup_teardown(test_wdb_global_get_distinct_agent_groups_exec_fail, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_wdb_global_get_distinct_agent_groups_succes_due, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_wdb_global_get_distinct_agent_groups_succes_ok, test_setup, test_teardown), + /* Tests wdb_global_recalculate_all_agent_groups_hash */ + cmocka_unit_test_setup_teardown(test_wdb_global_recalculate_all_agent_groups_hash_transaction_fail, test_setup, test_teardown), + cmocka_unit_test_setup_teardown(test_wdb_global_recalculate_all_agent_groups_hash_cache_fail, test_setup, test_teardown), + cmocka_unit_test_setup_teardown(test_wdb_global_recalculate_all_agent_groups_hash_exec_stmt_null, test_setup, test_teardown), + cmocka_unit_test_setup_teardown(test_wdb_global_recalculate_all_agent_groups_hash_invalid_id, test_setup, test_teardown), + cmocka_unit_test_setup_teardown(test_wdb_global_recalculate_all_agent_groups_hash_recalculate_error, test_setup, test_teardown), }; return cmocka_run_group_tests(tests, NULL, NULL); From a18c5f95d619f8a1fd0920a0532bd55e25c3603b Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Wed, 15 May 2024 17:43:27 -0300 Subject: [PATCH 078/419] wazuh-db: + reverts changes to wdb_global_set_agent_group_context function. + adds new function wdb_global_recalculate_agent_groups_hash_without_sync_status. --- src/wazuh_db/wdb.c | 3 +- src/wazuh_db/wdb.h | 25 ++++++++++++++-- src/wazuh_db/wdb_global.c | 63 +++++++++++++++++++++++++++++---------- 3 files changed, 72 insertions(+), 19 deletions(-) diff --git a/src/wazuh_db/wdb.c b/src/wazuh_db/wdb.c index 0981a7d37ec..f10651ff60b 100644 --- a/src/wazuh_db/wdb.c +++ b/src/wazuh_db/wdb.c @@ -201,12 +201,13 @@ static const char *SQL_STMT[] = { [WDB_STMT_GLOBAL_GROUP_CSV_GET] = "SELECT `group` from agent where id = ?;", [WDB_STMT_GLOBAL_GROUP_CTX_SET] = "UPDATE agent SET 'group' = ?, group_hash = ?, group_sync_status = ? WHERE id = ?;", [WDB_STMT_GLOBAL_GROUP_HASH_GET] = "SELECT group_hash FROM agent WHERE id > 0 AND group_hash IS NOT NULL ORDER BY id;", + [WDB_STMT_GLOBAL_GROUP_HASH_SET] = "UPDATE agent SET 'group' = ?, group_hash = ? WHERE id = ?;", [WDB_STMT_GLOBAL_UPDATE_AGENT_INFO] = "UPDATE agent SET config_sum = :config_sum, ip = :ip, manager_host = :manager_host, merged_sum = :merged_sum, name = :name, node_name = :node_name, os_arch = :os_arch, os_build = :os_build, os_codename = :os_codename, os_major = :os_major, os_minor = :os_minor, os_name = :os_name, os_platform = :os_platform, os_uname = :os_uname, os_version = :os_version, version = :version, last_keepalive = :last_keepalive, connection_status = :connection_status, disconnection_time = :disconnection_time, group_config_status = :group_config_status, status_code= :status_code, sync_status = :sync_status WHERE id = :id;", [WDB_STMT_GLOBAL_GET_GROUPS] = "SELECT DISTINCT `group`, group_hash from agent WHERE id > 0 AND group_hash > ? ORDER BY group_hash;", [WDB_STMT_GLOBAL_GET_AGENTS] = "SELECT id FROM agent WHERE id > ?;", [WDB_STMT_GLOBAL_GET_AGENTS_BY_CONNECTION_STATUS] = "SELECT id FROM agent WHERE id > ? AND connection_status = ?;", [WDB_STMT_GLOBAL_GET_AGENTS_BY_CONNECTION_STATUS_AND_NODE] = "SELECT id FROM agent WHERE id > ? AND connection_status = ? AND node_name = ? ORDER BY id LIMIT ?;", - [WDB_STMT_GLOBAL_GET_AGENTS_TO_RECALCULATE_GROUP_HASH] = "SELECT id, group_hash FROM agent WHERE id > 0 AND `group` IS NOT NULL;", + [WDB_STMT_GLOBAL_GET_AGENTS_TO_RECALCULATE_GROUP_HASH] = "SELECT id FROM agent WHERE id > 0;", [WDB_STMT_GLOBAL_GET_AGENT_INFO] = "SELECT * FROM agent WHERE id = ?;", [WDB_STMT_GLOBAL_RESET_CONNECTION_STATUS] = "UPDATE agent SET connection_status = 'disconnected', status_code = ?, sync_status = ?, disconnection_time = STRFTIME('%s', 'NOW') where connection_status != 'disconnected' AND connection_status != 'never_connected' AND id != 0;", [WDB_STMT_GLOBAL_GET_AGENTS_TO_DISCONNECT] = "SELECT id FROM agent WHERE id > ? AND (connection_status = 'active' OR connection_status = 'pending') AND last_keepalive < ?;", diff --git a/src/wazuh_db/wdb.h b/src/wazuh_db/wdb.h index d725a35d571..9a7bfbf04a4 100644 --- a/src/wazuh_db/wdb.h +++ b/src/wazuh_db/wdb.h @@ -260,6 +260,7 @@ typedef enum wdb_stmt { WDB_STMT_GLOBAL_GROUP_CSV_GET, WDB_STMT_GLOBAL_GROUP_CTX_SET, WDB_STMT_GLOBAL_GROUP_HASH_GET, + WDB_STMT_GLOBAL_GROUP_HASH_SET, WDB_STMT_GLOBAL_UPDATE_AGENT_INFO, WDB_STMT_GLOBAL_GET_GROUPS, WDB_STMT_GLOBAL_GET_AGENTS, @@ -1343,16 +1344,24 @@ int wdb_parse_global_set_agent_groups(wdb_t* wdb, char* input, char* output); /** * @brief Function to recalculate the agent group hash. - * Compares the new hash with the old one, if they are different it updates it in the database. * * @param [in] wdb The global struct database. * @param [in] agent_id Int with the agent id. * @param [in] sync_status String with the sync_status to be set. - * @param [in] old_hash String with the old hash. * @return WDBC_OK Success. * WDBC_ERROR On error. */ -int wdb_global_recalculate_agent_groups_hash(wdb_t* wdb, int agent_id, char* sync_status, const char* old_hash); +int wdb_global_recalculate_agent_groups_hash(wdb_t* wdb, int agent_id, char* sync_status); + +/** + * @brief Function to recalculate the agent group hash whitout update sync_status field. + * + * @param [in] wdb The global struct database. + * @param [in] agent_id Int with the agent id. + * @return WDBC_OK Success. + * WDBC_ERROR On error. + */ +int wdb_global_recalculate_agent_groups_hash_without_sync_status(wdb_t* wdb, int agent_id); /** * @brief Function to recalculate the agent group hash for all agents. @@ -2122,6 +2131,16 @@ char* wdb_global_calculate_agent_group_csv(wdb_t *wdb, int id); */ wdbc_result wdb_global_set_agent_group_context(wdb_t *wdb, int id, char* csv, char* hash, char* sync_status); +/** + * @brief Sets the group information in the agent table. + * @param [in] wdb The Global struct database. + * @param [in] id ID of the agent to set the information. + * @param [in] csv String with all the groups sepparated by comma to be inserted in the group column. + * @param [in] hash Hash calculus from the csv string to be inserted in the group_hash column. + * @return wdbc_result representing the status of the command. + */ +wdbc_result wdb_global_set_agent_group_hash(wdb_t *wdb, int id, char* csv, char* hash); + /** * @brief Verifies if at least one entry in the Global DB has the group_sync_status as "syncreq". * If not, it compares a received hash that represents the group column against a calculated hash. diff --git a/src/wazuh_db/wdb_global.c b/src/wazuh_db/wdb_global.c index 4f3e993b3a3..38fab052f19 100644 --- a/src/wazuh_db/wdb_global.c +++ b/src/wazuh_db/wdb_global.c @@ -927,7 +927,7 @@ int wdb_global_delete_group(wdb_t *wdb, char* group_name) { cJSON* agent_id = cJSON_GetObjectItem(agent_id_item, "id_agent"); if (cJSON_IsNumber(agent_id)) { if (WDBC_ERROR == wdb_global_if_empty_set_default_agent_group(wdb, agent_id->valueint) || - WDBC_ERROR == wdb_global_recalculate_agent_groups_hash(wdb, agent_id->valueint, sync_status, NULL)) { + WDBC_ERROR == wdb_global_recalculate_agent_groups_hash(wdb, agent_id->valueint, sync_status)) { merror("Couldn't recalculate hash group for agent: '%03d'", agent_id->valueint); } } @@ -1206,6 +1206,25 @@ wdbc_result wdb_global_set_agent_group_context(wdb_t *wdb, int id, char* csv, ch } } +wdbc_result wdb_global_set_agent_group_hash(wdb_t *wdb, int id, char* csv, char* hash) { + sqlite3_stmt* stmt = wdb_init_stmt_in_cache(wdb, WDB_STMT_GLOBAL_GROUP_HASH_SET); + if (stmt == NULL) { + return WDBC_ERROR; + } + + sqlite3_bind_text(stmt, 1, csv, -1, NULL); + sqlite3_bind_text(stmt, 2, hash, -1, NULL); + sqlite3_bind_int(stmt, 3, id); + + if (OS_SUCCESS == wdb_exec_stmt_silent(stmt)) { + return WDBC_OK; + } + else { + mdebug1("Error executing setting the agent group hash: %s", sqlite3_errmsg(wdb->db)); + return WDBC_ERROR; + } +} + cJSON* wdb_global_get_groups_integrity(wdb_t* wdb, os_sha1 hash) { sqlite3_stmt* stmt = wdb_init_stmt_in_cache(wdb, WDB_STMT_GLOBAL_GROUP_SYNCREQ_FIND); if (stmt == NULL) { @@ -1468,7 +1487,7 @@ wdbc_result wdb_global_set_agent_groups(wdb_t *wdb, wdb_groups_set_mode_t mode, } } if (OS_SUCCESS == valid_groups) { - if (WDBC_ERROR == wdb_global_recalculate_agent_groups_hash(wdb, agent_id, sync_status, NULL)) { + if (WDBC_ERROR == wdb_global_recalculate_agent_groups_hash(wdb, agent_id, sync_status)) { ret = WDBC_ERROR; merror("Couldn't recalculate hash group for agent: '%03d'", agent_id); } @@ -1482,7 +1501,7 @@ wdbc_result wdb_global_set_agent_groups(wdb_t *wdb, wdb_groups_set_mode_t mode, return ret; } -int wdb_global_recalculate_agent_groups_hash(wdb_t* wdb, int agent_id, char* sync_status, const char* old_hash) { +int wdb_global_recalculate_agent_groups_hash(wdb_t* wdb, int agent_id, char* sync_status) { int result = WDBC_OK; char* agent_groups_csv = wdb_global_calculate_agent_group_csv(wdb, agent_id); char groups_hash[WDB_GROUP_HASH_SIZE+1] = {0}; @@ -1491,24 +1510,40 @@ int wdb_global_recalculate_agent_groups_hash(wdb_t* wdb, int agent_id, char* syn } else { mwarn("The groups were empty right after the set for agent '%03d'", agent_id); } + if (WDBC_ERROR == wdb_global_set_agent_group_context(wdb, agent_id, agent_groups_csv, agent_groups_csv ? groups_hash : NULL, sync_status)) { + result = WDBC_ERROR; + merror("There was an error assigning the groups context to agent '%03d'", agent_id); + } + os_free(agent_groups_csv); - if (old_hash != NULL && strcmp(old_hash, groups_hash) == 0) { - mdebug2("No need to update the group hash for agent id '%03d', the new hash '%s' matches the old hash '%s'", agent_id, groups_hash, old_hash); + wdb_global_group_hash_cache(WDB_GLOBAL_GROUP_HASH_CLEAR, NULL); + + return result; +} + +int wdb_global_recalculate_agent_groups_hash_without_sync_status(wdb_t* wdb, int agent_id) { + int result = WDBC_OK; + char* agent_groups_csv = wdb_global_calculate_agent_group_csv(wdb, agent_id); + char groups_hash[WDB_GROUP_HASH_SIZE+1] = {0}; + + if (agent_groups_csv) { + OS_SHA256_String_sized(agent_groups_csv, groups_hash, WDB_GROUP_HASH_SIZE); } else { - if (WDBC_ERROR == wdb_global_set_agent_group_context(wdb, agent_id, agent_groups_csv, agent_groups_csv ? groups_hash : NULL, sync_status)) { - result = WDBC_ERROR; - merror("There was an error assigning the groups context to agent '%03d'", agent_id); - } - wdb_global_group_hash_cache(WDB_GLOBAL_GROUP_HASH_CLEAR, NULL); + mdebug1("No groups in belongs table for agent '%03d'", agent_id); + } + + if (WDBC_ERROR == wdb_global_set_agent_group_hash(wdb, agent_id, agent_groups_csv, agent_groups_csv ? groups_hash : NULL)) { + result = WDBC_ERROR; + merror("There was an error assigning the groups hash to agent '%03d'", agent_id); } + os_free(agent_groups_csv); + wdb_global_group_hash_cache(WDB_GLOBAL_GROUP_HASH_CLEAR, NULL); return result; } int wdb_global_recalculate_all_agent_groups_hash(wdb_t* wdb) { - int is_worker = OS_INVALID; - char* sync_status = NULL; //Prepare SQL query if (!wdb->transaction && wdb_begin2(wdb) < 0) { @@ -1525,12 +1560,10 @@ int wdb_global_recalculate_all_agent_groups_hash(wdb_t* wdb) { //Get agents to recalculate hash cJSON* j_stmt_result = wdb_exec_stmt(stmt); cJSON* agent = NULL; - sync_status = (w_is_single_node(&is_worker) || is_worker)?"synced":"syncreq"; cJSON_ArrayForEach(agent, j_stmt_result) { cJSON* id = cJSON_GetObjectItem(agent, "id"); - cJSON* old_group_hash = cJSON_GetObjectItem(agent, "group_hash"); if (cJSON_IsNumber(id)) { - if (WDBC_ERROR == wdb_global_recalculate_agent_groups_hash(wdb, id->valueint, sync_status, cJSON_GetStringValue(old_group_hash))) { + if (WDBC_ERROR == wdb_global_recalculate_agent_groups_hash_without_sync_status(wdb, id->valueint)) { merror("Couldn't recalculate hash group for agent: '%03d'", id->valueint); cJSON_Delete(j_stmt_result); return OS_INVALID; From 4b994e0c4c2a7d272b6253ec54bda3a1e314a5dd Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Wed, 15 May 2024 18:43:42 -0300 Subject: [PATCH 079/419] wazuh-db: + Added ut for function wdb_global_set_agent_group_hash + Fixed ut for function wdb_global_recalculate_all_agent_groups_hash --- src/unit_tests/wazuh_db/test_wdb_global.c | 88 +++++++++++++++++++---- 1 file changed, 76 insertions(+), 12 deletions(-) diff --git a/src/unit_tests/wazuh_db/test_wdb_global.c b/src/unit_tests/wazuh_db/test_wdb_global.c index bc1bb28b0c9..c110e636a22 100644 --- a/src/unit_tests/wazuh_db/test_wdb_global.c +++ b/src/unit_tests/wazuh_db/test_wdb_global.c @@ -8347,6 +8347,75 @@ void test_wdb_global_set_agent_group_context_exec_stmt_error(void **state) assert_int_equal(result, WDBC_ERROR); } +/* wdb_global_set_agent_group_hash */ + +void test_wdb_global_set_agent_group_hash_success(void **state) +{ + test_struct_t *data = (test_struct_t *)*state; + int agent_id = 1; + char* csv = "GROUP1,GROUP2,GROUP3"; + char* hash = "DUMMYHASH"; + + expect_value(__wrap_wdb_init_stmt_in_cache, statement_index, WDB_STMT_GLOBAL_GROUP_HASH_SET); + will_return(__wrap_wdb_init_stmt_in_cache, (sqlite3_stmt*)1); + expect_value(__wrap_sqlite3_bind_text, pos, 1); + expect_string(__wrap_sqlite3_bind_text, buffer, csv); + will_return(__wrap_sqlite3_bind_text, SQLITE_OK); + expect_value(__wrap_sqlite3_bind_text, pos, 2); + expect_string(__wrap_sqlite3_bind_text, buffer, hash); + will_return(__wrap_sqlite3_bind_text, SQLITE_OK); + expect_value(__wrap_sqlite3_bind_int, index, 3); + expect_value(__wrap_sqlite3_bind_int, value, agent_id); + will_return(__wrap_sqlite3_bind_int, SQLITE_OK); + will_return(__wrap_wdb_exec_stmt_silent, OS_SUCCESS); + + wdbc_result result = wdb_global_set_agent_group_hash(data->wdb, agent_id, csv, hash); + + assert_int_equal(result, WDBC_OK); +} + +void test_wdb_global_set_agent_group_hash_init_stmt_error(void **state) +{ + test_struct_t *data = (test_struct_t *)*state; + int agent_id = 1; + char* csv = "GROUP1,GROUP2,GROUP3"; + char* hash = "DUMMYHASH"; + + expect_value(__wrap_wdb_init_stmt_in_cache, statement_index, WDB_STMT_GLOBAL_GROUP_HASH_SET); + will_return(__wrap_wdb_init_stmt_in_cache, NULL); + + wdbc_result result = wdb_global_set_agent_group_hash(data->wdb, agent_id, csv, hash); + + assert_int_equal(result, WDBC_ERROR); +} + +void test_wdb_global_set_agent_group_hash_exec_stmt_error(void **state) +{ + test_struct_t *data = (test_struct_t *)*state; + int agent_id = 1; + char* csv = "GROUP1,GROUP2,GROUP3"; + char* hash = "DUMMYHASH"; + + expect_value(__wrap_wdb_init_stmt_in_cache, statement_index, WDB_STMT_GLOBAL_GROUP_HASH_SET); + will_return(__wrap_wdb_init_stmt_in_cache, (sqlite3_stmt*)1); + expect_value(__wrap_sqlite3_bind_text, pos, 1); + expect_string(__wrap_sqlite3_bind_text, buffer, csv); + will_return(__wrap_sqlite3_bind_text, SQLITE_OK); + expect_value(__wrap_sqlite3_bind_text, pos, 2); + expect_string(__wrap_sqlite3_bind_text, buffer, hash); + will_return(__wrap_sqlite3_bind_text, SQLITE_OK); + expect_value(__wrap_sqlite3_bind_int, index, 3); + expect_value(__wrap_sqlite3_bind_int, value, agent_id); + will_return(__wrap_sqlite3_bind_int, SQLITE_OK); + will_return(__wrap_wdb_exec_stmt_silent, OS_INVALID); + will_return(__wrap_sqlite3_errmsg, "ERROR MESSAGE"); + expect_string(__wrap__mdebug1, formatted_msg, "Error executing setting the agent group hash: ERROR MESSAGE"); + + wdbc_result result = wdb_global_set_agent_group_hash(data->wdb, agent_id, csv, hash); + + assert_int_equal(result, WDBC_ERROR); +} + /* Tests wdb_global_groups_number_get */ void test_wdb_global_groups_number_get_stmt_error(void **state) @@ -9247,9 +9316,6 @@ void test_wdb_global_recalculate_all_agent_groups_hash_exec_stmt_null(void **sta will_return(__wrap_wdb_stmt_cache, 1); will_return(__wrap_wdb_exec_stmt, NULL); - will_return(__wrap_w_is_single_node, 1); - will_return(__wrap_w_is_single_node, 1); - expect_function_call(__wrap_cJSON_Delete); int result = wdb_global_recalculate_all_agent_groups_hash(data->wdb); @@ -9269,9 +9335,6 @@ void test_wdb_global_recalculate_all_agent_groups_hash_invalid_id(void **state) will_return(__wrap_wdb_stmt_cache, 1); will_return(__wrap_wdb_exec_stmt, json_agent); - will_return(__wrap_w_is_single_node, 1); - will_return(__wrap_w_is_single_node, 1); - expect_string(__wrap__merror, formatted_msg, "Invalid element returned by get all agents query"); expect_function_call(__wrap_cJSON_Delete); @@ -9297,19 +9360,16 @@ void test_wdb_global_recalculate_all_agent_groups_hash_recalculate_error(void ** will_return(__wrap_wdb_stmt_cache, 1); will_return(__wrap_wdb_exec_stmt, j_stmt_result); - will_return(__wrap_w_is_single_node, 1); - will_return(__wrap_w_is_single_node, 1); - /* wdb_global_calculate_agent_group_csv */ will_return(__wrap_wdb_begin2, -1); expect_string(__wrap__mdebug1, formatted_msg, "Cannot begin transaction"); expect_string(__wrap__mdebug1, formatted_msg, "Unable to get groups of agent '001'"); - expect_string(__wrap__mwarn, formatted_msg, "The groups were empty right after the set for agent '001'"); + expect_string(__wrap__mdebug1, formatted_msg, "No groups in belongs table for agent '001'"); /* wdb_global_set_agent_group_context */ - expect_value(__wrap_wdb_init_stmt_in_cache, statement_index, WDB_STMT_GLOBAL_GROUP_CTX_SET); + expect_value(__wrap_wdb_init_stmt_in_cache, statement_index, WDB_STMT_GLOBAL_GROUP_HASH_SET); will_return(__wrap_wdb_init_stmt_in_cache, NULL); - expect_string(__wrap__merror, formatted_msg, "There was an error assigning the groups context to agent '001'"); + expect_string(__wrap__merror, formatted_msg, "There was an error assigning the groups hash to agent '001'"); expect_string(__wrap__merror, formatted_msg, "Couldn't recalculate hash group for agent: '001'"); @@ -9751,6 +9811,10 @@ int main() cmocka_unit_test_setup_teardown(test_wdb_global_set_agent_group_context_success, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_wdb_global_set_agent_group_context_init_stmt_error, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_wdb_global_set_agent_group_context_exec_stmt_error, test_setup, test_teardown), + /* Tests wdb_global_set_agent_group_hash */ + cmocka_unit_test_setup_teardown(test_wdb_global_set_agent_group_hash_success, test_setup, test_teardown), + cmocka_unit_test_setup_teardown(test_wdb_global_set_agent_group_hash_init_stmt_error, test_setup, test_teardown), + cmocka_unit_test_setup_teardown(test_wdb_global_set_agent_group_hash_exec_stmt_error, test_setup, test_teardown), /* Tests wdb_global_groups_number_get */ cmocka_unit_test_setup_teardown(test_wdb_global_groups_number_get_stmt_error, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_wdb_global_groups_number_get_bind_fail, test_setup, test_teardown), From a21bb9aeba97feb59b985523665b39b773b5a654 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Wed, 15 May 2024 22:57:51 -0300 Subject: [PATCH 080/419] Fix content generation configuration. --- .../testtool/scanner/config.content_generation.json | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.content_generation.json b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.content_generation.json index a5cbd4c0547..766247ddb74 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.content_generation.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/config.content_generation.json @@ -34,5 +34,7 @@ "url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0", "offset": 0 } - } + }, + "clusterName":"cluster01", + "clusterEnabled":false } From 8fa5a2402a6a4319ed04332f134f82e5299de807 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 24 Apr 2024 15:30:17 +0200 Subject: [PATCH 081/419] CL: - Added remediation cache baseline --- .../include/vulnerabilityScanner.hpp | 1 + .../databaseFeedManager.hpp | 22 ++++++++++++++++--- .../src/policyManager/policyManager.hpp | 16 ++++++++++++++ .../tests/mocks/MockPolicyManager.hpp | 7 ++++++ .../tests/mocks/TrampolinePolicyManager.hpp | 10 +++++++++ .../tests/unit/policyManager_test.cpp | 14 +++++++----- .../testtool/databaseFeedManager/main.cpp | 9 ++++++++ src/wazuh_modules/wm_vulnerability_scanner.c | 3 +++ 8 files changed, 74 insertions(+), 8 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp b/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp index 554815f4f35..13b1c412d4a 100644 --- a/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp @@ -27,6 +27,7 @@ #include #include +constexpr auto AGENT_REMEDIATIONS_COLUMN {"agent_remediations"}; constexpr auto REMEDIATIONS_COLUMN {"remediations"}; constexpr auto TRANSLATIONS_COLUMN {"translation"}; constexpr auto DESCRIPTIONS_COLUMN {"descriptions"}; diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp index 647bd66115b..257ea62f0c6 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp @@ -107,6 +107,19 @@ struct Translation final std::vector target; ///< Vector of valid targets. }; +/** + * @brief Translations cache. + * @details Key: Translation ID, Value: Translation information. + */ +using TranslationLRUCache = LRUCache; + +/** + * @brief Remediations cache. + * @details Key: Agent ID, Value: List of remediations. + * + */ +using RemediationLRUCache = LRUCache; + /** * @brief DatabaseFeedManager class. * @@ -280,7 +293,9 @@ class TDatabaseFeedManager final : public Observer const std::string topicName = updaterPolicy.at("topicName"); m_translationL2Cache = - std::make_unique>(TPolicyManager::instance().getTranslationLRUSize()); + std::make_unique(TPolicyManager::instance().getTranslationLRUSize()); + + m_remediationCache = std::make_unique(TPolicyManager::instance().getRemediationLRUSize()); try { @@ -363,7 +378,7 @@ class TDatabaseFeedManager final : public Observer } /** - * @brief Retrieves vulnerability remediation information from the database. + * @brief Retrieves vulnerability remediation information from the database, for a given CVE ID. * * This function retrieves remediation information associated with a given CVE ID * from the underlying database and stores it in the provided `dtoVulnRemediation` @@ -696,7 +711,8 @@ class TDatabaseFeedManager final : public Observer std::shared_ptr m_indexerConnector; std::unique_ptr m_contentRegistration; std::unique_ptr m_feedDatabase; - std::unique_ptr> m_translationL2Cache; + std::unique_ptr m_translationL2Cache; + std::unique_ptr m_remediationCache; std::unique_ptr m_contentUpdateSubscription; const std::atomic& m_shouldStop; diff --git a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp index 153587d4c66..8d4100779b6 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp @@ -29,6 +29,7 @@ constexpr auto UNKNOWN_VALUE {" "}; constexpr auto STATES_VD_INDEX_NAME_PREFIX {"wazuh-states-vulnerabilities-"}; constexpr auto DEFAULT_TRANSLATION_LRU_SIZE {2048}; constexpr auto DEFAULT_OSDATA_LRU_SIZE {1000}; +constexpr auto DEFAULT_REMEDIATION_LRU_SIZE {2048}; const static std::string UPDATER_PATH {"queue/vd_updater"}; enum class DisableManagerScanStatus : uint32_t @@ -177,6 +178,11 @@ class PolicyManager final : public Singleton newPolicy["osdataLRUSize"] = DEFAULT_OSDATA_LRU_SIZE; } + if (!newPolicy.contains("remediationLRUSize")) + { + newPolicy["remediationLRUSize"] = DEFAULT_REMEDIATION_LRU_SIZE; + } + if (!newPolicy.contains("managerDisabledScan")) { newPolicy["managerDisabledScan"] = DisableManagerScanStatus::SCAN_MANAGER; @@ -663,6 +669,16 @@ class PolicyManager final : public Singleton return m_configuration.at("osdataLRUSize").get(); } + /** + * @brief Get remediation LRU size. + * + * @return uint32_t remediation LRU size. + */ + uint32_t getRemediationLRUSize() const + { + return m_configuration.at("remediationLRUSize").get(); + } + /** * @brief Retrieves the current status of the manager's scan. * diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockPolicyManager.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockPolicyManager.hpp index 5e0345d097d..049a1091808 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockPolicyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockPolicyManager.hpp @@ -207,6 +207,13 @@ class MockPolicyManager * @note This method is intended for testing purposes and does not perform any real action. */ MOCK_METHOD(uint32_t, getOsdataLRUSize, (), (const)); + + /** + * @brief Mock method for getRemediationsLRUSize. + * + * @note This method is intended for testing purposes and does not perform any real action. + */ + MOCK_METHOD(uint32_t, getRemediationLRUSize, (), (const)); }; #endif // _MOCK_POLICYMANAGER_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolinePolicyManager.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolinePolicyManager.hpp index 12d7f57c290..46544be02e7 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolinePolicyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolinePolicyManager.hpp @@ -299,6 +299,16 @@ class TrampolinePolicyManager : public Singleton { return spPolicyManagerMock->getOsdataLRUSize(); } + + /** + * @brief Get remediation LRU size. + * + * @return uint32_t remediation LRU size. + */ + uint32_t getRemediationLRUSize() const + { + return spPolicyManagerMock->getRemediationLRUSize(); + } }; #endif //_TRAMPOLINE_POLICYMANAGER_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp index fde79e24a41..bff40bbdf35 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/policyManager_test.cpp @@ -184,7 +184,8 @@ TEST_F(PolicyManagerTest, validConfigurationCheckParameters) "translationLRUSize": 5000, "osdataLRUSize": 6000, "clusterName":"clusterName", - "clusterEnabled":false + "clusterEnabled":false, + "remediationLRUSize": 7000 })")}; EXPECT_NO_THROW(m_policyManager->initialize(configJson)); @@ -210,6 +211,7 @@ TEST_F(PolicyManagerTest, validConfigurationCheckParameters) R"({"configData":{"compressionType":"raw","consumerName":"Wazuh VulnerabilityDetector","contentFileName":"api_file.json","contentSource":"cti-offset","databasePath":"queue/vd_updater/rocksdb","deleteDownloadedContent":true,"offset":0,"outputFolder":"queue/vd_updater/tmp","url":"https://cti-url.com","versionedContent":"false"},"interval":3600,"ondemand":true,"topicName":"vulnerability_feed_manager"})"); EXPECT_EQ(m_policyManager->getTranslationLRUSize(), 5000); EXPECT_EQ(m_policyManager->getOsdataLRUSize(), 6000); + EXPECT_EQ(m_policyManager->getRemediationLRUSize(), 7000); } TEST_F(PolicyManagerTest, validConfigurationCheckParametersOffline) @@ -283,8 +285,9 @@ TEST_F(PolicyManagerTest, validConfigurationDefaultValuesWithClusterName) EXPECT_STREQ(m_policyManager->getKey().c_str(), ""); EXPECT_EQ(m_policyManager->getCAList().size(), 0); EXPECT_EQ(m_policyManager->getCTIUrl(), "cti-url.com"); - EXPECT_EQ(m_policyManager->getTranslationLRUSize(), 2048); - EXPECT_EQ(m_policyManager->getOsdataLRUSize(), 1000); + EXPECT_EQ(m_policyManager->getTranslationLRUSize(), DEFAULT_TRANSLATION_LRU_SIZE); + EXPECT_EQ(m_policyManager->getOsdataLRUSize(), DEFAULT_OSDATA_LRU_SIZE); + EXPECT_EQ(m_policyManager->getRemediationLRUSize(), DEFAULT_REMEDIATION_LRU_SIZE); EXPECT_STREQ("wazuh-states-vulnerabilities-cluster01", m_policyManager->getIndexerConfiguration().at("name").get_ref().c_str()); } @@ -322,8 +325,9 @@ TEST_F(PolicyManagerTest, validConfigurationDefaultValuesNoIndexer) EXPECT_EQ(m_policyManager->getHostList().count("http://localhost:9200"), 0); - EXPECT_EQ(m_policyManager->getTranslationLRUSize(), 2048); - EXPECT_EQ(m_policyManager->getOsdataLRUSize(), 1000); + EXPECT_EQ(m_policyManager->getTranslationLRUSize(), DEFAULT_TRANSLATION_LRU_SIZE); + EXPECT_EQ(m_policyManager->getOsdataLRUSize(), DEFAULT_OSDATA_LRU_SIZE); + EXPECT_EQ(m_policyManager->getRemediationLRUSize(), DEFAULT_REMEDIATION_LRU_SIZE); } TEST_F(PolicyManagerTest, validConfigurationVulnerabilityScannerIgnoreIndexStatus) diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/databaseFeedManager/main.cpp b/src/wazuh_modules/vulnerability_scanner/testtool/databaseFeedManager/main.cpp index cf9f71542ea..29ddc5812d0 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/databaseFeedManager/main.cpp +++ b/src/wazuh_modules/vulnerability_scanner/testtool/databaseFeedManager/main.cpp @@ -83,6 +83,15 @@ class DummyPolicyManager : public Singleton return 1000; } + /** + * @brief Get remediation LRU size. + * + */ + uint32_t getRemediationLRUSize() const + { + return 2048; + } + private: std::string m_topic; }; diff --git a/src/wazuh_modules/wm_vulnerability_scanner.c b/src/wazuh_modules/wm_vulnerability_scanner.c index d526df0f24d..334dccddce2 100644 --- a/src/wazuh_modules/wm_vulnerability_scanner.c +++ b/src/wazuh_modules/wm_vulnerability_scanner.c @@ -79,6 +79,9 @@ void* wm_vulnerability_scanner_main(wm_vulnerability_scanner_t* data) { getDefine_Int("vulnerability-detection", "translation_lru_size", 1, 100000)); cJSON_AddNumberToObject( config_json, "osdataLRUSize", getDefine_Int("vulnerability-detection", "osdata_lru_size", 1, 100000)); + cJSON_AddNumberToObject(config_json, + "remediationLRUSize", + getDefine_Int("vulnerability-detection", "remediation_lru_size", 1, 100000)); cJSON_AddNumberToObject(config_json, "managerDisabledScan", getDefine_Int("vulnerability-detection", "disable_scan_manager", 0, 1)); From 5bc1e22042eb425516ffd7e5819197b7edc15533 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 1 May 2024 20:32:36 -0300 Subject: [PATCH 082/419] CL: - Start implementation of needed functions --- .../vulnerability_scanner/CMakeLists.txt | 1 + .../include/vulnerabilityScanner.hpp | 2 +- .../vulnerability_scanner/schemas/hotfix.fbs | 7 + .../databaseFeedManager.hpp | 39 +- .../databaseFeedManager/updateHotfixes.hpp | 174 +++++++ .../scanOrchestrator/factoryOrchestrator.hpp | 6 +- .../scanOrchestrator/remediationDataCache.hpp | 120 +++++ .../src/scanOrchestrator/scanContext.hpp | 4 +- .../tests/unit/eventDetailsBuilder_test.cpp | 1 - .../tests/unit/updateCVECandidates_test.cpp | 2 - .../tests/unit/updateCVERemediations_test.hpp | 10 +- .../tests/unit/updateHotfixes_test.cpp | 483 ++++++++++++++++++ .../tests/unit/updateHotfixes_test.hpp | 44 ++ .../testtool/wazuhDBQuery/config.json | 79 +-- 14 files changed, 880 insertions(+), 92 deletions(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/schemas/hotfix.fbs create mode 100644 src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp create mode 100644 src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp create mode 100644 src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp create mode 100644 src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp diff --git a/src/wazuh_modules/vulnerability_scanner/CMakeLists.txt b/src/wazuh_modules/vulnerability_scanner/CMakeLists.txt index 162cdf1f477..96450d64a74 100644 --- a/src/wazuh_modules/vulnerability_scanner/CMakeLists.txt +++ b/src/wazuh_modules/vulnerability_scanner/CMakeLists.txt @@ -62,6 +62,7 @@ list(APPEND Schemas vulnerabilityRemediations packageTranslation messageBuffer + hotfix ) message("Compiling schemas") diff --git a/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp b/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp index 13b1c412d4a..9031dc5863c 100644 --- a/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp @@ -27,7 +27,7 @@ #include #include -constexpr auto AGENT_REMEDIATIONS_COLUMN {"agent_remediations"}; +constexpr auto HOTFIXES_COLUMN {"hotfixes_to_cves"}; constexpr auto REMEDIATIONS_COLUMN {"remediations"}; constexpr auto TRANSLATIONS_COLUMN {"translation"}; constexpr auto DESCRIPTIONS_COLUMN {"descriptions"}; diff --git a/src/wazuh_modules/vulnerability_scanner/schemas/hotfix.fbs b/src/wazuh_modules/vulnerability_scanner/schemas/hotfix.fbs new file mode 100644 index 00000000000..130d0c7344e --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/schemas/hotfix.fbs @@ -0,0 +1,7 @@ +namespace NSVulnerabilityScanner; + +table hotfix { + CVEs:[string]; +} + +root_type hotfix; diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp index 257ea62f0c6..713561147ae 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp @@ -114,9 +114,9 @@ struct Translation final using TranslationLRUCache = LRUCache; /** - * @brief Remediations cache. - * @details Key: Agent ID, Value: List of remediations. - * + * @brief Agent remediations cache. + * @details Key: Hotfix, Value: List of CVEs fixed. + * */ using RemediationLRUCache = LRUCache; @@ -409,6 +409,39 @@ class TDatabaseFeedManager final : public Observer dtoVulnRemediation.data = GetRemediationInfo(reinterpret_cast(dtoVulnRemediation.slice.data())); } + /** + * @brief Retrieves vulnerability remediation information from the database, for a given agent ID. + * + * This function retrieves remediation information associated with a given CVE ID from the underlying database and + * stores it in the 'remediationInfo' object. + * + * @param agentID The agent ID for which remediation information is requested. + * @param remediationInfo A reference to a `FlatbufferDataPair` object where the retrieved remediation + * information will be stored. + * + * @throws std::runtime_error if the retrieved data from the database is invalid or not in the expected FlatBuffers + * format. + */ + void getAgentRemediation(const std::string& agentID, FlatbufferDataPair& remediationInfo) + { + // If the remediation information is not found in the database, we return because there is no remediation. + if (auto result = m_feedDatabase->get(agentID, remediationInfo.slice, REMEDIATIONS_COLUMN); !result) + { + return; + } + + const auto remediationSlice = reinterpret_cast(remediationInfo.slice.data()); + + // Verify the integrity of the FlatBuffers remediation data + if (flatbuffers::Verifier verifier(remediationSlice, remediationInfo.slice.size()); + !VerifyRemediationInfoBuffer(verifier)) + { + throw std::runtime_error("Error: Invalid FlatBuffers data in RocksDB."); + } + + remediationInfo.data = GetRemediationInfo(remediationSlice); + } + /** * @brief Fills the Level 2 cache with translations from the feed database. * diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp new file mode 100644 index 00000000000..363974e312a --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp @@ -0,0 +1,174 @@ +/* + * Wazuh storeRemediationsModel + * Copyright (C) 2015, Wazuh Inc. + * October 05, 2023. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _UPDATE_HOTFIXES_HPP +#define _UPDATE_HOTFIXES_HPP + +#include "cve5_generated.h" +#include "hotfixes_generated.h" +#include "hotfixes_schema.h" +#include "loggerHelper.h" +#include "rocksDBWrapper.hpp" +#include "vulnerabilityScanner.hpp" +#include + +/** + * @brief UpdateHotfixes class. + * + */ +class UpdateHotfixes final +{ +private: + /** + * @brief Get the list of hotfixes from the remediation data. + * + * @param remediations Pointer to the 'Remediations' object containing remediation information. + * @return Set of strings containing the list of hotfixes. + * + * @see Remediations - The data structure containing remediation information. + */ + static std::unordered_set hotfixesOnRemediations(const cve_v5::Remediations* remediations) + { + std::unordered_set hotfixes; + if (!remediations) + { + return hotfixes; + } + + std::for_each(remediations->windows()->begin(), + remediations->windows()->end(), + [&hotfixes](const cve_v5::Remediation* remediation) + { + for (const auto& hotfix : *remediation->anyOf()) + { + hotfixes.insert(hotfix->str()); + } + }); + + return hotfixes; + } + +public: + /** + * @brief Update the hotfix information in the RocksDB Database + * + * This function updates the hotfix information for a given vulnerability (CVE) in the RocksDB database. + * It does so by inverting the relationship between the CVE and hotfix information, + * going from 'CVE -> Hotfixes' to 'Hotfixes -> CVE'. + * + * + * @param data Pointer to the 'Entry' object containing vulnerability and remediation information. + * @param feedDatabase Pointer to the 'RocksDB' object for interacting with the database. + * + * @note The 'Entry' object should conform to the specified cve5 schema, including nested structures. + * @note The 'RocksDBWrapper' object should be properly initialized and connected to the target database. + * + * @details The function performs the following steps: + * 1. Attempts to access remediation data for Windows from the 'Entry' object. + * 2. If remediation data is not available (empty), it logs an error message and returns. + * 3. Extracts the CVE identifier (CVE-ID) from the 'Entry' object. + * 4. Iterates through the available remediation data for Windows: + * - Extracts the list of updates (CVEs) associated with the remediation. + * - Creates a FlatBuffers object containing the list of CVEs. + * - Serializes the FlatBuffers object into binary data. + * - Stores the binary data in the RocksDB database, using the Remediation as the key. + * 5. If an exception occurs during this process, it logs an error message. + * + * @note This function assumes a specific data structure in the 'Entry' object, including nested objects. + * Ensure that the 'Entry' object conforms to the expected schema to avoid runtime errors. + * + * @see Entry - The data structure containing CVE and remediation information. + * @see RocksDBWrapper - The utility class for interacting with RocksDB databases. + */ + static void storeVulnerabilityHotfixes(const cve_v5::Entry* data, Utils::IRocksDBWrapper* feedDatabase) + { + if (!data->containers()->cna() && !data->containers()->cna()->x_remediations()) + { + return; + } + + const auto remediations = data->containers()->cna()->x_remediations(); + + if (!remediations) + { + logError(WM_VULNSCAN_LOGTAG, "No remediations available."); + return; + } + + flatbuffers::Parser parser; + if (!parser.Parse(vulnerabilityRemediations_SCHEMA)) + { + throw std::runtime_error("Unable to parse schema: " + parser.error_); + } + + const auto currentCVE {data->cveMetadata()->cveId()->str()}; // CVE associated with the current remediations + rocksdb::PinnableSlice cveList; + + // 1. Get the CVEs list associated with each remediation from the database + // 2. Convert the list back into a JSON object + // 3. Add the current CVE to the list + // 4. Convert the JSON object back into a FlatBuffer object + // 5. Update the remediation with the new CVE list + for (const auto& hotfix : hotfixesOnRemediations(remediations)) + { + nlohmann::json jsonData; + + if (feedDatabase->get(hotfix, cveList, REMEDIATIONS_COLUMN)) + { + // There is already a list of CVEs associated with the hotfix + std::string strData; + flatbuffers::GenText(parser, reinterpret_cast(cveList.data()), &strData); + jsonData = nlohmann::json::parse(strData); + } + + jsonData.push_back(currentCVE); + + if (!parser.Parse(jsonData.dump().c_str())) + { + throw std::runtime_error("Unable to parse patched data: " + parser.error_); + } + + flatbuffers::FlatBufferBuilder builder; + rocksdb::Slice flatbufferData(reinterpret_cast(parser.builder_.GetBufferPointer()), + parser.builder_.GetSize()); + + // Update the remediation with the new CVE list + if (!feedDatabase->columnExists(HOTFIXES_COLUMN)) + { + feedDatabase->createColumn(HOTFIXES_COLUMN); + } + feedDatabase->put(hotfix, flatbufferData, HOTFIXES_COLUMN); + } + } + + /** + * @brief Deletes a hotfix from the database + * + * @param hotfix Hotfix to be removed. + * @param feedDatabase rocksDB wrapper instance. + */ + static void removeHotfix(const std::string& hotfix, Utils::IRocksDBWrapper* feedDatabase) + { + if (hotfix.empty()) + { + return; + } + + if (!feedDatabase->columnExists(HOTFIXES_COLUMN)) + { + return; + } + + feedDatabase->delete_(hotfix, HOTFIXES_COLUMN); + } +}; + +#endif // _UPDATE_HOTFIXES_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp index 3c222d721c0..2e4f88e1acf 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp @@ -101,6 +101,11 @@ class TFactoryOrchestrator final break; case ScannerType::HotfixInsert: + //TODO orchestration = std::make_shared(databaseFeedManager); + // orchestration->setLast(std::make_shared(inventoryDatabase)); + // orchestration->setLast(std::make_shared(reportDispatcher)); + // orchestration->setLast(std::make_shared(indexerConnector)); + break; case ScannerType::HotfixDelete: break; case ScannerType::Os: @@ -108,7 +113,6 @@ class TFactoryOrchestrator final orchestration->setLast(std::make_shared(inventoryDatabase)); orchestration->setLast(std::make_shared(databaseFeedManager)); orchestration->setLast(std::make_shared(databaseFeedManager)); - orchestration->setLast(std::make_shared(reportDispatcher)); orchestration->setLast(std::make_shared(indexerConnector)); break; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp new file mode 100644 index 00000000000..64e94857bc1 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -0,0 +1,120 @@ +/* + * Wazuh Vulnerability scanner - Scan Orchestrator + * Copyright (C) 2015, Wazuh Inc. + * Nov 23, 2023. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _OS_DATA_CACHE_HPP +#define _OS_DATA_CACHE_HPP + +#include "../policyManager/policyManager.hpp" +#include "cacheLRU.hpp" +#include "singleton.hpp" +#include "socketDBWrapper.hpp" +#include "wazuhDBQueryBuilder.hpp" +#include "wdbDataException.hpp" +#include +#include + +auto constexpr WDB_SOCKET {"queue/db/wdb"}; + +/** + * @brief Remediation structure. + */ +struct Remediation final +{ + std::vector hotfixes; ///< Installed hotfixes. +}; + +/** + * @brief remediationDataCache class. + * + * @note This class queries the Wazuh-DB to get the remediation data for a given agent, and stores it in a LRU cache + */ +class remediationDataCache final : public Singleton +{ +private: + LRUCache m_remediationData {PolicyManager::instance().getRemediationLRUSize()}; + std::shared_mutex m_mutex; + std::optional m_wdbSocketWrapper {std::nullopt}; + + Remediation getRemediationDataFromWdb(const std::string& agentId) + { + nlohmann::json response; + try + { + m_wdbSocketWrapper->query(WazuhDBQueryBuilder::builder().agentGetHotfixesCommand(agentId).build(), + response); + } + catch (const std::exception& e) + { + throw WdbDataException(e.what(), agentId); + } + + if (response.empty()) + { + throw WdbDataException("Empty response from Wazuh-DB", agentId); + } + + Remediation remediationData; + + // Iterate over the response and store the hotfixes. + for (auto& hotfix : response) + { + remediationData.hotfixes.push_back(std::move(hotfix.at("hotfix"))); + } + + return remediationData; + } + +public: + /** + * @brief This method returns the Remediation data. + * @param agentId agent id. + * + * @return Remediation + */ + Remediation getRemediationData(const std::string& agentId) + { + std::shared_lock lock(m_mutex); + if (auto value = m_remediationData.getValue(agentId); value) + { + return *value; + } + + if (!m_wdbSocketWrapper) + { + try + { + m_wdbSocketWrapper.emplace(WDB_SOCKET); + } + catch (...) + { + throw WdbDataException("Error creating socketDBWrapper", agentId); + } + } + + const auto remediationData = getRemediationDataFromWdb(agentId); + m_remediationData.insertKey(agentId, remediationData); // Update the cache with the queried data. + + return remediationData; + } + + /** + * @brief This method sets the remediation data. + * + * @param agentId agent id. + * @param remediationData data to be inserted. + */ + void setRemediationData(const std::string& agentId, const Remediation& remediationData) + { + std::scoped_lock lock(m_mutex); + m_remediationData.insertKey(agentId, remediationData); + } +}; +#endif // _OS_DATA_CACHE_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index 3b27a5b8f4a..da191c36b3f 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -110,6 +110,7 @@ struct AgentData * * @tparam TOsDataCache os data cache type. * @tparam TGlobalData global data type. + * @tparam TRemediationDataCache remediation data cache type. */ template, typename TGlobalData = GlobalData> struct TScanContext final @@ -322,7 +323,7 @@ struct TScanContext final { m_type = ScannerType::HotfixDelete; } - + // TODO: add hotfix data m_osData = TOsDataCache::instance().getOsData(agentId().data()); } } @@ -439,6 +440,7 @@ struct TScanContext final m_affectedComponentType = AffectedComponentType::Hotfix; m_osData = TOsDataCache::instance().getOsData(agentId().data()); + // TODO: add hotfix data } } else if (syncMsg->data_type() == SyscollectorSynchronization::DataUnion_integrity_clear) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp index d44b18d9d31..359852576a2 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp @@ -391,7 +391,6 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) auto dbWrapper = std::make_unique(TEST_DESCRIPTION_DATABASE_PATH); rocksdb::Slice dbValue(reinterpret_cast(fbBuilder.GetBufferPointer()), fbBuilder.GetSize()); - dbWrapper->put(CVEID, dbValue); auto mockGetVulnerabiltyDescriptiveInformation = [&](const std::string_view cveId, diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateCVECandidates_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateCVECandidates_test.cpp index 753cd1d9aea..0522483d85a 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateCVECandidates_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateCVECandidates_test.cpp @@ -1204,8 +1204,6 @@ namespace NSUpdateCVECandidatesTest } // namespace NSUpdateCVECandidatesTest -using namespace NSUpdateCVECandidatesTest; - TEST_F(UpdateCVECandidatesTest, UpdateCVECandidateSuccess) { std::string cve5FlatbufferSchemaStr; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateCVERemediations_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateCVERemediations_test.hpp index aa94f5ede20..7b14a3b8f02 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateCVERemediations_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateCVERemediations_test.hpp @@ -9,8 +9,8 @@ * Foundation. */ -#ifndef _STORE_REMEDIATIONS_MODEL_TEST_H -#define _STORE_REMEDIATIONS_MODEL_TEST_H +#ifndef _UPDATE_CVE_REMEDIATIONS_TEST_HPP +#define _UPDATE_CVE_REMEDIATIONS_TEST_HPP #include "../../src/databaseFeedManager/databaseFeedManager.hpp" #include "../../src/databaseFeedManager/updateCVERemediations.hpp" #include "gmock/gmock.h" @@ -23,13 +23,13 @@ class UpdateCVERemediationsTest : public ::testing::Test { protected: /** - * @brief Construct a new storeRemediationsModel Tests object + * @brief Construct a new UpdateCVERemediationsTest object * */ UpdateCVERemediationsTest() = default; /** - * @brief Destroy the storeRemediationsModel Tests object + * @brief Destroy the UpdateCVERemediationsTest object * */ virtual ~UpdateCVERemediationsTest() = default; @@ -47,4 +47,4 @@ class UpdateCVERemediationsTest : public ::testing::Test void TearDown() override; }; -#endif //_STORE_REMEDIATIONS_MODEL_TEST_H +#endif //_UPDATE_CVE_REMEDIATIONS_TEST_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp new file mode 100644 index 00000000000..186b0b15125 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp @@ -0,0 +1,483 @@ +/* + * Wazuh storeRemediationsModel + * Copyright (C) 2015, Wazuh Inc. + * October 05, 2023. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#include "updateHotfixes_test.hpp" +#include "flatbuffers/idl.h" +#include "flatbuffers/util.h" +#include + +constexpr auto COMMON_DATABASE_DIR {"queue/vd"}; //< rocksDBWrapper = std::make_unique(DATABASE_PATH); + + // Call the updateRemediation function with the test data + EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + + // Validate the data was inserted + std::vector hotfixes { + "KBT-800", + "KBT-1000", + "KBT-3000", + "KBT-4000", + "KBT-5000", + "KBT-6000", + "KBT-7000", + "KBT-8000", + "KBT-9000", + }; + + FlatbufferDataPair fbHotfix; + + const std::string expectedCveId = "CVE-1337-1234"; + for (const auto& hotfixId : hotfixes) + { + EXPECT_TRUE(rocksDBWrapper->get(hotfixId, fbHotfix.slice, HOTFIXES_COLUMN)); + fbHotfix.data = + const_cast(NSVulnerabilityScanner::Gethotfix(fbHotfix.slice.data())); + EXPECT_EQ(fbHotfix.data->CVEs()->size(), 1); + EXPECT_STREQ(fbHotfix.data->CVEs()->Get(0)->str(), expectedCveId); + } +} + +TEST_F(UpdateHotfixesTest, DeleteHotfixes) +{ + // Define schema variable and parse JSON object. + std::string schemaStr; + + // Load file with schema. + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); + + // Parse schema. + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES) && parser.Parse(JSON_CVE5_VALID_ONE_BLOCK); + + // Create a test Entry object with Windows remediations + auto jbuf = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier jverifier(jbuf, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(jverifier); + auto entry = cve_v5::GetEntry(jbuf); + + // Create a mock RocksDBWrapper object + std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + + // Call the updateRemediation function with the test data + EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + + // Make sure the entries where inserted + FlatbufferDataPair remediationInfo; + std::vector hotfixes {"KBT-800", "KBT-1000", "KBT-3000"}; + for (const auto& hotfix : hotfixes) + { + EXPECT_TRUE(rocksDBWrapper->get(hotfix, remediationInfo.slice, HOTFIXES_COLUMN)); + } + + // Remove all but one hotfix + EXPECT_NO_THROW(UpdateHotfixes::removeHotfix(hotfixes.at(1), rocksDBWrapper.get())); + EXPECT_NO_THROW(UpdateHotfixes::removeHotfix(hotfixes.at(2), rocksDBWrapper.get())); + + // Check that the hotfixes were removed + EXPECT_TRUE(rocksDBWrapper->get(hotfixes.at(0), remediationInfo.slice, HOTFIXES_COLUMN)); + EXPECT_FALSE(rocksDBWrapper->get(hotfixes.at(1), remediationInfo.slice, HOTFIXES_COLUMN)); + EXPECT_FALSE(rocksDBWrapper->get(hotfixes.at(2), remediationInfo.slice, HOTFIXES_COLUMN)); +} diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp new file mode 100644 index 00000000000..870b5963349 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp @@ -0,0 +1,44 @@ +/* + * Wazuh storeRemediationsModel + * Copyright (C) 2015, Wazuh Inc. + * October 05, 2023. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _UPDATE_HOTFIXES_TEST_HPP +#define _UPDATE_HOTFIXES_TEST_HPP +#include "../../src/databaseFeedManager/databaseFeedManager.hpp" +#include "../../src/databaseFeedManager/updateHotfixes.hpp" +#include "hotfixes_generated.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +/** + * @brief This test class contains unit tests for the UpdateHotfixes module. + */ +class UpdateHotfixesTest : public ::testing::Test +{ +protected: + // LCOV_EXCL_START + UpdateHotfixesTest() = default; + ~UpdateHotfixesTest() override = default; + // LCOV_EXCL_STOP + + /** + * @brief SetUp. + * + */ + void SetUp() override; + + /** + * @brief TearDown. + * + */ + void TearDown() override; +}; + +#endif //_UPDATE_HOTFIXES_TEST_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/wazuhDBQuery/config.json b/src/wazuh_modules/vulnerability_scanner/testtool/wazuhDBQuery/config.json index a780fb7b52a..d8bcca06b2a 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/wazuhDBQuery/config.json +++ b/src/wazuh_modules/vulnerability_scanner/testtool/wazuhDBQuery/config.json @@ -1,86 +1,9 @@ { "queries": [ - [ - { - "method": "global" - }, - { - "method": "selectAll" - }, - { - "method": "fromTable", - "arg": "metadata" - } - ], - [ - { - "method": "global" - }, - { - "method": "selectAll" - }, - { - "method": "fromTable", - "arg": "agent" - }, - { - "method": "whereColumn", - "arg": "id" - }, - { - "method": "equalsTo", - "arg": "0" - } - ], - [ - { - "method": "agent", - "arg": "0" - }, - { - "method": "selectAll" - }, - { - "method": "fromTable", - "arg": "sys_programs" - }, - { - "method": "whereColumn", - "arg": "name" - }, - { - "method": "equalsTo", - "arg": "bash" - } - ], - [ - { - "method": "globalGetCommand", - "arg": "agent-info 0" - } - ], - [ - { - "method": "globalSelectCommand", - "arg": "groups" - } - ], - [ - { - "method": "agentGetOsInfoCommand", - "arg": "0" - } - ], [ { "method": "agentGetHotfixesCommand", - "arg": "0" - } - ], - [ - { - "method": "agentGetPackagesCommand", - "arg": "0" + "arg": "1" } ] ] From 45b2ff92d63340ba89a5ece38cdb1d53c50a7fa9 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Thu, 2 May 2024 12:34:27 -0300 Subject: [PATCH 083/419] CL: - Restored unwanted changes --- .../tests/unit/eventDetailsBuilder_test.cpp | 1 + .../tests/unit/updateCVECandidates_test.cpp | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp index 359852576a2..d44b18d9d31 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp @@ -391,6 +391,7 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) auto dbWrapper = std::make_unique(TEST_DESCRIPTION_DATABASE_PATH); rocksdb::Slice dbValue(reinterpret_cast(fbBuilder.GetBufferPointer()), fbBuilder.GetSize()); + dbWrapper->put(CVEID, dbValue); auto mockGetVulnerabiltyDescriptiveInformation = [&](const std::string_view cveId, diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateCVECandidates_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateCVECandidates_test.cpp index 0522483d85a..753cd1d9aea 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateCVECandidates_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateCVECandidates_test.cpp @@ -1204,6 +1204,8 @@ namespace NSUpdateCVECandidatesTest } // namespace NSUpdateCVECandidatesTest +using namespace NSUpdateCVECandidatesTest; + TEST_F(UpdateCVECandidatesTest, UpdateCVECandidateSuccess) { std::string cve5FlatbufferSchemaStr; From 4a36d37f90154d2b35824a32cc2f0badd138b7a3 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Thu, 2 May 2024 18:57:12 -0300 Subject: [PATCH 084/419] CL: - FactoryOrchestrator: Added hotfix insert orchestration - DatabaseFeedManager: Added cves to hotfix retrieve function - ScanContext: Added remediationDataCache --- .../include/vulnerabilityScanner.hpp | 2 +- .../databaseFeedManager.hpp | 10 +- .../databaseFeedManager/updateHotfixes.hpp | 48 +++++----- .../scanOrchestrator/factoryOrchestrator.hpp | 12 ++- .../src/scanOrchestrator/hotfixInsert.hpp | 95 +++++++++++++++++++ .../scanOrchestrator/remediationDataCache.hpp | 15 +-- .../src/scanOrchestrator/scanContext.hpp | 15 ++- 7 files changed, 154 insertions(+), 43 deletions(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp diff --git a/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp b/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp index 9031dc5863c..4c7b6de727b 100644 --- a/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp @@ -27,7 +27,7 @@ #include #include -constexpr auto HOTFIXES_COLUMN {"hotfixes_to_cves"}; +constexpr auto HOTFIXES_COLUMN {"hotfixes"}; constexpr auto REMEDIATIONS_COLUMN {"remediations"}; constexpr auto TRANSLATIONS_COLUMN {"translation"}; constexpr auto DESCRIPTIONS_COLUMN {"descriptions"}; diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp index 713561147ae..54d897d103b 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp @@ -410,22 +410,22 @@ class TDatabaseFeedManager final : public Observer } /** - * @brief Retrieves vulnerability remediation information from the database, for a given agent ID. + * @brief Retrieves the vulnerabilities information from the database, for a given hotfix ID. * - * This function retrieves remediation information associated with a given CVE ID from the underlying database and + * This function retrieves remediation information associated with a given hotfix from the underlying database and * stores it in the 'remediationInfo' object. * - * @param agentID The agent ID for which remediation information is requested. + * @param hotfix hotfix id for which remediation information is requested. * @param remediationInfo A reference to a `FlatbufferDataPair` object where the retrieved remediation * information will be stored. * * @throws std::runtime_error if the retrieved data from the database is invalid or not in the expected FlatBuffers * format. */ - void getAgentRemediation(const std::string& agentID, FlatbufferDataPair& remediationInfo) + void getHotfixVulnerabilities(const std::string& hotfix, FlatbufferDataPair& remediationInfo) { // If the remediation information is not found in the database, we return because there is no remediation. - if (auto result = m_feedDatabase->get(agentID, remediationInfo.slice, REMEDIATIONS_COLUMN); !result) + if (auto result = m_feedDatabase->get(hotfix, remediationInfo.slice, REMEDIATIONS_COLUMN); !result) { return; } diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp index 363974e312a..7b1a72052eb 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp @@ -13,8 +13,8 @@ #define _UPDATE_HOTFIXES_HPP #include "cve5_generated.h" -#include "hotfixes_generated.h" -#include "hotfixes_schema.h" +#include "hotfix_generated.h" +#include "hotfix_schema.h" #include "loggerHelper.h" #include "rocksDBWrapper.hpp" #include "vulnerabilityScanner.hpp" @@ -38,7 +38,7 @@ class UpdateHotfixes final static std::unordered_set hotfixesOnRemediations(const cve_v5::Remediations* remediations) { std::unordered_set hotfixes; - if (!remediations) + if (!remediations || !remediations->windows()) { return hotfixes; } @@ -47,7 +47,12 @@ class UpdateHotfixes final remediations->windows()->end(), [&hotfixes](const cve_v5::Remediation* remediation) { - for (const auto& hotfix : *remediation->anyOf()) + if (!remediation->anyOf()) + { + return; + } + + for (const auto hotfix : *remediation->anyOf()) { hotfixes.insert(hotfix->str()); } @@ -76,10 +81,10 @@ class UpdateHotfixes final * 2. If remediation data is not available (empty), it logs an error message and returns. * 3. Extracts the CVE identifier (CVE-ID) from the 'Entry' object. * 4. Iterates through the available remediation data for Windows: - * - Extracts the list of updates (CVEs) associated with the remediation. + * - Extracts the list of updates (CVEs) associated with the hotfix. * - Creates a FlatBuffers object containing the list of CVEs. * - Serializes the FlatBuffers object into binary data. - * - Stores the binary data in the RocksDB database, using the Remediation as the key. + * - Stores the binary data in the RocksDB database, using the 'hotfix' as the key. * 5. If an exception occurs during this process, it logs an error message. * * @note This function assumes a specific data structure in the 'Entry' object, including nested objects. @@ -90,25 +95,26 @@ class UpdateHotfixes final */ static void storeVulnerabilityHotfixes(const cve_v5::Entry* data, Utils::IRocksDBWrapper* feedDatabase) { - if (!data->containers()->cna() && !data->containers()->cna()->x_remediations()) - { - return; - } - const auto remediations = data->containers()->cna()->x_remediations(); - if (!remediations) { - logError(WM_VULNSCAN_LOGTAG, "No remediations available."); + logDebug2(WM_VULNSCAN_LOGTAG, "No remediations available."); return; } - flatbuffers::Parser parser; - if (!parser.Parse(vulnerabilityRemediations_SCHEMA)) + flatbuffers::IDLOptions options; + options.strict_json = true; + flatbuffers::Parser parser(options); + if (!parser.Parse(hotfix_SCHEMA)) { throw std::runtime_error("Unable to parse schema: " + parser.error_); } + if (!feedDatabase->columnExists(HOTFIXES_COLUMN)) + { + feedDatabase->createColumn(HOTFIXES_COLUMN); + } + const auto currentCVE {data->cveMetadata()->cveId()->str()}; // CVE associated with the current remediations rocksdb::PinnableSlice cveList; @@ -121,7 +127,7 @@ class UpdateHotfixes final { nlohmann::json jsonData; - if (feedDatabase->get(hotfix, cveList, REMEDIATIONS_COLUMN)) + if (feedDatabase->get(hotfix, cveList, HOTFIXES_COLUMN)) { // There is already a list of CVEs associated with the hotfix std::string strData; @@ -129,11 +135,11 @@ class UpdateHotfixes final jsonData = nlohmann::json::parse(strData); } - jsonData.push_back(currentCVE); - + // Add the current CVE to the list + jsonData["CVEs"].push_back(currentCVE); if (!parser.Parse(jsonData.dump().c_str())) { - throw std::runtime_error("Unable to parse patched data: " + parser.error_); + throw std::runtime_error("Unable to parse json data: " + parser.error_); } flatbuffers::FlatBufferBuilder builder; @@ -141,10 +147,6 @@ class UpdateHotfixes final parser.builder_.GetSize()); // Update the remediation with the new CVE list - if (!feedDatabase->columnExists(HOTFIXES_COLUMN)) - { - feedDatabase->createColumn(HOTFIXES_COLUMN); - } feedDatabase->put(hotfix, flatbufferData, HOTFIXES_COLUMN); } } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp index 2e4f88e1acf..12aff4e0c1a 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp @@ -25,6 +25,7 @@ #include "eventPackageAlertDetailsBuilder.hpp" #include "eventSendReport.hpp" #include "globalSyncInventory.hpp" +#include "hotfixInsert.hpp" #include "osScanner.hpp" #include "packageScanner.hpp" #include "resultIndexer.hpp" @@ -57,7 +58,8 @@ template + typename TGlobalSyncInventory = GlobalSyncInventory, + typename THotfixInsert = HotfixInsert> class TFactoryOrchestrator final { private: @@ -101,10 +103,10 @@ class TFactoryOrchestrator final break; case ScannerType::HotfixInsert: - //TODO orchestration = std::make_shared(databaseFeedManager); - // orchestration->setLast(std::make_shared(inventoryDatabase)); - // orchestration->setLast(std::make_shared(reportDispatcher)); - // orchestration->setLast(std::make_shared(indexerConnector)); + orchestration = std::make_shared(databaseFeedManager); + orchestration->setLast(std::make_shared(inventoryDatabase)); + orchestration->setLast(std::make_shared(reportDispatcher)); + orchestration->setLast(std::make_shared(indexerConnector)); break; case ScannerType::HotfixDelete: break; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp new file mode 100644 index 00000000000..24ec4b43d1a --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp @@ -0,0 +1,95 @@ +/* + * Wazuh Vulnerability scanner - Scan Orchestrator + * Copyright (C) 2015, Wazuh Inc. + * May 2, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _HOTFIX_INSERT_HPP +#define _HOTFIX_INSERT_HPP + +#include "chainOfResponsability.hpp" +#include "databaseFeedManager.hpp" +#include "inventorySync.hpp" +#include "remediationDataCache.hpp" +#include "scanContext.hpp" + +/** + * @brief TEventInsertInventory class. + * This class is in charge of inserting the inventory entries for a given agent and affected component type. + * It receives the scan context and the inventory database and returns the scan context with the inventory updated. + * The affected component type can be Os or Package. + * Os: Insert inventory of the OS for the agent. + * Package: Insert a package into the inventory for the agent. + * + * @tparam TScanContext scan context type. + */ +template +class THotfixInsert final : public AbstractHandler> +{ +private: + std::shared_ptr m_databaseFeedManager; + +public: + // LCOV_EXCL_START + /** + * @brief EventInsertInventory constructor. + * + * @param inventoryDatabase Inventory database. + */ + explicit THotfixInsert(std::shared_ptr& databaseFeedManager) + : m_databaseFeedManager(databaseFeedManager) + { + } + // LCOV_EXCL_STOP + + /** + * @brief Handles request and passes control to the next step of the chain. + * + * @param data Scan context. + * @return std::shared_ptr Abstract handler. + */ + std::shared_ptr handleRequest(std::shared_ptr data) override + { + // Get the list of CVEs remediated by the installed hotfix + + + // std::string key; + // key.append(data->agentNodeName()); + // key.append("_"); + // key.append(data->agentId()); + // key.append("_"); + + // // Create the key for the inventory. + // key.append(TInventorySync::affectedComponentKey(data)); + + // const auto& column = AFFECTED_COMPONENT_COLUMNS.at(data->affectedComponentType()); + // std::string value; + + // if (TInventorySync::m_inventoryDatabase.get(key, value, column)) + // { + // auto listCve = Utils::split(value, ','); + // for (const auto& cve : listCve) + // { + // std::string elementKey; + // elementKey.append(key); + // elementKey.append("_"); + // elementKey.append(cve); + + // data->m_elements.emplace(cve, TInventorySync::buildElement("DELETED", elementKey)); + // } + // logDebug2(WM_VULNSCAN_LOGTAG, "Deleting %s agent key: %s", column.c_str(), key.c_str()); + // TInventorySync::m_inventoryDatabase.delete_(key, column); + // } + + return AbstractHandler>::handleRequest(std::move(data)); + } +}; + +using HotfixInsert = THotfixInsert<>; + +#endif // _HOTFIX_INSERT_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index 64e94857bc1..cd45ee7524c 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -1,7 +1,7 @@ /* * Wazuh Vulnerability scanner - Scan Orchestrator * Copyright (C) 2015, Wazuh Inc. - * Nov 23, 2023. + * May 2, 2024. * * This program is free software; you can redistribute it * and/or modify it under the terms of the GNU General Public @@ -9,8 +9,8 @@ * Foundation. */ -#ifndef _OS_DATA_CACHE_HPP -#define _OS_DATA_CACHE_HPP +#ifndef _REMEDIATION_DATA_CACHE_HPP +#define _REMEDIATION_DATA_CACHE_HPP #include "../policyManager/policyManager.hpp" #include "cacheLRU.hpp" @@ -21,7 +21,10 @@ #include #include -auto constexpr WDB_SOCKET {"queue/db/wdb"}; +namespace RemediationDataCacheConstants +{ + auto constexpr WDB_SOCKET {"queue/db/wdb"}; +} /** * @brief Remediation structure. @@ -91,7 +94,7 @@ class remediationDataCache final : public Singleton { try { - m_wdbSocketWrapper.emplace(WDB_SOCKET); + m_wdbSocketWrapper.emplace(RemediationDataCacheConstants::WDB_SOCKET); } catch (...) { @@ -117,4 +120,4 @@ class remediationDataCache final : public Singleton m_remediationData.insertKey(agentId, remediationData); } }; -#endif // _OS_DATA_CACHE_HPP +#endif // _REMEDIATION_DATA_CACHE_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index da191c36b3f..af6f0d9f12d 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -18,6 +18,7 @@ #include "globalData.hpp" #include "logging_helper.h" #include "osDataCache.hpp" +#include "remediationDataCache.hpp" #include #include #include @@ -112,7 +113,9 @@ struct AgentData * @tparam TGlobalData global data type. * @tparam TRemediationDataCache remediation data cache type. */ -template, typename TGlobalData = GlobalData> +template, + typename TGlobalData = GlobalData, + typename TRemediationDataCache = remediationDataCache> struct TScanContext final { private: @@ -323,7 +326,7 @@ struct TScanContext final { m_type = ScannerType::HotfixDelete; } - // TODO: add hotfix data + m_remediationData = TRemediationDataCache::instance().getRemediationData(agentId().data()); m_osData = TOsDataCache::instance().getOsData(agentId().data()); } } @@ -439,8 +442,8 @@ struct TScanContext final // Set the affected component type m_affectedComponentType = AffectedComponentType::Hotfix; + m_remediationData = TRemediationDataCache::instance().getRemediationData(agentId().data()); m_osData = TOsDataCache::instance().getOsData(agentId().data()); - // TODO: add hotfix data } } else if (syncMsg->data_type() == SyscollectorSynchronization::DataUnion_integrity_clear) @@ -1488,6 +1491,12 @@ struct TScanContext final */ Os m_osData {}; + /** + * @brief Remediation data. + * + */ + Remediation m_remediationData {}; + /** * @brief Agent id. * From 16a75810bd8998d61f1387c6e1a88e7d46e18c0d Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Thu, 2 May 2024 22:19:16 +0000 Subject: [PATCH 085/419] Verify hotfixes for packages' scan --- .../src/scanOrchestrator/packageScanner.hpp | 616 ++++++++++-------- 1 file changed, 360 insertions(+), 256 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp index 45f745763c4..4e7b5b6aa12 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp @@ -14,6 +14,7 @@ #include "chainOfResponsability.hpp" #include "databaseFeedManager.hpp" +#include "remediationDataCache.hpp" #include "scanContext.hpp" #include "scannerHelper.hpp" #include "versionMatcher/versionMatcher.hpp" @@ -245,312 +246,415 @@ class TPackageScanner final : public AbstractHandler& databaseFeedManager) - : m_databaseFeedManager(databaseFeedManager) - { - } - // LCOV_EXCL_STOP - - /** - * @brief Handles request and passes control to the next step of the chain. - * - * @param data Scan context. - * @return std::shared_ptr Abstract handler. - */ - std::shared_ptr handleRequest(std::shared_ptr data) override + bool platformVerify(const std::string& cnaName, + const packageData& package, + const NSVulnerabilityScanner::ScanVulnerabilityCandidate& callbackData, + std::shared_ptr contextData) { - auto vulnerabilityScan = [&](const std::string& cnaName, - const packageData& package, - const NSVulnerabilityScanner::ScanVulnerabilityCandidate& callbackData) + // if the platforms are not empty, we need to check if the platform is in the list. + if (callbackData.platforms()) { - try + bool matchPlatform {false}; + for (const auto& platform : *callbackData.platforms()) { - // if the platforms are not empty, we need to check if the platform is in the list. - if (callbackData.platforms()) + const std::string platformValue {platform->str()}; + // if the platform is a CPE, we need to parse it and check if the product is the same as the os + // cpe. + if (ScannerHelper::isCPE(platformValue)) { - bool matchPlatform {false}; - for (const auto& platform : *callbackData.platforms()) + const auto cpe {ScannerHelper::parseCPE(platformValue)}; + if (cpe.part.compare("o") == 0) { - const std::string platformValue {platform->str()}; - // if the platform is a CPE, we need to parse it and check if the product is the same as the os - // cpe. - if (ScannerHelper::isCPE(platformValue)) - { - const auto cpe {ScannerHelper::parseCPE(platformValue)}; - if (cpe.part.compare("o") == 0) - { - if (ScannerHelper::compareCPE(cpe, ScannerHelper::parseCPE(data->osCPEName().data()))) - { - logDebug2(WM_VULNSCAN_LOGTAG, - "The platform is in the list based on CPE comparison for " - "Package: %s, Version: %s, CVE: %s, Content platform CPE: %s OS CPE: %s", - package.name.c_str(), - package.version.c_str(), - callbackData.cveId()->str().c_str(), - platformValue.c_str(), - data->osCPEName().data()); - matchPlatform = true; - break; - } - } - } - // If the platform is not a CPE, it is a string, at the moment, we only support the os code - // name. This is used mainly for debian and ubuntu platforms. - else + if (ScannerHelper::compareCPE(cpe, ScannerHelper::parseCPE(contextData->osCPEName().data()))) { - if (platformValue.compare(data->osCodeName()) == 0) - { - logDebug2( - WM_VULNSCAN_LOGTAG, - "The platform is in the list based on OS code name comparison for " - "Package: %s, Version: %s, CVE: %s, Content OS code name: %s, OS code name: %s", - package.name.c_str(), - package.version.c_str(), - callbackData.cveId()->str().c_str(), - platformValue.c_str(), - data->osCodeName().data()); - matchPlatform = true; - break; - } + logDebug2(WM_VULNSCAN_LOGTAG, + "The platform is in the list based on CPE comparison for " + "Package: %s, Version: %s, CVE: %s, Content platform CPE: %s OS CPE: %s", + package.name.c_str(), + package.version.c_str(), + callbackData.cveId()->str().c_str(), + platformValue.c_str(), + contextData->osCPEName().data()); + matchPlatform = true; + break; } } - - if (!matchPlatform) + } + // If the platform is not a CPE, it is a string, at the moment, we only support the os code + // name. This is used mainly for debian and ubuntu platforms. + else + { + if (platformValue.compare(contextData->osCodeName()) == 0) { logDebug2(WM_VULNSCAN_LOGTAG, - "The platform is not in the list for Package: %s, Version: %s, CVE: %s, OS CPE: %s, " - "OS code name: %s", + "The platform is in the list based on OS code name comparison for " + "Package: %s, Version: %s, CVE: %s, Content OS code name: %s, OS code name: %s", package.name.c_str(), package.version.c_str(), callbackData.cveId()->str().c_str(), - data->osCPEName().data(), - data->osCodeName().data()); - return false; + platformValue.c_str(), + contextData->osCodeName().data()); + matchPlatform = true; + break; } } + } + + if (!matchPlatform) + { + logDebug2(WM_VULNSCAN_LOGTAG, + "The platform is not in the list for Package: %s, Version: %s, CVE: %s, OS CPE: %s, " + "OS code name: %s", + package.name.c_str(), + package.version.c_str(), + callbackData.cveId()->str().c_str(), + contextData->osCPEName().data(), + contextData->osCodeName().data()); + return false; + } + } + + return true; + } - if (callbackData.vendor()) + bool vendorVerify(const std::string& cnaName, + const packageData& package, + const NSVulnerabilityScanner::ScanVulnerabilityCandidate& callbackData, + std::shared_ptr contextData) + { + if (callbackData.vendor()) + { + if (package.vendor.empty() || " " == package.vendor) + { + logDebug2(WM_VULNSCAN_LOGTAG, + "The vendor information is not available for Package: %s, Version: %s, " + "CVE: %s, Content vendor: %s", + package.name.c_str(), + package.version.c_str(), + callbackData.cveId()->str().c_str(), + callbackData.vendor()->str().c_str()); + return false; + } + else + { + if (package.vendor.compare(callbackData.vendor()->str()) != 0) + { + logDebug2(WM_VULNSCAN_LOGTAG, + "The vendor is not the same for Package: %s, Version: %s, " + "CVE: %s, Content vendor: %s, Package vendor: %s", + package.name.c_str(), + package.version.c_str(), + callbackData.cveId()->str().c_str(), + callbackData.vendor()->str().c_str(), + package.vendor.c_str()); + return false; + } + else + { + logDebug2(WM_VULNSCAN_LOGTAG, + "Vendor match for Package: %s, Version: %s, " + "CVE: %s, Vendor: %s", + package.name.c_str(), + package.version.c_str(), + callbackData.cveId()->str().c_str(), + package.vendor.c_str()); + } + } + } + + return true; + } + + bool versionMatch(const std::string& cnaName, + const packageData& package, + const NSVulnerabilityScanner::ScanVulnerabilityCandidate& callbackData, + std::shared_ptr contextData) + { + std::variant objectType = VersionMatcherStrategy::Unspecified; + if (const auto it = m_packageMap.find(package.format); it != m_packageMap.end()) + { + objectType = it->second; + } + + for (const auto& version : *callbackData.versions()) + { + const std::string packageVersion {package.version}; + std::string versionString {version->version() ? version->version()->str() : ""}; + std::string versionStringLessThan {version->lessThan() ? version->lessThan()->str() : ""}; + std::string versionStringLessThanOrEqual {version->lessThanOrEqual() ? version->lessThanOrEqual()->str() + : ""}; + + logDebug2(WM_VULNSCAN_LOGTAG, + "Scanning package - '%s' (Installed Version: %s, Security Vulnerability: %s). Identified " + "vulnerability: " + "Version: %s. Required Version Threshold: %s. Required Version Threshold (or Equal): %s.", + package.name.c_str(), + packageVersion.c_str(), + callbackData.cveId()->str().c_str(), + versionString.c_str(), + versionStringLessThan.c_str(), + versionStringLessThanOrEqual.c_str()); + + // No version range specified, check if the installed version is equal to the required version. + if (versionStringLessThan.empty() && versionStringLessThanOrEqual.empty()) + { + if (VersionMatcher::compare(packageVersion, versionString, objectType) == + VersionComparisonResult::A_EQUAL_B) { - if (package.vendor.empty() || " " == package.vendor) + // Version match found, the package status is defined by the vulnerability status. + if (version->status() == NSVulnerabilityScanner::Status::Status_affected) { - logDebug2(WM_VULNSCAN_LOGTAG, - "The vendor information is not available for Package: %s, Version: %s, " - "CVE: %s, Content vendor: %s", + logDebug1(WM_VULNSCAN_LOGTAG, + "Match found, the package '%s', is vulnerable to '%s'. Current version: '%s' is " + "equal to '%s'. - Agent '%s' (ID: '%s', Version: '%s').", package.name.c_str(), - package.version.c_str(), callbackData.cveId()->str().c_str(), - callbackData.vendor()->str().c_str()); - return false; + packageVersion.c_str(), + versionString.c_str(), + contextData->agentName().data(), + contextData->agentId().data(), + contextData->agentVersion().data()); + + contextData->m_elements[callbackData.cveId()->str()] = nlohmann::json::object(); + contextData->m_matchConditions[callbackData.cveId()->str()] = {std::move(versionString), + MatchRuleCondition::Equal}; + return true; + } + + return false; + } + } + else + { + // Version range specified + + // Check if the installed version satisfies the lower bound of the version range. + auto lowerBoundMatch = false; + if (versionString.compare("0") == 0) + { + lowerBoundMatch = true; + } + else + { + const auto matchResult = VersionMatcher::compare(packageVersion, versionString, objectType); + lowerBoundMatch = matchResult == VersionComparisonResult::A_GREATER_THAN_B || + matchResult == VersionComparisonResult::A_EQUAL_B; + } + + if (lowerBoundMatch) + { + // Check if the installed version satisfies the upper bound of the version range. + auto upperBoundMatch = false; + if (!versionStringLessThan.empty() && versionStringLessThan.compare("*") != 0) + { + const auto matchResult = + VersionMatcher::compare(packageVersion, versionStringLessThan, objectType); + upperBoundMatch = matchResult == VersionComparisonResult::A_LESS_THAN_B; + } + else if (!versionStringLessThanOrEqual.empty()) + { + const auto matchResult = + VersionMatcher::compare(packageVersion, versionStringLessThanOrEqual, objectType); + upperBoundMatch = matchResult == VersionComparisonResult::A_LESS_THAN_B || + matchResult == VersionComparisonResult::A_EQUAL_B; } else { - if (package.vendor.compare(callbackData.vendor()->str()) != 0) + upperBoundMatch = false; + } + + if (upperBoundMatch) + { + // Version match found, the package status is defined by the vulnerability status. + if (version->status() == NSVulnerabilityScanner::Status::Status_affected) { - logDebug2(WM_VULNSCAN_LOGTAG, - "The vendor is not the same for Package: %s, Version: %s, " - "CVE: %s, Content vendor: %s, Package vendor: %s", + logDebug1(WM_VULNSCAN_LOGTAG, + "Match found, the package '%s', is vulnerable to '%s'. Current version: " + "'%s' (" + "less than '%s' or equal to '%s'). - Agent '%s' (ID: '%s', Version: '%s').", package.name.c_str(), - package.version.c_str(), callbackData.cveId()->str().c_str(), - callbackData.vendor()->str().c_str(), - package.vendor.c_str()); - return false; + packageVersion.c_str(), + versionStringLessThan.c_str(), + versionStringLessThanOrEqual.c_str(), + contextData->agentName().data(), + contextData->agentId().data(), + contextData->agentVersion().data()); + + contextData->m_elements[callbackData.cveId()->str()] = nlohmann::json::object(); + + if (!versionStringLessThanOrEqual.empty()) + { + contextData->m_matchConditions[callbackData.cveId()->str()] = { + std::move(versionStringLessThanOrEqual), MatchRuleCondition::LessThanOrEqual}; + } + else + { + contextData->m_matchConditions[callbackData.cveId()->str()] = { + std::move(versionStringLessThan), MatchRuleCondition::LessThan}; + } + return true; } else { logDebug2(WM_VULNSCAN_LOGTAG, - "Vendor match for Package: %s, Version: %s, " - "CVE: %s, Vendor: %s", + "No match due to default status for Package: %s, Version: %s while scanning " + "for Vulnerability: %s, " + "Installed Version: %s, Required Version Threshold: %s, Required Version " + "Threshold (or Equal): %s", package.name.c_str(), - package.version.c_str(), + packageVersion.c_str(), callbackData.cveId()->str().c_str(), - package.vendor.c_str()); + versionString.c_str(), + versionStringLessThan.c_str(), + versionStringLessThanOrEqual.c_str()); + + return false; } } } + } + } - std::variant objectType = - VersionMatcherStrategy::Unspecified; - if (const auto it = m_packageMap.find(package.format); it != m_packageMap.end()) - { - objectType = it->second; - } + // No match found, the default status defines the package status. + if (callbackData.defaultStatus() == NSVulnerabilityScanner::Status::Status_affected) + { + logDebug1(WM_VULNSCAN_LOGTAG, + "Match found, the package '%s' is vulnerable to '%s' due to default status. - Agent " + "'%s' (ID: '%s', Version: '%s').", + package.name.c_str(), + callbackData.cveId()->str().c_str(), + contextData->agentName().data(), + contextData->agentId().data(), + contextData->agentVersion().data()); + + contextData->m_elements[callbackData.cveId()->str()] = nlohmann::json::object(); + contextData->m_matchConditions[callbackData.cveId()->str()] = {"", MatchRuleCondition::DefaultStatus}; + return true; + } - for (const auto& version : *callbackData.versions()) - { - const std::string packageVersion {package.version}; - std::string versionString {version->version() ? version->version()->str() : ""}; - std::string versionStringLessThan {version->lessThan() ? version->lessThan()->str() : ""}; - std::string versionStringLessThanOrEqual { - version->lessThanOrEqual() ? version->lessThanOrEqual()->str() : ""}; + logDebug2(WM_VULNSCAN_LOGTAG, + "No match due to default status for Package: %s, Version: %s while scanning for Vulnerability: %s", + package.name.c_str(), + package.version.c_str(), + callbackData.cveId()->str().c_str()); - logDebug2(WM_VULNSCAN_LOGTAG, - "Scanning package - '%s' (Installed Version: %s, Security Vulnerability: %s). Identified " - "vulnerability: " - "Version: %s. Required Version Threshold: %s. Required Version Threshold (or Equal): %s.", - package.name.c_str(), - packageVersion.c_str(), - callbackData.cveId()->str().c_str(), - versionString.c_str(), - versionStringLessThan.c_str(), - versionStringLessThanOrEqual.c_str()); + return false; + } - // No version range specified, check if the installed version is equal to the required version. - if (versionStringLessThan.empty() && versionStringLessThanOrEqual.empty()) - { - if (VersionMatcher::compare(packageVersion, versionString, objectType) == - VersionComparisonResult::A_EQUAL_B) - { - // Version match found, the package status is defined by the vulnerability status. - if (version->status() == NSVulnerabilityScanner::Status::Status_affected) - { - logDebug1( - WM_VULNSCAN_LOGTAG, - "Match found, the package '%s', is vulnerable to '%s'. Current version: '%s' is " - "equal to '%s'. - Agent '%s' (ID: '%s', Version: '%s').", - package.name.c_str(), - callbackData.cveId()->str().c_str(), - packageVersion.c_str(), - versionString.c_str(), - data->agentName().data(), - data->agentId().data(), - data->agentVersion().data()); - - data->m_elements[callbackData.cveId()->str()] = nlohmann::json::object(); - data->m_matchConditions[callbackData.cveId()->str()] = {std::move(versionString), - MatchRuleCondition::Equal}; - return true; - } + bool packageHotfixSolved(const std::string& cnaName, + const packageData& package, + const NSVulnerabilityScanner::ScanVulnerabilityCandidate& callbackData, + std::shared_ptr contextData) + { + FlatbufferDataPair remediations {}; + m_databaseFeedManager->getVulnerabilityRemediation(callbackData.cveId()->str(), remediations); - return false; - } - } - else - { - // Version range specified + if (remediations.data == nullptr || remediations.data->updates() == nullptr || + remediations.data->updates()->size() == 0) + { + return false; + } - // Check if the installed version satisfies the lower bound of the version range. - auto lowerBoundMatch = false; - if (versionString.compare("0") == 0) - { - lowerBoundMatch = true; - } - else - { - const auto matchResult = VersionMatcher::compare(packageVersion, versionString, objectType); - lowerBoundMatch = matchResult == VersionComparisonResult::A_GREATER_THAN_B || - matchResult == VersionComparisonResult::A_EQUAL_B; - } + // Replace this call + auto agentHotfixes = contextData->m_remediationData.getRemediationData(contextData->agentId()); - if (lowerBoundMatch) - { - // Check if the installed version satisfies the upper bound of the version range. - auto upperBoundMatch = false; - if (!versionStringLessThan.empty() && versionStringLessThan.compare("*") != 0) - { - const auto matchResult = - VersionMatcher::compare(packageVersion, versionStringLessThan, objectType); - upperBoundMatch = matchResult == VersionComparisonResult::A_LESS_THAN_B; - } - else if (!versionStringLessThanOrEqual.empty()) - { - const auto matchResult = - VersionMatcher::compare(packageVersion, versionStringLessThanOrEqual, objectType); - upperBoundMatch = matchResult == VersionComparisonResult::A_LESS_THAN_B || - matchResult == VersionComparisonResult::A_EQUAL_B; - } - else - { - upperBoundMatch = false; - } + if (agentHotfixes.size() == 0) + { + logDebug2( + WM_VULNSCAN_LOGTAG, "No remediations for agent '%s' have been found.", contextData->agentId().data()); - if (upperBoundMatch) - { - // Version match found, the package status is defined by the vulnerability status. - if (version->status() == NSVulnerabilityScanner::Status::Status_affected) - { - logDebug1( - WM_VULNSCAN_LOGTAG, - "Match found, the package '%s', is vulnerable to '%s'. Current version: " - "'%s' (" - "less than '%s' or equal to '%s'). - Agent '%s' (ID: '%s', Version: '%s').", - package.name.c_str(), - callbackData.cveId()->str().c_str(), - packageVersion.c_str(), - versionStringLessThan.c_str(), - versionStringLessThanOrEqual.c_str(), - data->agentName().data(), - data->agentId().data(), - data->agentVersion().data()); - - data->m_elements[callbackData.cveId()->str()] = nlohmann::json::object(); - - if (!versionStringLessThanOrEqual.empty()) - { - data->m_matchConditions[callbackData.cveId()->str()] = { - std::move(versionStringLessThanOrEqual), - MatchRuleCondition::LessThanOrEqual}; - } - else - { - data->m_matchConditions[callbackData.cveId()->str()] = { - std::move(versionStringLessThan), MatchRuleCondition::LessThan}; - } - return true; - } - else - { - logDebug2( - WM_VULNSCAN_LOGTAG, - "No match due to default status for Package: %s, Version: %s while scanning " - "for Vulnerability: %s, " - "Installed Version: %s, Required Version Threshold: %s, Required Version " - "Threshold (or Equal): %s", - package.name.c_str(), - packageVersion.c_str(), - callbackData.cveId()->str().c_str(), - versionString.c_str(), - versionStringLessThan.c_str(), - versionStringLessThanOrEqual.c_str()); - - return false; - } - } - } - } + return false; + } + + for (const auto& remediation : *(remediations.data->updates())) + { + + if (std::find_if(agentHotfixes.hotfixes.begin(), + agentHotfixes.hotfixes.end(), + [&](const auto& hotfix) + { return remediation->str() == hotfix->str(); }) != agentHotfixes.hotfixes.end()) + { + logDebug2(WM_VULNSCAN_LOGTAG, + "Remediation '%s' for package '%s' on agent '%s' that solves CVE '%s' has been found.", + remediation->str().c_str(), + package.name.c_str(), + contextData->agentId().data(), + callbackData.cveId()->str().c_str()); + + contextData->m_elements.erase(callbackData.cveId()->str()); + contextData->m_matchConditions.erase(callbackData.cveId()->str()); + return true; + } + } + + logDebug2(WM_VULNSCAN_LOGTAG, + "No remediation for package '%s' on agent '%s' that solves CVE '%s' has been found.", + package.name.c_str(), + contextData->agentId().data(), + callbackData.cveId()->str().c_str()); + + return false; + } + +public: + // LCOV_EXCL_START + /** + * @brief PackageScanner constructor. + * + * @param databaseFeedManager Database feed manager. + */ + explicit TPackageScanner(std::shared_ptr& databaseFeedManager) + : m_databaseFeedManager(databaseFeedManager) + { + } + // LCOV_EXCL_STOP + + /** + * @brief Handles request and passes control to the next step of the chain. + * + * @param data Scan context. + * @return std::shared_ptr Abstract handler. + */ + std::shared_ptr handleRequest(std::shared_ptr data) override + { + auto vulnerabilityScan = [&](const std::string& cnaName, + const packageData& package, + const NSVulnerabilityScanner::ScanVulnerabilityCandidate& callbackData) + { + try + { + /* Preliminary verifications before version matching. We return if the basic conditions are not met. */ + + // If the candidate contains platforms, verify if agent OS is in the list. + if (!platformVerify(cnaName, package, callbackData, data)) + { + return false; } - // No match found, the default status defines the package status. - if (callbackData.defaultStatus() == NSVulnerabilityScanner::Status::Status_affected) + // If the candidate contains a vendor, verify if package vendor matches. + if (!vendorVerify(cnaName, package, callbackData, data)) { - logDebug1(WM_VULNSCAN_LOGTAG, - "Match found, the package '%s' is vulnerable to '%s' due to default status. - Agent " - "'%s' (ID: '%s', Version: '%s').", - package.name.c_str(), - callbackData.cveId()->str().c_str(), - data->agentName().data(), - data->agentId().data(), - data->agentVersion().data()); + return false; + } + + /* Real version analysis of the candidate. */ + if (versionMatch(cnaName, package, callbackData, data)) + { + // The candidate version matches the package. Post-match filtering. + if (data->osPlatform().compare("windows") == 0) + { + if (packageHotfixSolved(cnaName, package, callbackData, data)) + { + // An installed hotfix solves the vulnerability. + return false; + } + } - data->m_elements[callbackData.cveId()->str()] = nlohmann::json::object(); - data->m_matchConditions[callbackData.cveId()->str()] = {"", MatchRuleCondition::DefaultStatus}; return true; } - logDebug2( - WM_VULNSCAN_LOGTAG, - "No match due to default status for Package: %s, Version: %s while scanning for Vulnerability: %s", - package.name.c_str(), - package.version.c_str(), - callbackData.cveId()->str().c_str()); - + /* The candidate for this CVE is discarded. */ return false; } catch (const std::exception& e) From ddae5198389adc292302c39fd19db158f389f37d Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Thu, 2 May 2024 21:27:23 -0300 Subject: [PATCH 086/419] CL: - internal_option: Added remediation LRU cache configuration - remediationDataCache: Change data type from vector to set - packageScanner: Aligned with newer changes --- etc/internal_options.conf | 1 + .../src/scanOrchestrator/packageScanner.hpp | 11 ++---- .../scanOrchestrator/remediationDataCache.hpp | 5 +-- .../src/scanOrchestrator/scanContext.hpp | 34 +++++++++++++++++++ 4 files changed, 40 insertions(+), 11 deletions(-) diff --git a/etc/internal_options.conf b/etc/internal_options.conf index 7818716154a..db2bc16f92e 100755 --- a/etc/internal_options.conf +++ b/etc/internal_options.conf @@ -426,6 +426,7 @@ auth.timeout_microseconds=0 # Vulnerability detector LRUs size vulnerability-detection.translation_lru_size=2048 vulnerability-detection.osdata_lru_size=1000 +vulnerability-detection.remediation_lru_size=2048 # Vulnerability detector - Enable or disable the scan manager # 0. Enabled diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp index 4e7b5b6aa12..5169a78979b 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp @@ -556,10 +556,7 @@ class TPackageScanner final : public AbstractHandlerm_remediationData.getRemediationData(contextData->agentId()); - - if (agentHotfixes.size() == 0) + if (!contextData->hasRemediations()) { logDebug2( WM_VULNSCAN_LOGTAG, "No remediations for agent '%s' have been found.", contextData->agentId().data()); @@ -569,11 +566,7 @@ class TPackageScanner final : public AbstractHandlerupdates())) { - - if (std::find_if(agentHotfixes.hotfixes.begin(), - agentHotfixes.hotfixes.end(), - [&](const auto& hotfix) - { return remediation->str() == hotfix->str(); }) != agentHotfixes.hotfixes.end()) + if (contextData->remediationIsInstalled(remediation->str())) { logDebug2(WM_VULNSCAN_LOGTAG, "Remediation '%s' for package '%s' on agent '%s' that solves CVE '%s' has been found.", diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index cd45ee7524c..a5b95c392d5 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -20,6 +20,7 @@ #include "wdbDataException.hpp" #include #include +#include namespace RemediationDataCacheConstants { @@ -31,7 +32,7 @@ namespace RemediationDataCacheConstants */ struct Remediation final { - std::vector hotfixes; ///< Installed hotfixes. + std::unordered_set hotfixes; ///< Installed hotfixes. }; /** @@ -69,7 +70,7 @@ class remediationDataCache final : public Singleton // Iterate over the response and store the hotfixes. for (auto& hotfix : response) { - remediationData.hotfixes.push_back(std::move(hotfix.at("hotfix"))); + remediationData.hotfixes.insert(std::move(hotfix.at("hotfix"))); } return remediationData; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index af6f0d9f12d..0850d0dbf43 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -254,6 +254,7 @@ struct TScanContext final { m_type = ScannerType::PackageDelete; } + m_remediationData = TRemediationDataCache::instance().getRemediationData(agentId().data()); m_osData = TOsDataCache::instance().getOsData(agentId().data()); } else if (delta->data_type() == SyscollectorDeltas::Provider_dbsync_osinfo) @@ -1402,6 +1403,39 @@ struct TScanContext final return m_osData.cpeName; } + /** + * @brief Checks if there are remediation installed. + * + * @return true if there are remediation. + * @return false if there are not remediation. + */ + bool hasRemediations() const + { + return !m_remediationData.hotfixes.empty(); + } + + /** + * @brief Checks if a remediation is installed. + * + * @param remediation ID of the remediation to check. + * + * @return true if the remediation is installed. + * @return false if the remediation is not installed. + */ + bool remediationIsInstalled(const std::string& remediation) const + { + return m_remediationData.hotfixes.count(remediation) == 1; + } + + /** + * @brief Gets manager name. + * @return Manager name. + */ + std::string_view managerName() const + { + return TGlobalData::instance().managerName(); + } + /** * @brief Gets the message type. * From 90dfd84a9f5cc396aacaab40032ae76501216076 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 3 May 2024 00:45:29 -0300 Subject: [PATCH 087/419] CL: - storeModel: Added remediation update on events - updateHotfixes: removeHotfix uses a full CVE entry now - remediationDataCache: Removed use of move - scanContext: added cache update on hotfix events --- .../src/databaseFeedManager/storeModel.hpp | 5 ++++ .../databaseFeedManager/updateHotfixes.hpp | 16 +++++++++---- .../scanOrchestrator/remediationDataCache.hpp | 2 +- .../src/scanOrchestrator/scanContext.hpp | 23 +++++++++++++++---- 4 files changed, 35 insertions(+), 11 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/storeModel.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/storeModel.hpp index ae38602e596..01e14a69452 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/storeModel.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/storeModel.hpp @@ -17,6 +17,8 @@ #include "updateCVECandidates.hpp" #include "updateCVEDescription.hpp" #include "updateCVERemediations.hpp" +#include "updateHotfixes.hpp" + /** * @brief StoreModel class. * @@ -52,12 +54,14 @@ class StoreModel final : public AbstractHandler> // We clean the candidates DBs to keep the data synced if ("PUBLISHED" == state) { + UpdateHotfixes::storeVulnerabilityHotfixes(cve5Entry, data->feedDatabase); UpdateCVERemediations::storeVulnerabilityRemediation(cve5Entry, data->feedDatabase); UpdateCVEDescription::storeVulnerabilityDescription(cve5Entry, data->feedDatabase); UpdateCVECandidates::storeVulnerabilityCandidate(cve5Entry, data->feedDatabase); } else if ("REJECTED" == state) { + UpdateHotfixes::removeHotfix(cve5Entry, data->feedDatabase); UpdateCVERemediations::removeRemediation(cve5Entry, data->feedDatabase); UpdateCVEDescription::removeVulnerabilityDescription(cve5Entry, data->feedDatabase); UpdateCVECandidates::removeVulnerabilityCandidate(cve5Entry, data->feedDatabase); @@ -71,6 +75,7 @@ class StoreModel final : public AbstractHandler> { if ("PUBLISHED" == state) { + UpdateHotfixes::storeVulnerabilityHotfixes(cve5Entry, data->feedDatabase); UpdateCVERemediations::storeVulnerabilityRemediation(cve5Entry, data->feedDatabase); UpdateCVEDescription::storeVulnerabilityDescription(cve5Entry, data->feedDatabase); UpdateCVECandidates::storeVulnerabilityCandidate(cve5Entry, data->feedDatabase); diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp index 7b1a72052eb..95c5ad1df8e 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp @@ -13,6 +13,7 @@ #define _UPDATE_HOTFIXES_HPP #include "cve5_generated.h" +#include "flatbuffers/idl.h" #include "hotfix_generated.h" #include "hotfix_schema.h" #include "loggerHelper.h" @@ -152,15 +153,17 @@ class UpdateHotfixes final } /** - * @brief Deletes a hotfix from the database + * @brief Deletes all hotfixes associated with a given vulnerability from the RocksDB database. * - * @param hotfix Hotfix to be removed. + * @param data Pointer to the 'Entry' object containing vulnerability and remediation information. * @param feedDatabase rocksDB wrapper instance. */ - static void removeHotfix(const std::string& hotfix, Utils::IRocksDBWrapper* feedDatabase) + static void removeHotfix(const cve_v5::Entry* data, Utils::IRocksDBWrapper* feedDatabase) { - if (hotfix.empty()) + const auto remediations = data->containers()->cna()->x_remediations(); + if (!remediations) { + logDebug2(WM_VULNSCAN_LOGTAG, "No remediations available."); return; } @@ -169,7 +172,10 @@ class UpdateHotfixes final return; } - feedDatabase->delete_(hotfix, HOTFIXES_COLUMN); + for (const auto& hotfix : hotfixesOnRemediations(remediations)) + { + feedDatabase->delete_(hotfix, HOTFIXES_COLUMN); + } } }; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index a5b95c392d5..e788077305a 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -70,7 +70,7 @@ class remediationDataCache final : public Singleton // Iterate over the response and store the hotfixes. for (auto& hotfix : response) { - remediationData.hotfixes.insert(std::move(hotfix.at("hotfix"))); + remediationData.hotfixes.insert(hotfix.at("hotfix")); } return remediationData; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index 0850d0dbf43..a0df52ba947 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -322,12 +322,18 @@ struct TScanContext final if (delta->operation()->str().compare("INSERTED") == 0) { m_type = ScannerType::HotfixInsert; + + // Insert the new hotfix into the remediation data cache. + const std::string hotfix = delta->data_as_dbsync_hotfixes()->hotfix()->str(); + Remediation remediation = {.hotfixes = {hotfix}}; + TRemediationDataCache::instance().setRemediationData(agentId().data(), + std::move(remediation)); } else if (delta->operation()->str().compare("DELETED") == 0) { m_type = ScannerType::HotfixDelete; } - m_remediationData = TRemediationDataCache::instance().getRemediationData(agentId().data()); + m_osData = TOsDataCache::instance().getOsData(agentId().data()); } } @@ -434,6 +440,7 @@ struct TScanContext final // Set the affected component type m_affectedComponentType = AffectedComponentType::Package; + m_remediationData = TRemediationDataCache::instance().getRemediationData(agentId().data()); m_osData = TOsDataCache::instance().getOsData(agentId().data()); } else if (syncMsg->data_as_state()->attributes_type() == @@ -443,7 +450,13 @@ struct TScanContext final // Set the affected component type m_affectedComponentType = AffectedComponentType::Hotfix; - m_remediationData = TRemediationDataCache::instance().getRemediationData(agentId().data()); + // Insert the new hotfix into the remediation data cache. + const std::string hotfix = + syncMsg->data_as_state()->attributes_as_syscollector_hotfixes()->hotfix()->str(); + Remediation remediation = {.hotfixes = {hotfix}}; + TRemediationDataCache::instance().setRemediationData(agentId().data(), + std::move(remediation)); + m_osData = TOsDataCache::instance().getOsData(agentId().data()); } } @@ -1405,7 +1418,7 @@ struct TScanContext final /** * @brief Checks if there are remediation installed. - * + * * @return true if there are remediation. * @return false if there are not remediation. */ @@ -1416,9 +1429,9 @@ struct TScanContext final /** * @brief Checks if a remediation is installed. - * + * * @param remediation ID of the remediation to check. - * + * * @return true if the remediation is installed. * @return false if the remediation is not installed. */ From bc3f40ed57b90413c9d6b5db1e504022e96199e5 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 3 May 2024 02:35:22 -0300 Subject: [PATCH 088/419] CL: - scanContext: Fixed bug (new data overwrites old data) - scanContext: Added getter for hotfix installed - Style changes (names, etc) --- .../databaseFeedManager.hpp | 13 ++++--- .../scanOrchestrator/remediationDataCache.hpp | 18 +++++++-- .../src/scanOrchestrator/scanContext.hpp | 39 ++++++++++++++++++- 3 files changed, 58 insertions(+), 12 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp index 54d897d103b..2c5d051a72b 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp @@ -416,30 +416,31 @@ class TDatabaseFeedManager final : public Observer * stores it in the 'remediationInfo' object. * * @param hotfix hotfix id for which remediation information is requested. - * @param remediationInfo A reference to a `FlatbufferDataPair` object where the retrieved remediation + * @param hotfixVulnerabilities A reference to a `FlatbufferDataPair` object where the retrieved hotfix to CVEs * information will be stored. * * @throws std::runtime_error if the retrieved data from the database is invalid or not in the expected FlatBuffers * format. */ - void getHotfixVulnerabilities(const std::string& hotfix, FlatbufferDataPair& remediationInfo) + void getHotfixVulnerabilities(const std::string& hotfix, + FlatbufferDataPair& hotfixVulnerabilities) { // If the remediation information is not found in the database, we return because there is no remediation. - if (auto result = m_feedDatabase->get(hotfix, remediationInfo.slice, REMEDIATIONS_COLUMN); !result) + if (auto result = m_feedDatabase->get(hotfix, hotfixVulnerabilities.slice, REMEDIATIONS_COLUMN); !result) { return; } - const auto remediationSlice = reinterpret_cast(remediationInfo.slice.data()); + const auto hotfixVulnerabilitieslice = reinterpret_cast(hotfixVulnerabilities.slice.data()); // Verify the integrity of the FlatBuffers remediation data - if (flatbuffers::Verifier verifier(remediationSlice, remediationInfo.slice.size()); + if (flatbuffers::Verifier verifier(hotfixVulnerabilitieslice, hotfixVulnerabilities.slice.size()); !VerifyRemediationInfoBuffer(verifier)) { throw std::runtime_error("Error: Invalid FlatBuffers data in RocksDB."); } - remediationInfo.data = GetRemediationInfo(remediationSlice); + hotfixVulnerabilities.data = Gethotfix(hotfixVulnerabilitieslice); } /** diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index e788077305a..080275efac5 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -110,15 +110,25 @@ class remediationDataCache final : public Singleton } /** - * @brief This method sets the remediation data. + * @brief Add remediation data to the cache. + * + * @note If the agentId already exists in the cache, the new data is merged with the existing data. * * @param agentId agent id. - * @param remediationData data to be inserted. + * @param newRemediationData data to be inserted. */ - void setRemediationData(const std::string& agentId, const Remediation& remediationData) + void addRemediationData(const std::string& agentId, Remediation newRemediationData) { std::scoped_lock lock(m_mutex); - m_remediationData.insertKey(agentId, remediationData); + auto currentRemediationData = m_remediationData.getValue(agentId).value(); + + // Merge the new data with the current data. + if (auto currentData = m_remediationData.getValue(agentId); currentData.has_value()) + { + newRemediationData.hotfixes.insert(currentData->hotfixes.begin(), currentData->hotfixes.end()); + } + + m_remediationData.insertKey(agentId, newRemediationData); } }; #endif // _REMEDIATION_DATA_CACHE_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index a0df52ba947..4e8b6c62833 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -326,7 +326,7 @@ struct TScanContext final // Insert the new hotfix into the remediation data cache. const std::string hotfix = delta->data_as_dbsync_hotfixes()->hotfix()->str(); Remediation remediation = {.hotfixes = {hotfix}}; - TRemediationDataCache::instance().setRemediationData(agentId().data(), + TRemediationDataCache::instance().addRemediationData(agentId().data(), std::move(remediation)); } else if (delta->operation()->str().compare("DELETED") == 0) @@ -454,7 +454,7 @@ struct TScanContext final const std::string hotfix = syncMsg->data_as_state()->attributes_as_syscollector_hotfixes()->hotfix()->str(); Remediation remediation = {.hotfixes = {hotfix}}; - TRemediationDataCache::instance().setRemediationData(agentId().data(), + TRemediationDataCache::instance().addRemediationData(agentId().data(), std::move(remediation)); m_osData = TOsDataCache::instance().getOsData(agentId().data()); @@ -1416,6 +1416,41 @@ struct TScanContext final return m_osData.cpeName; } + /** + * @brief get the hotfix being installed in the current scan. + * + */ + std::string_view installedHotfix() const + { + return extractData( + [](const SyscollectorDeltas::Delta* delta) + { + if (delta->data_as_dbsync_hotfixes() == nullptr) + { + return ""; + } + return delta->data_as_dbsync_hotfixes()->hotfix() ? delta->data_as_dbsync_hotfixes()->hotfix()->c_str() + : ""; + }, + [](const SyscollectorSynchronization::SyncMsg* syncMsg) + { + if (syncMsg->data_as_state() == nullptr || + syncMsg->data_as_state()->attributes_as_syscollector_hotfixes() == nullptr) + { + return ""; + } + return syncMsg->data_as_state()->attributes_as_syscollector_hotfixes()->hotfix() + ? syncMsg->data_as_state()->attributes_as_syscollector_hotfixes()->hotfix()->c_str() + : ""; + }, + [](const nlohmann::json* jsonData) + { + return jsonData->contains("/data/hotfix"_json_pointer) + ? jsonData->at("/data/hotfix"_json_pointer).get_ref().c_str() + : ""; + }); + } + /** * @brief Checks if there are remediation installed. * From 8c8019e4579747d2f456e2fa914d4e25c111601f Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 3 May 2024 03:03:26 -0300 Subject: [PATCH 089/419] CL: - hotfixInsert: Added orchestration - inventorySync: Added hotfix getter method - scanContext & main: Style changes --- .../src/scanOrchestrator/hotfixInsert.hpp | 50 ++++++++----------- .../src/scanOrchestrator/inventorySync.hpp | 17 +++++-- .../src/scanOrchestrator/scanContext.hpp | 2 +- .../testtool/databaseFeedManager/main.cpp | 2 +- 4 files changed, 36 insertions(+), 35 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp index 24ec4b43d1a..8c62880ef7c 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp @@ -14,6 +14,8 @@ #include "chainOfResponsability.hpp" #include "databaseFeedManager.hpp" +#include "hotfix_generated.h" +#include "hotfix_schema.h" #include "inventorySync.hpp" #include "remediationDataCache.hpp" #include "scanContext.hpp" @@ -55,36 +57,28 @@ class THotfixInsert final : public AbstractHandler */ std::shared_ptr handleRequest(std::shared_ptr data) override { - // Get the list of CVEs remediated by the installed hotfix + FlatbufferDataPair hotfixVulnerabilities; + // Get the list of CVEs remediated by the installed hotfixes + const std::string hotfixId {data->hotfixId()}; + m_databaseFeedManager->getHotfixVulnerabilities(hotfixId, hotfixVulnerabilities); - // std::string key; - // key.append(data->agentNodeName()); - // key.append("_"); - // key.append(data->agentId()); - // key.append("_"); - - // // Create the key for the inventory. - // key.append(TInventorySync::affectedComponentKey(data)); - - // const auto& column = AFFECTED_COMPONENT_COLUMNS.at(data->affectedComponentType()); - // std::string value; - - // if (TInventorySync::m_inventoryDatabase.get(key, value, column)) - // { - // auto listCve = Utils::split(value, ','); - // for (const auto& cve : listCve) - // { - // std::string elementKey; - // elementKey.append(key); - // elementKey.append("_"); - // elementKey.append(cve); - - // data->m_elements.emplace(cve, TInventorySync::buildElement("DELETED", elementKey)); - // } - // logDebug2(WM_VULNSCAN_LOGTAG, "Deleting %s agent key: %s", column.c_str(), key.c_str()); - // TInventorySync::m_inventoryDatabase.delete_(key, column); - // } + if (hotfixVulnerabilities.data == nullptr || hotfixVulnerabilities.data->CVEs() == nullptr || + hotfixVulnerabilities.data->CVEs()->size() == 0) + { + logDebug2(WM_VULNSCAN_LOGTAG, "No CVEs associated for the installed hotfix (%s)", hotfixId); + } + else + { + // For each CVE, insert the inventory entry + for (const auto& cve : *hotfixVulnerabilities.data->CVEs()) + { + // Add all CVEs to the deletetion list + logDebug2( + WM_VULNSCAN_LOGTAG, "CVE '%s' was remediated by hotfix '%s'", cve->str().c_str(), hotfixId); + data->m_elements[cve->str()] = nlohmann::json::object(); + } + } return AbstractHandler>::handleRequest(std::move(data)); } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/inventorySync.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/inventorySync.hpp index 26a10408fd3..2ce6c01b915 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/inventorySync.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/inventorySync.hpp @@ -21,9 +21,12 @@ constexpr auto OS = "os"; constexpr auto PACKAGE = "package"; constexpr auto OS_INITIAL_SCAN = "os_initial_scan"; +constexpr auto REMEDIATION = "remediation"; const std::map AFFECTED_COMPONENT_COLUMNS = { - {AffectedComponentType::Os, OS}, {AffectedComponentType::Package, PACKAGE}}; + {AffectedComponentType::Os, OS}, + {AffectedComponentType::Package, PACKAGE}, + {AffectedComponentType::Hotfix, REMEDIATION}}; /** * @brief TInventorySync class. @@ -57,7 +60,7 @@ class TInventorySync * @param data Scan context. * @return std::string_view Affected component key. */ - std::string_view affectedComponentKey(const std::shared_ptr& data) + std::string_view affectedComponentKey(const std::shared_ptr& data) const { if (data->affectedComponentType() == AffectedComponentType::Os) { @@ -67,6 +70,10 @@ class TInventorySync { return data->packageItemId(); } + else if (data->affectedComponentType() == AffectedComponentType::Hotfix) + { + return data->hotfixId(); + } else { throw std::runtime_error("Invalid affected type for inventory sync."); @@ -83,11 +90,11 @@ class TInventorySync explicit TInventorySync(Utils::RocksDBWrapper& inventoryDatabase) : m_inventoryDatabase(inventoryDatabase) { - for (const auto& element : AFFECTED_COMPONENT_COLUMNS) + for (const auto& [componentType, columnName] : AFFECTED_COMPONENT_COLUMNS) { - if (!m_inventoryDatabase.columnExists(element.second)) + if (!m_inventoryDatabase.columnExists(columnName)) { - m_inventoryDatabase.createColumn(element.second); + m_inventoryDatabase.createColumn(columnName); } } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index 4e8b6c62833..da1e50311f7 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -1420,7 +1420,7 @@ struct TScanContext final * @brief get the hotfix being installed in the current scan. * */ - std::string_view installedHotfix() const + std::string_view hotfixId() const { return extractData( [](const SyscollectorDeltas::Delta* delta) diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/databaseFeedManager/main.cpp b/src/wazuh_modules/vulnerability_scanner/testtool/databaseFeedManager/main.cpp index 29ddc5812d0..f45d42213ec 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/databaseFeedManager/main.cpp +++ b/src/wazuh_modules/vulnerability_scanner/testtool/databaseFeedManager/main.cpp @@ -85,7 +85,7 @@ class DummyPolicyManager : public Singleton /** * @brief Get remediation LRU size. - * + * */ uint32_t getRemediationLRUSize() const { From 51c425ecbfc0b562e41497cfaa2ac0ea43402c6c Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 3 May 2024 03:05:45 -0300 Subject: [PATCH 090/419] CL: - Added UTs --- .../tests/unit/factoryOrchestrator_test.cpp | 157 +++++++------- .../tests/unit/remediationDataCache_test.cpp | 105 +++++++++ .../tests/unit/remediationDataCache_test.hpp | 75 +++++++ .../tests/unit/updateHotfixes_test.cpp | 200 +++++++++++++----- .../tests/unit/updateHotfixes_test.hpp | 3 +- 5 files changed, 413 insertions(+), 127 deletions(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp create mode 100644 src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp index 58db0f86638..423449e20aa 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp @@ -32,7 +32,8 @@ enum class ScannerMockID : int BUILD_SINGLE_AGENT_LIST_CONTEXT = 14, CLEAN_SINGLE_AGENT_INVENTORY = 15, SCAN_AGENT_LIST = 16, - GLOBAL_INVENTORY_SYNC = 17 + GLOBAL_INVENTORY_SYNC = 17, + HOTFIX_INSERT = 18 }; /** @@ -128,11 +129,12 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypePackageInsert) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::PackageInsert, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::PackageInsert, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -173,11 +175,12 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypePackageDelete) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::PackageDelete, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::PackageDelete, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -216,11 +219,12 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypeIntegrityClear) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::IntegrityClear, + TFakeClass, + TFakeClass>::create(ScannerType::IntegrityClear, nullptr, nullptr, *m_inventoryDatabase, - nullptr); +>>>>>>> 2802ba8fbc (CL:) auto context = std::make_shared>(); @@ -237,32 +241,32 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypeIntegrityClear) TEST_F(FactoryOrchestratorTest, TestScannerTypeOs) { // Create the orchestrator for Os. - auto orchestration = - TFactoryOrchestrator, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - MockDatabaseFeedManager, - MockIndexerConnector, - std::vector, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass>::create(ScannerType::Os, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + auto orchestration = TFactoryOrchestrator, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + MockDatabaseFeedManager, + MockIndexerConnector, + std::vector, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass>::create(ScannerType::Os, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -303,11 +307,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAllData) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::CleanupAllAgentData, + TFakeClass, + TFakeClass>::create(ScannerType::CleanupAllAgentData, nullptr, nullptr, *m_inventoryDatabase, - nullptr); +>>>>>>> 2802ba8fbc (CL:) auto context = std::make_shared>(); @@ -343,11 +348,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanAllAgents) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::ReScanAllAgents, + TFakeClass, + TFakeClass>::create(ScannerType::ReScanAllAgents, nullptr, nullptr, *m_inventoryDatabase, - nullptr); +>>>>>>> 2802ba8fbc (CL:) auto context = std::make_shared>(); @@ -384,11 +390,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanSingleAgent) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::ReScanSingleAgent, + TFakeClass, + TFakeClass>::create(ScannerType::ReScanSingleAgent, nullptr, nullptr, *m_inventoryDatabase, - nullptr); +>>>>>>> 2802ba8fbc (CL:) auto context = std::make_shared>(); @@ -402,28 +409,33 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanSingleAgent) TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAgentData) { // Create the orchestrator for CleanupSingleAgentData. - auto orchestration = TFactoryOrchestrator, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - MockDatabaseFeedManager, - MockIndexerConnector, - std::vector, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass>:: - create(ScannerType::CleanupSingleAgentData, nullptr, nullptr, *m_inventoryDatabase, nullptr); + auto orchestration = + TFactoryOrchestrator, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + MockDatabaseFeedManager, + MockIndexerConnector, + std::vector, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass>::create(ScannerType::CleanupSingleAgentData, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -462,11 +474,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationInvalidScannerType) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(invalidScannerType, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(invalidScannerType, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); } catch (const std::runtime_error& e) { diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp new file mode 100644 index 00000000000..e8692352496 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp @@ -0,0 +1,105 @@ +/* + * Wazuh Vulnerability Scanner - Unit Tests + * Copyright (C) 2015, Wazuh Inc. + * May 2, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#include "remediationDataCache_test.hpp" + +bool hotfixesAreEqual(const Remediation& remediation1, const Remediation& remediation2) +{ + if (remediation1.hotfixes.size() != remediation2.hotfixes.size()) + { + return false; + } + + for (const auto& hotfix : remediation1.hotfixes) + { + if (remediation2.hotfixes.find(hotfix) == remediation2.hotfixes.end()) + { + return false; + } + } + + return true; +} + +TEST_F(remediationDataCacheTest, InsertMultipleItems) +{ + remediationDataCache cache; + std::string agentId {"1"}; + + { + // Set value in cache + Remediation remediationData { + .hotfixes = {"hotfix1", "hotfix2"}, + }; + + cache.addRemediationData(agentId, remediationData); + + // Get value from cache + const auto retrievedData = cache.getRemediationData(agentId); + + // Verify that the returned value is the same as the one set + EXPECT_TRUE(hotfixesAreEqual(remediationData, remediationData)); + } + + { + // Set another value in cache + Remediation remediationData { + .hotfixes = {"hotfix3", "hotfix4"}, + }; + + cache.addRemediationData(agentId, remediationData); + + // Get value from cache + const auto retrievedData = cache.getRemediationData(agentId); + + // Verify that the returned value is equal to both the values set + Remediation expected {.hotfixes = {"hotfix1", "hotfix2", "hotfix3", "hotfix4"}}; + EXPECT_TRUE(hotfixesAreEqual(retrievedData, expected)); + } +} + +TEST_F(remediationDataCacheTest, SetAndGetSuccess) +{ + // Start fake server + m_socketServer->listen( + [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) + { + std::ignore = fd; + std::ignore = dataHeader; + std::ignore = sizeHeader; + std::ignore = size; + std::ignore = data; + + m_socketServer->send(fd, "err ", 4); + }); + + remediationDataCache cache; + std::string agentId {"1"}; + + // Try to get value from empty cache + EXPECT_THROW(cache.getRemediationData(agentId), WdbDataException); + + // Set value in cache + Remediation remediationData { + .hotfixes = {"hotfix1", "hotfix2"}, + }; + + cache.addRemediationData(agentId, remediationData); + + // Get value from cache + const auto retrievedData = cache.getRemediationData(agentId); + + // Verify that the returned value is the same as the one set + EXPECT_TRUE(hotfixesAreEqual(remediationData, retrievedData)); + + // Try to get from non existing agent + EXPECT_THROW(cache.getRemediationData("2"), WdbDataException); +} diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp new file mode 100644 index 00000000000..49dc018960a --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp @@ -0,0 +1,75 @@ +/* + * Wazuh Vulnerability Scanner - Unit Tests + * Copyright (C) 2015, Wazuh Inc. + * May 2, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _REMEDIAION_DATA_CACHE_TEST_HPP +#define _REMEDIAION_DATA_CACHE_TEST_HPP + +#include "policyManager.hpp" +#include "remediationDataCache.hpp" +#include "socketServer.hpp" +#include "wdbDataException.hpp" +#include "gtest/gtest.h" +#include + +/** + * @brief Runs unit tests for remediationDataCache + */ +class remediationDataCacheTest : public ::testing::Test +{ +protected: + // LCOV_EXCL_START + remediationDataCacheTest() = default; + ~remediationDataCacheTest() override = default; + // LCOV_EXCL_STOP + /** + * @brief Fake socket server to test the DB query. + * + */ + std::shared_ptr, EpollWrapper>> m_socketServer; + + /** + * @brief Set the Up every test case. + * + */ + void SetUp() override + { + std::filesystem::create_directories("queue/db"); + // Create the socket server + m_socketServer = std::make_shared, EpollWrapper>>( + RemediationDataCacheConstants::WDB_SOCKET); + + // Policy manager initialization. + const auto& configJson {nlohmann::json::parse(R"( + { + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "cti-url": "cti-url.com" + }, + "remediationLRUSize":1000 + })")}; + PolicyManager::instance().initialize(configJson); + } + + /** + * @brief Clean up method after every test execution. + * + */ + void TearDown() override + { + PolicyManager::instance().teardown(); + // Stop the socket server + m_socketServer->stop(); + std::filesystem::remove_all("queue/db"); + } +}; + +#endif // _REMEDIAION_DATA_CACHE_TEST_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp index 186b0b15125..44d9238fb9d 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp @@ -14,16 +14,18 @@ #include "flatbuffers/util.h" #include -constexpr auto COMMON_DATABASE_DIR {"queue/vd"}; //< fbHotfix; - const std::string expectedCveId = "CVE-1337-1234"; - for (const auto& hotfixId : hotfixes) + const std::string expectedCveId = "CVE-2222-2222"; + for (const auto& hotfix : hotfixes) { - EXPECT_TRUE(rocksDBWrapper->get(hotfixId, fbHotfix.slice, HOTFIXES_COLUMN)); + EXPECT_TRUE(rocksDBWrapper->get(hotfix, fbHotfix.slice, HOTFIXES_COLUMN)); fbHotfix.data = const_cast(NSVulnerabilityScanner::Gethotfix(fbHotfix.slice.data())); EXPECT_EQ(fbHotfix.data->CVEs()->size(), 1); - EXPECT_STREQ(fbHotfix.data->CVEs()->Get(0)->str(), expectedCveId); + EXPECT_EQ(fbHotfix.data->CVEs()->Get(0)->str(), expectedCveId); + } +} + +TEST_F(UpdateHotfixesTest, SkipsEmptyUpdates) +{ + // Define schema variable and parse JSON object. + std::string schemaStr; + + // Load file with schema. + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); + + // Parse schema. + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + parser.Parse(JSON_CVE5_EMPTY_UPDATES); + + // Create a test Entry object with Windows remediations + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + auto entry = cve_v5::GetEntry(buffer); + + // Create a mock RocksDBWrapper object + std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + + // Call the updateRemediation function with the test data + EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); +} + +TEST_F(UpdateHotfixesTest, MultipleVulnerabilities) +{ + // Create a mock RocksDBWrapper object + std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + + // Define schema variable and parse JSON object. + std::string schemaStr; + + // Load file with schema. + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); + + // Parse schema. + flatbuffers::Parser parser; + + { + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + + // Parse the first entry + parser.Parse(JSON_CVE5_VALID_ONE_BLOCK); + + // Prepare the vulnerability + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + auto entry = cve_v5::GetEntry(buffer); + + // Insert the vulnerability + EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + } + + { + // Parse schema. + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + + // Parse the first entry + parser.Parse(JSON_CVE5_VALID_MULTIPLE_BLOCKS); + + // Prepare the vulnerability + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + auto entry = cve_v5::GetEntry(buffer); + + // Insert the vulnerability + EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + } + + // Hotfix to CVE mapping + std::map> hotfixesToCVEs { + {"KBT-800", {"CVE-1111-1111", "CVE-2222-2222"}}, + {"KBT-3000", {"CVE-1111-1111", "CVE-2222-2222"}}, + {"KBT-4000", {"CVE-2222-2222"}}, + {"KBT-5000", {"CVE-2222-2222"}}, + {"KBT-6000", {"CVE-2222-2222"}}, + {"KBT-7000", {"CVE-2222-2222"}}, + {"KBT-8000", {"CVE-2222-2222"}}, + {"KBT-9000", {"CVE-2222-2222"}}, + }; + + // Make sure the entries where inserted + FlatbufferDataPair hotfixInfo; + for (const auto& [hotfixIds, cveIds] : hotfixesToCVEs) + { + EXPECT_TRUE(rocksDBWrapper->get(hotfixIds, hotfixInfo.slice, HOTFIXES_COLUMN)); + hotfixInfo.data = const_cast(Gethotfix(hotfixInfo.slice.data())); + EXPECT_EQ(hotfixInfo.data->CVEs()->size(), cveIds.size()); + for (const auto& cveId : cveIds) + { + // Check that the cve is in the list + EXPECT_TRUE(std::find_if(hotfixInfo.data->CVEs()->begin(), + hotfixInfo.data->CVEs()->end(), + [&cveId](const flatbuffers::String* str) + { return str->str() == cveId; }) != hotfixInfo.data->CVEs()->end()); + } } } -TEST_F(UpdateHotfixesTest, DeleteHotfixes) +TEST_F(UpdateHotfixesTest, StoreAndRemove) { // Define schema variable and parse JSON object. std::string schemaStr; @@ -450,13 +538,14 @@ TEST_F(UpdateHotfixesTest, DeleteHotfixes) // Parse schema. flatbuffers::Parser parser; - parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES) && parser.Parse(JSON_CVE5_VALID_ONE_BLOCK); + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + parser.Parse(JSON_CVE5_VALID_ONE_BLOCK); // Create a test Entry object with Windows remediations - auto jbuf = parser.builder_.GetBufferPointer(); - flatbuffers::Verifier jverifier(jbuf, parser.builder_.GetSize()); - cve_v5::VerifyEntryBuffer(jverifier); - auto entry = cve_v5::GetEntry(jbuf); + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + auto entry = cve_v5::GetEntry(buffer); // Create a mock RocksDBWrapper object std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); @@ -465,19 +554,22 @@ TEST_F(UpdateHotfixesTest, DeleteHotfixes) EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); // Make sure the entries where inserted - FlatbufferDataPair remediationInfo; + FlatbufferDataPair hotfixInfo; std::vector hotfixes {"KBT-800", "KBT-1000", "KBT-3000"}; for (const auto& hotfix : hotfixes) { - EXPECT_TRUE(rocksDBWrapper->get(hotfix, remediationInfo.slice, HOTFIXES_COLUMN)); + EXPECT_TRUE(rocksDBWrapper->get(hotfix, hotfixInfo.slice, HOTFIXES_COLUMN)); + hotfixInfo.data = + const_cast(NSVulnerabilityScanner::Gethotfix(hotfixInfo.slice.data())); + EXPECT_EQ(hotfixInfo.data->CVEs()->size(), 1); + EXPECT_EQ(hotfixInfo.data->CVEs()->Get(0)->str(), "CVE-1111-1111"); } - // Remove all but one hotfix - EXPECT_NO_THROW(UpdateHotfixes::removeHotfix(hotfixes.at(1), rocksDBWrapper.get())); - EXPECT_NO_THROW(UpdateHotfixes::removeHotfix(hotfixes.at(2), rocksDBWrapper.get())); + // Remove all hotfixes + EXPECT_NO_THROW(UpdateHotfixes::removeHotfix(entry, rocksDBWrapper.get())); // Check that the hotfixes were removed - EXPECT_TRUE(rocksDBWrapper->get(hotfixes.at(0), remediationInfo.slice, HOTFIXES_COLUMN)); - EXPECT_FALSE(rocksDBWrapper->get(hotfixes.at(1), remediationInfo.slice, HOTFIXES_COLUMN)); - EXPECT_FALSE(rocksDBWrapper->get(hotfixes.at(2), remediationInfo.slice, HOTFIXES_COLUMN)); + EXPECT_TRUE(rocksDBWrapper->get(hotfixes.at(0), hotfixInfo.slice, HOTFIXES_COLUMN)); + EXPECT_FALSE(rocksDBWrapper->get(hotfixes.at(1), hotfixInfo.slice, HOTFIXES_COLUMN)); + EXPECT_FALSE(rocksDBWrapper->get(hotfixes.at(2), hotfixInfo.slice, HOTFIXES_COLUMN)); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp index 870b5963349..45eaf6bacc4 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp @@ -13,7 +13,7 @@ #define _UPDATE_HOTFIXES_TEST_HPP #include "../../src/databaseFeedManager/databaseFeedManager.hpp" #include "../../src/databaseFeedManager/updateHotfixes.hpp" -#include "hotfixes_generated.h" +#include "hotfix_generated.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -39,6 +39,7 @@ class UpdateHotfixesTest : public ::testing::Test * */ void TearDown() override; + }; #endif //_UPDATE_HOTFIXES_TEST_HPP From a927fd67b18f76219bd42b458535eee50f27da37 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 3 May 2024 11:52:58 -0300 Subject: [PATCH 091/419] CL: - Fixed rebase errors --- .../tests/unit/factoryOrchestrator_test.cpp | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp index 423449e20aa..964268e821b 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp @@ -221,10 +221,10 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypeIntegrityClear) TFakeClass, TFakeClass, TFakeClass>::create(ScannerType::IntegrityClear, - nullptr, - nullptr, - *m_inventoryDatabase, ->>>>>>> 2802ba8fbc (CL:) + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -312,7 +312,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAllData) nullptr, nullptr, *m_inventoryDatabase, ->>>>>>> 2802ba8fbc (CL:) + nullptr); auto context = std::make_shared>(); @@ -353,7 +353,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanAllAgents) nullptr, nullptr, *m_inventoryDatabase, ->>>>>>> 2802ba8fbc (CL:) + nullptr); auto context = std::make_shared>(); @@ -395,7 +395,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanSingleAgent) nullptr, nullptr, *m_inventoryDatabase, ->>>>>>> 2802ba8fbc (CL:) + nullptr); auto context = std::make_shared>(); @@ -515,11 +515,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationGlobalSyncInventory) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::GlobalSyncInventory, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::GlobalSyncInventory, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); From a7be2fe21ba00aef9f4516f0137c3ed42b79d1d4 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 3 May 2024 11:56:41 -0300 Subject: [PATCH 092/419] CL: - Added hotfixInsert orchestration --- .../src/scanOrchestrator/hotfixInsert.hpp | 1 + .../src/scanOrchestrator/scanOrchestrator.hpp | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp index 8c62880ef7c..b2ce41dba30 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp @@ -57,6 +57,7 @@ class THotfixInsert final : public AbstractHandler */ std::shared_ptr handleRequest(std::shared_ptr data) override { + logDebug2(WM_VULNSCAN_LOGTAG, "Inserting inventory entries for the installed hotfixes"); FlatbufferDataPair hotfixVulnerabilities; // Get the list of CVEs remediated by the installed hotfixes diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp index 6da8b0b06f5..7b28c63cca3 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp @@ -101,6 +101,8 @@ class TScanOrchestrator final : public TOSPrimitives std::move(indexerConnector), inventoryDatabase, std::move(reportDispatcher)); + m_hotfixInsertOrchestration = TFactoryOrchestrator::create( + ScannerType::HotfixInsert, databaseFeedManager, indexerConnector, inventoryDatabase, reportDispatcher); // Define the maximum size for the hostname constexpr auto MAX_HOSTNAME_SIZE = 256; @@ -226,6 +228,7 @@ class TScanOrchestrator final : public TOSPrimitives { switch (type) { + case ScannerType::HotfixInsert: m_hotfixInsertOrchestration->handleRequest(std::move(context)); break; case ScannerType::PackageInsert: m_packageInsertOrchestration->handleRequest(std::move(context)); break; case ScannerType::PackageDelete: m_packageDeleteOrchestration->handleRequest(std::move(context)); break; case ScannerType::Os: m_osOrchestration->handleRequest(std::move(context)); break; From 893fb6defc509f109aec7fddbe0ef418195c9227 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 3 May 2024 13:18:25 -0300 Subject: [PATCH 093/419] CL: - Style changes (clang-format) - Fixead file creation dates - Added missing documentation --- .../databaseFeedManager/updateHotfixes.hpp | 2 +- .../src/scanOrchestrator/hotfixInsert.hpp | 7 +++--- .../scanOrchestrator/remediationDataCache.hpp | 1 - .../src/scanOrchestrator/scanContext.hpp | 9 ++++--- .../tests/unit/factoryOrchestrator_test.cpp | 24 +++++++++---------- .../tests/unit/remediationDataCache_test.cpp | 2 +- .../tests/unit/updateHotfixes_test.cpp | 2 +- .../tests/unit/updateHotfixes_test.hpp | 3 +-- .../testtool/databaseFeedManager/main.cpp | 1 + 9 files changed, 26 insertions(+), 25 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp index 95c5ad1df8e..e3c6185d6e4 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp @@ -1,7 +1,7 @@ /* * Wazuh storeRemediationsModel * Copyright (C) 2015, Wazuh Inc. - * October 05, 2023. + * May 2, 2024. * * This program is free software; you can redistribute it * and/or modify it under the terms of the GNU General Public diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp index b2ce41dba30..782330c76be 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp @@ -39,9 +39,9 @@ class THotfixInsert final : public AbstractHandler public: // LCOV_EXCL_START /** - * @brief EventInsertInventory constructor. + * @brief HotfixInsert event constructor. * - * @param inventoryDatabase Inventory database. + * @param databaseFeedManager Database feed manager. */ explicit THotfixInsert(std::shared_ptr& databaseFeedManager) : m_databaseFeedManager(databaseFeedManager) @@ -75,8 +75,7 @@ class THotfixInsert final : public AbstractHandler for (const auto& cve : *hotfixVulnerabilities.data->CVEs()) { // Add all CVEs to the deletetion list - logDebug2( - WM_VULNSCAN_LOGTAG, "CVE '%s' was remediated by hotfix '%s'", cve->str().c_str(), hotfixId); + logDebug2(WM_VULNSCAN_LOGTAG, "CVE '%s' was remediated by hotfix '%s'", cve->str().c_str(), hotfixId); data->m_elements[cve->str()] = nlohmann::json::object(); } } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index 080275efac5..ad144ae161e 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -120,7 +120,6 @@ class remediationDataCache final : public Singleton void addRemediationData(const std::string& agentId, Remediation newRemediationData) { std::scoped_lock lock(m_mutex); - auto currentRemediationData = m_remediationData.getValue(agentId).value(); // Merge the new data with the current data. if (auto currentData = m_remediationData.getValue(agentId); currentData.has_value()) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index da1e50311f7..a1b742bb917 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -1417,9 +1417,12 @@ struct TScanContext final } /** - * @brief get the hotfix being installed in the current scan. - * - */ + * @brief get the hotfix identifier being installed in the current scan. + * + * @details If no hotfix is being installed, an empty string is returned. + * + * @return std::string_view hotfix identifier. + */ std::string_view hotfixId() const { return extractData( diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp index 964268e821b..bc478e2a2e3 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp @@ -309,10 +309,10 @@ TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAllData) TFakeClass, TFakeClass, TFakeClass>::create(ScannerType::CleanupAllAgentData, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -350,10 +350,10 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanAllAgents) TFakeClass, TFakeClass, TFakeClass>::create(ScannerType::ReScanAllAgents, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -392,10 +392,10 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanSingleAgent) TFakeClass, TFakeClass, TFakeClass>::create(ScannerType::ReScanSingleAgent, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp index e8692352496..1c0279521ab 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp @@ -34,7 +34,7 @@ TEST_F(remediationDataCacheTest, InsertMultipleItems) remediationDataCache cache; std::string agentId {"1"}; - { + { // Set value in cache Remediation remediationData { .hotfixes = {"hotfix1", "hotfix2"}, diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp index 44d9238fb9d..607733ea133 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp @@ -1,7 +1,7 @@ /* * Wazuh storeRemediationsModel * Copyright (C) 2015, Wazuh Inc. - * October 05, 2023. + * May 03, 2024. * * This program is free software; you can redistribute it * and/or modify it under the terms of the GNU General Public diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp index 45eaf6bacc4..73c253d10f5 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp @@ -1,7 +1,7 @@ /* * Wazuh storeRemediationsModel * Copyright (C) 2015, Wazuh Inc. - * October 05, 2023. + * May 03, 2024. * * This program is free software; you can redistribute it * and/or modify it under the terms of the GNU General Public @@ -39,7 +39,6 @@ class UpdateHotfixesTest : public ::testing::Test * */ void TearDown() override; - }; #endif //_UPDATE_HOTFIXES_TEST_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/databaseFeedManager/main.cpp b/src/wazuh_modules/vulnerability_scanner/testtool/databaseFeedManager/main.cpp index f45d42213ec..b6fdc2d7590 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/databaseFeedManager/main.cpp +++ b/src/wazuh_modules/vulnerability_scanner/testtool/databaseFeedManager/main.cpp @@ -86,6 +86,7 @@ class DummyPolicyManager : public Singleton /** * @brief Get remediation LRU size. * + * @return uint32_t remediation LRU size. */ uint32_t getRemediationLRUSize() const { From 00688a740b5e7f40c11c71cd0ea03788fe030c68 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 3 May 2024 15:07:04 -0300 Subject: [PATCH 094/419] CL: - Fixed hotfix insert logic when no hotfix is installed --- .../src/scanOrchestrator/hotfixInsert.hpp | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp index 782330c76be..2600dd43dd5 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp @@ -57,27 +57,29 @@ class THotfixInsert final : public AbstractHandler */ std::shared_ptr handleRequest(std::shared_ptr data) override { - logDebug2(WM_VULNSCAN_LOGTAG, "Inserting inventory entries for the installed hotfixes"); FlatbufferDataPair hotfixVulnerabilities; - // Get the list of CVEs remediated by the installed hotfixes const std::string hotfixId {data->hotfixId()}; - m_databaseFeedManager->getHotfixVulnerabilities(hotfixId, hotfixVulnerabilities); + if (hotfixId.empty()) + { + logDebug2(WM_VULNSCAN_LOGTAG, "No hotfix installed"); + return nullptr; + } + m_databaseFeedManager->getHotfixVulnerabilities(hotfixId, hotfixVulnerabilities); if (hotfixVulnerabilities.data == nullptr || hotfixVulnerabilities.data->CVEs() == nullptr || hotfixVulnerabilities.data->CVEs()->size() == 0) { - logDebug2(WM_VULNSCAN_LOGTAG, "No CVEs associated for the installed hotfix (%s)", hotfixId); + logDebug2(WM_VULNSCAN_LOGTAG, "No CVEs associated for the installed hotfix '%s'", hotfixId); + return nullptr; } - else + + // For each CVE, insert the inventory entry + for (const auto& cve : *hotfixVulnerabilities.data->CVEs()) { - // For each CVE, insert the inventory entry - for (const auto& cve : *hotfixVulnerabilities.data->CVEs()) - { - // Add all CVEs to the deletetion list - logDebug2(WM_VULNSCAN_LOGTAG, "CVE '%s' was remediated by hotfix '%s'", cve->str().c_str(), hotfixId); - data->m_elements[cve->str()] = nlohmann::json::object(); - } + // Add all CVEs to the deletetion list + logDebug2(WM_VULNSCAN_LOGTAG, "CVE '%s' was remediated by hotfix '%s'", cve->str().c_str(), hotfixId); + data->m_elements[cve->str()] = nlohmann::json::object(); } return AbstractHandler>::handleRequest(std::move(data)); From c44eaf0ae897f0b21cece13c1f84a37da8308639 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 3 May 2024 17:47:36 -0300 Subject: [PATCH 095/419] CL: - Prevent exception when no hotfix data is found --- .../src/scanOrchestrator/hotfixInsert.hpp | 3 ++- .../src/scanOrchestrator/remediationDataCache.hpp | 5 ++++- .../src/scanOrchestrator/scanContext.hpp | 7 +------ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp index 2600dd43dd5..ed7b2d89811 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp @@ -65,7 +65,8 @@ class THotfixInsert final : public AbstractHandler logDebug2(WM_VULNSCAN_LOGTAG, "No hotfix installed"); return nullptr; } - + + logDebug2(WM_VULNSCAN_LOGTAG, "Getting associated vulnerabilities for hotfix '%s'", hotfixId.c_str()); m_databaseFeedManager->getHotfixVulnerabilities(hotfixId, hotfixVulnerabilities); if (hotfixVulnerabilities.data == nullptr || hotfixVulnerabilities.data->CVEs() == nullptr || hotfixVulnerabilities.data->CVEs()->size() == 0) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index ad144ae161e..6d10c0b35e2 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -70,7 +70,10 @@ class remediationDataCache final : public Singleton // Iterate over the response and store the hotfixes. for (auto& hotfix : response) { - remediationData.hotfixes.insert(hotfix.at("hotfix")); + if (hotfix.contains("hotfix")) + { + remediationData.hotfixes.insert(hotfix.at("hotfix")); + } } return remediationData; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index a1b742bb917..157e5a82dd0 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -1446,12 +1446,7 @@ struct TScanContext final ? syncMsg->data_as_state()->attributes_as_syscollector_hotfixes()->hotfix()->c_str() : ""; }, - [](const nlohmann::json* jsonData) - { - return jsonData->contains("/data/hotfix"_json_pointer) - ? jsonData->at("/data/hotfix"_json_pointer).get_ref().c_str() - : ""; - }); + []([[maybe_unused]] const nlohmann::json* jsonData) { return ""; }); } /** From b76b71f5219aa5b467c39899d55621b592a18c43 Mon Sep 17 00:00:00 2001 From: MiguelazoDS Cazajous-Miguel Date: Sat, 4 May 2024 11:10:21 -0300 Subject: [PATCH 096/419] Fix modulesd crash --- .../src/scanOrchestrator/hotfixInsert.hpp | 2 +- .../src/scanOrchestrator/scanOrchestrator.hpp | 13 ++++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp index ed7b2d89811..f5bb019bd62 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp @@ -65,7 +65,7 @@ class THotfixInsert final : public AbstractHandler logDebug2(WM_VULNSCAN_LOGTAG, "No hotfix installed"); return nullptr; } - + logDebug2(WM_VULNSCAN_LOGTAG, "Getting associated vulnerabilities for hotfix '%s'", hotfixId.c_str()); m_databaseFeedManager->getHotfixVulnerabilities(hotfixId, hotfixVulnerabilities); if (hotfixVulnerabilities.data == nullptr || hotfixVulnerabilities.data->CVEs() == nullptr || diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp index 7b28c63cca3..d096fe91d09 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp @@ -97,12 +97,15 @@ class TScanOrchestrator final : public TOSPrimitives inventoryDatabase, reportDispatcher); m_inventorySyncOrchestration = TFactoryOrchestrator::create(ScannerType::GlobalSyncInventory, - std::move(databaseFeedManager), - std::move(indexerConnector), + databaseFeedManager, + indexerConnector, inventoryDatabase, - std::move(reportDispatcher)); - m_hotfixInsertOrchestration = TFactoryOrchestrator::create( - ScannerType::HotfixInsert, databaseFeedManager, indexerConnector, inventoryDatabase, reportDispatcher); + reportDispatcher); + m_hotfixInsertOrchestration = TFactoryOrchestrator::create(ScannerType::HotfixInsert, + std::move(databaseFeedManager), + std::move(indexerConnector), + inventoryDatabase, + std::move(reportDispatcher)); // Define the maximum size for the hostname constexpr auto MAX_HOSTNAME_SIZE = 256; From 4679e9548b9e825a1fd4b6ec380809f1945782da Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Mon, 6 May 2024 22:52:41 -0300 Subject: [PATCH 097/419] CL: - Renamed schema into hotfixes_applications - Changed column name - Updated methods to improve performance (no json parsing) --- .../vulnerability_scanner/CMakeLists.txt | 2 +- .../include/vulnerabilityScanner.hpp | 2 +- .../vulnerability_scanner/schemas/hotfix.fbs | 7 - .../schemas/hotfixes_applications.fbs | 7 + .../databaseFeedManager.hpp | 24 ++-- .../databaseFeedManager/updateHotfixes.hpp | 123 +++++++++++------- .../src/scanOrchestrator/hotfixInsert.hpp | 6 +- .../src/scanOrchestrator/inventorySync.hpp | 6 +- .../src/scanOrchestrator/scanOrchestrator.hpp | 8 +- .../tests/unit/updateHotfixes_test.cpp | 106 ++++++++++----- .../tests/unit/updateHotfixes_test.hpp | 2 +- 11 files changed, 176 insertions(+), 117 deletions(-) delete mode 100644 src/wazuh_modules/vulnerability_scanner/schemas/hotfix.fbs create mode 100644 src/wazuh_modules/vulnerability_scanner/schemas/hotfixes_applications.fbs diff --git a/src/wazuh_modules/vulnerability_scanner/CMakeLists.txt b/src/wazuh_modules/vulnerability_scanner/CMakeLists.txt index 96450d64a74..4c4b886923c 100644 --- a/src/wazuh_modules/vulnerability_scanner/CMakeLists.txt +++ b/src/wazuh_modules/vulnerability_scanner/CMakeLists.txt @@ -62,7 +62,7 @@ list(APPEND Schemas vulnerabilityRemediations packageTranslation messageBuffer - hotfix + hotfixes_applications ) message("Compiling schemas") diff --git a/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp b/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp index 4c7b6de727b..e7e090fa313 100644 --- a/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/include/vulnerabilityScanner.hpp @@ -27,7 +27,7 @@ #include #include -constexpr auto HOTFIXES_COLUMN {"hotfixes"}; +constexpr auto HOTFIXES_APPLICATIONS_COLUMN {"hotfixes_applications"}; constexpr auto REMEDIATIONS_COLUMN {"remediations"}; constexpr auto TRANSLATIONS_COLUMN {"translation"}; constexpr auto DESCRIPTIONS_COLUMN {"descriptions"}; diff --git a/src/wazuh_modules/vulnerability_scanner/schemas/hotfix.fbs b/src/wazuh_modules/vulnerability_scanner/schemas/hotfix.fbs deleted file mode 100644 index 130d0c7344e..00000000000 --- a/src/wazuh_modules/vulnerability_scanner/schemas/hotfix.fbs +++ /dev/null @@ -1,7 +0,0 @@ -namespace NSVulnerabilityScanner; - -table hotfix { - CVEs:[string]; -} - -root_type hotfix; diff --git a/src/wazuh_modules/vulnerability_scanner/schemas/hotfixes_applications.fbs b/src/wazuh_modules/vulnerability_scanner/schemas/hotfixes_applications.fbs new file mode 100644 index 00000000000..84a7c63abd2 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/schemas/hotfixes_applications.fbs @@ -0,0 +1,7 @@ +namespace NSVulnerabilityScanner; + +table HotfixesApplications { + CVEs:[string]; +} + +root_type HotfixesApplications; diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp index 2c5d051a72b..fad42c0d554 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp @@ -113,13 +113,6 @@ struct Translation final */ using TranslationLRUCache = LRUCache; -/** - * @brief Agent remediations cache. - * @details Key: Hotfix, Value: List of CVEs fixed. - * - */ -using RemediationLRUCache = LRUCache; - /** * @brief DatabaseFeedManager class. * @@ -295,8 +288,6 @@ class TDatabaseFeedManager final : public Observer m_translationL2Cache = std::make_unique(TPolicyManager::instance().getTranslationLRUSize()); - m_remediationCache = std::make_unique(TPolicyManager::instance().getRemediationLRUSize()); - try { m_feedDatabase = std::make_unique(DATABASE_PATH, false); @@ -422,25 +413,27 @@ class TDatabaseFeedManager final : public Observer * @throws std::runtime_error if the retrieved data from the database is invalid or not in the expected FlatBuffers * format. */ - void getHotfixVulnerabilities(const std::string& hotfix, - FlatbufferDataPair& hotfixVulnerabilities) + void + getHotfixVulnerabilities(const std::string& hotfix, + FlatbufferDataPair& hotfixVulnerabilities) { // If the remediation information is not found in the database, we return because there is no remediation. - if (auto result = m_feedDatabase->get(hotfix, hotfixVulnerabilities.slice, REMEDIATIONS_COLUMN); !result) + if (auto result = m_feedDatabase->get(hotfix, hotfixVulnerabilities.slice, HOTFIXES_APPLICATIONS_COLUMN); + !result) { return; } - const auto hotfixVulnerabilitieslice = reinterpret_cast(hotfixVulnerabilities.slice.data()); + const auto hotfixesApplicationsSlice = reinterpret_cast(hotfixVulnerabilities.slice.data()); // Verify the integrity of the FlatBuffers remediation data - if (flatbuffers::Verifier verifier(hotfixVulnerabilitieslice, hotfixVulnerabilities.slice.size()); + if (flatbuffers::Verifier verifier(hotfixesApplicationsSlice, hotfixVulnerabilities.slice.size()); !VerifyRemediationInfoBuffer(verifier)) { throw std::runtime_error("Error: Invalid FlatBuffers data in RocksDB."); } - hotfixVulnerabilities.data = Gethotfix(hotfixVulnerabilitieslice); + hotfixVulnerabilities.data = GetHotfixesApplications(hotfixesApplicationsSlice); } /** @@ -746,7 +739,6 @@ class TDatabaseFeedManager final : public Observer std::unique_ptr m_contentRegistration; std::unique_ptr m_feedDatabase; std::unique_ptr m_translationL2Cache; - std::unique_ptr m_remediationCache; std::unique_ptr m_contentUpdateSubscription; const std::atomic& m_shouldStop; diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp index e3c6185d6e4..d5ae3a706e3 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp @@ -14,8 +14,8 @@ #include "cve5_generated.h" #include "flatbuffers/idl.h" -#include "hotfix_generated.h" -#include "hotfix_schema.h" +#include "hotfixes_applications_generated.h" +#include "hotfixes_applications_schema.h" #include "loggerHelper.h" #include "rocksDBWrapper.hpp" #include "vulnerabilityScanner.hpp" @@ -99,56 +99,47 @@ class UpdateHotfixes final const auto remediations = data->containers()->cna()->x_remediations(); if (!remediations) { - logDebug2(WM_VULNSCAN_LOGTAG, "No remediations available."); return; } - flatbuffers::IDLOptions options; - options.strict_json = true; - flatbuffers::Parser parser(options); - if (!parser.Parse(hotfix_SCHEMA)) + if (!feedDatabase->columnExists(HOTFIXES_APPLICATIONS_COLUMN)) { - throw std::runtime_error("Unable to parse schema: " + parser.error_); + feedDatabase->createColumn(HOTFIXES_APPLICATIONS_COLUMN); } - if (!feedDatabase->columnExists(HOTFIXES_COLUMN)) - { - feedDatabase->createColumn(HOTFIXES_COLUMN); - } - - const auto currentCVE {data->cveMetadata()->cveId()->str()}; // CVE associated with the current remediations - rocksdb::PinnableSlice cveList; + flatbuffers::FlatBufferBuilder builder; + const auto hotfixes = hotfixesOnRemediations(remediations); - // 1. Get the CVEs list associated with each remediation from the database - // 2. Convert the list back into a JSON object - // 3. Add the current CVE to the list - // 4. Convert the JSON object back into a FlatBuffer object - // 5. Update the remediation with the new CVE list - for (const auto& hotfix : hotfixesOnRemediations(remediations)) + for (const auto& hotfix : hotfixes) { - nlohmann::json jsonData; + std::vector> cvesVector; - if (feedDatabase->get(hotfix, cveList, HOTFIXES_COLUMN)) + // Get the list of CVEs associated with the hotfix + if (rocksdb::PinnableSlice hotfixesApplicationsSlice; + feedDatabase->get(hotfix.c_str(), hotfixesApplicationsSlice, HOTFIXES_APPLICATIONS_COLUMN)) { - // There is already a list of CVEs associated with the hotfix - std::string strData; - flatbuffers::GenText(parser, reinterpret_cast(cveList.data()), &strData); - jsonData = nlohmann::json::parse(strData); + // Deserialize the binary data into a FlatBuffers object + const auto hotfixesApplicationsData = + NSVulnerabilityScanner::GetHotfixesApplications(hotfixesApplicationsSlice.data()); + + // Convert the binary data into a offset vector + std::for_each(hotfixesApplicationsData->CVEs()->begin(), + hotfixesApplicationsData->CVEs()->end(), + [&builder, &cvesVector](const flatbuffers::String* cve) + { cvesVector.emplace_back(builder.CreateString(cve->str())); }); } - // Add the current CVE to the list - jsonData["CVEs"].push_back(currentCVE); - if (!parser.Parse(jsonData.dump().c_str())) - { - throw std::runtime_error("Unable to parse json data: " + parser.error_); - } + // Add the current CVE to the list of CVEs associated with the hotfix + cvesVector.emplace_back(builder.CreateString(data->cveMetadata()->cveId()->str())); - flatbuffers::FlatBufferBuilder builder; - rocksdb::Slice flatbufferData(reinterpret_cast(parser.builder_.GetBufferPointer()), - parser.builder_.GetSize()); + // Create a FlatBuffers object containing the list of CVEs + const auto updates = builder.CreateVector(cvesVector); + builder.Finish(NSVulnerabilityScanner::CreateHotfixesApplications(builder, updates)); - // Update the remediation with the new CVE list - feedDatabase->put(hotfix, flatbufferData, HOTFIXES_COLUMN); + // Update the database with the new list of CVEs + rocksdb::Slice newHotfixesApplications(reinterpret_cast(builder.GetBufferPointer()), + builder.GetSize()); + feedDatabase->put(hotfix, newHotfixesApplications, HOTFIXES_APPLICATIONS_COLUMN); } } @@ -160,21 +151,65 @@ class UpdateHotfixes final */ static void removeHotfix(const cve_v5::Entry* data, Utils::IRocksDBWrapper* feedDatabase) { - const auto remediations = data->containers()->cna()->x_remediations(); - if (!remediations) + if (!feedDatabase->columnExists(HOTFIXES_APPLICATIONS_COLUMN)) { - logDebug2(WM_VULNSCAN_LOGTAG, "No remediations available."); return; } - if (!feedDatabase->columnExists(HOTFIXES_COLUMN)) + const auto remediations = data->containers()->cna()->x_remediations(); + if (!remediations) { return; } - for (const auto& hotfix : hotfixesOnRemediations(remediations)) + const auto currentCVE = data->cveMetadata()->cveId()->str(); + const auto hotfixes = hotfixesOnRemediations(remediations); + + flatbuffers::FlatBufferBuilder builder; + for (const auto& hotfix : hotfixes) { - feedDatabase->delete_(hotfix, HOTFIXES_COLUMN); + std::vector> cvesVector; + + // Get the list of CVEs associated with the hotfix + if (rocksdb::PinnableSlice hotfixesApplicationsSlice; + feedDatabase->get(hotfix.c_str(), hotfixesApplicationsSlice, HOTFIXES_APPLICATIONS_COLUMN)) + { + // Deserialize the binary data into a FlatBuffers object + const auto hotfixesApplicationsData = + NSVulnerabilityScanner::GetHotfixesApplications(hotfixesApplicationsSlice.data()); + + // Convert the binary data into a offset vector + std::for_each(hotfixesApplicationsData->CVEs()->begin(), + hotfixesApplicationsData->CVEs()->end(), + [&builder, &cvesVector, ¤tCVE](const flatbuffers::String* cve) + { + if (cve->str() != currentCVE) + { + cvesVector.emplace_back(builder.CreateString(cve->str())); + } + }); + } + else + { + // If the hotfix is not in the database, skip it + continue; + } + + // If the list of CVEs is empty, delete whole entry + if (cvesVector.empty()) + { + feedDatabase->delete_(hotfix.c_str(), HOTFIXES_APPLICATIONS_COLUMN); + continue; + } + + // Create a FlatBuffers object containing the remaining CVEs + const auto updates = builder.CreateVector(cvesVector); + builder.Finish(NSVulnerabilityScanner::CreateHotfixesApplications(builder, updates)); + + // Update the database with the new list of CVEs + rocksdb::Slice newHotfixesApplications(reinterpret_cast(builder.GetBufferPointer()), + builder.GetSize()); + feedDatabase->put(hotfix, newHotfixesApplications, HOTFIXES_APPLICATIONS_COLUMN); } } }; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp index f5bb019bd62..c2837f6c8ef 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp @@ -14,8 +14,8 @@ #include "chainOfResponsability.hpp" #include "databaseFeedManager.hpp" -#include "hotfix_generated.h" -#include "hotfix_schema.h" +#include "hotfixes_applications_generated.h" +#include "hotfixes_applications_schema.h" #include "inventorySync.hpp" #include "remediationDataCache.hpp" #include "scanContext.hpp" @@ -57,7 +57,7 @@ class THotfixInsert final : public AbstractHandler */ std::shared_ptr handleRequest(std::shared_ptr data) override { - FlatbufferDataPair hotfixVulnerabilities; + FlatbufferDataPair hotfixVulnerabilities; const std::string hotfixId {data->hotfixId()}; if (hotfixId.empty()) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/inventorySync.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/inventorySync.hpp index 2ce6c01b915..9bd5bfc7b56 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/inventorySync.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/inventorySync.hpp @@ -21,12 +21,8 @@ constexpr auto OS = "os"; constexpr auto PACKAGE = "package"; constexpr auto OS_INITIAL_SCAN = "os_initial_scan"; -constexpr auto REMEDIATION = "remediation"; - const std::map AFFECTED_COMPONENT_COLUMNS = { - {AffectedComponentType::Os, OS}, - {AffectedComponentType::Package, PACKAGE}, - {AffectedComponentType::Hotfix, REMEDIATION}}; + {AffectedComponentType::Os, OS}, {AffectedComponentType::Package, PACKAGE}}; /** * @brief TInventorySync class. diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp index d096fe91d09..8efa0645427 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp @@ -101,11 +101,9 @@ class TScanOrchestrator final : public TOSPrimitives indexerConnector, inventoryDatabase, reportDispatcher); - m_hotfixInsertOrchestration = TFactoryOrchestrator::create(ScannerType::HotfixInsert, - std::move(databaseFeedManager), - std::move(indexerConnector), - inventoryDatabase, - std::move(reportDispatcher)); + // coverity[copy_constructor_call] + m_hotfixInsertOrchestration = TFactoryOrchestrator::create( + ScannerType::HotfixInsert, databaseFeedManager, indexerConnector, inventoryDatabase, reportDispatcher); // Define the maximum size for the hostname constexpr auto MAX_HOSTNAME_SIZE = 256; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp index 607733ea133..7271b120620 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp @@ -411,14 +411,14 @@ TEST_F(UpdateHotfixesTest, UpdatesWindowsRemediationMultipleBlocks) "KBT-9000", }; - FlatbufferDataPair fbHotfix; + FlatbufferDataPair fbHotfix; const std::string expectedCveId = "CVE-2222-2222"; for (const auto& hotfix : hotfixes) { - EXPECT_TRUE(rocksDBWrapper->get(hotfix, fbHotfix.slice, HOTFIXES_COLUMN)); - fbHotfix.data = - const_cast(NSVulnerabilityScanner::Gethotfix(fbHotfix.slice.data())); + EXPECT_TRUE(rocksDBWrapper->get(hotfix, fbHotfix.slice, HOTFIXES_APPLICATIONS_COLUMN)); + fbHotfix.data = const_cast( + NSVulnerabilityScanner::GetHotfixesApplications(fbHotfix.slice.data())); EXPECT_EQ(fbHotfix.data->CVEs()->size(), 1); EXPECT_EQ(fbHotfix.data->CVEs()->Get(0)->str(), expectedCveId); } @@ -485,7 +485,7 @@ TEST_F(UpdateHotfixesTest, MultipleVulnerabilities) flatbuffers::Parser parser; parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); - // Parse the first entry + // Parse the second entry parser.Parse(JSON_CVE5_VALID_MULTIPLE_BLOCKS); // Prepare the vulnerability @@ -511,11 +511,11 @@ TEST_F(UpdateHotfixesTest, MultipleVulnerabilities) }; // Make sure the entries where inserted - FlatbufferDataPair hotfixInfo; + FlatbufferDataPair hotfixInfo; for (const auto& [hotfixIds, cveIds] : hotfixesToCVEs) { - EXPECT_TRUE(rocksDBWrapper->get(hotfixIds, hotfixInfo.slice, HOTFIXES_COLUMN)); - hotfixInfo.data = const_cast(Gethotfix(hotfixInfo.slice.data())); + EXPECT_TRUE(rocksDBWrapper->get(hotfixIds, hotfixInfo.slice, HOTFIXES_APPLICATIONS_COLUMN)); + hotfixInfo.data = const_cast(GetHotfixesApplications(hotfixInfo.slice.data())); EXPECT_EQ(hotfixInfo.data->CVEs()->size(), cveIds.size()); for (const auto& cveId : cveIds) { @@ -530,6 +530,9 @@ TEST_F(UpdateHotfixesTest, MultipleVulnerabilities) TEST_F(UpdateHotfixesTest, StoreAndRemove) { + // Create a mock RocksDBWrapper object + std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + // Define schema variable and parse JSON object. std::string schemaStr; @@ -538,38 +541,73 @@ TEST_F(UpdateHotfixesTest, StoreAndRemove) // Parse schema. flatbuffers::Parser parser; - parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); - parser.Parse(JSON_CVE5_VALID_ONE_BLOCK); - // Create a test Entry object with Windows remediations - auto buffer = parser.builder_.GetBufferPointer(); - flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); - cve_v5::VerifyEntryBuffer(verifier); - auto entry = cve_v5::GetEntry(buffer); + // Store the following hotfixes: + // KBT-800, KBT-1000, KBT-3000, KBT-4000, KBT-5000, KBT-6000, KBT-7000, KBT-8000, KBT-9000 + { + // Parse schema. + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); - // Create a mock RocksDBWrapper object - std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + // Parse the first entry + parser.Parse(JSON_CVE5_VALID_MULTIPLE_BLOCKS); - // Call the updateRemediation function with the test data - EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + // Prepare the vulnerability + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + auto entry = cve_v5::GetEntry(buffer); - // Make sure the entries where inserted - FlatbufferDataPair hotfixInfo; - std::vector hotfixes {"KBT-800", "KBT-1000", "KBT-3000"}; - for (const auto& hotfix : hotfixes) + // Insert the vulnerability + EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + } + + // Store the following hotfixes: + // KBT-800, KBT-1000, KBT-3000 { - EXPECT_TRUE(rocksDBWrapper->get(hotfix, hotfixInfo.slice, HOTFIXES_COLUMN)); - hotfixInfo.data = - const_cast(NSVulnerabilityScanner::Gethotfix(hotfixInfo.slice.data())); - EXPECT_EQ(hotfixInfo.data->CVEs()->size(), 1); - EXPECT_EQ(hotfixInfo.data->CVEs()->Get(0)->str(), "CVE-1111-1111"); + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + + // Parse the first entry + parser.Parse(JSON_CVE5_VALID_ONE_BLOCK); + + // Prepare the vulnerability + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + auto entry = cve_v5::GetEntry(buffer); + + // Insert the vulnerability + EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); } - // Remove all hotfixes - EXPECT_NO_THROW(UpdateHotfixes::removeHotfix(entry, rocksDBWrapper.get())); + // Remove the following hotfixes: + // KBT-4000, KBT-5000, KBT-6000, KBT-7000, KBT-8000, KBT-9000 + { + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + + // Parse the first entry + parser.Parse(JSON_CVE5_VALID_MULTIPLE_BLOCKS); + + // Prepare the vulnerability + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + auto entry = cve_v5::GetEntry(buffer); + + // Remove the hotfixes + EXPECT_NO_THROW(UpdateHotfixes::removeHotfix(entry, rocksDBWrapper.get())); + } - // Check that the hotfixes were removed - EXPECT_TRUE(rocksDBWrapper->get(hotfixes.at(0), hotfixInfo.slice, HOTFIXES_COLUMN)); - EXPECT_FALSE(rocksDBWrapper->get(hotfixes.at(1), hotfixInfo.slice, HOTFIXES_COLUMN)); - EXPECT_FALSE(rocksDBWrapper->get(hotfixes.at(2), hotfixInfo.slice, HOTFIXES_COLUMN)); + // Remaining hotfixes: + // KBT-800, KBT-1000, KBT-3000 + rocksdb::PinnableSlice slice; + EXPECT_TRUE(rocksDBWrapper->get("KBT-800", slice, HOTFIXES_APPLICATIONS_COLUMN)); + EXPECT_TRUE(rocksDBWrapper->get("KBT-1000", slice, HOTFIXES_APPLICATIONS_COLUMN)); + EXPECT_TRUE(rocksDBWrapper->get("KBT-3000", slice, HOTFIXES_APPLICATIONS_COLUMN)); + EXPECT_FALSE(rocksDBWrapper->get("KBT-4000", slice, HOTFIXES_APPLICATIONS_COLUMN)); + EXPECT_FALSE(rocksDBWrapper->get("KBT-5000", slice, HOTFIXES_APPLICATIONS_COLUMN)); + EXPECT_FALSE(rocksDBWrapper->get("KBT-6000", slice, HOTFIXES_APPLICATIONS_COLUMN)); + EXPECT_FALSE(rocksDBWrapper->get("KBT-7000", slice, HOTFIXES_APPLICATIONS_COLUMN)); + EXPECT_FALSE(rocksDBWrapper->get("KBT-8000", slice, HOTFIXES_APPLICATIONS_COLUMN)); + EXPECT_FALSE(rocksDBWrapper->get("KBT-9000", slice, HOTFIXES_APPLICATIONS_COLUMN)); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp index 73c253d10f5..9f60a6ebf88 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp @@ -13,7 +13,7 @@ #define _UPDATE_HOTFIXES_TEST_HPP #include "../../src/databaseFeedManager/databaseFeedManager.hpp" #include "../../src/databaseFeedManager/updateHotfixes.hpp" -#include "hotfix_generated.h" +#include "hotfixes_applications_generated.h" #include "gmock/gmock.h" #include "gtest/gtest.h" From 0c380e4f2640544f2dc72f1a78025d5954705edc Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Tue, 7 May 2024 16:43:04 -0300 Subject: [PATCH 098/419] CL: - Updated implementation to improve performance - Removed unnecesary schemas - Aligned all modules with changes --- .../vulnerability_scanner/CMakeLists.txt | 1 - .../schemas/hotfixes_applications.fbs | 7 - .../databaseFeedManager.hpp | 29 +- .../databaseFeedManager/updateHotfixes.hpp | 166 +++-------- .../src/scanOrchestrator/hotfixInsert.hpp | 25 +- .../tests/unit/updateHotfixes_test.cpp | 265 ++++++++++-------- .../tests/unit/updateHotfixes_test.hpp | 1 - 7 files changed, 214 insertions(+), 280 deletions(-) delete mode 100644 src/wazuh_modules/vulnerability_scanner/schemas/hotfixes_applications.fbs diff --git a/src/wazuh_modules/vulnerability_scanner/CMakeLists.txt b/src/wazuh_modules/vulnerability_scanner/CMakeLists.txt index 4c4b886923c..162cdf1f477 100644 --- a/src/wazuh_modules/vulnerability_scanner/CMakeLists.txt +++ b/src/wazuh_modules/vulnerability_scanner/CMakeLists.txt @@ -62,7 +62,6 @@ list(APPEND Schemas vulnerabilityRemediations packageTranslation messageBuffer - hotfixes_applications ) message("Compiling schemas") diff --git a/src/wazuh_modules/vulnerability_scanner/schemas/hotfixes_applications.fbs b/src/wazuh_modules/vulnerability_scanner/schemas/hotfixes_applications.fbs deleted file mode 100644 index 84a7c63abd2..00000000000 --- a/src/wazuh_modules/vulnerability_scanner/schemas/hotfixes_applications.fbs +++ /dev/null @@ -1,7 +0,0 @@ -namespace NSVulnerabilityScanner; - -table HotfixesApplications { - CVEs:[string]; -} - -root_type HotfixesApplications; diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp index fad42c0d554..d5ff958fdf8 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp @@ -407,33 +407,22 @@ class TDatabaseFeedManager final : public Observer * stores it in the 'remediationInfo' object. * * @param hotfix hotfix id for which remediation information is requested. - * @param hotfixVulnerabilities A reference to a `FlatbufferDataPair` object where the retrieved hotfix to CVEs - * information will be stored. + * @param hotfixVulnerabilities Referene * * @throws std::runtime_error if the retrieved data from the database is invalid or not in the expected FlatBuffers * format. */ - void - getHotfixVulnerabilities(const std::string& hotfix, - FlatbufferDataPair& hotfixVulnerabilities) + std::unordered_set getHotfixVulnerabilities(const std::string& hotfix) { - // If the remediation information is not found in the database, we return because there is no remediation. - if (auto result = m_feedDatabase->get(hotfix, hotfixVulnerabilities.slice, HOTFIXES_APPLICATIONS_COLUMN); - !result) + std::unordered_set hotfixVulnerabilities; + if (m_feedDatabase->columnExists(HOTFIXES_APPLICATIONS_COLUMN)) { - return; - } - - const auto hotfixesApplicationsSlice = reinterpret_cast(hotfixVulnerabilities.slice.data()); - - // Verify the integrity of the FlatBuffers remediation data - if (flatbuffers::Verifier verifier(hotfixesApplicationsSlice, hotfixVulnerabilities.slice.size()); - !VerifyRemediationInfoBuffer(verifier)) - { - throw std::runtime_error("Error: Invalid FlatBuffers data in RocksDB."); + for (const auto& [key, value] : m_feedDatabase->seek(hotfix, HOTFIXES_APPLICATIONS_COLUMN)) + { + hotfixVulnerabilities.insert(key); + } } - - hotfixVulnerabilities.data = GetHotfixesApplications(hotfixesApplicationsSlice); + return hotfixVulnerabilities; } /** diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp index d5ae3a706e3..7049a7a211e 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/updateHotfixes.hpp @@ -14,8 +14,6 @@ #include "cve5_generated.h" #include "flatbuffers/idl.h" -#include "hotfixes_applications_generated.h" -#include "hotfixes_applications_schema.h" #include "loggerHelper.h" #include "rocksDBWrapper.hpp" #include "vulnerabilityScanner.hpp" @@ -27,41 +25,6 @@ */ class UpdateHotfixes final { -private: - /** - * @brief Get the list of hotfixes from the remediation data. - * - * @param remediations Pointer to the 'Remediations' object containing remediation information. - * @return Set of strings containing the list of hotfixes. - * - * @see Remediations - The data structure containing remediation information. - */ - static std::unordered_set hotfixesOnRemediations(const cve_v5::Remediations* remediations) - { - std::unordered_set hotfixes; - if (!remediations || !remediations->windows()) - { - return hotfixes; - } - - std::for_each(remediations->windows()->begin(), - remediations->windows()->end(), - [&hotfixes](const cve_v5::Remediation* remediation) - { - if (!remediation->anyOf()) - { - return; - } - - for (const auto hotfix : *remediation->anyOf()) - { - hotfixes.insert(hotfix->str()); - } - }); - - return hotfixes; - } - public: /** * @brief Update the hotfix information in the RocksDB Database @@ -79,14 +42,12 @@ class UpdateHotfixes final * * @details The function performs the following steps: * 1. Attempts to access remediation data for Windows from the 'Entry' object. - * 2. If remediation data is not available (empty), it logs an error message and returns. + * 2. If remediation data is not available (empty), it returns. * 3. Extracts the CVE identifier (CVE-ID) from the 'Entry' object. * 4. Iterates through the available remediation data for Windows: - * - Extracts the list of updates (CVEs) associated with the hotfix. - * - Creates a FlatBuffers object containing the list of CVEs. - * - Serializes the FlatBuffers object into binary data. - * - Stores the binary data in the RocksDB database, using the 'hotfix' as the key. - * 5. If an exception occurs during this process, it logs an error message. + * - For each remediation, it checks if any hotfixes are available. + * - If hotfixes are available, it iterates through each hotfix and stores it in the database. + * - The key for storing the hotfix is generated with the format '${hotfix}_${CVE-ID}'. * * @note This function assumes a specific data structure in the 'Entry' object, including nested objects. * Ensure that the 'Entry' object conforms to the expected schema to avoid runtime errors. @@ -102,45 +63,34 @@ class UpdateHotfixes final return; } - if (!feedDatabase->columnExists(HOTFIXES_APPLICATIONS_COLUMN)) + const auto windowsRemediations = remediations->windows(); + if (!windowsRemediations) { - feedDatabase->createColumn(HOTFIXES_APPLICATIONS_COLUMN); + return; } - flatbuffers::FlatBufferBuilder builder; - const auto hotfixes = hotfixesOnRemediations(remediations); - - for (const auto& hotfix : hotfixes) + if (!feedDatabase->columnExists(HOTFIXES_APPLICATIONS_COLUMN)) { - std::vector> cvesVector; - - // Get the list of CVEs associated with the hotfix - if (rocksdb::PinnableSlice hotfixesApplicationsSlice; - feedDatabase->get(hotfix.c_str(), hotfixesApplicationsSlice, HOTFIXES_APPLICATIONS_COLUMN)) - { - // Deserialize the binary data into a FlatBuffers object - const auto hotfixesApplicationsData = - NSVulnerabilityScanner::GetHotfixesApplications(hotfixesApplicationsSlice.data()); - - // Convert the binary data into a offset vector - std::for_each(hotfixesApplicationsData->CVEs()->begin(), - hotfixesApplicationsData->CVEs()->end(), - [&builder, &cvesVector](const flatbuffers::String* cve) - { cvesVector.emplace_back(builder.CreateString(cve->str())); }); - } + feedDatabase->createColumn(HOTFIXES_APPLICATIONS_COLUMN); + } - // Add the current CVE to the list of CVEs associated with the hotfix - cvesVector.emplace_back(builder.CreateString(data->cveMetadata()->cveId()->str())); + const auto cveId = data->cveMetadata()->cveId()->str(); - // Create a FlatBuffers object containing the list of CVEs - const auto updates = builder.CreateVector(cvesVector); - builder.Finish(NSVulnerabilityScanner::CreateHotfixesApplications(builder, updates)); + std::for_each(windowsRemediations->begin(), + windowsRemediations->end(), + [&feedDatabase, &cveId](const cve_v5::Remediation* remediation) + { + if (!remediation->anyOf()) + { + return; + } - // Update the database with the new list of CVEs - rocksdb::Slice newHotfixesApplications(reinterpret_cast(builder.GetBufferPointer()), - builder.GetSize()); - feedDatabase->put(hotfix, newHotfixesApplications, HOTFIXES_APPLICATIONS_COLUMN); - } + for (const auto hotfix : *remediation->anyOf()) + { + const auto key = hotfix->str() + "_" + cveId; + feedDatabase->put(key, "", HOTFIXES_APPLICATIONS_COLUMN); + } + }); } /** @@ -162,56 +112,28 @@ class UpdateHotfixes final return; } - const auto currentCVE = data->cveMetadata()->cveId()->str(); - const auto hotfixes = hotfixesOnRemediations(remediations); - - flatbuffers::FlatBufferBuilder builder; - for (const auto& hotfix : hotfixes) + const auto windowsRemediations = remediations->windows(); + if (!windowsRemediations) { - std::vector> cvesVector; - - // Get the list of CVEs associated with the hotfix - if (rocksdb::PinnableSlice hotfixesApplicationsSlice; - feedDatabase->get(hotfix.c_str(), hotfixesApplicationsSlice, HOTFIXES_APPLICATIONS_COLUMN)) - { - // Deserialize the binary data into a FlatBuffers object - const auto hotfixesApplicationsData = - NSVulnerabilityScanner::GetHotfixesApplications(hotfixesApplicationsSlice.data()); - - // Convert the binary data into a offset vector - std::for_each(hotfixesApplicationsData->CVEs()->begin(), - hotfixesApplicationsData->CVEs()->end(), - [&builder, &cvesVector, ¤tCVE](const flatbuffers::String* cve) - { - if (cve->str() != currentCVE) - { - cvesVector.emplace_back(builder.CreateString(cve->str())); - } - }); - } - else - { - // If the hotfix is not in the database, skip it - continue; - } - - // If the list of CVEs is empty, delete whole entry - if (cvesVector.empty()) - { - feedDatabase->delete_(hotfix.c_str(), HOTFIXES_APPLICATIONS_COLUMN); - continue; - } + return; + } - // Create a FlatBuffers object containing the remaining CVEs - const auto updates = builder.CreateVector(cvesVector); - builder.Finish(NSVulnerabilityScanner::CreateHotfixesApplications(builder, updates)); + const auto cveId = data->cveMetadata()->cveId()->str(); + std::for_each(windowsRemediations->begin(), + windowsRemediations->end(), + [&feedDatabase, &cveId](const cve_v5::Remediation* remediation) + { + if (!remediation->anyOf()) + { + return; + } - // Update the database with the new list of CVEs - rocksdb::Slice newHotfixesApplications(reinterpret_cast(builder.GetBufferPointer()), - builder.GetSize()); - feedDatabase->put(hotfix, newHotfixesApplications, HOTFIXES_APPLICATIONS_COLUMN); - } + for (const auto hotfix : *remediation->anyOf()) + { + const auto key = hotfix->str() + "_" + cveId; + feedDatabase->delete_(key, HOTFIXES_APPLICATIONS_COLUMN); + } + }); } }; - #endif // _UPDATE_HOTFIXES_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp index c2837f6c8ef..6666cdabbfe 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp @@ -14,11 +14,11 @@ #include "chainOfResponsability.hpp" #include "databaseFeedManager.hpp" -#include "hotfixes_applications_generated.h" -#include "hotfixes_applications_schema.h" #include "inventorySync.hpp" #include "remediationDataCache.hpp" #include "scanContext.hpp" +#include "stringHelper.h" +#include /** * @brief TEventInsertInventory class. @@ -57,8 +57,6 @@ class THotfixInsert final : public AbstractHandler */ std::shared_ptr handleRequest(std::shared_ptr data) override { - FlatbufferDataPair hotfixVulnerabilities; - const std::string hotfixId {data->hotfixId()}; if (hotfixId.empty()) { @@ -67,20 +65,21 @@ class THotfixInsert final : public AbstractHandler } logDebug2(WM_VULNSCAN_LOGTAG, "Getting associated vulnerabilities for hotfix '%s'", hotfixId.c_str()); - m_databaseFeedManager->getHotfixVulnerabilities(hotfixId, hotfixVulnerabilities); - if (hotfixVulnerabilities.data == nullptr || hotfixVulnerabilities.data->CVEs() == nullptr || - hotfixVulnerabilities.data->CVEs()->size() == 0) + const std::unordered_set hotfixVulnerabilities = + m_databaseFeedManager->getHotfixVulnerabilities(hotfixId); + + if (hotfixVulnerabilities.empty()) { - logDebug2(WM_VULNSCAN_LOGTAG, "No CVEs associated for the installed hotfix '%s'", hotfixId); + logDebug2(WM_VULNSCAN_LOGTAG, "No vulnerabilities associated"); return nullptr; } - // For each CVE, insert the inventory entry - for (const auto& cve : *hotfixVulnerabilities.data->CVEs()) + for (const auto& hotfixVulnerability : hotfixVulnerabilities) { - // Add all CVEs to the deletetion list - logDebug2(WM_VULNSCAN_LOGTAG, "CVE '%s' was remediated by hotfix '%s'", cve->str().c_str(), hotfixId); - data->m_elements[cve->str()] = nlohmann::json::object(); + const auto cveId = Utils::leftTrim(hotfixVulnerability, "_"); + + logDebug2(WM_VULNSCAN_LOGTAG, "CVE '%s' was remediated by hotfix '%s'", cveId, hotfixId.c_str()); + data->m_elements[cveId] = nlohmann::json::object(); } return AbstractHandler>::handleRequest(std::move(data)); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp index 7271b120620..0e787964eca 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp @@ -375,79 +375,79 @@ void UpdateHotfixesTest::TearDown() TEST_F(UpdateHotfixesTest, UpdatesWindowsRemediationMultipleBlocks) { - // Define schema variable and parse JSON object. - std::string schemaStr; + // Create a mock RocksDBWrapper object + std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); - // Load file with schema. + // Parse the test data + std::string schemaStr; flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); - // Parse schema. flatbuffers::Parser parser; parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); parser.Parse(JSON_CVE5_VALID_MULTIPLE_BLOCKS); - // Create a test Entry object with Windows remediations + // Create and verify the Entry object. auto buffer = parser.builder_.GetBufferPointer(); flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); cve_v5::VerifyEntryBuffer(verifier); - auto entry = cve_v5::GetEntry(buffer); - // Create a mock RocksDBWrapper object - std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + const auto entry = cve_v5::GetEntry(buffer); // Call the updateRemediation function with the test data EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + const auto cveId = entry->cveMetadata()->cveId()->str(); + // Validate the data was inserted - std::vector hotfixes { - "KBT-800", - "KBT-1000", - "KBT-3000", - "KBT-4000", - "KBT-5000", - "KBT-6000", - "KBT-7000", - "KBT-8000", - "KBT-9000", - }; - - FlatbufferDataPair fbHotfix; - - const std::string expectedCveId = "CVE-2222-2222"; - for (const auto& hotfix : hotfixes) + std::vector keys {"KBT-800_" + cveId, + "KBT-1000_" + cveId, + "KBT-3000_" + cveId, + "KBT-4000_" + cveId, + "KBT-5000_" + cveId, + "KBT-6000_" + cveId, + "KBT-7000_" + cveId, + "KBT-8000_" + cveId, + "KBT-9000_" + cveId}; + + for (const auto& key : keys) { - EXPECT_TRUE(rocksDBWrapper->get(hotfix, fbHotfix.slice, HOTFIXES_APPLICATIONS_COLUMN)); - fbHotfix.data = const_cast( - NSVulnerabilityScanner::GetHotfixesApplications(fbHotfix.slice.data())); - EXPECT_EQ(fbHotfix.data->CVEs()->size(), 1); - EXPECT_EQ(fbHotfix.data->CVEs()->Get(0)->str(), expectedCveId); + rocksdb::PinnableSlice slice; + EXPECT_TRUE(rocksDBWrapper->get(key, slice, HOTFIXES_APPLICATIONS_COLUMN)) << "Unable to find key: " << key; } } TEST_F(UpdateHotfixesTest, SkipsEmptyUpdates) { - // Define schema variable and parse JSON object. - std::string schemaStr; + // Create a mock RocksDBWrapper object + std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); - // Load file with schema. + // Parse the test data + std::string schemaStr; flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); - // Parse schema. flatbuffers::Parser parser; parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); parser.Parse(JSON_CVE5_EMPTY_UPDATES); - // Create a test Entry object with Windows remediations + // Create and verify the Entry object. auto buffer = parser.builder_.GetBufferPointer(); flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); cve_v5::VerifyEntryBuffer(verifier); - auto entry = cve_v5::GetEntry(buffer); - // Create a mock RocksDBWrapper object - std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + const auto entry = cve_v5::GetEntry(buffer); // Call the updateRemediation function with the test data EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + + // Create the key list from the entry + const auto cveId = entry->cveMetadata()->cveId()->str(); + std::vector nonExpectedKeys {"KBT-800_" + cveId, "KBT-1000_" + cveId, "KBT-3000_" + cveId}; + + for (const auto& key : nonExpectedKeys) + { + rocksdb::PinnableSlice slice; + EXPECT_FALSE(rocksDBWrapper->get(key, slice, HOTFIXES_APPLICATIONS_COLUMN)) << "Unable to find key: " << key; + } } TEST_F(UpdateHotfixesTest, MultipleVulnerabilities) @@ -455,76 +455,68 @@ TEST_F(UpdateHotfixesTest, MultipleVulnerabilities) // Create a mock RocksDBWrapper object std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); - // Define schema variable and parse JSON object. - std::string schemaStr; - - // Load file with schema. - flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); - - // Parse schema. - flatbuffers::Parser parser; + // Expected keys to be inserted + std::set expectedKeys {}; { - parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + // Parse the test data + std::string schemaStr; + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); - // Parse the first entry + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); parser.Parse(JSON_CVE5_VALID_ONE_BLOCK); - // Prepare the vulnerability + // Create and verify the Entry object. auto buffer = parser.builder_.GetBufferPointer(); flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); cve_v5::VerifyEntryBuffer(verifier); - auto entry = cve_v5::GetEntry(buffer); + + const auto entry = cve_v5::GetEntry(buffer); // Insert the vulnerability EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + + // Add the key to the list + const auto cveId = entry->cveMetadata()->cveId()->str(); + expectedKeys.insert("KBT-800_" + cveId); + expectedKeys.insert("KBT-1000_" + cveId); + expectedKeys.insert("KBT-3000_" + cveId); } { - // Parse schema. + // Parse the test data + std::string schemaStr; + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); + flatbuffers::Parser parser; parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); - - // Parse the second entry parser.Parse(JSON_CVE5_VALID_MULTIPLE_BLOCKS); - // Prepare the vulnerability + // Create and verify the Entry object. auto buffer = parser.builder_.GetBufferPointer(); flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); cve_v5::VerifyEntryBuffer(verifier); - auto entry = cve_v5::GetEntry(buffer); + + const auto entry = cve_v5::GetEntry(buffer); // Insert the vulnerability EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + + // Add the key to the list + const auto cveId = entry->cveMetadata()->cveId()->str(); + expectedKeys.insert("KBT-4000_" + cveId); + expectedKeys.insert("KBT-5000_" + cveId); + expectedKeys.insert("KBT-6000_" + cveId); + expectedKeys.insert("KBT-7000_" + cveId); + expectedKeys.insert("KBT-8000_" + cveId); + expectedKeys.insert("KBT-9000_" + cveId); } - // Hotfix to CVE mapping - std::map> hotfixesToCVEs { - {"KBT-800", {"CVE-1111-1111", "CVE-2222-2222"}}, - {"KBT-3000", {"CVE-1111-1111", "CVE-2222-2222"}}, - {"KBT-4000", {"CVE-2222-2222"}}, - {"KBT-5000", {"CVE-2222-2222"}}, - {"KBT-6000", {"CVE-2222-2222"}}, - {"KBT-7000", {"CVE-2222-2222"}}, - {"KBT-8000", {"CVE-2222-2222"}}, - {"KBT-9000", {"CVE-2222-2222"}}, - }; - - // Make sure the entries where inserted - FlatbufferDataPair hotfixInfo; - for (const auto& [hotfixIds, cveIds] : hotfixesToCVEs) + rocksdb::PinnableSlice slice; + for (const auto& key : expectedKeys) { - EXPECT_TRUE(rocksDBWrapper->get(hotfixIds, hotfixInfo.slice, HOTFIXES_APPLICATIONS_COLUMN)); - hotfixInfo.data = const_cast(GetHotfixesApplications(hotfixInfo.slice.data())); - EXPECT_EQ(hotfixInfo.data->CVEs()->size(), cveIds.size()); - for (const auto& cveId : cveIds) - { - // Check that the cve is in the list - EXPECT_TRUE(std::find_if(hotfixInfo.data->CVEs()->begin(), - hotfixInfo.data->CVEs()->end(), - [&cveId](const flatbuffers::String* str) - { return str->str() == cveId; }) != hotfixInfo.data->CVEs()->end()); - } + EXPECT_TRUE(rocksDBWrapper->get(key, slice, HOTFIXES_APPLICATIONS_COLUMN)) << "Unable to find key: " << key; } } @@ -533,81 +525,122 @@ TEST_F(UpdateHotfixesTest, StoreAndRemove) // Create a mock RocksDBWrapper object std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); - // Define schema variable and parse JSON object. - std::string schemaStr; - - // Load file with schema. - flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); - - // Parse schema. - flatbuffers::Parser parser; + // Expected and non expected keys + std::unordered_set expectedKeys {}; + std::unordered_set nonExpectedKeys {}; - // Store the following hotfixes: - // KBT-800, KBT-1000, KBT-3000, KBT-4000, KBT-5000, KBT-6000, KBT-7000, KBT-8000, KBT-9000 { - // Parse schema. + // Parse the test data + std::string schemaStr; + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); + flatbuffers::Parser parser; parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); - - // Parse the first entry parser.Parse(JSON_CVE5_VALID_MULTIPLE_BLOCKS); - // Prepare the vulnerability + // Create and verify the Entry object. auto buffer = parser.builder_.GetBufferPointer(); flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); cve_v5::VerifyEntryBuffer(verifier); - auto entry = cve_v5::GetEntry(buffer); + + const auto entry = cve_v5::GetEntry(buffer); // Insert the vulnerability EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + + // Update the expected keys + const auto cveId = entry->cveMetadata()->cveId()->str(); + expectedKeys.insert("KBT-800_" + cveId); + expectedKeys.insert("KBT-1000_" + cveId); + expectedKeys.insert("KBT-3000_" + cveId); + expectedKeys.insert("KBT-4000_" + cveId); + expectedKeys.insert("KBT-5000_" + cveId); + expectedKeys.insert("KBT-6000_" + cveId); + expectedKeys.insert("KBT-7000_" + cveId); + expectedKeys.insert("KBT-8000_" + cveId); + expectedKeys.insert("KBT-9000_" + cveId); } - // Store the following hotfixes: - // KBT-800, KBT-1000, KBT-3000 { - parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + // Parse the test data + std::string schemaStr; + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); - // Parse the first entry + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); parser.Parse(JSON_CVE5_VALID_ONE_BLOCK); - // Prepare the vulnerability + // Create and verify the Entry object. auto buffer = parser.builder_.GetBufferPointer(); flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); cve_v5::VerifyEntryBuffer(verifier); - auto entry = cve_v5::GetEntry(buffer); + + const auto entry = cve_v5::GetEntry(buffer); // Insert the vulnerability EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + + // Update the expected keys + const auto cveId = entry->cveMetadata()->cveId()->str(); + expectedKeys.insert("KBT-800_" + cveId); + expectedKeys.insert("KBT-1000_" + cveId); + expectedKeys.insert("KBT-3000_" + cveId); } - // Remove the following hotfixes: - // KBT-4000, KBT-5000, KBT-6000, KBT-7000, KBT-8000, KBT-9000 { - parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + // Parse the test data + std::string schemaStr; + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); - // Parse the first entry + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); parser.Parse(JSON_CVE5_VALID_MULTIPLE_BLOCKS); - // Prepare the vulnerability + // Create and verify the Entry object. auto buffer = parser.builder_.GetBufferPointer(); flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); cve_v5::VerifyEntryBuffer(verifier); - auto entry = cve_v5::GetEntry(buffer); + + const auto entry = cve_v5::GetEntry(buffer); // Remove the hotfixes EXPECT_NO_THROW(UpdateHotfixes::removeHotfix(entry, rocksDBWrapper.get())); + + // Update the non expected keys + const auto cveId = entry->cveMetadata()->cveId()->str(); + nonExpectedKeys.insert("KBT-800_" + cveId); + nonExpectedKeys.insert("KBT-1000_" + cveId); + nonExpectedKeys.insert("KBT-3000_" + cveId); + nonExpectedKeys.insert("KBT-4000_" + cveId); + nonExpectedKeys.insert("KBT-5000_" + cveId); + nonExpectedKeys.insert("KBT-6000_" + cveId); + nonExpectedKeys.insert("KBT-7000_" + cveId); + nonExpectedKeys.insert("KBT-8000_" + cveId); + nonExpectedKeys.insert("KBT-9000_" + cveId); + + // Update the expected keys + expectedKeys.erase("KBT-800_" + cveId); + expectedKeys.erase("KBT-1000_" + cveId); + expectedKeys.erase("KBT-3000_" + cveId); + expectedKeys.erase("KBT-4000_" + cveId); + expectedKeys.erase("KBT-5000_" + cveId); + expectedKeys.erase("KBT-6000_" + cveId); + expectedKeys.erase("KBT-7000_" + cveId); + expectedKeys.erase("KBT-8000_" + cveId); + expectedKeys.erase("KBT-9000_" + cveId); } // Remaining hotfixes: // KBT-800, KBT-1000, KBT-3000 rocksdb::PinnableSlice slice; - EXPECT_TRUE(rocksDBWrapper->get("KBT-800", slice, HOTFIXES_APPLICATIONS_COLUMN)); - EXPECT_TRUE(rocksDBWrapper->get("KBT-1000", slice, HOTFIXES_APPLICATIONS_COLUMN)); - EXPECT_TRUE(rocksDBWrapper->get("KBT-3000", slice, HOTFIXES_APPLICATIONS_COLUMN)); - EXPECT_FALSE(rocksDBWrapper->get("KBT-4000", slice, HOTFIXES_APPLICATIONS_COLUMN)); - EXPECT_FALSE(rocksDBWrapper->get("KBT-5000", slice, HOTFIXES_APPLICATIONS_COLUMN)); - EXPECT_FALSE(rocksDBWrapper->get("KBT-6000", slice, HOTFIXES_APPLICATIONS_COLUMN)); - EXPECT_FALSE(rocksDBWrapper->get("KBT-7000", slice, HOTFIXES_APPLICATIONS_COLUMN)); - EXPECT_FALSE(rocksDBWrapper->get("KBT-8000", slice, HOTFIXES_APPLICATIONS_COLUMN)); - EXPECT_FALSE(rocksDBWrapper->get("KBT-9000", slice, HOTFIXES_APPLICATIONS_COLUMN)); + + for (const auto& key : expectedKeys) + { + EXPECT_TRUE(rocksDBWrapper->get(key, slice, HOTFIXES_APPLICATIONS_COLUMN)) << "Unable to find key: " << key; + } + + for (const auto& key : nonExpectedKeys) + { + EXPECT_FALSE(rocksDBWrapper->get(key, slice, HOTFIXES_APPLICATIONS_COLUMN)) << "Found key: " << key; + } } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp index 9f60a6ebf88..66153fe464f 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.hpp @@ -13,7 +13,6 @@ #define _UPDATE_HOTFIXES_TEST_HPP #include "../../src/databaseFeedManager/databaseFeedManager.hpp" #include "../../src/databaseFeedManager/updateHotfixes.hpp" -#include "hotfixes_applications_generated.h" #include "gmock/gmock.h" #include "gtest/gtest.h" From 213761ea5cee0797287891dad72d47a8cdaa39f1 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Tue, 7 May 2024 19:37:49 -0300 Subject: [PATCH 099/419] CL: - Fixed doxygen --- .../src/databaseFeedManager/databaseFeedManager.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp index d5ff958fdf8..d5cd28212ff 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp @@ -407,7 +407,8 @@ class TDatabaseFeedManager final : public Observer * stores it in the 'remediationInfo' object. * * @param hotfix hotfix id for which remediation information is requested. - * @param hotfixVulnerabilities Referene + * + * @return An unordered set containing the CVEs associated with the provided hotfix. * * @throws std::runtime_error if the retrieved data from the database is invalid or not in the expected FlatBuffers * format. From efe01501124cb2333c06bfdb994a00298e0183cf Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Wed, 8 May 2024 04:23:01 +0000 Subject: [PATCH 100/419] Creating specific chain steps for hotfix insert orchestration --- .../cveSolvedAlertDetailsBuilder.hpp | 111 +++++++++++++++++ .../scanOrchestrator/cveSolvedInventorySync | 114 ++++++++++++++++++ .../src/scanOrchestrator/eventSendReport.hpp | 8 ++ .../scanOrchestrator/factoryOrchestrator.hpp | 7 +- .../src/scanOrchestrator/hotfixInsert.hpp | 7 +- .../src/scanOrchestrator/resultIndexer.hpp | 23 +++- 6 files changed, 261 insertions(+), 9 deletions(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cveSolvedAlertDetailsBuilder.hpp create mode 100644 src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cveSolvedInventorySync diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cveSolvedAlertDetailsBuilder.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cveSolvedAlertDetailsBuilder.hpp new file mode 100644 index 00000000000..ae4f68bc5a3 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cveSolvedAlertDetailsBuilder.hpp @@ -0,0 +1,111 @@ +/* + * Wazuh Vulnerability scanner - Scan Orchestrator + * Copyright (C) 2015, Wazuh Inc. + * May 6, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _CVE_SOLVED_ALERT_DETAILS_BUILDER_HPP +#define _CVE_SOLVED_ALERT_DETAILS_BUILDER_HPP + +#include "chainOfResponsability.hpp" +#include "databaseFeedManager.hpp" +#include "numericHelper.h" +#include "scanContext.hpp" + +/** + * @brief Class in charge of building the solved alert details. + * @tparam TScanContext scan context type. + */ +template +class TCVESolvedAlertDetailsBuilder final : public AbstractHandler> +{ +private: + std::shared_ptr m_databaseFeedManager; + +public: + // LCOV_EXCL_START + /** + * @brief Construct a new CVE Solved Alert Details Builder object + * + * @param databaseFeedManager Database feed manager instance. + */ + explicit TCVESolvedAlertDetailsBuilder(std::shared_ptr& databaseFeedManager) + : m_databaseFeedManager(databaseFeedManager) + { + } + + /** + * @brief Class destructor. + * + */ + ~TCVESolvedAlertDetailsBuilder() = default; + // LCOV_EXCL_STOP + + /** + * @brief Handles request and passes control to the next step of the chain. + * + * @param data Scan context. + * @return std::shared_ptr Abstract handler. + */ + std::shared_ptr handleRequest(std::shared_ptr data) override + { + // We only generate alerts for real time events, aka dbsync deltas. + if (data->messageType() == MessageType::Delta) + { + for (const auto& [cve, elements] : data->m_elements) + { + if (elements.empty()) + { + continue; + } + FlatbufferDataPair returnData; + m_databaseFeedManager->getVulnerabiltyDescriptiveInformation(cve, returnData); + + if (returnData.data) + { + const std::string cvssVersion {returnData.data->scoreVersion()->str()}; + const std::string scoreVersion {"cvss" + cvssVersion.substr(0, 1)}; + nlohmann::json json; + + json["vulnerability"]["status"] = "Solved"; + + std::string title {cve}; + title.append(" affecting "); + title.append("one or many packages"); + title.append(" was solved by "); + title.append(data->hotfixId()); + json["vulnerability"]["title"] = title; + // This improves the description of the alert without creating another rule + json["vulnerability"]["package"]["name"] = "one or many packages"; + + json["vulnerability"]["cve"] = cve; + if (!cvssVersion.empty()) + { + json["vulnerability"]["cvss"][scoreVersion]["base_score"] = + Utils::floatToDoubleRound(returnData.data->scoreBase(), 2); + } + json["vulnerability"]["enumeration"] = "CVE"; + json["vulnerability"]["published"] = returnData.data->datePublished()->str(); + json["vulnerability"]["reference"] = returnData.data->reference()->str(); + json["vulnerability"]["severity"] = Utils::toSentenceCase(returnData.data->severity()->str()); + + // The title is different depending on the type of the alert. + json["vulnerability"]["type"] = "Packages"; + json["vulnerability"]["updated"] = returnData.data->dateUpdated()->str(); + + data->m_alerts[cve] = std::move(json); + } + } + } + return AbstractHandler>::handleRequest(std::move(data)); + } +}; + +using CVESolvedAlertDetailsBuilder = TCVESolvedAlertDetailsBuilder<>; + +#endif // _CVE_SOLVED_ALERT_DETAILS_BUILDER_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cveSolvedInventorySync b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cveSolvedInventorySync new file mode 100644 index 00000000000..2cfb410b687 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cveSolvedInventorySync @@ -0,0 +1,114 @@ +/* + * Wazuh Vulnerability scanner - Scan Orchestrator + * Copyright (C) 2015, Wazuh Inc. + * May 6, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _CVE_SOLVED_INVENTORY_SYNC_HPP +#define _CVE_SOLVED_INVENTORY_SYNC_HPP + +#include "chainOfResponsability.hpp" +#include "inventorySync.hpp" +#include "scanContext.hpp" + +/** + * @brief Class in charge of synchronizing the agent's inventory with the CVEs solved. + * + * @param TScanContext Scan context. + */ +template +class TCVESolvedInventorySync final + : public AbstractHandler> + , public InventorySync +{ + +public: + // LCOV_EXCL_START + /** + * @brief Construct a new TCVESolvedInventorySync object. + * + * @param inventoryDatabase Inventory database. + */ + explicit TCVESolvedInventorySync(Utils::RocksDBWrapper& inventoryDatabase) + : InventorySync(inventoryDatabase) + { + } + // LCOV_EXCL_STOP + + /** + * @brief Handles request and passes control to the next step of the chain. + * + * @param data Scan context. + * @return std::shared_ptr Abstract handler. + */ + std::shared_ptr handleRequest(std::shared_ptr data) override + { + // We look for the CVEs in the agent's vulnerabilities inventory + const auto& column = AFFECTED_COMPONENT_COLUMNS.at(AffectedComponentType::Package); + + for (const auto& [key, value] : TInventorySync::m_inventoryDatabase.seek(data->agentId(), column)) + { + auto inventory = Utils::split(value.ToString(), ','); + auto currentSize = inventory.size(); + for (auto& [cve, json] : data->m_elements) + { + inventory.erase( + std::remove_if(inventory.begin(), + inventory.end(), + [&](const std::string& item) + { + if (item == cve) + { + std::string elementKey; + elementKey.append(key); + elementKey.append("_"); + elementKey.append(cve); + + logDebug2(WM_VULNSCAN_LOGTAG, + "CVE '%s' was remediated by hotfix '%s' for '%s'.", + cve.c_str(), + data->hotfixId().data(), + elementKey.c_str()); + json.push_back(std::move( + TInventorySync::buildElement("DELETED", elementKey))); + + return true; + } + + return false; + }), + inventory.end()); + } + + if (inventory.empty()) + { + logDebug2(WM_VULNSCAN_LOGTAG, "Deleting agent element key: %s", key.c_str()); + m_inventoryDatabase.delete_(key, column); + } + else if (inventory.size() != currentSize) + { + std::string insertListString; + for (const auto& cve : inventory) + { + insertListString.append(cve); + insertListString.append(","); + } + insertListString.pop_back(); + logDebug2( + WM_VULNSCAN_LOGTAG, "Updating agent element key: %s -> %s", key.c_str(), insertListString.c_str()); + m_inventoryDatabase.put(key, insertListString, column); + } + } + + return AbstractHandler>::handleRequest(std::move(data)); + } +}; + +using CVESolvedInventorySync = TCVESolvedInventorySync<>; + +#endif // _CVE_SOLVED_INVENTORY_SYNC_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventSendReport.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventSendReport.hpp index 08b3b3c0e3e..408915d2aa3 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventSendReport.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventSendReport.hpp @@ -84,6 +84,14 @@ class TEventSendReport final : public AbstractHandlerosName().data(), key.c_str()); } + else if (data->affectedComponentType() == AffectedComponentType::Hotfix) + { + logDebug2(WM_VULNSCAN_LOGTAG, + "Vulnerability report for agent ID %s, hotfix: %s, cve: %s", + data->agentId().data(), + data->hotfixId().data(), + key.c_str()); + } else { logWarn(WM_VULNSCAN_LOGTAG, diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp index 12aff4e0c1a..9ff3ed9ca9b 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp @@ -19,6 +19,8 @@ #include "cleanAgentInventory.hpp" #include "cleanInventory.hpp" #include "clearSendReport.hpp" +#include "cveSolvedAlertDetailsBuilder.hpp" +#include "cveSolvedInventorySync" #include "eventDeleteInventory.hpp" #include "eventDetailsBuilder.hpp" #include "eventInsertInventory.hpp" @@ -41,6 +43,7 @@ template(databaseFeedManager); - orchestration->setLast(std::make_shared(inventoryDatabase)); + orchestration->setLast(std::make_shared(inventoryDatabase)); + orchestration->setLast(std::make_shared(databaseFeedManager)); orchestration->setLast(std::make_shared(reportDispatcher)); orchestration->setLast(std::make_shared(indexerConnector)); break; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp index 6666cdabbfe..b367733e7ff 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/hotfixInsert.hpp @@ -70,16 +70,15 @@ class THotfixInsert final : public AbstractHandler if (hotfixVulnerabilities.empty()) { - logDebug2(WM_VULNSCAN_LOGTAG, "No vulnerabilities associated"); + logDebug2(WM_VULNSCAN_LOGTAG, "No vulnerabilities associated to hotfix '%s'", hotfixId.c_str()); return nullptr; } for (const auto& hotfixVulnerability : hotfixVulnerabilities) { - const auto cveId = Utils::leftTrim(hotfixVulnerability, "_"); + const auto cveId = hotfixVulnerability.substr(hotfixId.size() + 1); - logDebug2(WM_VULNSCAN_LOGTAG, "CVE '%s' was remediated by hotfix '%s'", cveId, hotfixId.c_str()); - data->m_elements[cveId] = nlohmann::json::object(); + data->m_elements[cveId] = nlohmann::json::array(); } return AbstractHandler>::handleRequest(std::move(data)); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp index 2cedd0553b2..01a1c01611c 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp @@ -51,16 +51,31 @@ class TResultIndexer final : public AbstractHandlerpublish(result.dump()); + } + else + { + logError(WM_VULNSCAN_LOGTAG, "Invalid element to publish: %s", result.dump().c_str()); + } + }; + for (const auto& [key, value] : data->m_elements) { - logDebug2(WM_VULNSCAN_LOGTAG, "Processing and publish key: %s", key.c_str()); - if (value.contains("operation") && value.contains("id")) + if (value.is_array()) { - m_indexerConnector->publish(value.dump()); + for (const auto& element : value) + { + resultCallback(element, key); + } } else { - logError(WM_VULNSCAN_LOGTAG, "Invalid element to publish: %s", value.dump().c_str()); + resultCallback(value, key); } } } From a30db7958a9ebbcf0707c7869a749753e4846661 Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Wed, 8 May 2024 07:28:57 +0000 Subject: [PATCH 101/419] CL: - Updating tests with new remediation cache --- .../scanOrchestrator/factoryOrchestrator.hpp | 1 + .../tests/mocks/MockFactoryOrchestrator.hpp | 1 + .../tests/mocks/MockRemediationDataCache.hpp | 56 ++++ .../mocks/TrampolineRemediationDataCache.hpp | 61 ++++ .../tests/mocks/TrampolineScanContext.hpp | 2 +- .../tests/unit/alertClearBuilder_test.cpp | 16 +- .../unit/buildAllAgentListContext_test.cpp | 15 +- .../tests/unit/cleanAgentInventory_test.cpp | 41 ++- .../tests/unit/cleanInventory_test.cpp | 22 +- .../tests/unit/clearSendReport_test.cpp | 45 ++- .../tests/unit/clearSendReport_test.hpp | 3 + .../tests/unit/databaseFeedManager_test.h | 1 + .../tests/unit/eventDeleteInventory_test.cpp | 17 +- .../tests/unit/eventDetailsBuilder_test.cpp | 50 +++- .../tests/unit/eventInsertInventory_test.cpp | 29 +- .../eventPackageAlertDetailsBuilder_test.cpp | 62 +++- .../tests/unit/eventSendReport_test.cpp | 53 +++- .../tests/unit/eventSendReport_test.hpp | 3 + .../tests/unit/factoryOrchestrator_test.cpp | 54 +++- .../tests/unit/packageScanner_test.cpp | 273 +++++++++++++----- .../tests/unit/resultIndexer_test.cpp | 44 ++- .../tests/unit/scanAgentList_test.cpp | 64 ++-- .../tests/unit/scanContext_test.cpp | 69 +++-- .../tests/unit/scanOrchestrator_test.cpp | 93 +++++- .../unit/scanOsAlertDetailsBuilder_test.cpp | 35 ++- 25 files changed, 852 insertions(+), 258 deletions(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/tests/mocks/MockRemediationDataCache.hpp create mode 100644 src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineRemediationDataCache.hpp diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp index 9ff3ed9ca9b..f4aa6c1a5f5 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp @@ -120,6 +120,7 @@ class TFactoryOrchestrator final orchestration->setLast(std::make_shared(inventoryDatabase)); orchestration->setLast(std::make_shared(databaseFeedManager)); orchestration->setLast(std::make_shared(databaseFeedManager)); + orchestration->setLast(std::make_shared(reportDispatcher)); orchestration->setLast(std::make_shared(indexerConnector)); break; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockFactoryOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockFactoryOrchestrator.hpp index 26b710c2be5..29a441669ef 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockFactoryOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockFactoryOrchestrator.hpp @@ -12,6 +12,7 @@ #define _MOCK_FACTORYORCHESTRATOR_HPP #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "TrampolineScanContext.hpp" #include "scanContext.hpp" #include "shared_modules/utils/mocks/chainOfResponsabilityMock.h" diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockRemediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockRemediationDataCache.hpp new file mode 100644 index 00000000000..fb0fece96b1 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockRemediationDataCache.hpp @@ -0,0 +1,56 @@ +/* + * Wazuh databaseFeedManager + * Copyright (C) 2015, Wazuh Inc. + * May 7, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ +#ifndef _MOCK_REMEDIATION_DATA_CACHE_HPP +#define _MOCK_REMEDIATION_DATA_CACHE_HPP + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +#include "remediationDataCache.hpp" +#include "singleton.hpp" + +#include "json.hpp" + +/** + * @class MockRemediationDataCache + * + * @brief Mock class for simulating a database feed manager object. + * + * The `MockRemediationDataCache` class is designed to simulate the behavior of an + * remediation data cache for testing purposes. It provides mock implementations of methods and + * allows you to set expectations on method calls and their return values for testing. + * + * This class is used in unit tests only to verify interactions with a content + * register without actually performing real operations on it. + */ +class MockRemediationDataCache : public Singleton +{ +public: + MockRemediationDataCache() = default; + + virtual ~MockRemediationDataCache() = default; + + /** + * @brief Mock method for getRemediationData. + * + * @note This method is intended for testing purposes and does not perform any real action. + */ + MOCK_METHOD(Remediation, getRemediationData, (const std::string& agentId), ()); + + /** + * @brief Mock method for addRemediationData. + * + * @note This method is intended for testing purposes and does not perform any real action. + */ + MOCK_METHOD(void, addRemediationData, (const std::string& agentId, Remediation newRemediationData), ()); +}; + +#endif // _MOCK_REMEDIATION_DATA_CACHE_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineRemediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineRemediationDataCache.hpp new file mode 100644 index 00000000000..39a0a116259 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineRemediationDataCache.hpp @@ -0,0 +1,61 @@ +/* + * Wazuh databaseFeedManager + * Copyright (C) 2015, Wazuh Inc. + * May 7, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ +#ifndef _TRAMPOLINE_REMEDIATION_DATA_CACHE_HPP +#define _TRAMPOLINE_REMEDIATION_DATA_CACHE_HPP + +#include "MockRemediationDataCache.hpp" +#include "singleton.hpp" + +#include "json.hpp" + +extern std::shared_ptr spRemediationDataCacheMock; + +/** + * @brief This class is a wrapper of the trampoline content register library. + */ +class TrampolineRemediationDataCache final : public Singleton +{ +public: + /** + * @brief Constructor for creating a TrampolineRemediationDataCache object. + * + * The `TrampolineRemediationDataCache` constructor initializes a new instance of the + * TrampolineRemediationDataCache class. + */ + TrampolineRemediationDataCache() {}; + + virtual ~TrampolineRemediationDataCache() = default; + + /** + * @brief Trampoline to method getRemediationData. + * + * @param agentId + * + * @return Remediation + */ + Remediation getRemediationData(const std::string& agentId) + { + return spRemediationDataCacheMock->getRemediationData(agentId); + } + + /** + * @brief Trampoline to method addRemediationData. + * + * @param agentId + * @param Remediation + */ + void addRemediationData(const std::string& agentId, const Remediation& newRemediationData) + { + spRemediationDataCacheMock->addRemediationData(agentId, newRemediationData); + } +}; + +#endif //_TRAMPOLINE_REMEDIATION_DATA_CACHE_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineScanContext.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineScanContext.hpp index 9437488621a..4072a3e1db6 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineScanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineScanContext.hpp @@ -14,7 +14,7 @@ #include "scanContext.hpp" -extern std::shared_ptr> spScanContext; +extern std::shared_ptr> spScanContext; /** * @brief ScanContext structure. diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/alertClearBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/alertClearBuilder_test.cpp index fd58b482499..45eac8e2e73 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/alertClearBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/alertClearBuilder_test.cpp @@ -18,6 +18,7 @@ #include "MockDatabaseFeedManager.hpp" #include "MockOsDataCache.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "flatbuffers/flatbuffer_builder.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" @@ -74,6 +75,7 @@ void AlertClearBuilderTest::SetUp() void AlertClearBuilderTest::TearDown() { spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); PolicyManager::instance().teardown(); std::filesystem::remove_all("queue/vd"); } @@ -99,6 +101,9 @@ TEST_F(AlertClearBuilderTest, TestSuccessfulIntegrityClear) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); flatbuffers::Parser parser; @@ -107,13 +112,16 @@ TEST_F(AlertClearBuilderTest, TestSuccessfulIntegrityClear) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorSynchronization = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorSynchronization); + auto scanContextOriginal = + std::make_shared>( + syscollectorSynchronization); scanContextOriginal->m_isInventoryEmpty = false; - TAlertClearBuilder> alertClearBuilder( - spDatabaseFeedManagerMock); + TAlertClearBuilder> + alertClearBuilder(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = alertClearBuilder.handleRequest(scanContextOriginal)); EXPECT_EQ(scanContextResult->m_elements.size(), 0); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp index 7aca549855d..3e0acc4c3a7 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp @@ -11,18 +11,23 @@ #include "buildAllAgentListContext_test.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "TrampolineSocketDBWrapper.hpp" #include "buildAllAgentListContext.hpp" +using TrampolineScanContext = TScanContext; + TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContext) { spSocketDBWrapperMock = std::make_shared(); EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)).Times(1); - auto allAgentContext = - std::make_shared, TrampolineSocketDBWrapper>>(); + auto allAgentContext = std::make_shared< + TBuildAllAgentListContext, + TrampolineSocketDBWrapper>>(); - auto scanContext = std::make_shared>(); + auto scanContext = + std::make_shared>(); allAgentContext->handleRequest(scanContext); } @@ -48,9 +53,9 @@ TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContextWithElements) .WillOnce(testing::SetArgReferee<1>(queryResult)); auto allAgentContext = - std::make_shared, TrampolineSocketDBWrapper>>(); + std::make_shared>(); - auto scanContext = std::make_shared>(); + auto scanContext = std::make_shared(); // Context is not used allAgentContext->handleRequest(scanContext); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanAgentInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanAgentInventory_test.cpp index 5f21b504c06..2549f23d1ed 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanAgentInventory_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanAgentInventory_test.cpp @@ -11,6 +11,7 @@ #include "cleanAgentInventory_test.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "cleanAgentInventory.hpp" #include "flatbuffers/include/syscollector_synchronization_schema.h" #include "mocks/chainOfResponsabilityMock.h" @@ -77,6 +78,7 @@ void CleanAgentInventoryTest::SetUp() void CleanAgentInventoryTest::TearDown() { spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); m_inventoryDatabase->deleteAll(); m_inventoryDatabase.reset(); std::filesystem::remove_all(INVENTORY_DB_PATH); @@ -85,7 +87,8 @@ void CleanAgentInventoryTest::TearDown() TEST_F(CleanAgentInventoryTest, TestInstantiationOfTheeventInsertInventoryClass) { // Instantiation of the eventInsertInventory class. - EXPECT_NO_THROW(std::make_shared>>(*m_inventoryDatabase)); + using scanContext_t = TScanContext; + EXPECT_NO_THROW(std::make_shared>(*m_inventoryDatabase)); } TEST_F(CleanAgentInventoryTest, CleanAgentDataSuccessfulPackage) @@ -97,13 +100,14 @@ TEST_F(CleanAgentInventoryTest, CleanAgentDataSuccessfulPackage) m_inventoryDatabase->put("001", "1", OS_INITIAL_SCAN); m_inventoryDatabase->put("002", "2", OS_INITIAL_SCAN); - auto spSubOrchestration = - std::make_shared>>>(); + auto spSubOrchestration = std::make_shared>>>(); EXPECT_CALL(*spSubOrchestration, handleRequest(testing::_)).Times(2); auto cleanAgentInventory = std::make_shared< - TCleanAgentInventory, - MockAbstractHandler>>>>( + TCleanAgentInventory, + MockAbstractHandler>>>>( *m_inventoryDatabase, spSubOrchestration); nlohmann::json jsonData = nlohmann::json::parse( @@ -112,7 +116,8 @@ TEST_F(CleanAgentInventoryTest, CleanAgentDataSuccessfulPackage) std::variant data = &jsonData; - auto context = std::make_shared>(data); + auto context = + std::make_shared>(data); EXPECT_NO_THROW(cleanAgentInventory->handleRequest(context)); @@ -134,13 +139,14 @@ TEST_F(CleanAgentInventoryTest, CleanAgentDataIntegrityClear) m_inventoryDatabase->put("001", "1", OS_INITIAL_SCAN); m_inventoryDatabase->put("002", "2", OS_INITIAL_SCAN); - auto spSubOrchestration = - std::make_shared>>>(); + auto spSubOrchestration = std::make_shared>>>(); EXPECT_CALL(*spSubOrchestration, handleRequest(testing::_)).Times(1); auto cleanAgentInventory = std::make_shared< - TCleanAgentInventory, - MockAbstractHandler>>>>( + TCleanAgentInventory, + MockAbstractHandler>>>>( *m_inventoryDatabase, spSubOrchestration); flatbuffers::Parser parser; @@ -150,7 +156,8 @@ TEST_F(CleanAgentInventoryTest, CleanAgentDataIntegrityClear) std::variant msg = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto context = std::make_shared>(msg); + auto context = + std::make_shared>(msg); EXPECT_NO_THROW(cleanAgentInventory->handleRequest(context)); @@ -172,13 +179,14 @@ TEST_F(CleanAgentInventoryTest, CleanAgentDataIntegrityClearOs) m_inventoryDatabase->put("001", "1", OS_INITIAL_SCAN); m_inventoryDatabase->put("002", "2", OS_INITIAL_SCAN); - auto spSubOrchestration = - std::make_shared>>>(); + auto spSubOrchestration = std::make_shared>>>(); EXPECT_CALL(*spSubOrchestration, handleRequest(testing::_)).Times(1); auto cleanAgentInventory = std::make_shared< - TCleanAgentInventory, - MockAbstractHandler>>>>( + TCleanAgentInventory, + MockAbstractHandler>>>>( *m_inventoryDatabase, spSubOrchestration); flatbuffers::Parser parser; @@ -188,7 +196,8 @@ TEST_F(CleanAgentInventoryTest, CleanAgentDataIntegrityClearOs) std::variant msg = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto context = std::make_shared>(msg); + auto context = + std::make_shared>(msg); EXPECT_NO_THROW(cleanAgentInventory->handleRequest(context)); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanInventory_test.cpp index 063c7c95fcf..57af4d31d52 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanInventory_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/cleanInventory_test.cpp @@ -17,6 +17,7 @@ #include "../scanOrchestrator/cleanInventory.hpp" #include "MockOsDataCache.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "flatbuffers/flatbuffer_builder.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" @@ -71,6 +72,7 @@ void CleanInventoryTest::SetUp() void CleanInventoryTest::TearDown() { spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); m_inventoryDatabase->deleteAll(); m_inventoryDatabase.reset(); std::filesystem::remove_all(TEST_INVENTORY_DATABASE_PATH); @@ -82,8 +84,8 @@ void CleanInventoryTest::TearDown() TEST_F(CleanInventoryTest, TestInstantiationOfTheeventInsertInventoryClass) { // Instantiation of the eventInsertInventory class. - EXPECT_NO_THROW( - std::make_shared>>(*m_inventoryDatabase, nullptr)); + using scanContext_t = TScanContext; + EXPECT_NO_THROW(std::make_shared>(*m_inventoryDatabase, nullptr)); } /* @@ -98,14 +100,15 @@ TEST_F(CleanInventoryTest, TestCleanAllData) m_inventoryDatabase->put("001", "1", OS_INITIAL_SCAN); m_inventoryDatabase->put("002", "2", OS_INITIAL_SCAN); - auto spSubOrchestration = - std::make_shared>>>(); + auto spSubOrchestration = std::make_shared>>>(); EXPECT_CALL(*spSubOrchestration, handleRequest(testing::_)).Times(6); - auto cleanInventory = - std::make_shared, - MockAbstractHandler>>>>( - *m_inventoryDatabase, spSubOrchestration); + auto cleanInventory = std::make_shared< + TCleanInventory, + MockAbstractHandler>>>>( + *m_inventoryDatabase, spSubOrchestration); nlohmann::json jsonData = nlohmann::json::parse( R"({"agent_info": {"agent_id":"001", "agent_version":"4.8.0", "agent_name":"test_agent_name", @@ -113,7 +116,8 @@ TEST_F(CleanInventoryTest, TestCleanAllData) std::variant data = &jsonData; - auto context = std::make_shared>(data); + auto context = + std::make_shared>(data); EXPECT_NO_THROW(cleanInventory->handleRequest(context)); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.cpp index ac9f3113faf..e80657dbfcb 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.cpp @@ -16,6 +16,7 @@ #include "MockOsDataCache.hpp" #include "MockReportDispatcher.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" #include "flatbuffers/util.h" @@ -81,7 +82,9 @@ TEST_F(ClearSendReportTest, SendFormattedMsgPackages) // Mock report dispatcher. std::shared_ptr reportDispatcher = std::make_shared(); // Send report instance. - TClearSendReport, MockReportDispatcher> sendReport(reportDispatcher); + TClearSendReport, + MockReportDispatcher> + sendReport(reportDispatcher); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -102,6 +105,9 @@ TEST_F(ClearSendReportTest, SendFormattedMsgPackages) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + // Mock scanContext. flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_synchronization_SCHEMA)); @@ -109,7 +115,9 @@ TEST_F(ClearSendReportTest, SendFormattedMsgPackages) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorSync = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorSync); + auto scanContext = + std::make_shared>( + syscollectorSync); scanContext->m_alerts["clean"] = nlohmann::json::object(); EXPECT_CALL(*reportDispatcher, push(_)).Times(1); @@ -123,7 +131,9 @@ TEST_F(ClearSendReportTest, SendFormattedMsgOsInfo) // Mock report dispatcher. std::shared_ptr reportDispatcher = std::make_shared(); // Send report instance. - TClearSendReport, MockReportDispatcher> sendReport(reportDispatcher); + TClearSendReport, + MockReportDispatcher> + sendReport(reportDispatcher); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -144,6 +154,9 @@ TEST_F(ClearSendReportTest, SendFormattedMsgOsInfo) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + // Mock scanContext. flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_synchronization_SCHEMA)); @@ -151,7 +164,9 @@ TEST_F(ClearSendReportTest, SendFormattedMsgOsInfo) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorSync = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorSync); + auto scanContext = + std::make_shared>( + syscollectorSync); scanContext->m_alerts["clean"] = nlohmann::json::object(); EXPECT_CALL(*reportDispatcher, push(_)).Times(1); @@ -165,7 +180,9 @@ TEST_F(ClearSendReportTest, SendFormattedMsgUnexpected) // Mock report dispatcher. std::shared_ptr reportDispatcher = std::make_shared(); // Send report instance. - TClearSendReport, MockReportDispatcher> sendReport(reportDispatcher); + TClearSendReport, + MockReportDispatcher> + sendReport(reportDispatcher); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -186,6 +203,9 @@ TEST_F(ClearSendReportTest, SendFormattedMsgUnexpected) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + // Mock scanContext. flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_synchronization_SCHEMA)); @@ -193,7 +213,9 @@ TEST_F(ClearSendReportTest, SendFormattedMsgUnexpected) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorSync = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorSync); + auto scanContext = + std::make_shared>( + syscollectorSync); scanContext->m_alerts["clean"] = nlohmann::json::object(); EXPECT_CALL(*reportDispatcher, push(_)).Times(1); @@ -207,7 +229,9 @@ TEST_F(ClearSendReportTest, SendFormattedMsgThrow) // Mock report dispatcher. std::shared_ptr reportDispatcher = std::make_shared(); // Send report instance. - TClearSendReport, MockReportDispatcher> sendReport(reportDispatcher); + TClearSendReport, + MockReportDispatcher> + sendReport(reportDispatcher); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -228,6 +252,9 @@ TEST_F(ClearSendReportTest, SendFormattedMsgThrow) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + // Mock scanContext. flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_synchronization_SCHEMA)); @@ -235,7 +262,9 @@ TEST_F(ClearSendReportTest, SendFormattedMsgThrow) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorSync = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorSync); + auto scanContext = + std::make_shared>( + syscollectorSync); scanContext->m_alerts["clean"] = nlohmann::json::object(); // Simulate an exception in push method. diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.hpp index eac10bc58c9..684826e0303 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/clearSendReport_test.hpp @@ -13,6 +13,7 @@ #define _CLEAN_SEND_REPORT_TEST_HPP #include "MockOsDataCache.hpp" +#include "MockRemediationDataCache.hpp" #include "policyManager.hpp" #include "socketServer.hpp" #include "gtest/gtest.h" @@ -20,6 +21,7 @@ // External shared pointers definitions extern std::shared_ptr spOsDataCacheMock; +extern std::shared_ptr spRemediationDataCacheMock; /** * @brief SendReport test class. @@ -59,6 +61,7 @@ class ClearSendReportTest : public ::testing::Test void TearDown() override { spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); } }; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.h b/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.h index ee620730500..92cd238d189 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.h +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.h @@ -15,6 +15,7 @@ #include "TrampolineContentRegister.hpp" #include "TrampolineIndexerConnector.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "TrampolinePolicyManager.hpp" #include "TrampolineRouterSuscribe.hpp" #include "rocksDBWrapper.hpp" diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDeleteInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDeleteInventory_test.cpp index 66a2cce20de..b16631c212e 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDeleteInventory_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDeleteInventory_test.cpp @@ -17,6 +17,7 @@ #include "../scanOrchestrator/eventDeleteInventory.hpp" #include "MockOsDataCache.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "flatbuffers/flatbuffer_builder.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" @@ -84,6 +85,7 @@ void EventDeleteInventoryTest::SetUp() void EventDeleteInventoryTest::TearDown() { spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); m_inventoryDatabase->deleteAll(); m_inventoryDatabase.reset(); std::filesystem::remove_all(TEST_INVENTORY_DATABASE_PATH); @@ -95,7 +97,8 @@ void EventDeleteInventoryTest::TearDown() TEST_F(EventDeleteInventoryTest, TestInstantiationOfTheEventInsertInventoryClass) { // Instantiation of the eventDeleteInventory class. - EXPECT_NO_THROW(std::make_shared>>(*m_inventoryDatabase)); + using scanContext_t = TScanContext; + EXPECT_NO_THROW(std::make_shared>(*m_inventoryDatabase)); } /* @@ -106,8 +109,9 @@ TEST_F(EventDeleteInventoryTest, TestHandleRequestPackageDelete) // Instantiation of the eventDeleteInventory class. m_inventoryDatabase->put("001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", CVEID1, PACKAGE); - auto eventDeleteInventory = - std::make_shared>>(*m_inventoryDatabase); + auto eventDeleteInventory = std::make_shared< + TEventDeleteInventory>>( + *m_inventoryDatabase); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -128,6 +132,9 @@ TEST_F(EventDeleteInventoryTest, TestHandleRequestPackageDelete) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + // Mock scanContext. flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); @@ -137,7 +144,9 @@ TEST_F(EventDeleteInventoryTest, TestHandleRequestPackageDelete) syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); // Create a ScanContext object. - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); // Call handleRequest method. EXPECT_NO_THROW(eventDeleteInventory->handleRequest(scanContext)); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp index d44b18d9d31..49400dc136b 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDetailsBuilder_test.cpp @@ -18,6 +18,7 @@ #include "MockDatabaseFeedManager.hpp" #include "MockOsDataCache.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "flatbuffers/flatbuffer_builder.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" @@ -182,6 +183,7 @@ void EventDetailsBuilderTest::SetUp() void EventDetailsBuilderTest::TearDown() { spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); PolicyManager::instance().teardown(); std::filesystem::remove_all("queue/vd"); } @@ -244,6 +246,9 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS2) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -254,13 +259,16 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS2) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); // Mock one vulnerability scanContext->m_elements[CVEID] = R"({"operation":"INSERTED", "id":"001_ec465b7eb5fa011a336e95614072e4c7f1a65a53_CVE-2024-1234"})"_json; - TEventDetailsBuilder> eventDetailsBuilder( - spDatabaseFeedManagerMock); + TEventDetailsBuilder> + eventDetailsBuilder(spDatabaseFeedManagerMock); EXPECT_NO_THROW(eventDetailsBuilder.handleRequest(scanContext)); @@ -421,6 +429,9 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -431,13 +442,16 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"INSERTED", "id":"000_ec465b7eb5fa011a336e95614072e4c7f1a65a53_CVE-2024-1234"})"_json; scanContext->m_alerts[CVEID] = nlohmann::json::object(); // Mock one alert - TEventDetailsBuilder> eventDetailsBuilder( - spDatabaseFeedManagerMock); + TEventDetailsBuilder> + eventDetailsBuilder(spDatabaseFeedManagerMock); EXPECT_NO_THROW(eventDetailsBuilder.handleRequest(scanContext)); @@ -600,6 +614,9 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageDeleted) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -610,11 +627,14 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulPackageDeleted) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"INSERTED"})"_json; - TEventDetailsBuilder> eventDetailsBuilder( - spDatabaseFeedManagerMock); + TEventDetailsBuilder> + eventDetailsBuilder(spDatabaseFeedManagerMock); EXPECT_NO_THROW(eventDetailsBuilder.handleRequest(scanContext)); @@ -681,6 +701,9 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulOsInserted) EXPECT_CALL(*spOsDataCacheMock, setOsData(_, _)).Times(1); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -691,12 +714,15 @@ TEST_F(EventDetailsBuilderTest, TestSuccessfulOsInserted) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"INSERTED", "id":"002_Microsoft Windows 10 Pro_CVE-2024-1234"})"_json; - TEventDetailsBuilder> eventDetailsBuilder( - spDatabaseFeedManagerMock); + TEventDetailsBuilder> + eventDetailsBuilder(spDatabaseFeedManagerMock); EXPECT_NO_THROW(eventDetailsBuilder.handleRequest(scanContext)); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventInsertInventory_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventInsertInventory_test.cpp index 3ffadbdef23..51174d80b6a 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventInsertInventory_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventInsertInventory_test.cpp @@ -17,6 +17,7 @@ #include "../scanOrchestrator/eventInsertInventory.hpp" #include "MockOsDataCache.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "flatbuffers/flatbuffer_builder.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" @@ -84,6 +85,7 @@ void EventInsertInventoryTest::SetUp() void EventInsertInventoryTest::TearDown() { spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); m_inventoryDatabase->deleteAll(); m_inventoryDatabase.reset(); std::filesystem::remove_all(TEST_INVENTORY_DATABASE_PATH); @@ -95,7 +97,8 @@ void EventInsertInventoryTest::TearDown() TEST_F(EventInsertInventoryTest, TestInstantiationOfTheeventInsertInventoryClass) { // Instantiation of the eventInsertInventory class. - EXPECT_NO_THROW(std::make_shared>>(*m_inventoryDatabase)); + using scanContext_t = TScanContext; + EXPECT_NO_THROW(std::make_shared>(*m_inventoryDatabase)); } /* @@ -104,8 +107,9 @@ TEST_F(EventInsertInventoryTest, TestInstantiationOfTheeventInsertInventoryClass TEST_F(EventInsertInventoryTest, TestHandleRequestPackageInsertNonExisting) { // Instantiation of the eventInsertInventory class. - auto eventInsertInventory = - std::make_shared>>(*m_inventoryDatabase); + auto eventInsertInventory = std::make_shared< + TEventInsertInventory>>( + *m_inventoryDatabase); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -126,6 +130,9 @@ TEST_F(EventInsertInventoryTest, TestHandleRequestPackageInsertNonExisting) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + // Mock scanContext. flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); @@ -135,7 +142,9 @@ TEST_F(EventInsertInventoryTest, TestHandleRequestPackageInsertNonExisting) syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); // Create a ScanContext object. - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID1] = nlohmann::json::object(); // Mock one vulnerability // Call handleRequest method. @@ -159,8 +168,9 @@ TEST_F(EventInsertInventoryTest, TestHandleRequestPackageInsertAlreadyExisting) // Instantiation of the eventInsertInventory class. m_inventoryDatabase->put("001_ec465b7eb5fa011a336e95614072e4c7f1a65a53", CVEID2, PACKAGE); - auto eventInsertInventory = - std::make_shared>>(*m_inventoryDatabase); + auto eventInsertInventory = std::make_shared< + TEventInsertInventory>>( + *m_inventoryDatabase); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -181,6 +191,9 @@ TEST_F(EventInsertInventoryTest, TestHandleRequestPackageInsertAlreadyExisting) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + // Mock scanContext. flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); @@ -190,7 +203,9 @@ TEST_F(EventInsertInventoryTest, TestHandleRequestPackageInsertAlreadyExisting) syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); // Create a ScanContext object. - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID1] = nlohmann::json::object(); // Mock one vulnerability // Call handleRequest method. diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp index f5e58aa25b7..6cb92831a2b 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp @@ -18,6 +18,7 @@ #include "MockDatabaseFeedManager.hpp" #include "MockOsDataCache.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "flatbuffers/flatbuffer_builder.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" @@ -122,6 +123,7 @@ void EventPackageAlertDetailsBuilderTest::SetUp() void EventPackageAlertDetailsBuilderTest::TearDown() { spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); PolicyManager::instance().teardown(); std::filesystem::remove_all("queue/vd"); } @@ -184,6 +186,9 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS2) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -194,11 +199,14 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS2) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"INSERTED"})"_json; scanContext->m_matchConditions[CVEID] = {"1.0.0", MatchRuleCondition::Equal}; - TEventPackageAlertDetailsBuilder> + TEventPackageAlertDetailsBuilder> eventPackageAlertDetailsAugmentation(spDatabaseFeedManagerMock); EXPECT_NO_THROW(eventPackageAlertDetailsAugmentation.handleRequest(scanContext)); @@ -360,6 +368,9 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -370,11 +381,14 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestSuccessfulPackageInsertedCVSS3) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"INSERTED"})"_json; scanContext->m_matchConditions[CVEID] = {"1.0.0", MatchRuleCondition::LessThan}; - TEventPackageAlertDetailsBuilder> + TEventPackageAlertDetailsBuilder> eventPackageAlertDetailsAugmentation(spDatabaseFeedManagerMock); EXPECT_NO_THROW(eventPackageAlertDetailsAugmentation.handleRequest(scanContext)); @@ -540,6 +554,9 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestSuccessfulPackageInsertedDefault spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -550,11 +567,14 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestSuccessfulPackageInsertedDefault uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"INSERTED"})"_json; scanContext->m_matchConditions[CVEID] = {"1.0.0", MatchRuleCondition::DefaultStatus}; - TEventPackageAlertDetailsBuilder> + TEventPackageAlertDetailsBuilder> eventPackageAlertDetailsAugmentation(spDatabaseFeedManagerMock); EXPECT_NO_THROW(eventPackageAlertDetailsAugmentation.handleRequest(scanContext)); @@ -623,6 +643,9 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestSuccessfulPackageInsertedLessTha spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -633,11 +656,14 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestSuccessfulPackageInsertedLessTha uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"INSERTED"})"_json; scanContext->m_matchConditions[CVEID] = {"1.0.0", MatchRuleCondition::LessThanOrEqual}; - TEventPackageAlertDetailsBuilder> + TEventPackageAlertDetailsBuilder> eventPackageAlertDetailsAugmentation(spDatabaseFeedManagerMock); EXPECT_NO_THROW(eventPackageAlertDetailsAugmentation.handleRequest(scanContext)); @@ -706,6 +732,9 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestSuccessfulPackageDeleted) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -716,10 +745,13 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestSuccessfulPackageDeleted) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"DELETED"})"_json; - TEventPackageAlertDetailsBuilder> + TEventPackageAlertDetailsBuilder> eventPackageAlertDetailsAugmentation(spDatabaseFeedManagerMock); EXPECT_NO_THROW(eventPackageAlertDetailsAugmentation.handleRequest(scanContext)); @@ -822,6 +854,9 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestFailedInvalidOperation) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -832,10 +867,13 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestFailedInvalidOperation) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"invalid"})"_json; - TEventPackageAlertDetailsBuilder> + TEventPackageAlertDetailsBuilder> eventPackageAlertDetailsAugmentation(spDatabaseFeedManagerMock); EXPECT_ANY_THROW(eventPackageAlertDetailsAugmentation.handleRequest(scanContext)); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.cpp index f5aeacc3f34..3c7fd703b76 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.cpp @@ -18,6 +18,7 @@ #include "MockOsDataCache.hpp" #include "MockReportDispatcher.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "flatbuffers/idl.h" using ::testing::_; @@ -197,7 +198,9 @@ TEST_F(EventSendReportTest, SendFormattedMsg) // Mock report dispatcher. auto reportDispatcher = std::make_shared(); // Send report instance. - TEventSendReport, MockReportDispatcher> sendReport(reportDispatcher); + TEventSendReport, + MockReportDispatcher> + sendReport(reportDispatcher); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -218,6 +221,9 @@ TEST_F(EventSendReportTest, SendFormattedMsg) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + // Mock scanContext. flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_synchronization_SCHEMA)); @@ -225,7 +231,9 @@ TEST_F(EventSendReportTest, SendFormattedMsg) const uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorSync = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorSync); + auto scanContext = + std::make_shared>( + syscollectorSync); nlohmann::json detectionJson = nlohmann::json::parse(SYNC_STATE_ALERT); scanContext->m_alerts["CVE-2020-14343"] = detectionJson; @@ -240,7 +248,9 @@ TEST_F(EventSendReportTest, InvalidEncodingValue) // Mock report dispatcher. auto reportDispatcher = std::make_shared(); // Send report instance. - TEventSendReport, MockReportDispatcher> sendReport(reportDispatcher); + TEventSendReport, + MockReportDispatcher> + sendReport(reportDispatcher); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -261,6 +271,9 @@ TEST_F(EventSendReportTest, InvalidEncodingValue) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + // Mock scanContext. flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); @@ -268,7 +281,9 @@ TEST_F(EventSendReportTest, InvalidEncodingValue) const uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorSync = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorSync); + auto scanContext = + std::make_shared>( + syscollectorSync); nlohmann::json detectionJson = nlohmann::json::parse(SYNC_STATE_ALERT); detectionJson["data"]["package"]["name"] = "\xAA"; scanContext->m_alerts["CVE-2020-14343"] = detectionJson; @@ -284,7 +299,9 @@ TEST_F(EventSendReportTest, SendFormattedDeltaMsg) // Mock report dispatcher. auto reportDispatcher = std::make_shared(); // Send report instance. - TEventSendReport, MockReportDispatcher> sendReport(reportDispatcher); + TEventSendReport, + MockReportDispatcher> + sendReport(reportDispatcher); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -305,6 +322,9 @@ TEST_F(EventSendReportTest, SendFormattedDeltaMsg) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + // Mock scanContext. flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); @@ -312,7 +332,9 @@ TEST_F(EventSendReportTest, SendFormattedDeltaMsg) const uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); nlohmann::json detectionJson = nlohmann::json::parse(DELTA_DELETE_ALERT); scanContext->m_alerts["CVE-2023-5441"] = detectionJson; @@ -327,7 +349,9 @@ TEST_F(EventSendReportTest, SendFormattedDeltaMsgOS) // Mock report dispatcher. auto reportDispatcher = std::make_shared(); // Send report instance. - TEventSendReport, MockReportDispatcher> sendReport(reportDispatcher); + TEventSendReport, + MockReportDispatcher> + sendReport(reportDispatcher); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -356,7 +380,9 @@ TEST_F(EventSendReportTest, SendFormattedDeltaMsgOS) const uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_alerts["CVE-2023-5441"] = nlohmann::json::object(); EXPECT_CALL(*reportDispatcher, push(_)).Times(1); @@ -370,7 +396,9 @@ TEST_F(EventSendReportTest, SendFormattedDeltaMsgHotfix) // Mock report dispatcher. auto reportDispatcher = std::make_shared(); // Send report instance. - TEventSendReport, MockReportDispatcher> sendReport(reportDispatcher); + TEventSendReport, + MockReportDispatcher> + sendReport(reportDispatcher); Os osData {.hostName = "osdata_hostname", .architecture = "osdata_architecture", @@ -391,6 +419,9 @@ TEST_F(EventSendReportTest, SendFormattedDeltaMsgHotfix) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + // Mock scanContext. flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); @@ -398,7 +429,9 @@ TEST_F(EventSendReportTest, SendFormattedDeltaMsgHotfix) const uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_alerts["CVE-2023-5441"] = nlohmann::json::object(); EXPECT_CALL(*reportDispatcher, push(_)).Times(1); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.hpp index 18061a2e4bb..eafaebf0be6 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventSendReport_test.hpp @@ -13,6 +13,7 @@ #define _EVENT_SEND_REPORT_TEST_HPP #include "MockOsDataCache.hpp" +#include "MockRemediationDataCache.hpp" #include "policyManager.hpp" #include "socketServer.hpp" #include "gtest/gtest.h" @@ -20,6 +21,7 @@ // External shared pointers definitions extern std::shared_ptr spOsDataCacheMock; +extern std::shared_ptr spRemediationDataCacheMock; /** * @brief SendReport test class. @@ -88,6 +90,7 @@ class EventSendReportTest : public ::testing::Test void TearDown() override { spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); PolicyManager::instance().teardown(); m_fakeWdb->stop(); std::filesystem::remove_all("queue/db"); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp index bc478e2a2e3..30c54da0657 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp @@ -18,22 +18,24 @@ enum class ScannerMockID : int PACKAGE_SCANNER = 0, EVENT_PACKAGE_ALERT_DETAILS_BUILDER = 1, SCAN_OS_ALERT_DETAILS_BUILDER = 2, - EVENT_DETAILS_BUILDER = 3, - ALERT_CLEAR_BUILDER = 4, - OS_SCANNER = 5, - CLEAN_ALL_AGENT_INVENTORY = 6, - EVENT_DELETE_INVENTORY = 7, - EVENT_INSERT_INVENTORY = 8, - SCAN_INVENTORY_SYNC = 9, - CLEAR_SEND_REPORT = 10, - EVENT_SEND_REPORT = 11, - RESULT_INDEXER = 12, - BUILD_ALL_AGENT_LIST_CONTEXT = 13, - BUILD_SINGLE_AGENT_LIST_CONTEXT = 14, - CLEAN_SINGLE_AGENT_INVENTORY = 15, - SCAN_AGENT_LIST = 16, - GLOBAL_INVENTORY_SYNC = 17, - HOTFIX_INSERT = 18 + CVE_SOLVED_ALERT_DETAILS_BUILDER = 3, + EVENT_DETAILS_BUILDER = 4, + ALERT_CLEAR_BUILDER = 5, + OS_SCANNER = 6, + CLEAN_ALL_AGENT_INVENTORY = 7, + EVENT_DELETE_INVENTORY = 8, + EVENT_INSERT_INVENTORY = 9, + SCAN_INVENTORY_SYNC = 10, + CVE_SOLVED_INVENTORY_SYNC = 11, + CLEAR_SEND_REPORT = 12, + EVENT_SEND_REPORT = 13, + RESULT_INDEXER = 14, + BUILD_ALL_AGENT_LIST_CONTEXT = 15, + BUILD_SINGLE_AGENT_LIST_CONTEXT = 16, + CLEAN_SINGLE_AGENT_INVENTORY = 17, + SCAN_AGENT_LIST = 18, + GLOBAL_INVENTORY_SYNC = 19, + HOTFIX_INSERT = 20 }; /** @@ -112,6 +114,7 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypePackageInsert) TFactoryOrchestrator, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -119,6 +122,7 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypePackageInsert) TFakeClass, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -158,6 +162,7 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypePackageDelete) TFactoryOrchestrator, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -165,6 +170,7 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypePackageDelete) TFakeClass, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -202,6 +208,7 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypeIntegrityClear) TFactoryOrchestrator, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -209,6 +216,7 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypeIntegrityClear) TFakeClass, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -244,6 +252,7 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypeOs) auto orchestration = TFactoryOrchestrator, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -251,6 +260,7 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypeOs) TFakeClass, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -290,6 +300,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAllData) TFactoryOrchestrator, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -297,6 +308,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAllData) TFakeClass, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -331,6 +343,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanAllAgents) TFactoryOrchestrator, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -338,6 +351,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanAllAgents) TFakeClass, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -373,6 +387,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanSingleAgent) TFactoryOrchestrator, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -380,6 +395,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanSingleAgent) TFakeClass, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -413,6 +429,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAgentData) TFactoryOrchestrator, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -420,6 +437,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAgentData) TFakeClass, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -457,6 +475,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationInvalidScannerType) TFactoryOrchestrator, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -464,6 +483,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationInvalidScannerType) TFakeClass, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -498,6 +518,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationGlobalSyncInventory) TFactoryOrchestrator, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, @@ -505,6 +526,7 @@ TEST_F(FactoryOrchestratorTest, TestCreationGlobalSyncInventory) TFakeClass, TFakeClass, TFakeClass, + TFakeClass, TFakeClass, TFakeClass, TFakeClass, diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp index 593059b6e17..7ba76765978 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp @@ -19,6 +19,7 @@ #include "MockDatabaseFeedManager.hpp" #include "TrampolineGlobalData.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "flatbuffers/flatbuffer_builder.h" #include "flatbuffers/idl.h" #include "json.hpp" @@ -352,6 +353,7 @@ void PackageScannerTest::SetUp() {} void PackageScannerTest::TearDown() { spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); } TEST_F(PackageScannerTest, TestPackageAffectedEqualTo) @@ -411,6 +413,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualTo) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) @@ -423,15 +428,19 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualTo) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); ASSERT_TRUE(scanContextResult != nullptr); @@ -504,6 +513,9 @@ TEST_F(PackageScannerTest, TestPackageUnaffectedEqualTo) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) @@ -516,15 +528,19 @@ TEST_F(PackageScannerTest, TestPackageUnaffectedEqualTo) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); EXPECT_TRUE(scanContextResult == nullptr); @@ -587,6 +603,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThan) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) @@ -599,15 +618,19 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThan) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); ASSERT_TRUE(scanContextResult != nullptr); @@ -680,6 +703,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanVendorMissing) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) @@ -692,15 +718,19 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanVendorMissing) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); ASSERT_TRUE(scanContextResult == nullptr); @@ -763,6 +793,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanVendorMismatch) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) @@ -775,15 +808,19 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanVendorMismatch) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); ASSERT_TRUE(scanContextResult == nullptr); @@ -846,6 +883,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanVendorMatch) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) @@ -858,15 +898,19 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanVendorMatch) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); ASSERT_TRUE(scanContextResult != nullptr); @@ -939,6 +983,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanOrEqual) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) @@ -951,15 +998,19 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanOrEqual) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); ASSERT_TRUE(scanContextResult != nullptr); @@ -1032,6 +1083,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanWithVersionNotZero) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) @@ -1044,15 +1098,19 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanWithVersionNotZero) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); ASSERT_TRUE(scanContextResult != nullptr); @@ -1125,6 +1183,9 @@ TEST_F(PackageScannerTest, TestPackageUnaffectedLessThan) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) @@ -1137,15 +1198,19 @@ TEST_F(PackageScannerTest, TestPackageUnaffectedLessThan) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); EXPECT_TRUE(scanContextResult == nullptr); @@ -1208,6 +1273,9 @@ TEST_F(PackageScannerTest, TestPackageDefaultStatusAffected) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) @@ -1220,15 +1288,19 @@ TEST_F(PackageScannerTest, TestPackageDefaultStatusAffected) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); ASSERT_TRUE(scanContextResult != nullptr); @@ -1300,6 +1372,9 @@ TEST_F(PackageScannerTest, TestPackageDefaultStatusUnaffected) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) @@ -1312,15 +1387,19 @@ TEST_F(PackageScannerTest, TestPackageDefaultStatusUnaffected) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); EXPECT_TRUE(scanContextResult == nullptr); @@ -1357,6 +1436,9 @@ TEST_F(PackageScannerTest, TestPackageGetVulnerabilitiesCandidatesGeneratesExcep spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) @@ -1369,15 +1451,19 @@ TEST_F(PackageScannerTest, TestPackageGetVulnerabilitiesCandidatesGeneratesExcep uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); - std::shared_ptr> scanContextResult; + std::shared_ptr> scanContextResult; EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); EXPECT_TRUE(scanContextResult == nullptr); @@ -1404,6 +1490,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToAlma8) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("alma")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates("alma_8", _, _)); @@ -1414,13 +1503,17 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToAlma8) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); } @@ -1446,6 +1539,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToAlas1) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("alas")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates("alas_1", _, _)); @@ -1456,13 +1552,17 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToAlas1) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); } @@ -1488,6 +1588,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToAlas2) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("alas")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates("alas_2", _, _)); @@ -1498,13 +1601,17 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToAlas2) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); } @@ -1530,6 +1637,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToAlas2022) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("alas")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates("alas_2022", _, _)); @@ -1540,13 +1650,17 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToAlas2022) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); } @@ -1572,6 +1686,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToRedHat7) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("redhat")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates("redhat_7", _, _)); @@ -1582,13 +1699,17 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToRedHat7) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); } @@ -1614,6 +1735,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToSLED) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("suse")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates("suse_desktop_15", _, _)); @@ -1624,13 +1748,17 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToSLED) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); } @@ -1656,6 +1784,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToSLES) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("suse")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates("suse_server_15", _, _)); @@ -1666,13 +1797,17 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToSLES) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); spGlobalDataMock = std::make_shared(); EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); - TPackageScanner, TrampolineGlobalData> packageScanner( - spDatabaseFeedManagerMock); + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp index 19a529be857..958477936fa 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp @@ -18,6 +18,7 @@ #include "MockOsDataCache.hpp" #include "TrampolineIndexerConnector.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "flatbuffers/flatbuffer_builder.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" @@ -68,6 +69,7 @@ void ResultIndexerTest::TearDown() { spIndexerConnectorMock.reset(); spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); } /* @@ -101,18 +103,24 @@ TEST_F(ResultIndexerTest, TestHandleRequest) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); scanContextOriginal->m_elements[CVEID] = elementValue; // Mock one vulnerability - auto spResultIndexer = - std::make_shared>>( - pIndexerConnectorTrap); + auto spResultIndexer = std::make_shared< + TResultIndexer>>( + pIndexerConnectorTrap); EXPECT_NO_THROW(spResultIndexer->handleRequest(scanContextOriginal)); } @@ -145,18 +153,24 @@ TEST_F(ResultIndexerTest, TestHandleRequestNoOperation) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); scanContextOriginal->m_elements[CVEID] = elementValue; // Mock one vulnerability - auto spResultIndexer = - std::make_shared>>( - pIndexerConnectorTrap); + auto spResultIndexer = std::make_shared< + TResultIndexer>>( + pIndexerConnectorTrap); EXPECT_NO_THROW(spResultIndexer->handleRequest(scanContextOriginal)); } @@ -189,18 +203,24 @@ TEST_F(ResultIndexerTest, TestHandleRequestNoId) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - auto scanContextOriginal = std::make_shared>(syscollectorDelta); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); scanContextOriginal->m_elements[CVEID] = elementValue; // Mock one vulnerability - auto spResultIndexer = - std::make_shared>>( - pIndexerConnectorTrap); + auto spResultIndexer = std::make_shared< + TResultIndexer>>( + pIndexerConnectorTrap); EXPECT_NO_THROW(spResultIndexer->handleRequest(scanContextOriginal)); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp index 11cc6850402..b22b2ae8751 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp @@ -11,12 +11,14 @@ #include "scanAgentList_test.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "TrampolineSocketDBWrapper.hpp" #include "scanAgentList.hpp" #include "shared_modules/utils/mocks/chainOfResponsabilityMock.h" #include "socketDBWrapperException.hpp" using testing::_; +using TrampolineScanContext = TScanContext; const std::string EXPECTED_QUERY_PACKAGE {"agent 001 package get "}; const std::string EXPECTED_QUERY_OS {"agent 001 osinfo get "}; @@ -53,22 +55,22 @@ TEST_F(ScanAgentListTest, SingleDeleteAndInsertTest) EXPECT_CALL(*spOsDataCacheMock, getOsData(testing::_)).WillRepeatedly(testing::Return(osData)); EXPECT_CALL(*spOsDataCacheMock, setOsData(_, _)).Times(1); - auto spPackageInsertOrchestrationMock = - std::make_shared>>>(); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(testing::_)) + .WillRepeatedly(testing::Return(Remediation {})); - auto spOsOrchestrationMock = - std::make_shared>>>(); + auto spPackageInsertOrchestrationMock = + std::make_shared>>(); + auto spOsOrchestrationMock = std::make_shared>>(); // Called twice because the server socket response has two packages. EXPECT_CALL(*spPackageInsertOrchestrationMock, handleRequest(testing::_)).Times(2); - EXPECT_CALL(*spOsOrchestrationMock, handleRequest(testing::_)).Times(1); - auto scanAgentList = - std::make_shared, - MockAbstractHandler>>, - TrampolineSocketDBWrapper>>(spPackageInsertOrchestrationMock, - spOsOrchestrationMock); + auto scanAgentList = std::make_shared>, + TrampolineSocketDBWrapper>>(spPackageInsertOrchestrationMock, + spOsOrchestrationMock); EXPECT_CALL(*spSocketDBWrapperMock, query(EXPECTED_QUERY_OS, testing::_)) .Times(1) @@ -84,7 +86,7 @@ TEST_F(ScanAgentListTest, SingleDeleteAndInsertTest) std::variant data = &jsonData; - auto contextData = std::make_shared>(data); + auto contextData = std::make_shared(data); contextData->m_agents.push_back({"001", "test_agent_name", "4.8.0", "192.168.0.1"}); scanAgentList->handleRequest(contextData); @@ -115,22 +117,24 @@ TEST_F(ScanAgentListTest, EmptyPackagesWDBResponseTest) EXPECT_CALL(*spOsDataCacheMock, getOsData(testing::_)).WillRepeatedly(testing::Return(osData)); EXPECT_CALL(*spOsDataCacheMock, setOsData(_, _)).Times(1); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(testing::_)) + .WillRepeatedly(testing::Return(Remediation {})); + auto spPackageInsertOrchestrationMock = - std::make_shared>>>(); + std::make_shared>>(); - auto spOsOrchestrationMock = - std::make_shared>>>(); + auto spOsOrchestrationMock = std::make_shared>>(); // Called twice because the server socket response has two packages. EXPECT_CALL(*spPackageInsertOrchestrationMock, handleRequest(testing::_)).Times(0); EXPECT_CALL(*spOsOrchestrationMock, handleRequest(testing::_)).Times(1); - auto scanAgentList = - std::make_shared, - MockAbstractHandler>>, - TrampolineSocketDBWrapper>>(spPackageInsertOrchestrationMock, - spOsOrchestrationMock); + auto scanAgentList = std::make_shared>, + TrampolineSocketDBWrapper>>(spPackageInsertOrchestrationMock, + spOsOrchestrationMock); EXPECT_CALL(*spSocketDBWrapperMock, query(EXPECTED_QUERY_OS, testing::_)) .Times(1) @@ -146,7 +150,7 @@ TEST_F(ScanAgentListTest, EmptyPackagesWDBResponseTest) std::variant data = &jsonData; - auto contextData = std::make_shared>(data); + auto contextData = std::make_shared(data); contextData->m_agents.push_back({"001", "test_agent_name", "4.8.0", "192.168.0.1"}); scanAgentList->handleRequest(contextData); @@ -175,27 +179,29 @@ TEST_F(ScanAgentListTest, DISABLED_InsertAllTestNotSyncedResponse) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(testing::_)).WillRepeatedly(testing::Return(osData)); - auto spOsOrchestrationMock = - std::make_shared>>>(); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(testing::_)) + .WillRepeatedly(testing::Return(Remediation {})); + + auto spOsOrchestrationMock = std::make_shared>>(); auto spPackageInsertOrchestrationMock = - std::make_shared>>>(); + std::make_shared>>(); // Called twice because the server socket response has two packages. EXPECT_CALL(*spPackageInsertOrchestrationMock, handleRequest(testing::_)).Times(0); - auto scanAgentList = - std::make_shared, - MockAbstractHandler>>, - TrampolineSocketDBWrapper>>(spOsOrchestrationMock, - spPackageInsertOrchestrationMock); + auto scanAgentList = std::make_shared>, + TrampolineSocketDBWrapper>>(spOsOrchestrationMock, + spPackageInsertOrchestrationMock); nlohmann::json jsonData = nlohmann::json::parse(R"([{"status":"NOT_SYNCED"}])"); std::variant data = &jsonData; - auto contextData = std::make_shared>(data); + auto contextData = std::make_shared(data); scanAgentList->handleRequest(contextData); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanContext_test.cpp index 530389244e4..404d875f5fc 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanContext_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanContext_test.cpp @@ -19,12 +19,14 @@ #include "MockOsDataCache.hpp" #include "TrampolineGlobalData.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "flatbuffers/flatbuffer_builder.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" #include "json.hpp" using ::testing::_; +using scanContext_t = TScanContext; std::shared_ptr spGlobalDataMock; @@ -280,6 +282,7 @@ void ScanContextTest::TearDown() { spGlobalDataMock.reset(); spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); } const char* ScanContextTest::fbStringGetHelper(const flatbuffers::String* pStr) @@ -308,6 +311,9 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasPackagesInserted) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillOnce(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); @@ -315,8 +321,8 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasPackagesInserted) std::variant msg = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - std::shared_ptr> scanContext; - EXPECT_NO_THROW(scanContext = std::make_shared>(msg)); + std::shared_ptr scanContext; + EXPECT_NO_THROW(scanContext = std::make_shared(msg)); EXPECT_EQ(scanContext->getType(), ScannerType::PackageInsert); @@ -426,6 +432,9 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasPackagesDeleted) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillOnce(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_DELETED_MSG.c_str())); @@ -433,8 +442,8 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasPackagesDeleted) std::variant msg = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - std::shared_ptr> scanContext; - EXPECT_NO_THROW(scanContext = std::make_shared>(msg)); + std::shared_ptr scanContext; + EXPECT_NO_THROW(scanContext = std::make_shared(msg)); EXPECT_EQ(scanContext->getType(), ScannerType::PackageDelete); @@ -535,8 +544,8 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasOsInfo) std::variant msg = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - std::shared_ptr> scanContext; - EXPECT_NO_THROW(scanContext = std::make_shared>(msg)); + std::shared_ptr scanContext; + EXPECT_NO_THROW(scanContext = std::make_shared(msg)); EXPECT_EQ(scanContext->getType(), ScannerType::Os); @@ -636,6 +645,10 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasHotfixesInserted) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillOnce(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + EXPECT_CALL(*spRemediationDataCacheMock, addRemediationData(_, _)).Times(1); + flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); ASSERT_TRUE(parser.Parse(DELTA_HOTFIXES_INSERTED_MSG.c_str())); @@ -643,8 +656,8 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasHotfixesInserted) std::variant msg = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - std::shared_ptr> scanContext; - EXPECT_NO_THROW(scanContext = std::make_shared>(msg)); + std::shared_ptr scanContext; + EXPECT_NO_THROW(scanContext = std::make_shared(msg)); EXPECT_EQ(scanContext->getType(), ScannerType::HotfixInsert); @@ -700,6 +713,9 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasHotfixesDeleted) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillOnce(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); ASSERT_TRUE(parser.Parse(DELTA_HOTFIXES_DELETED_MSG.c_str())); @@ -707,8 +723,8 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasHotfixesDeleted) std::variant msg = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - std::shared_ptr> scanContext; - EXPECT_NO_THROW(scanContext = std::make_shared>(msg)); + std::shared_ptr scanContext; + EXPECT_NO_THROW(scanContext = std::make_shared(msg)); EXPECT_EQ(scanContext->getType(), ScannerType::HotfixDelete); @@ -752,7 +768,7 @@ TEST_F(ScanContextTest, TestSyscollectorDeltasNoOperation) std::variant msg = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); - EXPECT_THROW(std::make_shared>(msg), std::runtime_error); + EXPECT_THROW(std::make_shared(msg), std::runtime_error); } TEST_F(ScanContextTest, TestSyscollectorSynchronizationStateOsInfo) @@ -767,8 +783,8 @@ TEST_F(ScanContextTest, TestSyscollectorSynchronizationStateOsInfo) std::variant msg = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - std::shared_ptr> scanContext; - EXPECT_NO_THROW(scanContext = std::make_shared>(msg)); + std::shared_ptr scanContext; + EXPECT_NO_THROW(scanContext = std::make_shared(msg)); EXPECT_EQ(scanContext->getType(), ScannerType::Os); @@ -886,6 +902,9 @@ TEST_F(ScanContextTest, TestSyscollectorSynchronizationStatePackages) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillOnce(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_synchronization_SCHEMA)); ASSERT_TRUE(parser.Parse(SYNCHRONIZATION_STATE_PACKAGES_MSG.c_str())); @@ -893,8 +912,8 @@ TEST_F(ScanContextTest, TestSyscollectorSynchronizationStatePackages) std::variant msg = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - std::shared_ptr> scanContext; - EXPECT_NO_THROW(scanContext = std::make_shared>(msg)); + std::shared_ptr scanContext; + EXPECT_NO_THROW(scanContext = std::make_shared(msg)); EXPECT_EQ(scanContext->getType(), ScannerType::PackageInsert); @@ -1023,6 +1042,10 @@ TEST_F(ScanContextTest, TestSyscollectorSynchronizationStateHotfixes) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillOnce(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + EXPECT_CALL(*spRemediationDataCacheMock, addRemediationData(_, _)).Times(1); + flatbuffers::Parser parser; ASSERT_TRUE(parser.Parse(syscollector_synchronization_SCHEMA)); ASSERT_TRUE(parser.Parse(SYNCHRONIZATION_STATE_HOTFIXES_MSG.c_str())); @@ -1030,8 +1053,8 @@ TEST_F(ScanContextTest, TestSyscollectorSynchronizationStateHotfixes) std::variant msg = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - std::shared_ptr> scanContext; - EXPECT_NO_THROW(scanContext = std::make_shared>(msg)); + std::shared_ptr scanContext; + EXPECT_NO_THROW(scanContext = std::make_shared(msg)); EXPECT_EQ(scanContext->getType(), ScannerType::HotfixInsert); @@ -1077,8 +1100,8 @@ TEST_F(ScanContextTest, TestSyscollectorSynchronizationIntegrityClear) std::variant msg = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - std::shared_ptr> scanContext; - EXPECT_NO_THROW(scanContext = std::make_shared>(msg)); + std::shared_ptr scanContext; + EXPECT_NO_THROW(scanContext = std::make_shared(msg)); EXPECT_EQ(scanContext->getType(), ScannerType::IntegrityClear); @@ -1486,8 +1509,8 @@ TEST_F(ScanContextTest, TestJSONMessagePackageDelete) std::variant msg = &message; - std::shared_ptr> scanContext; - EXPECT_NO_THROW(scanContext = std::make_shared>(msg)); + std::shared_ptr scanContext; + EXPECT_NO_THROW(scanContext = std::make_shared(msg)); EXPECT_EQ(scanContext->getType(), ScannerType::PackageDelete); EXPECT_EQ(scanContext->messageType(), MessageType::DataJSON); @@ -1529,8 +1552,8 @@ TEST_F(ScanContextTest, TestJSONMessageHotfixDelete) std::variant msg = &message; - std::shared_ptr> scanContext; - EXPECT_NO_THROW(scanContext = std::make_shared>(msg)); + std::shared_ptr scanContext; + EXPECT_NO_THROW(scanContext = std::make_shared(msg)); EXPECT_EQ(scanContext->getType(), ScannerType::HotfixDelete); EXPECT_EQ(scanContext->messageType(), MessageType::DataJSON); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp index 213f47a6d78..238e5c39960 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOrchestrator_test.cpp @@ -19,6 +19,7 @@ #include "MockSocketDBWrapper.hpp" #include "TrampolineFactoryOrchestrator.hpp" #include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" #include "TrampolineScanContext.hpp" #include "TrampolineSocketDBWrapper.hpp" #include "flatbuffers/flatbuffer_builder.h" @@ -181,7 +182,7 @@ namespace NSScanOrchestratorTest // Shared pointers definitions std::shared_ptr spFactoryOrchestratorMock; std::shared_ptr spSocketDBWrapperMock; -std::shared_ptr> spScanContext; +std::shared_ptr> spScanContext; using namespace NSScanOrchestratorTest; @@ -191,6 +192,7 @@ void ScanOrchestratorTest::TearDown() { spFactoryOrchestratorMock.reset(); spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); spSocketDBWrapperMock.reset(); spScanContext.reset(); Log::deassignLogFunction(); @@ -217,6 +219,9 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsert) spOsDataCacheMock = std::make_shared(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spOsOrchestrationMock = std::make_shared>>(); EXPECT_CALL(*spOsOrchestrationMock, handleRequest(_)).Times(0); @@ -251,6 +256,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsert) std::make_shared>>(); EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + auto spHotfixInsertOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spHotfixInsertOrchestrationMock, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -261,7 +270,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsert) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) - .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)) + .WillOnce(testing::Return(spHotfixInsertOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -284,7 +294,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsert) TEST_REPORTS_BULK_SIZE); std::shared_mutex mutexScanOrchestrator; - spScanContext = std::make_shared>(syscollectorDelta); + spScanContext = std::make_shared>( + syscollectorDelta); TScanOrchestrator(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spOsOrchestrationMock = std::make_shared>>(); EXPECT_CALL(*spOsOrchestrationMock, handleRequest(_)).Times(0); @@ -365,6 +379,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageDelete) std::make_shared>>(); EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + auto spHotfixInsertOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spHotfixInsertOrchestrationMock, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -375,7 +393,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageDelete) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) - .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)) + .WillOnce(testing::Return(spHotfixInsertOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -398,7 +417,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageDelete) TEST_REPORTS_BULK_SIZE); std::shared_mutex mutexScanOrchestrator; - spScanContext = std::make_shared>(syscollectorDelta); + spScanContext = std::make_shared>( + syscollectorDelta); TScanOrchestrator(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + EXPECT_CALL(*spRemediationDataCacheMock, addRemediationData(_, _)).Times(1); + auto spOsOrchestrationMock = std::make_shared>>(); EXPECT_CALL(*spOsOrchestrationMock, handleRequest(_)).Times(0); @@ -479,6 +503,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixInsert) std::make_shared>>(); EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + auto spHotfixInsertOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spHotfixInsertOrchestrationMock, handleRequest(_)).Times(1); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -489,7 +517,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixInsert) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) - .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)) + .WillOnce(testing::Return(spHotfixInsertOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -512,7 +541,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixInsert) TEST_REPORTS_BULK_SIZE); std::shared_mutex mutexScanOrchestrator; - spScanContext = std::make_shared>(syscollectorDelta); + spScanContext = std::make_shared>( + syscollectorDelta); TScanOrchestrator(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spOsOrchestrationMock = std::make_shared>>(); EXPECT_CALL(*spOsOrchestrationMock, handleRequest(_)).Times(0); @@ -593,6 +626,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixDelete) std::make_shared>>(); EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + auto spHotfixInsertOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spDeleteAgentScanOrchestration, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -603,7 +640,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixDelete) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) - .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)) + .WillOnce(testing::Return(spHotfixInsertOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -626,7 +664,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeHotfixDelete) TEST_REPORTS_BULK_SIZE); std::shared_mutex mutexScanOrchestrator; - spScanContext = std::make_shared>(syscollectorDelta); + spScanContext = std::make_shared>( + syscollectorDelta); TScanOrchestrator>>(); EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + auto spHotfixInsertOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spDeleteAgentScanOrchestration, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -701,7 +744,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeOs) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) - .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)) + .WillOnce(testing::Return(spHotfixInsertOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -725,7 +769,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeOs) TEST_REPORTS_BULK_SIZE); std::shared_mutex mutexScanOrchestrator; - spScanContext = std::make_shared>(syscollectorDelta); + spScanContext = std::make_shared>( + syscollectorDelta); TScanOrchestrator(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spOsOrchestrationMock = std::make_shared>>(); EXPECT_CALL(*spOsOrchestrationMock, handleRequest(_)).Times(0); @@ -807,6 +855,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeIntegrityClear) std::make_shared>>(); EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + auto spHotfixInsertOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spDeleteAgentScanOrchestration, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -817,7 +869,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeIntegrityClear) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) - .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)) + .WillOnce(testing::Return(spHotfixInsertOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -842,7 +895,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypeIntegrityClear) TEST_REPORTS_BULK_SIZE); std::shared_mutex mutexScanOrchestrator; - spScanContext = std::make_shared>(syscollectorSynchronization); + spScanContext = std::make_shared>( + syscollectorSynchronization); TScanOrchestrator(); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spOsOrchestrationMock = std::make_shared>>(); EXPECT_CALL(*spOsOrchestrationMock, handleRequest(_)).Times(0); @@ -924,6 +981,10 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsertInDelayed) std::make_shared>>(); EXPECT_CALL(*spGlobalInventorySyncOrchestrationMock, handleRequest(_)).Times(0); + auto spHotfixInsertOrchestrationMock = + std::make_shared>>(); + EXPECT_CALL(*spDeleteAgentScanOrchestration, handleRequest(_)).Times(0); + spFactoryOrchestratorMock = std::make_shared(); EXPECT_CALL(*spFactoryOrchestratorMock, create()) .WillOnce(testing::Return(spOsOrchestrationMock)) @@ -934,7 +995,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsertInDelayed) .WillOnce(testing::Return(spCleanUpAllOrchestrationMock)) .WillOnce(testing::Return(spDeleteAgentScanOrchestration)) .WillOnce(testing::Return(spQueryAllPkgsOrchestrationMock)) - .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)); + .WillOnce(testing::Return(spGlobalInventorySyncOrchestrationMock)) + .WillOnce(testing::Return(spHotfixInsertOrchestrationMock)); auto spIndexerConnectorMock = std::make_shared(); @@ -957,7 +1019,8 @@ TEST_F(ScanOrchestratorTest, TestRunScannerTypePackageInsertInDelayed) TEST_REPORTS_BULK_SIZE); std::shared_mutex mutexScanOrchestrator; - spScanContext = std::make_shared>(syscollectorDelta); + spScanContext = std::make_shared>( + syscollectorDelta); TScanOrchestrator spOsDataCacheMock; +std::shared_ptr spRemediationDataCacheMock; using namespace NSScanOsAlertDetailsBuilderTest; @@ -85,6 +89,7 @@ void ScanOsAlertDetailsBuilderTest::SetUp() void ScanOsAlertDetailsBuilderTest::TearDown() { spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); PolicyManager::instance().teardown(); std::filesystem::remove_all("queue/vd"); } @@ -148,6 +153,9 @@ TEST_F(ScanOsAlertDetailsBuilderTest, TestSuccessfulScanOsAlertAffects) EXPECT_CALL(*spOsDataCacheMock, setOsData(_, _)).Times(1); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -158,11 +166,14 @@ TEST_F(ScanOsAlertDetailsBuilderTest, TestSuccessfulScanOsAlertAffects) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"INSERTED"})"_json; scanContext->m_isFirstScan = false; - TScanOsAlertDetailsBuilder> + TScanOsAlertDetailsBuilder> scanOsAlertDetailsAugmentation(spDatabaseFeedManagerMock); EXPECT_NO_THROW(scanOsAlertDetailsAugmentation.handleRequest(scanContext)); @@ -258,6 +269,9 @@ TEST_F(ScanOsAlertDetailsBuilderTest, TestSuccessfulScanOsAlertWasSolved) EXPECT_CALL(*spOsDataCacheMock, setOsData(_, _)).Times(1); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -268,11 +282,14 @@ TEST_F(ScanOsAlertDetailsBuilderTest, TestSuccessfulScanOsAlertWasSolved) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"DELETED"})"_json; scanContext->m_isFirstScan = false; - TScanOsAlertDetailsBuilder> + TScanOsAlertDetailsBuilder> scanOsAlertDetailsAugmentation(spDatabaseFeedManagerMock); EXPECT_NO_THROW(scanOsAlertDetailsAugmentation.handleRequest(scanContext)); @@ -369,6 +386,9 @@ TEST_F(ScanOsAlertDetailsBuilderTest, TestFirstScanNoAlerts) EXPECT_CALL(*spOsDataCacheMock, setOsData(_, _)).Times(1); EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); @@ -379,11 +399,14 @@ TEST_F(ScanOsAlertDetailsBuilderTest, TestFirstScanNoAlerts) uint8_t* buffer = parser.builder_.GetBufferPointer(); std::variant syscollectorDelta = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); - auto scanContext = std::make_shared>(syscollectorDelta); + auto scanContext = + std::make_shared>( + syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"DELETED"})"_json; scanContext->m_isFirstScan = true; - TScanOsAlertDetailsBuilder> + TScanOsAlertDetailsBuilder> scanOsAlertDetailsAugmentation(spDatabaseFeedManagerMock); EXPECT_NO_THROW(scanOsAlertDetailsAugmentation.handleRequest(scanContext)); From 2464aa784905b4a891c0955e5bb22d2cd8ab3e71 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 8 May 2024 09:49:05 -0300 Subject: [PATCH 102/419] CL: - Fixed clang format --- .../src/scanOrchestrator/resultIndexer.hpp | 20 +++++++++---------- .../unit/scanOsAlertDetailsBuilder_test.cpp | 1 - 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp index 01a1c01611c..962776824e3 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp @@ -52,17 +52,17 @@ class TResultIndexer final : public AbstractHandlerpublish(result.dump()); - } - else - { - logError(WM_VULNSCAN_LOGTAG, "Invalid element to publish: %s", result.dump().c_str()); - } - }; + m_indexerConnector->publish(result.dump()); + } + else + { + logError(WM_VULNSCAN_LOGTAG, "Invalid element to publish: %s", result.dump().c_str()); + } + }; for (const auto& [key, value] : data->m_elements) { diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp index a7a4f5c7e14..2ef460623b8 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp @@ -20,7 +20,6 @@ #include "MockRemediationDataCache.hpp" #include "TrampolineOsDataCache.hpp" #include "TrampolineRemediationDataCache.hpp" -#include "TrampolineRemediationDataCache.hpp" #include "flatbuffers/flatbuffer_builder.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" From 3b69c2a871a3448479dde7ab921bef8eb070392b Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 8 May 2024 10:02:14 -0300 Subject: [PATCH 103/419] CL: - Fixed doxygen --- .../tests/mocks/TrampolineRemediationDataCache.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineRemediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineRemediationDataCache.hpp index 39a0a116259..feb28b87c60 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineRemediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/TrampolineRemediationDataCache.hpp @@ -50,7 +50,7 @@ class TrampolineRemediationDataCache final : public Singleton Date: Wed, 8 May 2024 13:21:50 -0300 Subject: [PATCH 104/419] CL: - Fixed typo (missing .hpp extention) --- .../{cveSolvedInventorySync => cveSolvedInventorySync.hpp} | 0 .../src/scanOrchestrator/factoryOrchestrator.hpp | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/{cveSolvedInventorySync => cveSolvedInventorySync.hpp} (100%) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cveSolvedInventorySync b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cveSolvedInventorySync.hpp similarity index 100% rename from src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cveSolvedInventorySync rename to src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cveSolvedInventorySync.hpp diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp index f4aa6c1a5f5..b37d3986939 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp @@ -20,7 +20,7 @@ #include "cleanInventory.hpp" #include "clearSendReport.hpp" #include "cveSolvedAlertDetailsBuilder.hpp" -#include "cveSolvedInventorySync" +#include "cveSolvedInventorySync.hpp" #include "eventDeleteInventory.hpp" #include "eventDetailsBuilder.hpp" #include "eventInsertInventory.hpp" From 24d48483725035b1302e18c967d5c4118b74d702 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 8 May 2024 14:44:27 -0300 Subject: [PATCH 105/419] CL: - Updated UTs to extend coverage --- .../src/databaseFeedManager/storeModel.hpp | 2 +- .../tests/unit/eventDecoder_test.cpp | 214 +++++++++++++++- .../tests/unit/storeModel_test.cpp | 159 +++++++++++- .../tests/unit/updateHotfixes_test.cpp | 228 +++++++++++++++++- 4 files changed, 584 insertions(+), 19 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/storeModel.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/storeModel.hpp index 01e14a69452..5178f760d5c 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/storeModel.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/storeModel.hpp @@ -38,7 +38,7 @@ class StoreModel final : public AbstractHandler> { if (!data->cve5Buffer.data()) { - throw std::runtime_error("CVE5 buffer is empty"); + throw std::runtime_error("CVE5 buffer is empty"); // LCOV_EXCL_LINE } auto cve5Entry = cve_v5::GetEntry(data->cve5Buffer.data()); const auto& type = data->resource.contains("type") ? data->resource.at("type").get() : ""; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDecoder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDecoder_test.cpp index 6d91321d1e2..bbb6954d277 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDecoder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventDecoder_test.cpp @@ -24,6 +24,9 @@ const std::string CVE_ID {"CVE-2010-0002"}; const std::string TRANSLATION_ID {"TID-0001"}; const std::string VENDOR_MAP_ID {"FEED-GLOBAL"}; +const std::string OSCPE_MAP_ID {"OSCPE-GLOBAL"}; +const std::string CNA_MAPPING_ID {"CNA-MAPPING-GLOBAL"}; +const std::string UNSUPPORTED_ID {"UNSUPPORTED-GLOBAL"}; auto constexpr CREATED_RESOURCE {R"( { @@ -496,8 +499,105 @@ auto constexpr VENDOR_MAP {R"( } )"}; -auto constexpr UPDATE_VENDOR_MAP { - R"( +auto constexpr OSCPE_MAP {R"***( + { + "offset": 2, + "type": "create", + "version": 1, + "context": "vulnerabilities", + "resource": "OSCPE-GLOBAL", + "payload": { + "opensuse-leap": "suse:sles:15", + "opensuse-tumbleweed": "suse:sles:15", + "rhel": "redhat:enterprise_linux:$(MAJOR_VERSION)", + "centos": "redhat:enterprise_linux:$(MAJOR_VERSION)", + "fedora": "fedoraproject:fedora:$(MAJOR_VERSION)", + "rocky": "rocky:rocky:$(MAJOR_VERSION)", + "amzn": "amazon:amazon_linux:$(MAJOR_VERSION)", + "ol": "oracle:linux:$(MAJOR_VERSION):$(MINOR_VERSION)", + "sles": "suse:sles:$(VERSION_UPDATE_HYPHEN)", + "sled": "suse:sled:$(VERSION_UPDATE_HYPHEN)", + "almalinux": "almalinux:almalinux:$(MAJOR_VERSION)", + "Microsoft Windows Server 2003": "microsoft:windows_server_2003::$(RELEASE)::::", + "Microsoft Windows Server 2003 R2": "microsoft:windows_server_2003:r2:$(RELEASE)::::", + "Microsoft Windows XP": "microsoft:windows_xp::$(RELEASE)::::", + "Microsoft Windows Vista": "microsoft:windows_vista:$(RELEASE):::::", + "Microsoft Windows 7": "microsoft:windows_7:$(RELEASE):::::", + "Microsoft Windows 8": "microsoft:windows_8::::::", + "Microsoft Windows 8.1": "microsoft:windows_8.1::::::", + "Microsoft Windows 10": "microsoft:windows_10_$(DISPLAY_VERSION):$(VERSION):::::", + "Microsoft Windows 11": "microsoft:windows_11_$(DISPLAY_VERSION):$(VERSION):::::", + "Microsoft Windows Server 2008": "microsoft:windows_server_2008::$(RELEASE)::::", + "Microsoft Windows Server 2008 R2": "microsoft:windows_server_2008:r2:$(RELEASE)::::", + "Microsoft Windows Server 2012": "microsoft:windows_server_2012::::::", + "Microsoft Windows Server 2012 R2": "microsoft:windows_server_2012:r2:::::", + "Microsoft Windows Server 2012 23H2": "microsoft:windows_server_2022_23h2:*:::::", + "Microsoft Windows Server 2016": "microsoft:windows_server_2016:$(RELEASE):::::", + "Microsoft Windows Server 2019": "microsoft:windows_server_2019:$(RELEASE):::::", + "Microsoft Windows Server 2022": "microsoft:windows_server_2022:$(RELEASE):::::", + "macOS": "apple:macos:::::" + } + } + )***"}; + +auto constexpr CNA_MAPPING_MAP {R"***( + { + "offset": 2, + "type": "create", + "version": 1, + "context": "vulnerabilities", + "resource": "CNA-MAPPING-GLOBAL", + "payload": { + "cnaMapping": + { + "suse": "$(PLATFORM)_$(MAJOR_VERSION)", + "redhat": "redhat_$(MAJOR_VERSION)", + "alma": "alma_$(MAJOR_VERSION)", + "alas": "alas_$(MAJOR_VERSION)", + "opensuse": "suse_server_15" + }, + "platformEquivalence": + { + "sled": "suse_desktop", + "sles": "suse_server" + }, + "majorVersionEquivalence": + { + "amzn": + { + "2018": "1" + } + } + } + } + )***"}; + +auto constexpr UNSUPPORTED_MAP {R"***( + { + "offset": 1, + "type": "create", + "version": 1, + "context": "vulnerabilities", + "resource": "UNSUPPORTED-GLOBAL", + "payload": { + "unsupported": "unsupported" + } + } + )***"}; + +auto constexpr MISSING_RESOURCE_KEY {R"( + { + "offset": 1, + "type": "create", + "version": 1, + "context": "vulnerabilities", + "payload": { + "unsupported": "unsupported" + } + } +)"}; + +auto constexpr UPDATE_VENDOR_MAP {R"( { "offset": 2, "type": "update", @@ -815,6 +915,97 @@ TEST_F(EventDecoderTest, TestCreatedVendorMap) EXPECT_EQ(nlohmann::json::parse(VENDOR_MAP).at("payload"), result); } +/* + * @brief Test a new oscpe map resource. + */ +TEST_F(EventDecoderTest, TestCreatedOscpeMap) +{ + std::vector message {}; + auto jsonResource = nlohmann::json::parse(OSCPE_MAP); + auto eventContext = std::make_shared( + EventContext {.message = message, .resource = jsonResource, .feedDatabase = m_feedDb.get()}); + + std::shared_ptr eventDecoder; + + // Instantiation of the EventDecoder class. + EXPECT_NO_THROW(eventDecoder = std::make_shared()); + + // HandleRequest + EXPECT_NO_THROW(eventDecoder->handleRequest(eventContext)); + + // Verify data + rocksdb::PinnableSlice slice; + EXPECT_TRUE(m_feedDb->get(OSCPE_MAP_ID, slice, COLUMNS.at(ResourceType::OSCPE_RULES))); + auto result = nlohmann::json::parse(slice.ToString()); + + EXPECT_EQ(nlohmann::json::parse(OSCPE_MAP).at("payload"), result); +} + +/* + * @brief Test a new cna mapping resource. + */ +TEST_F(EventDecoderTest, TestCreatedCnaMapping) +{ + std::vector message {}; + auto jsonResource = nlohmann::json::parse(CNA_MAPPING_MAP); + auto eventContext = std::make_shared( + EventContext {.message = message, .resource = jsonResource, .feedDatabase = m_feedDb.get()}); + + std::shared_ptr eventDecoder; + + // Instantiation of the EventDecoder class. + EXPECT_NO_THROW(eventDecoder = std::make_shared()); + + // HandleRequest + EXPECT_NO_THROW(eventDecoder->handleRequest(eventContext)); + + // Verify data + rocksdb::PinnableSlice slice; + EXPECT_TRUE(m_feedDb->get(CNA_MAPPING_ID, slice, COLUMNS.at(ResourceType::CNA_MAPPING))); + auto result = nlohmann::json::parse(slice.ToString()); + + EXPECT_EQ(nlohmann::json::parse(CNA_MAPPING_MAP).at("payload"), result); +} + +/** + * @brief Test a new unsupported resource + * + */ +TEST_F(EventDecoderTest, TestCreatedUnsupported) +{ + std::vector message {}; + auto jsonResource = nlohmann::json::parse(UNSUPPORTED_MAP); + auto eventContext = std::make_shared( + EventContext {.message = message, .resource = jsonResource, .feedDatabase = m_feedDb.get()}); + + std::shared_ptr eventDecoder; + + // Instantiation of the EventDecoder class. + EXPECT_NO_THROW(eventDecoder = std::make_shared()); + + // HandleRequest + EXPECT_NO_THROW(eventDecoder->handleRequest(eventContext)); +} + +/** + * @brief Test update non-existing resource + * + */ +TEST_F(EventDecoderTest, TestUpdateNonExistingResource) +{ + std::vector message {}; + auto jsonResource = nlohmann::json::parse(UPDATED_RESOURCE); + auto eventContext = std::make_shared( + EventContext {.message = message, .resource = jsonResource, .feedDatabase = m_feedDb.get()}); + + std::shared_ptr eventDecoder; + + // Instantiation of the EventDecoder class. + EXPECT_NO_THROW(eventDecoder = std::make_shared()); + + EXPECT_THROW(eventDecoder->handleRequest(eventContext), std::runtime_error); +} + /* * @brief Test a vendor map update */ @@ -857,3 +1048,22 @@ TEST_F(EventDecoderTest, TestUpdatedVendorMap) } EXPECT_EQ(result.at("/contains/0/New Vendor Replaced"_json_pointer), "new_vendor_replaced"); } + +/** + * @brief Test missing resource key + * + */ +TEST_F(EventDecoderTest, TestMissingResourceKey) +{ + std::vector message {}; + auto jsonResource = nlohmann::json::parse(MISSING_RESOURCE_KEY); + auto eventContext = std::make_shared( + EventContext {.message = message, .resource = jsonResource, .feedDatabase = m_feedDb.get()}); + + std::shared_ptr eventDecoder; + + // Instantiation of the EventDecoder class. + EXPECT_NO_THROW(eventDecoder = std::make_shared()); + + EXPECT_THROW(eventDecoder->handleRequest(eventContext), std::runtime_error); +} diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/storeModel_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/storeModel_test.cpp index 1e6d7e2b305..4c5166569c0 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/storeModel_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/storeModel_test.cpp @@ -19,8 +19,8 @@ #include const std::string CVE_UPDATE_EXAMPLE_SUCCESS {"CVE-2022-0605"}; -const std::string CVE5_ENTRY { - R"( + +const std::string CVE5_ENTRY_PUBLISHED = R"( { "containers": { "cna": { @@ -105,7 +105,68 @@ const std::string CVE5_ENTRY { }, "dataType": "CVE_RECORD", "dataVersion": "5.0" - })"}; + } + )"; + +const std::string CVE5_ENTRY_REJECTED = R"( + { + "containers": { + "cna": { + "providerMetadata": { + "dateUpdated": "2023-03-09T14:02:53Z", + "orgId": "00000000-0000-4000-A000-000000000003", + "shortName": "nvd" + }, + "rejectedReasons": [ + { + "lang": "en", + "value": "This candidate was in a CNA pool that was not assigned to any issues during 2022." + } + ] + } + }, + "cveMetadata": { + "assignerOrgId": "00000000-0000-4000-A000-000000000003", + "assignerShortName": "nvd", + "cveId": "CVE-2022-26053", + "datePublished": "2023-03-07T23:15:10Z", + "dateUpdated": "2023-03-09T14:02:53Z", + "state": "REJECTED" + }, + "dataType": "CVE_RECORD", + "dataVersion": "5.0" + } + )"; + +const std::string CVE5_ENTRY_INVALID_STATE = R"( + { + "containers": { + "cna": { + "providerMetadata": { + "dateUpdated": "2023-03-09T14:02:53Z", + "orgId": "00000000-0000-4000-A000-000000000003", + "shortName": "nvd" + }, + "rejectedReasons": [ + { + "lang": "en", + "value": "This candidate was in a CNA pool that was not assigned to any issues during 2022." + } + ] + } + }, + "cveMetadata": { + "assignerOrgId": "00000000-0000-4000-A000-000000000003", + "assignerShortName": "nvd", + "cveId": "CVE-2022-26053", + "datePublished": "2023-03-07T23:15:10Z", + "dateUpdated": "2023-03-09T14:02:53Z", + "state": "INVALID" + }, + "dataType": "CVE_RECORD", + "dataVersion": "5.0" + } + )"; /* * @brief Test handleRequest of the StoreModel class. @@ -117,13 +178,15 @@ TEST_F(StoreModelTest, TestHandleRequestCreate) resource["type"] = "create"; flatbuffers::Parser parser; - ASSERT_TRUE(parser.Parse(cve5_SCHEMA) && parser.Parse(CVE5_ENTRY.c_str())); + ASSERT_TRUE(parser.Parse(cve5_SCHEMA) && parser.Parse(CVE5_ENTRY_PUBLISHED.c_str())); flatbuffers::FlatBufferBuilder& builder = parser.builder_; auto feedDatabase = std::make_unique("temp"); - auto eventContext = std::make_shared( - EventContext {.message = message, .resource = resource, .feedDatabase = feedDatabase.get()}); + auto eventContext = std::make_shared(EventContext {.message = message, + .resource = resource, + .feedDatabase = feedDatabase.get(), + .resourceType = ResourceType::CVE}); eventContext->cve5Buffer = builder.Release(); @@ -143,13 +206,15 @@ TEST_F(StoreModelTest, TestHandleRequestUpdate) resource["type"] = "update"; flatbuffers::Parser parser; - ASSERT_TRUE(parser.Parse(cve5_SCHEMA) && parser.Parse(CVE5_ENTRY.c_str())); + ASSERT_TRUE(parser.Parse(cve5_SCHEMA) && parser.Parse(CVE5_ENTRY_PUBLISHED.c_str())); flatbuffers::FlatBufferBuilder& builder = parser.builder_; auto feedDatabase = std::make_unique("temp"); - auto eventContext = std::make_shared( - EventContext {.message = message, .resource = resource, .feedDatabase = feedDatabase.get()}); + auto eventContext = std::make_shared(EventContext {.message = message, + .resource = resource, + .feedDatabase = feedDatabase.get(), + .resourceType = ResourceType::CVE}); eventContext->cve5Buffer = builder.Release(); @@ -161,3 +226,79 @@ TEST_F(StoreModelTest, TestHandleRequestUpdate) // HandleRequest EXPECT_NO_THROW(storeModel->handleRequest(eventContext)); } + +TEST_F(StoreModelTest, TestInvalidEventType) +{ + std::vector message; + nlohmann::json resource; + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(cve5_SCHEMA) && parser.Parse(CVE5_ENTRY_PUBLISHED.c_str())); + + flatbuffers::FlatBufferBuilder& builder = parser.builder_; + + auto feedDatabase = std::make_unique("temp"); + auto eventContext = std::make_shared(EventContext {.message = message, + .resource = resource, + .feedDatabase = feedDatabase.get(), + .resourceType = ResourceType::CVE}); + + eventContext->cve5Buffer = builder.Release(); + + std::shared_ptr storeModel; + + // Instantiation of the StoreModel class. + EXPECT_NO_THROW(storeModel = std::make_shared()); + + // HandleRequest + EXPECT_THROW(storeModel->handleRequest(eventContext), std::runtime_error); +} + +TEST_F(StoreModelTest, TestInvalidEntryState) +{ + std::vector message; + nlohmann::json resource; + resource["type"] = "update"; + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(cve5_SCHEMA) && parser.Parse(CVE5_ENTRY_INVALID_STATE.c_str())); + + flatbuffers::FlatBufferBuilder& builder = parser.builder_; + + auto feedDatabase = std::make_unique("temp"); + auto eventContext = std::make_shared(EventContext {.message = message, + .resource = resource, + .feedDatabase = feedDatabase.get(), + .resourceType = ResourceType::CVE}); + + eventContext->cve5Buffer = builder.Release(); + + std::shared_ptr storeModel; + + // Instantiation of the StoreModel class. + EXPECT_NO_THROW(storeModel = std::make_shared()); + + // HandleRequest + EXPECT_THROW(storeModel->handleRequest(eventContext), std::runtime_error); +} + +TEST_F(StoreModelTest, TestEmptyBuffer) +{ + std::vector message; + nlohmann::json resource; + resource["type"] = "update"; + + auto feedDatabase = std::make_unique("temp"); + auto eventContext = std::make_shared(EventContext {.message = message, + .resource = resource, + .feedDatabase = feedDatabase.get(), + .resourceType = ResourceType::CVE}); + + std::shared_ptr storeModel; + + // Instantiation of the StoreModel class. + EXPECT_NO_THROW(storeModel = std::make_shared()); + + // HandleRequest + EXPECT_THROW(storeModel->handleRequest(eventContext), std::runtime_error); +} diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp index 0e787964eca..d82cbbd8c3d 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/updateHotfixes_test.cpp @@ -304,6 +304,63 @@ namespace NSUpdateHotfixesTest } )"; + const auto JSON_CVE5_NO_WINDOWS = R"( + { + "dataType": "CVE_RECORD", + "dataVersion": "5.0", + "cveMetadata": { + "cveId": "CVE-4444-4444", + "assignerOrgId": "b3476cb9-2e3d-41a6-98d0-0f47421a65b6", + "state": "PUBLISHED" + }, + "containers": { + "cna": { + "providerMetadata": { + "orgId": "b3476cb9-2e3d-41a6-98d0-0f47421a65b6" + }, + "problemTypes": [ + { + "descriptions": [ + { + "lang": "en", + "description": "CWE-78 OS Command Injection" + } + ] + } + ], + "affected": [ + { + "vendor": "Example.org", + "product": "Example Enterprise", + "versions": [ + { + "version": "1.0.0", + "status": "affected", + "lessThan": "1.0.6", + "versionType": "semver" + } + ], + "defaultStatus": "unaffected" + } + ], + "descriptions": [ + { + "lang": "en", + "value": "OS Command Injection vulnerability parseFilename function of example.php in the Web Management Interface of Example.org Example Enterprise on Windows, MacOS and XT-4500 allows remote unauthenticated attackers to escalate privileges.\n\nThis issue affects:\n * 1.0 versions before 1.0.6\n * 2.1 versions from 2.16 until 2.1.9." + } + ], + "references": [ + { + "url": "https://example.org/ESA-22-11-CVE-1337-1234" + } + ], + "x_remediations": { + } + } + } + } +)"; + const auto JSON_CVE5_NO_REMEDIATIONS = R"( { "dataType": "CVE_RECORD", @@ -416,6 +473,60 @@ TEST_F(UpdateHotfixesTest, UpdatesWindowsRemediationMultipleBlocks) } } +TEST_F(UpdateHotfixesTest, SkipsEmptyRemediations) +{ + // Create a mock RocksDBWrapper object + std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + + // Parse the test data + std::string schemaStr; + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); + + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + parser.Parse(JSON_CVE5_NO_REMEDIATIONS); + + // Create and verify the Entry object. + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + + const auto entry = cve_v5::GetEntry(buffer); + + // Call the updateRemediation function with the test data + EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + + // The column should not have been created + EXPECT_FALSE(rocksDBWrapper->columnExists(HOTFIXES_APPLICATIONS_COLUMN)); +} + +TEST_F(UpdateHotfixesTest, SkipsNoWindows) +{ + // Create a mock RocksDBWrapper object + std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + + // Parse the test data + std::string schemaStr; + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); + + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + parser.Parse(JSON_CVE5_NO_WINDOWS); + + // Create and verify the Entry object. + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + + const auto entry = cve_v5::GetEntry(buffer); + + // Call the updateRemediation function with the test data + EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); + + // The column should not have been created + EXPECT_FALSE(rocksDBWrapper->columnExists(HOTFIXES_APPLICATIONS_COLUMN)); +} + TEST_F(UpdateHotfixesTest, SkipsEmptyUpdates) { // Create a mock RocksDBWrapper object @@ -439,15 +550,13 @@ TEST_F(UpdateHotfixesTest, SkipsEmptyUpdates) // Call the updateRemediation function with the test data EXPECT_NO_THROW(UpdateHotfixes::storeVulnerabilityHotfixes(entry, rocksDBWrapper.get())); - // Create the key list from the entry - const auto cveId = entry->cveMetadata()->cveId()->str(); - std::vector nonExpectedKeys {"KBT-800_" + cveId, "KBT-1000_" + cveId, "KBT-3000_" + cveId}; - - for (const auto& key : nonExpectedKeys) + // Check that no data was inserted + auto counter = 0; + for (auto [key, value] : rocksDBWrapper->seek("KBT", HOTFIXES_APPLICATIONS_COLUMN)) { - rocksdb::PinnableSlice slice; - EXPECT_FALSE(rocksDBWrapper->get(key, slice, HOTFIXES_APPLICATIONS_COLUMN)) << "Unable to find key: " << key; + counter++; } + EXPECT_EQ(counter, 0); } TEST_F(UpdateHotfixesTest, MultipleVulnerabilities) @@ -520,6 +629,111 @@ TEST_F(UpdateHotfixesTest, MultipleVulnerabilities) } } +TEST_F(UpdateHotfixesTest, RemoveSkipNoDatabase) +{ + // Create a mock RocksDBWrapper object + std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + + // Parse the test data + std::string schemaStr; + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); + + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + parser.Parse(JSON_CVE5_EMPTY_UPDATES); + + // Create and verify the Entry object. + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + + const auto entry = cve_v5::GetEntry(buffer); + + // Call the updateRemediation function with the test data + EXPECT_NO_THROW(UpdateHotfixes::removeHotfix(entry, rocksDBWrapper.get())); +} + +TEST_F(UpdateHotfixesTest, RemoveSkipNoWindows) +{ + // Create a mock RocksDBWrapper object + std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + + // Create the column + EXPECT_NO_THROW(rocksDBWrapper->createColumn(HOTFIXES_APPLICATIONS_COLUMN)); + + // Parse the test data + std::string schemaStr; + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); + + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + parser.Parse(JSON_CVE5_NO_WINDOWS); + + // Create and verify the Entry object. + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + + const auto entry = cve_v5::GetEntry(buffer); + + // Call the updateRemediation function with the test data + EXPECT_NO_THROW(UpdateHotfixes::removeHotfix(entry, rocksDBWrapper.get())); +} + +TEST_F(UpdateHotfixesTest, RemoveSkipNoRemediations) +{ + // Create a mock RocksDBWrapper object + std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + + // Create the column + EXPECT_NO_THROW(rocksDBWrapper->createColumn(HOTFIXES_APPLICATIONS_COLUMN)); + + // Parse the test data + std::string schemaStr; + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); + + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + parser.Parse(JSON_CVE5_NO_REMEDIATIONS); + + // Create and verify the Entry object. + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + + const auto entry = cve_v5::GetEntry(buffer); + + // Call the updateRemediation function with the test data + EXPECT_NO_THROW(UpdateHotfixes::removeHotfix(entry, rocksDBWrapper.get())); +} + +TEST_F(UpdateHotfixesTest, RemoveSkipEmptyUpdates) +{ + // Create a mock RocksDBWrapper object + std::unique_ptr rocksDBWrapper = std::make_unique(DATABASE_PATH); + + // Create the column + EXPECT_NO_THROW(rocksDBWrapper->createColumn(HOTFIXES_APPLICATIONS_COLUMN)); + + // Parse the test data + std::string schemaStr; + flatbuffers::LoadFile(FLATBUFFER_SCHEMA.c_str(), false, &schemaStr); + + flatbuffers::Parser parser; + parser.Parse(schemaStr.c_str(), FB_INCLUDE_DIRECTORIES); + parser.Parse(JSON_CVE5_EMPTY_UPDATES); + + // Create and verify the Entry object. + auto buffer = parser.builder_.GetBufferPointer(); + flatbuffers::Verifier verifier(buffer, parser.builder_.GetSize()); + cve_v5::VerifyEntryBuffer(verifier); + + const auto entry = cve_v5::GetEntry(buffer); + + // Call the updateRemediation function with the test data + EXPECT_NO_THROW(UpdateHotfixes::removeHotfix(entry, rocksDBWrapper.get())); +} + TEST_F(UpdateHotfixesTest, StoreAndRemove) { // Create a mock RocksDBWrapper object From de9a7d207dad26f6976d0389689da6e28990702c Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 8 May 2024 18:20:15 -0300 Subject: [PATCH 106/419] CL: - Updated UTs to extend coverage - Added LCOV exclusion macro to closing braces (known bug) --- .../src/scanOrchestrator/inventorySync.hpp | 4 +- .../scanOrchestrator/remediationDataCache.hpp | 4 +- .../unit/buildAllAgentListContext_test.cpp | 56 +++++++++++++++++++ .../tests/unit/osDataCache_test.cpp | 42 ++++++++++++++ .../tests/unit/remediationDataCache_test.cpp | 50 +++++++++++++++-- 5 files changed, 147 insertions(+), 9 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/inventorySync.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/inventorySync.hpp index 9bd5bfc7b56..8735e1dfafc 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/inventorySync.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/inventorySync.hpp @@ -48,7 +48,7 @@ class TInventorySync json["operation"] = operation; json["id"] = elementKey; return json; - } + } // LCOV_EXCL_LINE /** * @brief Get the affected component key. @@ -74,7 +74,7 @@ class TInventorySync { throw std::runtime_error("Invalid affected type for inventory sync."); } - } + } // LCOV_EXCL_LINE public: // LCOV_EXCL_START diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index 6d10c0b35e2..4f3273e4a69 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -77,7 +77,7 @@ class remediationDataCache final : public Singleton } return remediationData; - } + } // LCOV_EXCL_LINE public: /** @@ -110,7 +110,7 @@ class remediationDataCache final : public Singleton m_remediationData.insertKey(agentId, remediationData); // Update the cache with the queried data. return remediationData; - } + } // LCOV_EXCL_LINE /** * @brief Add remediation data to the cache. diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp index 3e0acc4c3a7..90b81da6501 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp @@ -62,3 +62,59 @@ TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContextWithElements) EXPECT_EQ(scanContext->m_agents.size(), 1); } + +TEST_F(BuildAllAgentListContextTest, MissingField) +{ + static const std::string MESSAGE { + R"(ok [{"name":"name", "version": "Wazuh 4.4.4", "ip":"192.168.0.1","node_name":"node_1"}])"}; + m_socketServer = + std::make_shared, EpollWrapper>>(TEST_SOCKET_PATH); + + m_socketServer->listen( + [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) + { + std::ignore = dataHeader; + std::ignore = sizeHeader; + + std::string receivedMsg(data, size); + EXPECT_STREQ(receivedMsg.c_str(), EXPECTED_QUERY.c_str()); + + m_socketServer->send(fd, MESSAGE.c_str(), MESSAGE.size()); + }); + + auto allAgentContext = std::make_shared< + TBuildAllAgentListContext>>(); + + auto scanContext = + std::make_shared>(); + + allAgentContext->handleRequest(scanContext); +} + +TEST_F(BuildAllAgentListContextTest, ExceptionOnDB) +{ + m_socketServer = + std::make_shared, EpollWrapper>>(TEST_SOCKET_PATH); + + m_socketServer->listen( + [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) + { + std::ignore = dataHeader; + std::ignore = sizeHeader; + std::ignore = size; + std::ignore = data; + + throw std::runtime_error("Error on DB"); + }); + + remediationDataCache cache; + std::string agentId {"1"}; + + auto allAgentContext = std::make_shared< + TBuildAllAgentListContext>>(); + + auto scanContext = + std::make_shared>(); + + EXPECT_THROW(allAgentContext->handleRequest(scanContext), WdbDataException); +} diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp index 3d4b9b63e34..562d97c3055 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp @@ -140,3 +140,45 @@ TEST_F(OsDataCacheTest, TestDbQuery) ASSERT_EQ(osDataRetrieved.kernelVersion, "kernelVersion"); ASSERT_EQ(osDataRetrieved.kernelRelease, "kernelRelease"); } + +TEST_F(OsDataCacheTest, EmptyResponse) +{ + // Start fake server + m_socketServer->listen( + [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) + { + std::ignore = fd; + std::ignore = dataHeader; + std::ignore = sizeHeader; + std::ignore = size; + std::ignore = data; + + m_socketServer->send(fd, "", 0); + }); + + OsDataCache cache; + std::string agentId {"1"}; + + // Try to get value from empty cache + EXPECT_THROW(cache.getOsData(agentId), WdbDataException); +} + +TEST_F(OsDataCacheTest, ExceptionOnDB) +{ + // Start fake server + m_socketServer->listen( + [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) + { + std::ignore = dataHeader; + std::ignore = sizeHeader; + std::ignore = size; + std::ignore = data; + + throw std::runtime_error("Error on DB"); + }); + + OsDataCache cache; + std::string agentId {"1"}; + + EXPECT_THROW(cache.getOsData(agentId), WdbDataException); +} diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp index 1c0279521ab..b841260ef82 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp @@ -78,7 +78,7 @@ TEST_F(remediationDataCacheTest, SetAndGetSuccess) std::ignore = size; std::ignore = data; - m_socketServer->send(fd, "err ", 4); + m_socketServer->send(fd, R"([{"hotfix":"hotfix1"},{"hotfix":"hotfix2"}])", 4); }); remediationDataCache cache; @@ -94,12 +94,52 @@ TEST_F(remediationDataCacheTest, SetAndGetSuccess) cache.addRemediationData(agentId, remediationData); - // Get value from cache - const auto retrievedData = cache.getRemediationData(agentId); - // Verify that the returned value is the same as the one set - EXPECT_TRUE(hotfixesAreEqual(remediationData, retrievedData)); + EXPECT_TRUE(hotfixesAreEqual(cache.getRemediationData(agentId), remediationData)); // Try to get from non existing agent EXPECT_THROW(cache.getRemediationData("2"), WdbDataException); } + +TEST_F(remediationDataCacheTest, EmptyResponseFromDB) +{ + // Start fake server + m_socketServer->listen( + [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) + { + std::ignore = fd; + std::ignore = dataHeader; + std::ignore = sizeHeader; + std::ignore = size; + std::ignore = data; + + m_socketServer->send(fd, "", 0); + }); + + remediationDataCache cache; + std::string agentId {"1"}; + + // Try to get value from empty cache + EXPECT_THROW(cache.getRemediationData(agentId), WdbDataException); +} + +TEST_F(remediationDataCacheTest, ExceptionOnDB) +{ + // Start fake server + m_socketServer->listen( + [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) + { + std::ignore = dataHeader; + std::ignore = sizeHeader; + std::ignore = size; + std::ignore = data; + + throw std::runtime_error("Error on DB"); + }); + + remediationDataCache cache; + std::string agentId {"1"}; + + // Try to get value from empty cache + EXPECT_THROW(cache.getRemediationData(agentId), WdbDataException); +} From 64c3c345bf20cbbf5d712692766a6de368540fc0 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 8 May 2024 20:12:25 -0300 Subject: [PATCH 107/419] CL: - Extended UT to improve coverity --- .../eventPackageAlertDetailsBuilder_test.cpp | 1 + .../tests/unit/packageScanner_test.cpp | 255 ++++++++++++++++++ .../unit/scanOsAlertDetailsBuilder_test.cpp | 209 ++++++++++++++ 3 files changed, 465 insertions(+) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp index 6cb92831a2b..7f82ba647fc 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/eventPackageAlertDetailsBuilder_test.cpp @@ -749,6 +749,7 @@ TEST_F(EventPackageAlertDetailsBuilderTest, TestSuccessfulPackageDeleted) std::make_shared>( syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"DELETED"})"_json; + scanContext->m_matchConditions[CVEID] = {"1.0.0", MatchRuleCondition::Unknown}; TEventPackageAlertDetailsBuilder> diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp index 7ba76765978..b5c96891ebb 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp @@ -1811,3 +1811,258 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualToSLES) EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); } + +TEST_F(PackageScannerTest, TestGetTranslationFromL2) +{ + auto mockGetVulnerabilitiesCandidates = + [&](const std::string& cnaName, + const packageData& package, + const std::function& callback) + { + std::string candidatesFlatbufferSchemaStr; + + // Read schemas from filesystem. + bool valid = + flatbuffers::LoadFile(CANDIDATES_FLATBUFFER_SCHEMA_PATH.c_str(), false, &candidatesFlatbufferSchemaStr); + ASSERT_EQ(valid, true); + + // Parse schemas and JSON example. + flatbuffers::Parser fbParser; + valid = (fbParser.Parse(candidatesFlatbufferSchemaStr.c_str(), INCLUDE_DIRECTORIES) && + fbParser.Parse(CANDIDATES_AFFECTED_EQUAL_TO_INPUT.c_str())); + ASSERT_EQ(valid, true); + + auto candidatesArray = + GetScanVulnerabilityCandidateArray(reinterpret_cast(fbParser.builder_.GetBufferPointer())); + + if (candidatesArray) + { + for (const auto& candidate : *candidatesArray->candidates()) + { + if (callback(cnaName, package, *candidate)) + { + // If the candidate is vulnerable, we stop looking for. + break; + } + } + } + }; + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "upstream", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + + auto spDatabaseFeedManagerMock = std::make_shared(); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("cnaName")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(_, _, _)) + .WillOnce(testing::Invoke(mockGetVulnerabilitiesCandidates)); + + std::vector mockTranslatedData = { + TranslatedData {.translatedProduct = "translatedProduct", .translatedVendor = "translatedVendor"}}; + EXPECT_CALL(*spDatabaseFeedManagerMock, getTranslationFromL2(_, _)).WillOnce(testing::Return(mockTranslatedData)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); + + spGlobalDataMock = std::make_shared(); + EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); + + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); + + std::shared_ptr> scanContextResult; + EXPECT_NO_THROW(scanContextResult = packageScanner.handleRequest(scanContextOriginal)); + + ASSERT_TRUE(scanContextResult != nullptr); + + EXPECT_EQ(scanContextResult->m_elements.size(), 1); + EXPECT_NE(scanContextResult->m_elements.find(CVEID), scanContextResult->m_elements.end()); + + EXPECT_EQ(scanContextResult->m_matchConditions.size(), 1); + EXPECT_NE(scanContextResult->m_matchConditions.find(CVEID), scanContextResult->m_matchConditions.end()); + + auto& matchCondition = scanContextResult->m_matchConditions[CVEID]; + EXPECT_EQ(matchCondition.condition, MatchRuleCondition::Equal); + EXPECT_STREQ(matchCondition.version.c_str(), "5.1.9"); +} + +TEST_F(PackageScannerTest, TestGetCnaNameByPrefix) +{ + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "upstream", + .majorVersion = "15", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "sles", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + + auto spDatabaseFeedManagerMock = std::make_shared(); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByPrefix(_, _)).WillOnce(testing::Return("suse")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates("suse_server_15", _, _)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); + + spGlobalDataMock = std::make_shared(); + EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); + + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); + + EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); +} + +TEST_F(PackageScannerTest, TestGetCnaNameByContains) +{ + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "upstream", + .majorVersion = "15", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "sles", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + + auto spDatabaseFeedManagerMock = std::make_shared(); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByPrefix(_, _)).WillOnce(testing::Return("")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByContains(_, _)).WillOnce(testing::Return("suse")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates("suse_server_15", _, _)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); + + spGlobalDataMock = std::make_shared(); + EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); + + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); + + EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); +} + +TEST_F(PackageScannerTest, TestGetDefaultCna) +{ + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "upstream", + .majorVersion = "15", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "sles", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + + auto spDatabaseFeedManagerMock = std::make_shared(); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByPrefix(_, _)).WillOnce(testing::Return("")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByContains(_, _)).WillOnce(testing::Return("")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(DEFAULT_CNA, _, _)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); + + spGlobalDataMock = std::make_shared(); + EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); + + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); + + EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); +} diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp index 2ef460623b8..3207c1624bf 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanOsAlertDetailsBuilder_test.cpp @@ -170,6 +170,7 @@ TEST_F(ScanOsAlertDetailsBuilderTest, TestSuccessfulScanOsAlertAffects) syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"INSERTED"})"_json; scanContext->m_isFirstScan = false; + scanContext->m_matchConditions[CVEID] = {"version", MatchRuleCondition::Unknown}; TScanOsAlertDetailsBuilder> @@ -209,6 +210,125 @@ TEST_F(ScanOsAlertDetailsBuilderTest, TestSuccessfulScanOsAlertAffects) EXPECT_STREQ(alert.at("vulnerability").at("updated").get_ref().c_str(), GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->dateUpdated()->c_str()); } + +TEST_F(ScanOsAlertDetailsBuilderTest, TestVulnerabilityCvss2) +{ + flatbuffers::FlatBufferBuilder fbBuilder; + auto vulnerabilityDescriptionData = + NSVulnerabilityScanner::CreateVulnerabilityDescriptionDirect(fbBuilder, + "accessComplexity_test_string", + "assignerShortName_test_string", + "attackVector_test_string", + "authentication_test_string", + "availabilityImpact_test_string", + "classification_test_string", + "confidentialityImpact_test_string", + "cweId_test_string", + "datePublished_test_string", + "dateUpdated_test_string", + "description_test_string", + "integrityImpact_test_string", + "privilegesRequired_test_string", + "reference_test_string", + "scope_test_string", + 8.3, + "2", + "severity_test_string", + "userInteraction_test_string"); + fbBuilder.Finish(vulnerabilityDescriptionData); + + auto dbWrapper = std::make_unique(TEST_DESCRIPTION_DATABASE_PATH); + rocksdb::Slice dbValue(reinterpret_cast(fbBuilder.GetBufferPointer()), fbBuilder.GetSize()); + dbWrapper->put(CVEID, dbValue); + + auto mockGetVulnerabiltyDescriptiveInformation = + [&](const std::string_view cveId, + FlatbufferDataPair& resultContainer) + { + dbWrapper->get(std::string(cveId), resultContainer.slice); + resultContainer.data = const_cast( + NSVulnerabilityScanner::GetVulnerabilityDescription(resultContainer.slice.data())); + }; + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "osdata_codeName", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, setOsData(_, _)).Times(1); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + + auto spDatabaseFeedManagerMock = std::make_shared(); + EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) + .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_synchronization_SCHEMA)); + ASSERT_TRUE(parser.Parse(OS_SCAN_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); + auto scanContext = + std::make_shared>( + syscollectorDelta); + scanContext->m_elements[CVEID] = R"({"operation":"INSERTED"})"_json; + scanContext->m_isFirstScan = false; + scanContext->m_matchConditions[CVEID] = {"version", MatchRuleCondition::DefaultStatus}; + + TScanOsAlertDetailsBuilder> + scanOsAlertDetailsAugmentation(spDatabaseFeedManagerMock); + + EXPECT_NO_THROW(scanOsAlertDetailsAugmentation.handleRequest(scanContext)); + + EXPECT_EQ(scanContext->m_elements.size(), 1); + EXPECT_NE(scanContext->m_elements.find(CVEID), scanContext->m_elements.end()); + + EXPECT_EQ(scanContext->m_alerts.size(), 1); + EXPECT_NE(scanContext->m_alerts.find(CVEID), scanContext->m_alerts.end()); + + auto& alert = scanContext->m_alerts[CVEID]; + + std::string alertScoreVersion {"cvss2"}; + + EXPECT_STREQ(alert.at("vulnerability").at("cve").get_ref().c_str(), CVEID.c_str()); + + EXPECT_DOUBLE_EQ( + alert.at("vulnerability").at("cvss").at(alertScoreVersion).at("base_score").get_ref(), + Utils::floatToDoubleRound(GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->scoreBase(), 2)); + + EXPECT_STREQ(alert.at("vulnerability").at("enumeration").get_ref().c_str(), "CVE"); + EXPECT_STREQ(alert.at("vulnerability").at("published").get_ref().c_str(), + GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->datePublished()->c_str()); + EXPECT_STREQ(alert.at("vulnerability").at("reference").get_ref().c_str(), + GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->reference()->c_str()); + EXPECT_STREQ( + alert.at("vulnerability").at("severity").get_ref().c_str(), + Utils::toSentenceCase(GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->severity()->str()).c_str()); + EXPECT_STREQ(alert.at("vulnerability").at("status").get_ref().c_str(), "Active"); + + EXPECT_STREQ(alert.at("vulnerability").at("title").get_ref().c_str(), + (CVEID + " affects " + scanContext->osName().data()).c_str()); + EXPECT_STREQ(alert.at("vulnerability").at("type").get_ref().c_str(), "Packages"); + EXPECT_STREQ(alert.at("vulnerability").at("updated").get_ref().c_str(), + GetVulnerabilityDescription(fbBuilder.GetBufferPointer())->dateUpdated()->c_str()); +} + TEST_F(ScanOsAlertDetailsBuilderTest, TestSuccessfulScanOsAlertWasSolved) { flatbuffers::FlatBufferBuilder fbBuilder; @@ -286,6 +406,7 @@ TEST_F(ScanOsAlertDetailsBuilderTest, TestSuccessfulScanOsAlertWasSolved) syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"DELETED"})"_json; scanContext->m_isFirstScan = false; + scanContext->m_matchConditions[CVEID] = {"version", MatchRuleCondition::LessThan}; TScanOsAlertDetailsBuilder> @@ -403,6 +524,7 @@ TEST_F(ScanOsAlertDetailsBuilderTest, TestFirstScanNoAlerts) syscollectorDelta); scanContext->m_elements[CVEID] = R"({"operation":"DELETED"})"_json; scanContext->m_isFirstScan = true; + scanContext->m_matchConditions[CVEID] = {"version", MatchRuleCondition::Equal}; TScanOsAlertDetailsBuilder> @@ -415,3 +537,90 @@ TEST_F(ScanOsAlertDetailsBuilderTest, TestFirstScanNoAlerts) EXPECT_EQ(scanContext->m_alerts.size(), 0); } + +TEST_F(ScanOsAlertDetailsBuilderTest, TestUnknownOperation) +{ + flatbuffers::FlatBufferBuilder fbBuilder; + auto vulnerabilityDescriptionData = + NSVulnerabilityScanner::CreateVulnerabilityDescriptionDirect(fbBuilder, + "accessComplexity_test_string", + "assignerShortName_test_string", + "attackVector_test_string", + "authentication_test_string", + "availabilityImpact_test_string", + "classification_test_string", + "confidentialityImpact_test_string", + "cweId_test_string", + "datePublished_test_string", + "dateUpdated_test_string", + "description_test_string", + "integrityImpact_test_string", + "privilegesRequired_test_string", + "reference_test_string", + "scope_test_string", + 8.3, + "3", + "severity_test_string", + "userInteraction_test_string"); + fbBuilder.Finish(vulnerabilityDescriptionData); + + auto dbWrapper = std::make_unique(TEST_DESCRIPTION_DATABASE_PATH); + rocksdb::Slice dbValue(reinterpret_cast(fbBuilder.GetBufferPointer()), fbBuilder.GetSize()); + dbWrapper->put(CVEID, dbValue); + + auto mockGetVulnerabiltyDescriptiveInformation = + [&](const std::string_view cveId, + FlatbufferDataPair& resultContainer) + { + dbWrapper->get(std::string(cveId), resultContainer.slice); + resultContainer.data = const_cast( + NSVulnerabilityScanner::GetVulnerabilityDescription(resultContainer.slice.data())); + }; + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "osdata_codeName", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, setOsData(_, _)).Times(1); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + + auto spDatabaseFeedManagerMock = std::make_shared(); + EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabiltyDescriptiveInformation(_, _)) + .WillRepeatedly(testing::Invoke(mockGetVulnerabiltyDescriptiveInformation)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_synchronization_SCHEMA)); + ASSERT_TRUE(parser.Parse(OS_SCAN_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorSynchronization::GetSyncMsg(reinterpret_cast(buffer)); + auto scanContext = + std::make_shared>( + syscollectorDelta); + scanContext->m_elements[CVEID] = R"({"operation":"UNKNOWN"})"_json; + scanContext->m_isFirstScan = false; + + TScanOsAlertDetailsBuilder> + scanOsAlertDetailsAugmentation(spDatabaseFeedManagerMock); + + EXPECT_THROW(scanOsAlertDetailsAugmentation.handleRequest(scanContext), std::runtime_error); + + EXPECT_EQ(scanContext->m_alerts.size(), 0); +} From 66bc18758ad3789dd8bce17d0050ec4ff4fe6733 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 8 May 2024 20:35:34 -0300 Subject: [PATCH 108/419] CL: - Fixed failing UT --- .../vulnerability_scanner/tests/unit/packageScanner_test.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp index b5c96891ebb..6c4ae60f3f0 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp @@ -2057,7 +2057,6 @@ TEST_F(PackageScannerTest, TestGetDefaultCna) syscollectorDelta); spGlobalDataMock = std::make_shared(); - EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); TPackageScanner, From a2b4f2302a9c592fc4bb9a790deab9d8f4522071 Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Fri, 10 May 2024 13:57:49 +0000 Subject: [PATCH 109/419] CL: - Fix UT after rebase --- .../scanOrchestrator/remediationDataCache.hpp | 19 +---- .../src/scanOrchestrator/scanContext.hpp | 2 +- .../unit/buildAllAgentListContext_test.cpp | 74 ++++++++----------- 3 files changed, 34 insertions(+), 61 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index 4f3273e4a69..ff354b795cd 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -40,7 +40,8 @@ struct Remediation final * * @note This class queries the Wazuh-DB to get the remediation data for a given agent, and stores it in a LRU cache */ -class remediationDataCache final : public Singleton +template +class remediationDataCache final : public Singleton> { private: LRUCache m_remediationData {PolicyManager::instance().getRemediationLRUSize()}; @@ -52,8 +53,8 @@ class remediationDataCache final : public Singleton nlohmann::json response; try { - m_wdbSocketWrapper->query(WazuhDBQueryBuilder::builder().agentGetHotfixesCommand(agentId).build(), - response); + SocketDBWrapper::instance().query(WazuhDBQueryBuilder::builder().agentGetHotfixesCommand(agentId).build(), + response); } catch (const std::exception& e) { @@ -94,18 +95,6 @@ class remediationDataCache final : public Singleton return *value; } - if (!m_wdbSocketWrapper) - { - try - { - m_wdbSocketWrapper.emplace(RemediationDataCacheConstants::WDB_SOCKET); - } - catch (...) - { - throw WdbDataException("Error creating socketDBWrapper", agentId); - } - } - const auto remediationData = getRemediationDataFromWdb(agentId); m_remediationData.insertKey(agentId, remediationData); // Update the cache with the queried data. diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index 157e5a82dd0..18a1836771f 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -115,7 +115,7 @@ struct AgentData */ template, typename TGlobalData = GlobalData, - typename TRemediationDataCache = remediationDataCache> + typename TRemediationDataCache = remediationDataCache<>> struct TScanContext final { private: diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp index 90b81da6501..90f1d1cfb8b 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp @@ -22,29 +22,25 @@ TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContext) spSocketDBWrapperMock = std::make_shared(); EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)).Times(1); - auto allAgentContext = std::make_shared< - TBuildAllAgentListContext, - TrampolineSocketDBWrapper>>(); + auto allAgentContext = + std::make_shared>(); - auto scanContext = - std::make_shared>(); + auto scanContext = std::make_shared(); allAgentContext->handleRequest(scanContext); } TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContextWithElements) { - static const std::string MESSAGE { - R"(ok [{"id":1, "name":"name", "version": "Wazuh 4.4.4", "ip":"192.168.0.1","node_name":"node_1"}])"}; - spSocketDBWrapperMock = std::make_shared(); - nlohmann::json queryResult = nlohmann::json::parse(R"([ + const nlohmann::json queryResult = nlohmann::json::parse(R"( + [ { - "id": 1, - "name": "name", - "version": "Wazuh 4.4.4", - "ip": "192.168.0.1", - "node_name": "node_1" + "id": 1, + "name": "name", + "version": "Wazuh 4.4.4", + "ip": "192.168.0.1", + "node_name": "node_1" } ])"); @@ -67,54 +63,42 @@ TEST_F(BuildAllAgentListContextTest, MissingField) { static const std::string MESSAGE { R"(ok [{"name":"name", "version": "Wazuh 4.4.4", "ip":"192.168.0.1","node_name":"node_1"}])"}; - m_socketServer = - std::make_shared, EpollWrapper>>(TEST_SOCKET_PATH); - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) + const nlohmann::json queryResult = nlohmann::json::parse(R"( + [ { - std::ignore = dataHeader; - std::ignore = sizeHeader; - - std::string receivedMsg(data, size); - EXPECT_STREQ(receivedMsg.c_str(), EXPECTED_QUERY.c_str()); + "name": "name", + "version": "Wazuh 4.4.4", + "ip": "192.168.0.1", + "node_name": "node_1" + } + ])"); - m_socketServer->send(fd, MESSAGE.c_str(), MESSAGE.size()); - }); + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)).Times(1).WillOnce(testing::Return(queryResult)); - auto allAgentContext = std::make_shared< - TBuildAllAgentListContext>>(); + auto allAgentContext = + std::make_shared>(); - auto scanContext = - std::make_shared>(); + auto scanContext = std::make_shared(); allAgentContext->handleRequest(scanContext); } TEST_F(BuildAllAgentListContextTest, ExceptionOnDB) { - m_socketServer = - std::make_shared, EpollWrapper>>(TEST_SOCKET_PATH); + spSocketDBWrapperMock = std::make_shared(); - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = dataHeader; - std::ignore = sizeHeader; - std::ignore = size; - std::ignore = data; + const std::string agentId {"1"}; - throw std::runtime_error("Error on DB"); - }); + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::Throw(WdbDataException("Error on DB", agentId))); remediationDataCache cache; - std::string agentId {"1"}; - auto allAgentContext = std::make_shared< - TBuildAllAgentListContext>>(); + auto allAgentContext = std::make_shared>(); - auto scanContext = - std::make_shared>(); + auto scanContext = std::make_shared(); EXPECT_THROW(allAgentContext->handleRequest(scanContext), WdbDataException); } From 57b4379a425b2a423f2efe022899dcad0c33d4d0 Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Fri, 10 May 2024 13:58:32 +0000 Subject: [PATCH 110/419] CL: - Add more logging for QA tests and fix hotfix answer --- .../vulnerability_scanner/qa/test_efficacy_log.py | 3 +++ .../qa/test_policy_change_log.py | 11 ++++++++--- .../vulnerability_scanner/testtool/scanner/main.cpp | 9 +++++---- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_efficacy_log.py b/src/wazuh_modules/vulnerability_scanner/qa/test_efficacy_log.py index 40276dc9fc3..12833a645e0 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_efficacy_log.py +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_efficacy_log.py @@ -238,15 +238,18 @@ def run_process_and_monitor_log(request, run_on_end): basetimeout = timeout for expected_line in expected_lines: while not found_lines[expected_line]: + LOGGER.debug(f"Waiting for: {expected_line}") if timeout < 8*basetimeout: tail_log(log_file, expected_lines, found_lines, timeout) timeout = 1.5*timeout else: LOGGER.error(f"Timeout waiting for: {expected_line}") + basetimeout = timeout break process.terminate() + LOGGER.debug(f"Waiting for the process to finish") return found_lines test_folders = sorted(Path("wazuh_modules/vulnerability_scanner/qa/test_data").glob(os.getenv('WAZUH_VD_TEST_GLOB', '*'))) diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_policy_change_log.py b/src/wazuh_modules/vulnerability_scanner/qa/test_policy_change_log.py index a9e837e672a..dbc0969d39c 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_policy_change_log.py +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_policy_change_log.py @@ -18,7 +18,7 @@ import re import time -def find_regex_in_file(regex, file, times=1, max_timeout=10): +def find_regex_in_file(regex, file, times=1, max_timeout=50): pattern = re.compile(regex) start_time = time.time() @@ -142,13 +142,18 @@ def runner_function(request, run_on_end): basetimeout = timeout for expected_line in expected_lines: while not found_lines[expected_line]: - if timeout < 4*basetimeout: + LOGGER.debug(f"Waiting for: {expected_line}") + if timeout < 8*basetimeout: tail_log(log_file, expected_lines, found_lines, timeout) timeout = 1.5*timeout + else: + LOGGER.error(f"Timeout waiting for: {expected_line}") + basetimeout = timeout + break - LOGGER.debug(f"Succesfully found all expected lines") # Wait for the process to finish, sigterm is sent to the process process.terminate() + LOGGER.debug(f"Waiting for the process to finish") return found_lines diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp index a816ade3289..4c4c3ae6712 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp @@ -236,6 +236,7 @@ int main(const int argc, const char* argv[]) else { std::string successMessage = "ok " + fakeGlobalData.dump(); + std::cout << "Response message: '" << successMessage << "' to: '" << messageReceived << "'" << std::endl; fakeDBServer->send(fd, successMessage.c_str(), successMessage.size()); } } @@ -247,7 +248,7 @@ int main(const int argc, const char* argv[]) std::string responseMessage = fakeAgentPackages.contains(agentId) ? "ok " + fakeAgentPackages[agentId].dump() : "ok []"; - std::cout << "Response message for sys_programs: " << responseMessage << std::endl; + std::cout << "Response message: '" << responseMessage << "' to: '" << messageReceived << "'" << std::endl; fakeDBServer->send(fd, responseMessage.c_str(), responseMessage.size()); } else if (tableName.find("sys_osinfo") != std::string::npos || tokens[2] == "osinfo") @@ -255,15 +256,15 @@ int main(const int argc, const char* argv[]) std::string responseMessage = fakeAgentOsData.contains(agentId) ? "ok " + fakeAgentOsData[agentId].dump() : "ok []"; - std::cout << "Response message for sys_osinfo: " << responseMessage << std::endl; + std::cout << "Response message: '" << responseMessage << "' to: '" << messageReceived << "'" << std::endl; fakeDBServer->send(fd, responseMessage.c_str(), responseMessage.size()); } else if (tableName.find("sys_hotfixes") != std::string::npos || tokens[2] == "hotfix") { std::string responseMessage = fakeAgentHotfixes.contains(agentId) ? "ok " + fakeAgentHotfixes[agentId].dump() - : "ok []"; - std::cout << "Response message for sys_hotfixes: " << responseMessage << std::endl; + : "ok [{\"status\": \"SUCCESS\"}]"; + std::cout << "Response message: '" << responseMessage << "' to: '" << messageReceived << "'" << std::endl; fakeDBServer->send(fd, responseMessage.c_str(), responseMessage.size()); } else From f599549697d7e8ed4b9b5eb7015663226faf9d46 Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Fri, 10 May 2024 15:01:46 +0000 Subject: [PATCH 111/419] CL: - Fixing style --- .../tests/unit/buildAllAgentListContext_test.cpp | 2 +- .../vulnerability_scanner/testtool/scanner/main.cpp | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp index 90f1d1cfb8b..456aa07eb57 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp @@ -24,8 +24,8 @@ TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContext) auto allAgentContext = std::make_shared>(); - auto scanContext = std::make_shared(); + allAgentContext->handleRequest(scanContext); } diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp index 4c4c3ae6712..e7cd0fe7b8f 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp @@ -248,7 +248,8 @@ int main(const int argc, const char* argv[]) std::string responseMessage = fakeAgentPackages.contains(agentId) ? "ok " + fakeAgentPackages[agentId].dump() : "ok []"; - std::cout << "Response message: '" << responseMessage << "' to: '" << messageReceived << "'" << std::endl; + std::cout << "Response message: '" << responseMessage << "' to: '" << messageReceived + << "'" << std::endl; fakeDBServer->send(fd, responseMessage.c_str(), responseMessage.size()); } else if (tableName.find("sys_osinfo") != std::string::npos || tokens[2] == "osinfo") @@ -256,7 +257,8 @@ int main(const int argc, const char* argv[]) std::string responseMessage = fakeAgentOsData.contains(agentId) ? "ok " + fakeAgentOsData[agentId].dump() : "ok []"; - std::cout << "Response message: '" << responseMessage << "' to: '" << messageReceived << "'" << std::endl; + std::cout << "Response message: '" << responseMessage << "' to: '" << messageReceived + << "'" << std::endl; fakeDBServer->send(fd, responseMessage.c_str(), responseMessage.size()); } else if (tableName.find("sys_hotfixes") != std::string::npos || tokens[2] == "hotfix") @@ -264,7 +266,8 @@ int main(const int argc, const char* argv[]) std::string responseMessage = fakeAgentHotfixes.contains(agentId) ? "ok " + fakeAgentHotfixes[agentId].dump() : "ok [{\"status\": \"SUCCESS\"}]"; - std::cout << "Response message: '" << responseMessage << "' to: '" << messageReceived << "'" << std::endl; + std::cout << "Response message: '" << responseMessage << "' to: '" << messageReceived + << "'" << std::endl; fakeDBServer->send(fd, responseMessage.c_str(), responseMessage.size()); } else From 3af4dda0d2e02876030794c3945a8e07509bd40d Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Mon, 13 May 2024 10:52:14 -0300 Subject: [PATCH 112/419] CL: - Aligned UTs with rebase changes --- .../tests/unit/remediationDataCache_test.hpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp index 49dc018960a..d7d686fade9 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp @@ -54,7 +54,9 @@ class remediationDataCacheTest : public ::testing::Test "index-status": "yes", "cti-url": "cti-url.com" }, - "remediationLRUSize":1000 + "remediationLRUSize":1000, + "clusterName":"cluster01", + "clusterEnabled":false })")}; PolicyManager::instance().initialize(configJson); } From 24eed504cc934c3facaa89ac08235d5c109ff529 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Tue, 14 May 2024 15:44:25 -0300 Subject: [PATCH 113/419] CL: - Fixed class name - Aligned with changes from #23358 after rebase --- .../scanOrchestrator/remediationDataCache.hpp | 9 ++------- .../src/scanOrchestrator/scanContext.hpp | 2 +- .../tests/unit/buildAllAgentListContext_test.cpp | 12 ++++++------ .../tests/unit/remediationDataCache_test.cpp | 16 ++++++++-------- .../tests/unit/remediationDataCache_test.hpp | 11 ++++------- 5 files changed, 21 insertions(+), 29 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index ff354b795cd..404e9c5b5d0 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -22,11 +22,6 @@ #include #include -namespace RemediationDataCacheConstants -{ - auto constexpr WDB_SOCKET {"queue/db/wdb"}; -} - /** * @brief Remediation structure. */ @@ -36,12 +31,12 @@ struct Remediation final }; /** - * @brief remediationDataCache class. + * @brief RemediationDataCache class. * * @note This class queries the Wazuh-DB to get the remediation data for a given agent, and stores it in a LRU cache */ template -class remediationDataCache final : public Singleton> +class RemediationDataCache final : public Singleton> { private: LRUCache m_remediationData {PolicyManager::instance().getRemediationLRUSize()}; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index 18a1836771f..b2b2f5297df 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -115,7 +115,7 @@ struct AgentData */ template, typename TGlobalData = GlobalData, - typename TRemediationDataCache = remediationDataCache<>> + typename TRemediationDataCache = RemediationDataCache<>> struct TScanContext final { private: diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp index 456aa07eb57..306483857c7 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp @@ -61,10 +61,7 @@ TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContextWithElements) TEST_F(BuildAllAgentListContextTest, MissingField) { - static const std::string MESSAGE { - R"(ok [{"name":"name", "version": "Wazuh 4.4.4", "ip":"192.168.0.1","node_name":"node_1"}])"}; - - const nlohmann::json queryResult = nlohmann::json::parse(R"( + nlohmann::json queryResult = nlohmann::json::parse(R"( [ { "name": "name", @@ -74,7 +71,10 @@ TEST_F(BuildAllAgentListContextTest, MissingField) } ])"); - EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)).Times(1).WillOnce(testing::Return(queryResult)); + // Set second argument to queryResult + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(queryResult)); auto allAgentContext = std::make_shared>(); @@ -94,7 +94,7 @@ TEST_F(BuildAllAgentListContextTest, ExceptionOnDB) .Times(1) .WillOnce(testing::Throw(WdbDataException("Error on DB", agentId))); - remediationDataCache cache; + RemediationDataCache cache; auto allAgentContext = std::make_shared>(); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp index b841260ef82..5058faef713 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp @@ -29,9 +29,9 @@ bool hotfixesAreEqual(const Remediation& remediation1, const Remediation& remedi return true; } -TEST_F(remediationDataCacheTest, InsertMultipleItems) +TEST_F(RemediationDataCacheTest, InsertMultipleItems) { - remediationDataCache cache; + RemediationDataCache cache; std::string agentId {"1"}; { @@ -66,7 +66,7 @@ TEST_F(remediationDataCacheTest, InsertMultipleItems) } } -TEST_F(remediationDataCacheTest, SetAndGetSuccess) +TEST_F(RemediationDataCacheTest, SetAndGetSuccess) { // Start fake server m_socketServer->listen( @@ -81,7 +81,7 @@ TEST_F(remediationDataCacheTest, SetAndGetSuccess) m_socketServer->send(fd, R"([{"hotfix":"hotfix1"},{"hotfix":"hotfix2"}])", 4); }); - remediationDataCache cache; + RemediationDataCache cache; std::string agentId {"1"}; // Try to get value from empty cache @@ -101,7 +101,7 @@ TEST_F(remediationDataCacheTest, SetAndGetSuccess) EXPECT_THROW(cache.getRemediationData("2"), WdbDataException); } -TEST_F(remediationDataCacheTest, EmptyResponseFromDB) +TEST_F(RemediationDataCacheTest, EmptyResponseFromDB) { // Start fake server m_socketServer->listen( @@ -116,14 +116,14 @@ TEST_F(remediationDataCacheTest, EmptyResponseFromDB) m_socketServer->send(fd, "", 0); }); - remediationDataCache cache; + RemediationDataCache cache; std::string agentId {"1"}; // Try to get value from empty cache EXPECT_THROW(cache.getRemediationData(agentId), WdbDataException); } -TEST_F(remediationDataCacheTest, ExceptionOnDB) +TEST_F(RemediationDataCacheTest, ExceptionOnDB) { // Start fake server m_socketServer->listen( @@ -137,7 +137,7 @@ TEST_F(remediationDataCacheTest, ExceptionOnDB) throw std::runtime_error("Error on DB"); }); - remediationDataCache cache; + RemediationDataCache cache; std::string agentId {"1"}; // Try to get value from empty cache diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp index d7d686fade9..6b939ebd3f1 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp @@ -20,14 +20,14 @@ #include /** - * @brief Runs unit tests for remediationDataCache + * @brief Runs unit tests for RemediationDataCache */ -class remediationDataCacheTest : public ::testing::Test +class RemediationDataCacheTest : public ::testing::Test { protected: // LCOV_EXCL_START - remediationDataCacheTest() = default; - ~remediationDataCacheTest() override = default; + RemediationDataCacheTest() = default; + ~RemediationDataCacheTest() override = default; // LCOV_EXCL_STOP /** * @brief Fake socket server to test the DB query. @@ -42,9 +42,6 @@ class remediationDataCacheTest : public ::testing::Test void SetUp() override { std::filesystem::create_directories("queue/db"); - // Create the socket server - m_socketServer = std::make_shared, EpollWrapper>>( - RemediationDataCacheConstants::WDB_SOCKET); // Policy manager initialization. const auto& configJson {nlohmann::json::parse(R"( From d8bf8b18e9a409fb1b453becadaba301802c3ddf Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Tue, 14 May 2024 23:22:31 -0300 Subject: [PATCH 114/419] CL: - Fix: socketDBWrapper socket path was hardcoded --- src/shared_modules/utils/socketDBWrapper.hpp | 4 ++-- .../vulnerability_scanner/testtool/wazuhDBQuery/main.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/shared_modules/utils/socketDBWrapper.hpp b/src/shared_modules/utils/socketDBWrapper.hpp index c988dc62b3e..36129c3fd38 100644 --- a/src/shared_modules/utils/socketDBWrapper.hpp +++ b/src/shared_modules/utils/socketDBWrapper.hpp @@ -56,10 +56,10 @@ class SocketDBWrapper final : public Singleton bool m_dataReady {false}; public: - void init() + void init(const std::string& socketPath = WDB_SOCKET) { m_teardown = false; - m_dbSocket = std::make_unique, EpollWrapper>>(WDB_SOCKET); + m_dbSocket = std::make_unique, EpollWrapper>>(socketPath); m_dbSocket->connect( [&](const char* body, uint32_t bodySize, const char*, uint32_t) { diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/wazuhDBQuery/main.cpp b/src/wazuh_modules/vulnerability_scanner/testtool/wazuhDBQuery/main.cpp index 20ef880a1f3..9ebbce785e6 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/wazuhDBQuery/main.cpp +++ b/src/wazuh_modules/vulnerability_scanner/testtool/wazuhDBQuery/main.cpp @@ -44,7 +44,7 @@ int main(const int argc, const char* argv[]) // Read json configuration file auto configuration = nlohmann::json::parse(std::ifstream(cmdLineArgs.getConfigurationFilePath())); nlohmann::json response; - SocketDBWrapper::instance().init(); + SocketDBWrapper::instance().init(WAZUH_DB_SOCK); for (const auto& query : configuration.at("queries")) { From 7c2ec86666dd2dddad1300d0c24c5a19461da39e Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Tue, 14 May 2024 23:23:18 -0300 Subject: [PATCH 115/419] CL: - Aligned with changes on dbWrapper - Updated UTs to align with changes on dbWrapper --- .../scanOrchestrator/remediationDataCache.hpp | 19 ++-- .../tests/unit/osDataCache_test.cpp | 93 ++++++---------- .../tests/unit/osDataCache_test.hpp | 10 -- .../tests/unit/remediationDataCache_test.cpp | 105 ++++++++++-------- .../tests/unit/remediationDataCache_test.hpp | 7 -- 5 files changed, 103 insertions(+), 131 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index 404e9c5b5d0..3b6fa1c60dd 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -41,14 +41,13 @@ class RemediationDataCache final : public Singleton> private: LRUCache m_remediationData {PolicyManager::instance().getRemediationLRUSize()}; std::shared_mutex m_mutex; - std::optional m_wdbSocketWrapper {std::nullopt}; - Remediation getRemediationDataFromWdb(const std::string& agentId) + Remediation getRemediationDataFromWdb(const std::string& agentId) const { nlohmann::json response; try { - SocketDBWrapper::instance().query(WazuhDBQueryBuilder::builder().agentGetHotfixesCommand(agentId).build(), + TSocketDBWrapper::instance().query(WazuhDBQueryBuilder::builder().agentGetHotfixesCommand(agentId).build(), response); } catch (const std::exception& e) @@ -56,13 +55,14 @@ class RemediationDataCache final : public Singleton> throw WdbDataException(e.what(), agentId); } + Remediation remediationData; + if (response.empty()) { - throw WdbDataException("Empty response from Wazuh-DB", agentId); + // No remediation data found (no hotfixes installed) + return remediationData; } - Remediation remediationData; - // Iterate over the response and store the hotfixes. for (auto& hotfix : response) { @@ -91,7 +91,12 @@ class RemediationDataCache final : public Singleton> } const auto remediationData = getRemediationDataFromWdb(agentId); - m_remediationData.insertKey(agentId, remediationData); // Update the cache with the queried data. + + if (!remediationData.hotfixes.empty()) + { + // Update the cache with the queried data + m_remediationData.insertKey(agentId, remediationData); + } return remediationData; } // LCOV_EXCL_LINE diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp index 562d97c3055..fcb0cc66ad6 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp @@ -15,23 +15,14 @@ TEST_F(OsDataCacheTest, TestSetAndGetSuccess) { - // Start fake server - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = fd; - std::ignore = dataHeader; - std::ignore = sizeHeader; - std::ignore = size; - std::ignore = data; - - m_socketServer->send(fd, "err ", 4); - }); - OsDataCache cache; - std::string agentId {"1"}; spSocketDBWrapperMock = std::make_shared(); - EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)).Times(1); + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(nlohmann::json::object())); + + std::string agentId {"1"}; // Try to get value from empty cache EXPECT_THROW(cache.getOsData(agentId), WdbDataException); @@ -74,10 +65,15 @@ TEST_F(OsDataCacheTest, TestSetAndGetSuccess) ASSERT_EQ(osDataRetrieved.sysName, osData.sysName); ASSERT_EQ(osDataRetrieved.kernelVersion, osData.kernelVersion); ASSERT_EQ(osDataRetrieved.kernelRelease, osData.kernelRelease); + + spSocketDBWrapperMock.reset(); } TEST_F(OsDataCacheTest, TestDbQuery) { + spSocketDBWrapperMock = std::make_shared(); + OsDataCache cache; + // Create fake response nlohmann::json response = nlohmann::json::array(); response.push_back(nlohmann::json::object()); @@ -97,28 +93,11 @@ TEST_F(OsDataCacheTest, TestDbQuery) response.at(0)["version"] = "kernelVersion"; response.at(0)["release"] = "kernelRelease"; - std::string responseString = response.dump(); - std::string finalResponse = "ok "; - finalResponse.append(responseString); - - // Start fake server - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = dataHeader; - std::ignore = sizeHeader; - std::ignore = size; - std::ignore = data; - - m_socketServer->send(fd, finalResponse.c_str(), finalResponse.size()); - }); - - OsDataCache cache; - std::string agentId {"1"}; - spSocketDBWrapperMock = std::make_shared(); EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) .Times(1) - .WillRepeatedly(testing::SetArgReferee<1>(response)); + .WillOnce(testing::SetArgReferee<1>(response)); + + std::string agentId {"1"}; // Get value from cache auto osDataRetrieved = cache.getOsData(agentId); @@ -143,20 +122,15 @@ TEST_F(OsDataCacheTest, TestDbQuery) TEST_F(OsDataCacheTest, EmptyResponse) { - // Start fake server - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = fd; - std::ignore = dataHeader; - std::ignore = sizeHeader; - std::ignore = size; - std::ignore = data; - - m_socketServer->send(fd, "", 0); - }); - - OsDataCache cache; + spSocketDBWrapperMock = std::make_shared(); + OsDataCache cache; + + nlohmann::json queryResponse = nlohmann::json::array(); + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(queryResponse)); + std::string agentId {"1"}; // Try to get value from empty cache @@ -165,20 +139,15 @@ TEST_F(OsDataCacheTest, EmptyResponse) TEST_F(OsDataCacheTest, ExceptionOnDB) { - // Start fake server - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = dataHeader; - std::ignore = sizeHeader; - std::ignore = size; - std::ignore = data; - - throw std::runtime_error("Error on DB"); - }); - - OsDataCache cache; + spSocketDBWrapperMock = std::make_shared(); + OsDataCache cache; + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::Throw(std::runtime_error("Error on DB"))); + std::string agentId {"1"}; EXPECT_THROW(cache.getOsData(agentId), WdbDataException); + spSocketDBWrapperMock.reset(); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.hpp index c70ea63b1a9..17bb4891229 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.hpp @@ -29,11 +29,6 @@ class OsDataCacheTest : public ::testing::Test OsDataCacheTest() = default; ~OsDataCacheTest() override = default; // LCOV_EXCL_STOP - /** - * @brief Fake socket server to test the DB query. - * - */ - std::shared_ptr, EpollWrapper>> m_socketServer; /** * @brief Set the Up every test case. @@ -42,9 +37,6 @@ class OsDataCacheTest : public ::testing::Test void SetUp() override { std::filesystem::create_directories("queue/db"); - // Create the socket server - m_socketServer = - std::make_shared, EpollWrapper>>(WDB_SOCKET); // Policy manager initialization. const auto& configJson {nlohmann::json::parse(R"({ @@ -67,8 +59,6 @@ class OsDataCacheTest : public ::testing::Test void TearDown() override { PolicyManager::instance().teardown(); - // Stop the socket server - m_socketServer->stop(); std::filesystem::remove_all("queue/db"); } }; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp index 5058faef713..3cc7714e431 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp @@ -10,8 +10,18 @@ */ #include "remediationDataCache_test.hpp" +#include "MockSocketDBWrapper.hpp" +#include "TrampolineSocketDBWrapper.hpp" -bool hotfixesAreEqual(const Remediation& remediation1, const Remediation& remediation2) +/** + * @brief Compare two remediation objects + * + * @param remediation1 + * @param remediation2 + * @return true if the remediations are equal + * @return false if the remediations are not equal + */ +bool remediationsAreEqual(const Remediation& remediation1, const Remediation& remediation2) { if (remediation1.hotfixes.size() != remediation2.hotfixes.size()) { @@ -31,7 +41,10 @@ bool hotfixesAreEqual(const Remediation& remediation1, const Remediation& remedi TEST_F(RemediationDataCacheTest, InsertMultipleItems) { - RemediationDataCache cache; + RemediationDataCache cache; + spSocketDBWrapperMock = std::make_shared(); + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)).Times(0); + std::string agentId {"1"}; { @@ -46,7 +59,7 @@ TEST_F(RemediationDataCacheTest, InsertMultipleItems) const auto retrievedData = cache.getRemediationData(agentId); // Verify that the returned value is the same as the one set - EXPECT_TRUE(hotfixesAreEqual(remediationData, remediationData)); + EXPECT_TRUE(remediationsAreEqual(remediationData, remediationData)); } { @@ -62,30 +75,21 @@ TEST_F(RemediationDataCacheTest, InsertMultipleItems) // Verify that the returned value is equal to both the values set Remediation expected {.hotfixes = {"hotfix1", "hotfix2", "hotfix3", "hotfix4"}}; - EXPECT_TRUE(hotfixesAreEqual(retrievedData, expected)); + EXPECT_TRUE(remediationsAreEqual(retrievedData, expected)); } } TEST_F(RemediationDataCacheTest, SetAndGetSuccess) { - // Start fake server - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = fd; - std::ignore = dataHeader; - std::ignore = sizeHeader; - std::ignore = size; - std::ignore = data; + RemediationDataCache cache; + spSocketDBWrapperMock = std::make_shared(); - m_socketServer->send(fd, R"([{"hotfix":"hotfix1"},{"hotfix":"hotfix2"}])", 4); - }); + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)).Times(2); - RemediationDataCache cache; std::string agentId {"1"}; // Try to get value from empty cache - EXPECT_THROW(cache.getRemediationData(agentId), WdbDataException); + EXPECT_TRUE(remediationsAreEqual(cache.getRemediationData(agentId), Remediation {})); // Set value in cache Remediation remediationData { @@ -95,51 +99,62 @@ TEST_F(RemediationDataCacheTest, SetAndGetSuccess) cache.addRemediationData(agentId, remediationData); // Verify that the returned value is the same as the one set - EXPECT_TRUE(hotfixesAreEqual(cache.getRemediationData(agentId), remediationData)); + EXPECT_TRUE(remediationsAreEqual(cache.getRemediationData(agentId), remediationData)); // Try to get from non existing agent - EXPECT_THROW(cache.getRemediationData("2"), WdbDataException); + EXPECT_TRUE(remediationsAreEqual(cache.getRemediationData("2"), Remediation {})); } TEST_F(RemediationDataCacheTest, EmptyResponseFromDB) { - // Start fake server - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = fd; - std::ignore = dataHeader; - std::ignore = sizeHeader; - std::ignore = size; - std::ignore = data; + RemediationDataCache cache; + spSocketDBWrapperMock = std::make_shared(); - m_socketServer->send(fd, "", 0); - }); + nlohmann::json queryResponse = nlohmann::json::array(); + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(queryResponse)); - RemediationDataCache cache; std::string agentId {"1"}; - // Try to get value from empty cache - EXPECT_THROW(cache.getRemediationData(agentId), WdbDataException); + EXPECT_TRUE(remediationsAreEqual(cache.getRemediationData(agentId), Remediation {})); +} + +TEST_F(RemediationDataCacheTest, ResponseFromDB) +{ + RemediationDataCache cache; + spSocketDBWrapperMock = std::make_shared(); + + nlohmann::json queryResponse = R"([{"hotfix": "hotfix1"}, {"hotfix": "hotfix2"}])"_json; + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(queryResponse)); + + std::string agentId {"1"}; + + // Get value from the cache + const auto remediationData = cache.getRemediationData(agentId); + + // Verify that the returned value is the same as the one set + Remediation expected {.hotfixes = {"hotfix1", "hotfix2"}}; + + EXPECT_TRUE(remediationsAreEqual(remediationData, expected)); } TEST_F(RemediationDataCacheTest, ExceptionOnDB) { - // Start fake server - m_socketServer->listen( - [&](const int fd, const char* data, uint32_t size, const char* dataHeader, uint32_t sizeHeader) - { - std::ignore = dataHeader; - std::ignore = sizeHeader; - std::ignore = size; - std::ignore = data; + RemediationDataCache cache; + spSocketDBWrapperMock = std::make_shared(); - throw std::runtime_error("Error on DB"); - }); + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::Throw(std::runtime_error("Error on DB"))); - RemediationDataCache cache; std::string agentId {"1"}; - // Try to get value from empty cache + // Attempt to get value from the cache EXPECT_THROW(cache.getRemediationData(agentId), WdbDataException); + spSocketDBWrapperMock.reset(); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp index 6b939ebd3f1..a0336cbb27a 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.hpp @@ -29,11 +29,6 @@ class RemediationDataCacheTest : public ::testing::Test RemediationDataCacheTest() = default; ~RemediationDataCacheTest() override = default; // LCOV_EXCL_STOP - /** - * @brief Fake socket server to test the DB query. - * - */ - std::shared_ptr, EpollWrapper>> m_socketServer; /** * @brief Set the Up every test case. @@ -65,8 +60,6 @@ class RemediationDataCacheTest : public ::testing::Test void TearDown() override { PolicyManager::instance().teardown(); - // Stop the socket server - m_socketServer->stop(); std::filesystem::remove_all("queue/db"); } }; From 1a68ec7207bbd808eee31cf98bf37a7f113ef39d Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Wed, 15 May 2024 02:28:00 +0000 Subject: [PATCH 116/419] Adding component test for hotfixes feature --- .../qa/test_data/009/expected_002.out | 1 - .../qa/test_data/012/expected_001.out | 1 + .../qa/test_data/012/expected_002.out | 5 ++++ .../qa/test_data/012/expected_003.out | 6 +++++ .../qa/test_data/012/input_001.json | 22 ++++++++++++++++++ .../qa/test_data/012/input_002.json | 23 +++++++++++++++++++ .../qa/test_data/012/input_003.json | 12 ++++++++++ .../qa/test_data/013/expected_001.out | 1 + .../qa/test_data/013/expected_002.out | 1 + .../qa/test_data/013/expected_003.out | 4 ++++ .../qa/test_data/013/input_001.json | 22 ++++++++++++++++++ .../qa/test_data/013/input_002.json | 12 ++++++++++ .../qa/test_data/013/input_003.json | 23 +++++++++++++++++++ 13 files changed, 132 insertions(+), 1 deletion(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/012/expected_001.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/012/expected_002.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/012/expected_003.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/012/input_001.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/012/input_002.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/012/input_003.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/013/expected_001.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/013/expected_002.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/013/expected_003.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/013/input_001.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/013/input_002.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/013/input_003.json diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/expected_002.out index 0c83c86aed0..19f4f8bda71 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/expected_002.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/009/expected_002.out @@ -3,7 +3,6 @@ "Match found, the package 'firefox' is vulnerable to 'CVE-2007-0896' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2007-1970' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2007-2176' due to default status. - Agent '' (ID: '009', Version: '').", - "Match found, the package 'firefox' is vulnerable to 'CVE-2007-3670' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2007-3827' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2007-4013' due to default status. - Agent '' (ID: '009', Version: '').", "Match found, the package 'firefox' is vulnerable to 'CVE-2007-6715' due to default status. - Agent '' (ID: '009', Version: '').", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/expected_001.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/expected_001.out new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/expected_001.out @@ -0,0 +1 @@ +[] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/expected_002.out new file mode 100644 index 00000000000..122b8d289ca --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/expected_002.out @@ -0,0 +1,5 @@ +[ + "Match found, the package 'office', is vulnerable to 'CVE-2024-20673'. Current version: '2016' is equal to '2016'. - Agent '' (ID: '001', Version: '')", + "No remediations for agent '001' have been found", + "Processing and publish key: CVE-2024-20673" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/expected_003.out new file mode 100644 index 00000000000..f4ff9cbef44 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/expected_003.out @@ -0,0 +1,6 @@ +[ + "Getting associated vulnerabilities for hotfix 'KB5002537'", + "CVE 'CVE-2024-20673' was remediated by hotfix 'KB5002537' for '001_eff251a49a142accf85b170526462e13d3265f03_CVE-2024-20673'", + "Vulnerability report for agent ID 001, hotfix: KB5002537, cve: CVE-2024-20673", + "Processing and publish key: CVE-2024-20673" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/input_001.json new file mode 100644 index 00000000000..52f648a9a6b --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/input_001.json @@ -0,0 +1,22 @@ +{ + "agent_info": { + "agent_id": "001" + }, + "data_type": "state", + "data": { + "attributes_type": "syscollector_osinfo", + "attributes": { + "architecture": "x86_64", + "checksum": "1713967856394802400", + "hostname":"DESKTOP-EQ4F57D", + "os_major":"10", + "os_minor":"0", + "os_build":"19045.4291", + "os_name":"Microsoft Windows 10 Pro", + "os_display_version":"22H2", + "os_platform":"windows", + "os_version":"10.0.19045.4291", + "scan_time":"2024/04/24 14:10:57" + } + } + } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/input_002.json new file mode 100644 index 00000000000..c62e00b16c7 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/input_002.json @@ -0,0 +1,23 @@ +{ + "agent_info": { + "agent_id": "001" + }, + "data_type": "dbsync_packages", + "data": { + "version": "2016", + "vendor": "Microsoft Corporation", + "architecture": " ", + "name": "Microsoft Office Professional Plus 2016", + "size": 0, + "format": "win", + "checksum": "12c8e8d9df8f9a9f54d4aaf43568d10c79f3cc56", + "description": " ", + "install_time": "2024/04/03 22:53:13", + "item_id": "eff251a49a142accf85b170526462e13d3265f03", + "multiarch": null, + "priority": " ", + "scan_time": "2024/04/24 14:11:04", + "source": " " + }, + "operation": "INSERTED" + } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/input_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/input_003.json new file mode 100644 index 00000000000..fc10c06f9a4 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/012/input_003.json @@ -0,0 +1,12 @@ +{ + "agent_info": { + "agent_id": "001" + }, + "data_type": "dbsync_hotfixes", + "data": { + "hotfix": "KB5002537", + "checksum": "22c8e8d9df8f9a9f54d4aaf43568d10c79f3cc56", + "scan_time": "2024/04/24 14:11:04" + }, + "operation": "INSERTED" + } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/expected_001.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/expected_001.out new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/expected_001.out @@ -0,0 +1 @@ +[] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/expected_002.out new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/expected_002.out @@ -0,0 +1 @@ +[] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/expected_003.out new file mode 100644 index 00000000000..b085460dc5f --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/expected_003.out @@ -0,0 +1,4 @@ +[ + "Match found, the package 'office', is vulnerable to 'CVE-2024-20673'. Current version: '2016' is equal to '2016'. - Agent '' (ID: '001', Version: '')", + "Remediation 'KB5002537' for package 'office' on agent '001' that solves CVE 'CVE-2024-20673' has been found" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/input_001.json new file mode 100644 index 00000000000..52f648a9a6b --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/input_001.json @@ -0,0 +1,22 @@ +{ + "agent_info": { + "agent_id": "001" + }, + "data_type": "state", + "data": { + "attributes_type": "syscollector_osinfo", + "attributes": { + "architecture": "x86_64", + "checksum": "1713967856394802400", + "hostname":"DESKTOP-EQ4F57D", + "os_major":"10", + "os_minor":"0", + "os_build":"19045.4291", + "os_name":"Microsoft Windows 10 Pro", + "os_display_version":"22H2", + "os_platform":"windows", + "os_version":"10.0.19045.4291", + "scan_time":"2024/04/24 14:10:57" + } + } + } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/input_002.json new file mode 100644 index 00000000000..fc10c06f9a4 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/input_002.json @@ -0,0 +1,12 @@ +{ + "agent_info": { + "agent_id": "001" + }, + "data_type": "dbsync_hotfixes", + "data": { + "hotfix": "KB5002537", + "checksum": "22c8e8d9df8f9a9f54d4aaf43568d10c79f3cc56", + "scan_time": "2024/04/24 14:11:04" + }, + "operation": "INSERTED" + } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/input_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/input_003.json new file mode 100644 index 00000000000..c62e00b16c7 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/013/input_003.json @@ -0,0 +1,23 @@ +{ + "agent_info": { + "agent_id": "001" + }, + "data_type": "dbsync_packages", + "data": { + "version": "2016", + "vendor": "Microsoft Corporation", + "architecture": " ", + "name": "Microsoft Office Professional Plus 2016", + "size": 0, + "format": "win", + "checksum": "12c8e8d9df8f9a9f54d4aaf43568d10c79f3cc56", + "description": " ", + "install_time": "2024/04/03 22:53:13", + "item_id": "eff251a49a142accf85b170526462e13d3265f03", + "multiarch": null, + "priority": " ", + "scan_time": "2024/04/24 14:11:04", + "source": " " + }, + "operation": "INSERTED" + } From db4f1c631f93ac1a1f9bd5976bb2db95c9d35e5d Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 15 May 2024 09:52:22 -0300 Subject: [PATCH 117/419] CL: - Fixed style --- .../src/scanOrchestrator/remediationDataCache.hpp | 2 +- .../tests/unit/buildAllAgentListContext_test.cpp | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index 3b6fa1c60dd..5745cc4c3b7 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -48,7 +48,7 @@ class RemediationDataCache final : public Singleton> try { TSocketDBWrapper::instance().query(WazuhDBQueryBuilder::builder().agentGetHotfixesCommand(agentId).build(), - response); + response); } catch (const std::exception& e) { diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp index 306483857c7..035d09d82f1 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp @@ -61,6 +61,8 @@ TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContextWithElements) TEST_F(BuildAllAgentListContextTest, MissingField) { + spSocketDBWrapperMock = std::make_shared(); + nlohmann::json queryResult = nlohmann::json::parse(R"( [ { @@ -94,11 +96,11 @@ TEST_F(BuildAllAgentListContextTest, ExceptionOnDB) .Times(1) .WillOnce(testing::Throw(WdbDataException("Error on DB", agentId))); - RemediationDataCache cache; - auto allAgentContext = std::make_shared>(); auto scanContext = std::make_shared(); EXPECT_THROW(allAgentContext->handleRequest(scanContext), WdbDataException); + + spSocketDBWrapperMock.reset(); } From e43eb769df45a4627c59e867ad034cc99906d37d Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 15 May 2024 14:04:20 -0300 Subject: [PATCH 118/419] CL: - Fixed UTs (wrong trampoline initialization) --- .../tests/unit/buildAllAgentListContext_test.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp index 035d09d82f1..1bec9334f68 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/buildAllAgentListContext_test.cpp @@ -94,9 +94,10 @@ TEST_F(BuildAllAgentListContextTest, ExceptionOnDB) EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) .Times(1) - .WillOnce(testing::Throw(WdbDataException("Error on DB", agentId))); + .WillOnce(testing::Throw(SocketDbWrapperException("Error on DB"))); - auto allAgentContext = std::make_shared>(); + auto allAgentContext = + std::make_shared>(); auto scanContext = std::make_shared(); From 3a2f42a359ca2dc33a4f36cae38618b6a85f8b8f Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 15 May 2024 14:28:52 -0300 Subject: [PATCH 119/419] CL: - Removed helper methods from scanContext --- .../src/scanOrchestrator/packageScanner.hpp | 7 ++++-- .../src/scanOrchestrator/scanContext.hpp | 22 ++++--------------- 2 files changed, 9 insertions(+), 20 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp index 5169a78979b..f987d0b620d 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp @@ -556,7 +556,9 @@ class TPackageScanner final : public AbstractHandlerhasRemediations()) + // Check that the agent has remediation data. + auto agentRemediations = contextData->getRemediationData(); + if (!agentRemediations.hotfixes.empty()) { logDebug2( WM_VULNSCAN_LOGTAG, "No remediations for agent '%s' have been found.", contextData->agentId().data()); @@ -566,7 +568,8 @@ class TPackageScanner final : public AbstractHandlerupdates())) { - if (contextData->remediationIsInstalled(remediation->str())) + // Check if the remediation is installed on the agent. + if (agentRemediations.hotfixes.count(remediation->str()) != 0) { logDebug2(WM_VULNSCAN_LOGTAG, "Remediation '%s' for package '%s' on agent '%s' that solves CVE '%s' has been found.", diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index b2b2f5297df..621e2d332ff 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -1450,27 +1450,13 @@ struct TScanContext final } /** - * @brief Checks if there are remediation installed. + * @brief Gets the Remediation data. * - * @return true if there are remediation. - * @return false if there are not remediation. + * @return Remediation The remediation data. */ - bool hasRemediations() const + Remediation getRemediationData() const { - return !m_remediationData.hotfixes.empty(); - } - - /** - * @brief Checks if a remediation is installed. - * - * @param remediation ID of the remediation to check. - * - * @return true if the remediation is installed. - * @return false if the remediation is not installed. - */ - bool remediationIsInstalled(const std::string& remediation) const - { - return m_remediationData.hotfixes.count(remediation) == 1; + return m_remediationData; } /** From 4a61099af3e4a6f17eb7ba361ab7598db4cfe205 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 15 May 2024 15:08:36 -0300 Subject: [PATCH 120/419] CL: - Reverted changes on resultIndexer - Created new class arrayResultIndexer (with reverted changes) - Updated UTs --- .../scanOrchestrator/arrayResultIndexer.hpp | 88 +++++++ .../scanOrchestrator/factoryOrchestrator.hpp | 6 +- .../src/scanOrchestrator/resultIndexer.hpp | 26 +-- .../tests/unit/arrayResultIndexer_test.cpp | 217 ++++++++++++++++++ .../tests/unit/arrayResultIndexer_test.hpp | 41 ++++ .../tests/unit/factoryOrchestrator_test.cpp | 206 +++++++++++------ .../tests/unit/resultIndexer_test.cpp | 3 +- 7 files changed, 483 insertions(+), 104 deletions(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/arrayResultIndexer.hpp create mode 100644 src/wazuh_modules/vulnerability_scanner/tests/unit/arrayResultIndexer_test.cpp create mode 100644 src/wazuh_modules/vulnerability_scanner/tests/unit/arrayResultIndexer_test.hpp diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/arrayResultIndexer.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/arrayResultIndexer.hpp new file mode 100644 index 00000000000..37d1f8750e6 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/arrayResultIndexer.hpp @@ -0,0 +1,88 @@ +/* + * Wazuh Vulnerability scanner - Scan Orchestrator + * Copyright (C) 2015, Wazuh Inc. + * May 14, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _ARRAY_RESULT_INDEXER_HPP +#define _ARRAY_RESULT_INDEXER_HPP + +#include "chainOfResponsability.hpp" +#include "indexerConnector.hpp" +#include "scanContext.hpp" + +/** + * @brief ArrayResultIndexer class. + * + * @tparam TIndexerConnector indexer connector type. + * @tparam TScanContext scan context type. + */ +template +class TArrayResultIndexer final : public AbstractHandler> +{ +private: + std::shared_ptr m_indexerConnector; + +public: + // LCOV_EXCL_START + /** + * @brief ArrayResultIndexer constructor. + * + * @param indexerConnector Indexer connector. + */ + explicit TArrayResultIndexer(std::shared_ptr indexerConnector) + : m_indexerConnector(std::move(indexerConnector)) + { + } + // LCOV_EXCL_STOP + + /** + * @brief Handles request and passes control to the next step of the chain. + * + * @param data Scan context. + * @return std::shared_ptr Abstract handler. + */ + std::shared_ptr handleRequest(std::shared_ptr data) override + { + if (m_indexerConnector != nullptr) + { + auto resultCallback = [&](const nlohmann::json& result, const std::string& key) + { + logDebug2(WM_VULNSCAN_LOGTAG, "Processing and publish key: %s", key.c_str()); + if (result.contains("operation") && result.contains("id")) + { + m_indexerConnector->publish(result.dump()); + } + else + { + logError(WM_VULNSCAN_LOGTAG, "Invalid element to publish: %s", result.dump().c_str()); + } + }; + + for (const auto& [key, value] : data->m_elements) + { + if (value.is_array()) + { + for (const auto& element : value) + { + resultCallback(element, key); + } + } + else + { + resultCallback(value, key); + } + } + } + return AbstractHandler>::handleRequest(std::move(data)); + } +}; + +using ArrayResultIndexer = TArrayResultIndexer<>; + +#endif // _ARRAY_RESULT_INDEXER_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp index b37d3986939..1221b784465 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp @@ -13,6 +13,7 @@ #define _FACTORY_ORCHESTRATOR_HPP #include "alertClearBuilder.hpp" +#include "arrayResultIndexer.hpp" #include "buildAllAgentListContext.hpp" #include "buildSingleAgentListContext.hpp" #include "chainOfResponsability.hpp" @@ -63,7 +64,8 @@ template + typename THotfixInsert = HotfixInsert, + typename TArrayResultIndexer = ArrayResultIndexer> class TFactoryOrchestrator final { private: @@ -111,7 +113,7 @@ class TFactoryOrchestrator final orchestration->setLast(std::make_shared(inventoryDatabase)); orchestration->setLast(std::make_shared(databaseFeedManager)); orchestration->setLast(std::make_shared(reportDispatcher)); - orchestration->setLast(std::make_shared(indexerConnector)); + orchestration->setLast(std::make_shared(indexerConnector)); break; case ScannerType::HotfixDelete: break; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp index 962776824e3..366b6476121 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp @@ -51,32 +51,10 @@ class TResultIndexer final : public AbstractHandlerpublish(result.dump()); - } - else - { - logError(WM_VULNSCAN_LOGTAG, "Invalid element to publish: %s", result.dump().c_str()); - } - }; - for (const auto& [key, value] : data->m_elements) { - if (value.is_array()) - { - for (const auto& element : value) - { - resultCallback(element, key); - } - } - else - { - resultCallback(value, key); - } + logDebug2(WM_VULNSCAN_LOGTAG, "Processing and publish key: %s", key.c_str()); + m_indexerConnector->publish(value.dump()); } } return AbstractHandler>::handleRequest(std::move(data)); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/arrayResultIndexer_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/arrayResultIndexer_test.cpp new file mode 100644 index 00000000000..386b697b372 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/arrayResultIndexer_test.cpp @@ -0,0 +1,217 @@ +/* + * Wazuh Vulnerability Scanner - Unit Tests + * Copyright (C) 2015, Wazuh Inc. + * May 14, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#include "arrayResultIndexer_test.hpp" +#include "../../../../shared_modules/utils/flatbuffers/include/syscollector_deltas_generated.h" +#include "../../../../shared_modules/utils/flatbuffers/include/syscollector_deltas_schema.h" +#include "../scanOrchestrator/arrayResultIndexer.hpp" +#include "../scanOrchestrator/scanContext.hpp" +#include "MockIndexerConnector.hpp" +#include "MockOsDataCache.hpp" +#include "TrampolineIndexerConnector.hpp" +#include "TrampolineOsDataCache.hpp" +#include "TrampolineRemediationDataCache.hpp" +#include "flatbuffers/flatbuffer_builder.h" +#include "flatbuffers/flatbuffers.h" +#include "flatbuffers/idl.h" +#include "json.hpp" + +using ::testing::_; +using TrampolineScanContext = TScanContext; + +namespace NSArrayResultIndexerTest +{ + const std::string DELTA_PACKAGES_INSERTED_MSG = + R"( + { + "agent_info": { + "agent_id": "001", + "agent_ip": "192.168.33.20", + "agent_name": "focal" + }, + "data_type": "dbsync_packages", + "data": { + "architecture": "amd64", + "checksum": "1e6ce14f97f57d1bbd46ff8e5d3e133171a1bbce", + "description": "library for GIF images library", + "format": "deb", + "groups": "libs", + "item_id": "ec465b7eb5fa011a336e95614072e4c7f1a65a53", + "multiarch": "same", + "name": "libgif7", + "priority": "optional", + "scan_time": "2023/08/04 19:56:11", + "size": 72, + "source": "giflib", + "vendor": "Ubuntu Developers ", + "version": "5.1.9-1", + "install_time": "1577890801" + }, + "operation": "INSERTED" + } + )"; + + const std::string CVEID {"CVE-2024-1234"}; +} // namespace NSArrayResultIndexerTest + +using namespace NSArrayResultIndexerTest; + +void ArrayResultIndexerTest::SetUp() {} + +void ArrayResultIndexerTest::TearDown() +{ + spIndexerConnectorMock.reset(); + spOsDataCacheMock.reset(); + spRemediationDataCacheMock.reset(); +} + +/* + * @brief Test handleRequest of the ResultIndexer class. + */ +TEST_F(ArrayResultIndexerTest, TestHandleRequest) +{ + auto elementValue = nlohmann::json::parse(R"({"id": "id_test", "operation":"INSERTED"})"); + + spIndexerConnectorMock = std::make_shared(); + EXPECT_CALL(*spIndexerConnectorMock, publish(elementValue.dump())).Times(1); + + auto pIndexerConnectorTrap = std::make_shared(); + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "osdata_codeName", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); + scanContextOriginal->m_elements[CVEID] = elementValue; // Mock one vulnerability + + auto spResultIndexer = + std::make_shared>(pIndexerConnectorTrap); + + EXPECT_NO_THROW(spResultIndexer->handleRequest(scanContextOriginal)); +} + +TEST_F(ArrayResultIndexerTest, TestHandleRequestNoOperation) +{ + auto elementValue = nlohmann::json::parse(R"({"id": "id_test"})"); + + spIndexerConnectorMock = std::make_shared(); + EXPECT_CALL(*spIndexerConnectorMock, publish(elementValue.dump())).Times(0); + + auto pIndexerConnectorTrap = std::make_shared(); + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "osdata_codeName", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); + auto scanContextOriginal = std::make_shared(syscollectorDelta); + scanContextOriginal->m_elements[CVEID] = elementValue; // Mock one vulnerability + + auto spResultIndexer = + std::make_shared>(pIndexerConnectorTrap); + + EXPECT_NO_THROW(spResultIndexer->handleRequest(scanContextOriginal)); +} + +TEST_F(ArrayResultIndexerTest, TestHandleRequestNoId) +{ + auto elementValue = nlohmann::json::parse(R"({"operation":"INSERTED"})"); + + spIndexerConnectorMock = std::make_shared(); + EXPECT_CALL(*spIndexerConnectorMock, publish(elementValue.dump())).Times(0); + + auto pIndexerConnectorTrap = std::make_shared(); + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "osdata_codeName", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); + auto scanContextOriginal = std::make_shared(syscollectorDelta); + scanContextOriginal->m_elements[CVEID] = elementValue; // Mock one vulnerability + + auto spResultIndexer = + std::make_shared>(pIndexerConnectorTrap); + + EXPECT_NO_THROW(spResultIndexer->handleRequest(scanContextOriginal)); +} diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/arrayResultIndexer_test.hpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/arrayResultIndexer_test.hpp new file mode 100644 index 00000000000..339c23fdef9 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/arrayResultIndexer_test.hpp @@ -0,0 +1,41 @@ +/* + * Wazuh Vulnerability Scanner - Unit Tests + * Copyright (C) 2015, Wazuh Inc. + * May 14, 2024. + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License (version 2) as published by the FSF - Free Software + * Foundation. + */ + +#ifndef _ARRAY_RESULT_INDEXER_TEST_HPP +#define _ARRAY_RESULT_INDEXER_TEST_HPP + +#include "gtest/gtest.h" + +/** + * @brief Runs unit tests for ArrayResultIndexer + */ +class ArrayResultIndexerTest : public ::testing::Test +{ +protected: + // LCOV_EXCL_START + ArrayResultIndexerTest() = default; + ~ArrayResultIndexerTest() override = default; + + /** + * @brief Set the environment for testing. + * + */ + void SetUp() override; + + /** + * @brief Clean the environment after testing. + * + */ + void TearDown() override; + // LCOV_EXCL_STOP +}; + +#endif // _ARRAY_RESULT_INDEXER_TEST_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp index 30c54da0657..231c0ce6705 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp @@ -35,7 +35,8 @@ enum class ScannerMockID : int CLEAN_SINGLE_AGENT_INVENTORY = 17, SCAN_AGENT_LIST = 18, GLOBAL_INVENTORY_SYNC = 19, - HOTFIX_INSERT = 20 + HOTFIX_INSERT = 20, + ARRAY_RESULT_INDEXER = 21 }; /** @@ -134,11 +135,12 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypePackageInsert) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::PackageInsert, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::PackageInsert, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -182,11 +184,12 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypePackageDelete) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::PackageDelete, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::PackageDelete, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -228,11 +231,12 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypeIntegrityClear) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::IntegrityClear, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::IntegrityClear, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -249,34 +253,36 @@ TEST_F(FactoryOrchestratorTest, TestScannerTypeIntegrityClear) TEST_F(FactoryOrchestratorTest, TestScannerTypeOs) { // Create the orchestrator for Os. - auto orchestration = TFactoryOrchestrator, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - MockDatabaseFeedManager, - MockIndexerConnector, - std::vector, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass, - TFakeClass>::create(ScannerType::Os, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + auto orchestration = + TFactoryOrchestrator, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + MockDatabaseFeedManager, + MockIndexerConnector, + std::vector, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass>::create(ScannerType::Os, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -320,11 +326,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAllData) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::CleanupAllAgentData, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::CleanupAllAgentData, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -363,11 +370,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanAllAgents) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::ReScanAllAgents, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::ReScanAllAgents, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -407,11 +415,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanSingleAgent) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::ReScanSingleAgent, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::ReScanSingleAgent, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); @@ -424,6 +433,44 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanSingleAgent) TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAgentData) { + // Create the orchestrator for CleanupSingleAgentData. + auto orchestration = TFactoryOrchestrator, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + MockDatabaseFeedManager, + MockIndexerConnector, + std::vector, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass, + TFakeClass>:: + create(ScannerType::CleanupSingleAgentData, nullptr, nullptr, *m_inventoryDatabase, nullptr); + + auto context = std::make_shared>(); + + EXPECT_NO_THROW(orchestration->handleRequest(context)); + EXPECT_EQ(context->size(), 1); + EXPECT_EQ(context->at(0), ScannerMockID::CLEAN_SINGLE_AGENT_INVENTORY); +} + +TEST_F(FactoryOrchestratorTest, TestCreationHotfixInsert) +{ + // Create the orchestrator for HotfixInsert. // Create the orchestrator for CleanupSingleAgentData. auto orchestration = TFactoryOrchestrator, @@ -449,17 +496,22 @@ TEST_F(FactoryOrchestratorTest, TestCreationCleanUpAgentData) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::CleanupSingleAgentData, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::HotfixInsert, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); EXPECT_NO_THROW(orchestration->handleRequest(context)); - EXPECT_EQ(context->size(), 1); - EXPECT_EQ(context->at(0), ScannerMockID::CLEAN_SINGLE_AGENT_INVENTORY); + EXPECT_EQ(context->size(), 5); + EXPECT_EQ(context->at(0), ScannerMockID::HOTFIX_INSERT); + EXPECT_EQ(context->at(1), ScannerMockID::CVE_SOLVED_INVENTORY_SYNC); + EXPECT_EQ(context->at(2), ScannerMockID::CVE_SOLVED_ALERT_DETAILS_BUILDER); + EXPECT_EQ(context->at(3), ScannerMockID::EVENT_SEND_REPORT); + EXPECT_EQ(context->at(4), ScannerMockID::ARRAY_RESULT_INDEXER); } /* @@ -495,11 +547,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationInvalidScannerType) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(invalidScannerType, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(invalidScannerType, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); } catch (const std::runtime_error& e) { @@ -538,11 +591,12 @@ TEST_F(FactoryOrchestratorTest, TestCreationGlobalSyncInventory) TFakeClass, TFakeClass, TFakeClass, - TFakeClass>::create(ScannerType::GlobalSyncInventory, - nullptr, - nullptr, - *m_inventoryDatabase, - nullptr); + TFakeClass, + TFakeClass>::create(ScannerType::GlobalSyncInventory, + nullptr, + nullptr, + *m_inventoryDatabase, + nullptr); auto context = std::make_shared>(); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp index 958477936fa..dfa7ecc1dc2 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp @@ -130,7 +130,7 @@ TEST_F(ResultIndexerTest, TestHandleRequestNoOperation) auto elementValue = nlohmann::json::parse(R"({"id": "id_test"})"); spIndexerConnectorMock = std::make_shared(); - EXPECT_CALL(*spIndexerConnectorMock, publish(elementValue.dump())).Times(0); + EXPECT_CALL(*spIndexerConnectorMock, publish(elementValue.dump())).Times(1); auto pIndexerConnectorTrap = std::make_shared(); @@ -215,7 +215,6 @@ TEST_F(ResultIndexerTest, TestHandleRequestNoId) auto scanContextOriginal = std::make_shared>( syscollectorDelta); - scanContextOriginal->m_elements[CVEID] = elementValue; // Mock one vulnerability auto spResultIndexer = std::make_shared< TResultIndexer Date: Wed, 15 May 2024 17:17:39 -0300 Subject: [PATCH 121/419] CL: - Restored changes on resultIndexer and updated UTs - Added exception on bad type for arrayResultIndexer and updated UTs --- .../scanOrchestrator/arrayResultIndexer.hpp | 16 +++--- .../src/scanOrchestrator/resultIndexer.hpp | 9 +++- .../tests/unit/arrayResultIndexer_test.cpp | 52 +++++++++++++++++-- .../tests/unit/resultIndexer_test.cpp | 2 +- 4 files changed, 66 insertions(+), 13 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/arrayResultIndexer.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/arrayResultIndexer.hpp index 37d1f8750e6..bdec0d7afea 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/arrayResultIndexer.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/arrayResultIndexer.hpp @@ -60,22 +60,22 @@ class TArrayResultIndexer final : public AbstractHandlerm_elements) { - if (value.is_array()) + if (!value.is_array()) { - for (const auto& element : value) - { - resultCallback(element, key); - } + throw std::invalid_argument("Invalid element to publish (not an array): " + value.dump()); } - else + + for (const auto& element : value) { - resultCallback(value, key); + resultCallback(element, key); } } } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp index 366b6476121..2cedd0553b2 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp @@ -54,7 +54,14 @@ class TResultIndexer final : public AbstractHandlerm_elements) { logDebug2(WM_VULNSCAN_LOGTAG, "Processing and publish key: %s", key.c_str()); - m_indexerConnector->publish(value.dump()); + if (value.contains("operation") && value.contains("id")) + { + m_indexerConnector->publish(value.dump()); + } + else + { + logError(WM_VULNSCAN_LOGTAG, "Invalid element to publish: %s", value.dump().c_str()); + } } } return AbstractHandler>::handleRequest(std::move(data)); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/arrayResultIndexer_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/arrayResultIndexer_test.cpp index 386b697b372..ef3481ba629 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/arrayResultIndexer_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/arrayResultIndexer_test.cpp @@ -116,7 +116,7 @@ TEST_F(ArrayResultIndexerTest, TestHandleRequest) auto scanContextOriginal = std::make_shared>( syscollectorDelta); - scanContextOriginal->m_elements[CVEID] = elementValue; // Mock one vulnerability + scanContextOriginal->m_elements[CVEID].push_back(elementValue); // Mock one vulnerability auto spResultIndexer = std::make_shared>(pIndexerConnectorTrap); @@ -162,7 +162,7 @@ TEST_F(ArrayResultIndexerTest, TestHandleRequestNoOperation) std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); auto scanContextOriginal = std::make_shared(syscollectorDelta); - scanContextOriginal->m_elements[CVEID] = elementValue; // Mock one vulnerability + scanContextOriginal->m_elements[CVEID].push_back(elementValue); // Mock one vulnerability auto spResultIndexer = std::make_shared>(pIndexerConnectorTrap); @@ -208,10 +208,56 @@ TEST_F(ArrayResultIndexerTest, TestHandleRequestNoId) std::variant syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); auto scanContextOriginal = std::make_shared(syscollectorDelta); - scanContextOriginal->m_elements[CVEID] = elementValue; // Mock one vulnerability + scanContextOriginal->m_elements[CVEID].push_back(elementValue); // Mock one vulnerability auto spResultIndexer = std::make_shared>(pIndexerConnectorTrap); EXPECT_NO_THROW(spResultIndexer->handleRequest(scanContextOriginal)); } + +TEST_F(ArrayResultIndexerTest, PublishObjectInsteadOfArray) +{ + auto elementValue = nlohmann::json::parse(R"({"operation":"INSERTED"})"); + + spIndexerConnectorMock = std::make_shared(); + EXPECT_CALL(*spIndexerConnectorMock, publish(elementValue.dump())).Times(0); + + auto pIndexerConnectorTrap = std::make_shared(); + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "osdata_codeName", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); + auto scanContextOriginal = std::make_shared(syscollectorDelta); + scanContextOriginal->m_elements[CVEID] = elementValue; // Mock one vulnerability + + auto spResultIndexer = + std::make_shared>(pIndexerConnectorTrap); + + EXPECT_THROW(spResultIndexer->handleRequest(scanContextOriginal), std::invalid_argument); +} diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp index dfa7ecc1dc2..f7c80b5e5af 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp @@ -130,7 +130,7 @@ TEST_F(ResultIndexerTest, TestHandleRequestNoOperation) auto elementValue = nlohmann::json::parse(R"({"id": "id_test"})"); spIndexerConnectorMock = std::make_shared(); - EXPECT_CALL(*spIndexerConnectorMock, publish(elementValue.dump())).Times(1); + EXPECT_CALL(*spIndexerConnectorMock, publish(elementValue.dump())).Times(0); auto pIndexerConnectorTrap = std::make_shared(); From ca7d93a394f8b14aba6f2da59c6eaed9dd1ed34c Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Thu, 16 May 2024 03:04:41 +0000 Subject: [PATCH 122/419] Fixing style --- .../vulnerability_scanner/testtool/scanner/main.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp index e7cd0fe7b8f..176429f10bf 100644 --- a/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp +++ b/src/wazuh_modules/vulnerability_scanner/testtool/scanner/main.cpp @@ -236,7 +236,8 @@ int main(const int argc, const char* argv[]) else { std::string successMessage = "ok " + fakeGlobalData.dump(); - std::cout << "Response message: '" << successMessage << "' to: '" << messageReceived << "'" << std::endl; + std::cout << "Response message: '" << successMessage << "' to: '" << messageReceived + << "'" << std::endl; fakeDBServer->send(fd, successMessage.c_str(), successMessage.size()); } } From 7995964e2540fddee4efadcd6ca9ad11b347bbe9 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Thu, 16 May 2024 00:42:00 -0300 Subject: [PATCH 123/419] Fix coverity issues. --- src/shared_modules/utils/socketDBWrapper.hpp | 2 +- .../src/scanOrchestrator/cleanAgentInventory.hpp | 5 ++--- .../src/scanOrchestrator/eventDeleteInventory.hpp | 12 ++++-------- .../src/scanOrchestrator/eventInsertInventory.hpp | 8 +++----- .../src/scanOrchestrator/globalSyncInventory.hpp | 7 +++---- .../src/scanOrchestrator/scanInventorySync.hpp | 8 +++----- 6 files changed, 16 insertions(+), 26 deletions(-) diff --git a/src/shared_modules/utils/socketDBWrapper.hpp b/src/shared_modules/utils/socketDBWrapper.hpp index c988dc62b3e..ca01951b4b7 100644 --- a/src/shared_modules/utils/socketDBWrapper.hpp +++ b/src/shared_modules/utils/socketDBWrapper.hpp @@ -156,7 +156,6 @@ class SocketDBWrapper final : public Singleton { // Acquire lock to avoid multiple threads sending queries at the same time std::scoped_lock lockMessage {m_mutexMessage}; - m_dataReady = false; if (m_teardown) { @@ -166,6 +165,7 @@ class SocketDBWrapper final : public Singleton // Acquire lock before clearing the response std::unique_lock lockResponse {m_mutexResponse}; + m_dataReady = false; m_response.clear(); m_responsePartial.clear(); // coverity[missing_lock] diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp index 18ebc42ea83..3c2d2be8d6a 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/cleanAgentInventory.hpp @@ -61,10 +61,9 @@ class TCleanAgentInventory final */ std::shared_ptr handleRequest(std::shared_ptr ctx) override { - const auto clusterManagerPrefix = - ctx->agentId().compare("000") == 0 && ctx->clusterStatus() ? std::string(ctx->clusterNodeName()) + "_" : ""; - std::string agentKey = clusterManagerPrefix; + std::string agentKey = + ctx->agentId().compare("000") == 0 && ctx->clusterStatus() ? std::string(ctx->clusterNodeName()) + "_" : ""; agentKey.append(ctx->agentId()); agentKey.append("_"); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDeleteInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDeleteInventory.hpp index 1eba607b649..acdf61ff289 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDeleteInventory.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventDeleteInventory.hpp @@ -52,11 +52,9 @@ class TEventDeleteInventory final */ std::shared_ptr handleRequest(std::shared_ptr data) override { - std::string key; - const auto clusterManagerPrefix = data->agentId().compare("000") == 0 && data->clusterStatus() - ? std::string(data->clusterNodeName()) + "_" - : ""; - key.append(clusterManagerPrefix); + std::string key = data->agentId().compare("000") == 0 && data->clusterStatus() + ? std::string(data->clusterNodeName()) + "_" + : ""; key.append(data->agentId()); key.append("_"); @@ -64,9 +62,7 @@ class TEventDeleteInventory final key.append(TInventorySync::affectedComponentKey(data)); const auto& column = AFFECTED_COMPONENT_COLUMNS.at(data->affectedComponentType()); - std::string value; - - if (TInventorySync::m_inventoryDatabase.get(key, value, column)) + if (std::string value; TInventorySync::m_inventoryDatabase.get(key, value, column)) { auto listCve = Utils::split(value, ','); for (const auto& cve : listCve) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventInsertInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventInsertInventory.hpp index 96819218953..6e972a8d695 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventInsertInventory.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/eventInsertInventory.hpp @@ -52,11 +52,9 @@ class TEventInsertInventory final */ std::shared_ptr handleRequest(std::shared_ptr data) override { - const auto clusterManagerPrefix = data->agentId().compare("000") == 0 && data->clusterStatus() - ? std::string(data->clusterNodeName()) + "_" - : ""; - - std::string elementKey = clusterManagerPrefix; + std::string elementKey = data->agentId().compare("000") == 0 && data->clusterStatus() + ? std::string(data->clusterNodeName()) + "_" + : ""; elementKey.append(data->agentId()); elementKey.append("_"); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/globalSyncInventory.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/globalSyncInventory.hpp index 8ddef13253d..feb06b2db3e 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/globalSyncInventory.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/globalSyncInventory.hpp @@ -51,10 +51,9 @@ class TGlobalSyncInventory final : public AbstractHandleragentId().compare("000") == 0 && data->clusterStatus() - ? std::string(data->clusterNodeName()) + "_" - : ""; - std::string key = clusterManagerPrefix; + std::string key = data->agentId().compare("000") == 0 && data->clusterStatus() + ? std::string(data->clusterNodeName()) + "_" + : ""; key.append(data->agentId()); m_indexerConnector->sync(key); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanInventorySync.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanInventorySync.hpp index a652ba032ea..cbd8b7d0df3 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanInventorySync.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanInventorySync.hpp @@ -49,11 +49,9 @@ class TScanInventorySync final */ std::shared_ptr handleRequest(std::shared_ptr data) override { - const auto clusterManagerPrefix = data->agentId().compare("000") == 0 && data->clusterStatus() - ? std::string(data->clusterNodeName()) + "_" - : ""; - - std::string key = clusterManagerPrefix; + std::string key = data->agentId().compare("000") == 0 && data->clusterStatus() + ? std::string(data->clusterNodeName()) + "_" + : ""; key.append(data->agentId()); key.append("_"); From fd4dff3b87ce586f95fbc0b4c732dc27a195f67d Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Thu, 16 May 2024 09:48:42 +0200 Subject: [PATCH 124/419] fix: race condition at wazuh-db when setting a DB closing time --- src/wazuh_db/wdb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wazuh_db/wdb.h b/src/wazuh_db/wdb.h index 20b1f862226..97f233d40de 100644 --- a/src/wazuh_db/wdb.h +++ b/src/wazuh_db/wdb.h @@ -364,7 +364,7 @@ typedef struct wdb_t { int peer; unsigned int refcount; unsigned int transaction:1; - time_t last; + _Atomic(time_t) last; time_t transaction_begin_time; pthread_mutex_t mutex; struct stmt_cache_list *cache_list; From 5ffedb3ea525062002f5129858d132c776d73d83 Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Thu, 16 May 2024 09:50:18 +0200 Subject: [PATCH 125/419] fix: race condition at wazuh-db running flag --- src/wazuh_db/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wazuh_db/main.c b/src/wazuh_db/main.c index 025db17cdc2..ebc65422c2c 100644 --- a/src/wazuh_db/main.c +++ b/src/wazuh_db/main.c @@ -29,7 +29,7 @@ wnotify_t * notify_queue; //static w_queue_t * sock_queue; static pthread_mutex_t queue_mutex = PTHREAD_MUTEX_INITIALIZER; //static pthread_cond_t sock_cond = PTHREAD_COND_INITIALIZER; -static volatile int running = 1; +static volatile _Atomic(int) running = 1; rlim_t nofile; int main(int argc, char ** argv) From 74817f3dbdde7a7f2b71501687f985d439ccf800 Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Thu, 16 May 2024 12:10:17 +0200 Subject: [PATCH 126/419] fix: race condition at wazuh-db GC when closing old databases --- src/wazuh_db/wdb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wazuh_db/wdb.h b/src/wazuh_db/wdb.h index 97f233d40de..571df42fbaf 100644 --- a/src/wazuh_db/wdb.h +++ b/src/wazuh_db/wdb.h @@ -362,7 +362,7 @@ typedef struct wdb_t { sqlite3_stmt * stmt[WDB_STMT_SIZE]; char * id; int peer; - unsigned int refcount; + _Atomic(unsigned int) refcount; unsigned int transaction:1; _Atomic(time_t) last; time_t transaction_begin_time; From 4c511a82e6b122a96ec91702cffde417e639a02f Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Thu, 16 May 2024 12:56:04 +0200 Subject: [PATCH 127/419] feat: clear wdb_create_profile function parameters --- src/headers/defs.h | 1 + src/wazuh_db/wdb.c | 14 ++++++-------- src/wazuh_db/wdb.h | 2 +- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/headers/defs.h b/src/headers/defs.h index 5dfb3d3427a..326b65fc8ea 100644 --- a/src/headers/defs.h +++ b/src/headers/defs.h @@ -227,6 +227,7 @@ published by the Free Software Foundation. For more details, go to \n\ #define WDB_GLOB_NAME "global" #define WDB_MITRE_NAME "mitre" #define WDB_PROF_NAME ".template.db" +#define WDB_PROF_PATH WDB2_DIR "/" WDB_PROF_NAME #define WDB_TASK_DIR "queue/tasks" #define WDB_TASK_NAME "tasks" #define WDB_BACKUP_FOLDER "backup/db" diff --git a/src/wazuh_db/wdb.c b/src/wazuh_db/wdb.c index 7b89636c79a..8ac166f137d 100644 --- a/src/wazuh_db/wdb.c +++ b/src/wazuh_db/wdb.c @@ -475,18 +475,16 @@ int wdb_create_agent_db2(const char * agent_id) { size_t nbytes; int result = 0; - snprintf(path, OS_FLSIZE, "%s/%s", WDB2_DIR, WDB_PROF_NAME); - - if (!(source = fopen(path, "r"))) { + if (!(source = fopen(WDB_PROF_PATH, "r"))) { mdebug1("Profile database not found, creating."); - if (wdb_create_profile(path) < 0) + if (wdb_create_profile() < 0) return -1; // Retry to open - if (!(source = fopen(path, "r"))) { - merror("Couldn't open profile '%s'.", path); + if (!(source = fopen(WDB_PROF_PATH, "r"))) { + merror("Couldn't open profile '%s'.", WDB_PROF_PATH); return -1; } } @@ -634,8 +632,8 @@ int wdb_create_global(const char *path) { } /* Create profile database */ -int wdb_create_profile(const char *path) { - return wdb_create_file(path, schema_agents_sql); +int wdb_create_profile() { + return wdb_create_file(WDB_PROF_PATH, schema_agents_sql); } /* Create new database file from SQL script */ diff --git a/src/wazuh_db/wdb.h b/src/wazuh_db/wdb.h index 571df42fbaf..f0b572fa8f0 100644 --- a/src/wazuh_db/wdb.h +++ b/src/wazuh_db/wdb.h @@ -729,7 +729,7 @@ int wdb_commit2(wdb_t * wdb); int wdb_create_global(const char *path); /* Create profile database */ -int wdb_create_profile(const char *path); +int wdb_create_profile(); /* Create new database file from SQL script */ int wdb_create_file(const char *path, const char *source); From 8d1f4095b5c9c7627953a5ff444ce1cfc282433d Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Thu, 16 May 2024 12:56:53 +0200 Subject: [PATCH 128/419] fix: create wazuh-db agent template on startup --- src/wazuh_db/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/wazuh_db/main.c b/src/wazuh_db/main.c index ebc65422c2c..576db333f6d 100644 --- a/src/wazuh_db/main.c +++ b/src/wazuh_db/main.c @@ -137,6 +137,7 @@ int main(int argc, char ** argv) snprintf(path_template, sizeof(path_template), "%s/%s/%s", home_path, WDB2_DIR, WDB_PROF_NAME); unlink(path_template); mdebug1("Template file removed: %s", path_template); + wdb_create_profile(); // Set max open files limit struct rlimit rlimit = { nofile, nofile }; From 04e973b82583c4a696e9a1b7ab48755e0a60140f Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Thu, 16 May 2024 12:59:48 +0200 Subject: [PATCH 129/419] fix: race condition on wazuh-db agent template creation --- src/wazuh_db/wdb.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/wazuh_db/wdb.c b/src/wazuh_db/wdb.c index 8ac166f137d..150892895c4 100644 --- a/src/wazuh_db/wdb.c +++ b/src/wazuh_db/wdb.c @@ -474,21 +474,28 @@ int wdb_create_agent_db2(const char * agent_id) { FILE *dest; size_t nbytes; int result = 0; + static pthread_mutex_t profile_mutex = PTHREAD_MUTEX_INITIALIZER; + + w_mutex_lock(&profile_mutex); if (!(source = fopen(WDB_PROF_PATH, "r"))) { mdebug1("Profile database not found, creating."); - if (wdb_create_profile() < 0) + if (wdb_create_profile() < 0) { + w_mutex_unlock(&profile_mutex); return -1; + } // Retry to open if (!(source = fopen(WDB_PROF_PATH, "r"))) { + w_mutex_unlock(&profile_mutex); merror("Couldn't open profile '%s'.", WDB_PROF_PATH); return -1; } } + w_mutex_unlock(&profile_mutex); snprintf(path, OS_FLSIZE, "%s/%s.db", WDB2_DIR, agent_id); if (!(dest = fopen(path, "w"))) { From 7c8ebc45e95e8e178ee428a63c72158dcca314af Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Wed, 15 May 2024 14:59:37 +0200 Subject: [PATCH 130/419] Add group hash recalculation on Agent-group send full --- framework/wazuh/core/cluster/master.py | 4 ++++ .../wazuh/core/cluster/tests/test_master.py | 21 ++++++++++++------- .../wazuh/core/cluster/tests/test_worker.py | 5 ++++- framework/wazuh/core/cluster/worker.py | 3 +++ 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/framework/wazuh/core/cluster/master.py b/framework/wazuh/core/cluster/master.py index 6e42cdb9dfe..1df6979e7fd 100644 --- a/framework/wazuh/core/cluster/master.py +++ b/framework/wazuh/core/cluster/master.py @@ -666,6 +666,10 @@ async def send_entire_agent_groups_information(self): start_time = get_utc_now() logger.info('Starting.') + # Recalculate group hashes before retrieving agent groups info + logger.debug('Recalculating agent-group hash.') + await AsyncWazuhDBConnection().run_wdb_command(command='global recalculate-agent-group-hashes') + sync_object = c_common.SyncWazuhdb(manager=self, logger=logger, cmd=b'syn_g_m_w_c', data_retriever=AsyncWazuhDBConnection().run_wdb_command, get_data_command='global sync-agent-groups-get ', diff --git a/framework/wazuh/core/cluster/tests/test_master.py b/framework/wazuh/core/cluster/tests/test_master.py index effb8ee1d04..a6eaedbb4d1 100644 --- a/framework/wazuh/core/cluster/tests/test_master.py +++ b/framework/wazuh/core/cluster/tests/test_master.py @@ -896,18 +896,22 @@ def info(self, info): @pytest.mark.asyncio -@patch("wazuh.core.cluster.master.AsyncWazuhDBConnection") +@patch("wazuh.core.cluster.master.AsyncWazuhDBConnection", return_value=AsyncMock()) @patch('wazuh.core.cluster.common.SyncWazuhdb') -async def test_manager_handler_send_entire_agent_groups_information(SyncWazuhdb_mock, AsyncWazuhDBConnection_mock): +async def test_manager_handler_send_entire_agent_groups_information(syncwazuhdb_mock, asyncwazuhdbconnection_mock): """Check if the data chunks are being properly forward to the Wazuh-db socket.""" class LoggerMock: """Auxiliary class.""" def __init__(self): + self._debug = [] self._info = [] self._error = [] + def debug(self, debug): + self._debug.append(debug) + def info(self, data): """Auxiliary method.""" self._info.append(data) @@ -915,24 +919,25 @@ def info(self, data): master_handler = get_master_handler() logger = LoggerMock() master_handler.task_loggers["Agent-groups send full"] = logger - SyncWazuhdb_mock.return_value.retrieve_information = AsyncMock() - SyncWazuhdb_mock.return_value.sync = AsyncMock() + syncwazuhdb_mock.return_value.retrieve_information = AsyncMock() + syncwazuhdb_mock.return_value.sync = AsyncMock() assert await master_handler.send_entire_agent_groups_information() is None - SyncWazuhdb_mock.assert_called_once_with(manager=master_handler, logger=logger, cmd=b'syn_g_m_w_c', + syncwazuhdb_mock.assert_called_once_with(manager=master_handler, logger=logger, cmd=b'syn_g_m_w_c', data_retriever=ANY, get_data_command='global sync-agent-groups-get ', get_payload={'condition': 'all', 'set_synced': False, 'get_global_hash': False, 'last_id': 0}, pivot_key='last_id', set_data_command='global set-agent-groups', set_payload={'mode': 'override', 'sync_status': 'synced'}) - SyncWazuhdb_mock.return_value.retrieve_information.assert_called_once() - SyncWazuhdb_mock.return_value.sync.assert_called_once_with(start_time=ANY, chunks=ANY) + syncwazuhdb_mock.return_value.retrieve_information.assert_called_once() + syncwazuhdb_mock.return_value.sync.assert_called_once_with(start_time=ANY, chunks=ANY) assert logger._info == ['Starting.'] + assert logger._debug == ['Recalculating agent-group hash.'] @pytest.mark.asyncio @patch("wazuh.core.cluster.master.AsyncWazuhDBConnection") -async def test_manager_handler_send_agent_groups_information(AsyncWazuhDBConnection_mock): +async def test_manager_handler_send_agent_groups_information(asyncwazuhdbconnection_mock): """Check if the data chunks are being properly forward to the Wazuh-db socket.""" class LoggerMock: diff --git a/framework/wazuh/core/cluster/tests/test_worker.py b/framework/wazuh/core/cluster/tests/test_worker.py index be7223caade..d2371e5927d 100644 --- a/framework/wazuh/core/cluster/tests/test_worker.py +++ b/framework/wazuh/core/cluster/tests/test_worker.py @@ -607,12 +607,14 @@ def clear(self): @pytest.mark.asyncio @freeze_time('1970-01-01') +@patch("wazuh.core.cluster.worker.AsyncWazuhDBConnection") @patch('wazuh.core.cluster.worker.WorkerHandler.check_agent_groups_checksums', return_value='') @patch('wazuh.core.cluster.common.Handler.send_request', return_value='check') @patch('wazuh.core.cluster.common.Handler.update_chunks_wdb', return_value={'updated_chunks': 1}) @patch('wazuh.core.cluster.common.Handler.get_chunks_in_task_id', return_value='chunks') async def test_worker_handler_recv_agent_groups_information(get_chunks_in_task_id_mock, update_chunks_wdb_mock, - send_request_mock, check_agent_groups_checksums_mock): + send_request_mock, check_agent_groups_checksums_mock, + asyncwazuhdbconnection_mock): """Check that the wazuh-db data reception task is created.""" class LoggerMock: @@ -644,6 +646,7 @@ def reset_mock(): assert 'Finished in 0.000s. Updated 1 chunks.' in logger._info reset_mock() + asyncwazuhdbconnection_mock.return_value = AsyncMock() assert await worker_handler.recv_agent_groups_entire_information(task_id=b'17', info_type='agent-groups') == 'check' get_chunks_in_task_id_mock.assert_called_once_with(b'17', b'syn_wgc_err') update_chunks_wdb_mock.assert_called_once_with('chunks', 'agent-groups', logger_c, b'syn_wgc_err', 0) diff --git a/framework/wazuh/core/cluster/worker.py b/framework/wazuh/core/cluster/worker.py index 1e601a5e3f6..cc28258853d 100644 --- a/framework/wazuh/core/cluster/worker.py +++ b/framework/wazuh/core/cluster/worker.py @@ -483,6 +483,9 @@ async def recv_agent_groups_information(self, task_id: bytes, info_type: str, lo data = await super().get_chunks_in_task_id(task_id, error_command) result = await super().update_chunks_wdb(data, info_type, logger, error_command, timeout) response = await self.send_request(command=command, data=json.dumps(result).encode()) + if command == b'syn_wgc_e': + # Recalculate group hash before comparing with master's + await AsyncWazuhDBConnection().run_wdb_command(command='global recalculate-agent-group-hashes') await self.check_agent_groups_checksums(data, logger) end_time = datetime.utcnow().replace(tzinfo=timezone.utc) From 22449c835383c96f7aa5a1eb65cc1837a990dccc Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Thu, 16 May 2024 12:05:18 -0300 Subject: [PATCH 131/419] =?UTF-8?q?wazuh-db:=20+=20Removes=20duplicate=20q?= =?UTF-8?q?uery=20WDB=5FSTMT=5FGLOBAL=5FGET=5FAGENTS=5FTO=5FRECALCULATE=5F?= =?UTF-8?q?GROUP=5FHASH,=20WDB=5FSTMT=5FGLOBAL=5FGET=5FAGENTS=20is=20used.?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20+=20Changes=20the=20debugging?= =?UTF-8?q?=20message=20=E2=80=9CError=20recalculating=20group=20hash=20of?= =?UTF-8?q?=20agents=20in=20global.db.=E2=80=9D=20to=20warning?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/unit_tests/wazuh_db/test_wdb_global.c | 27 +++++++++++++++++++ .../wazuh_db/test_wdb_global_parser.c | 2 +- src/wazuh_db/wdb.c | 1 - src/wazuh_db/wdb.h | 1 - src/wazuh_db/wdb_global.c | 9 +++++-- src/wazuh_db/wdb_parser.c | 2 +- 6 files changed, 36 insertions(+), 6 deletions(-) diff --git a/src/unit_tests/wazuh_db/test_wdb_global.c b/src/unit_tests/wazuh_db/test_wdb_global.c index c110e636a22..f156781e231 100644 --- a/src/unit_tests/wazuh_db/test_wdb_global.c +++ b/src/unit_tests/wazuh_db/test_wdb_global.c @@ -9308,12 +9308,32 @@ void test_wdb_global_recalculate_all_agent_groups_hash_cache_fail(void **state) assert_int_equal(result, OS_INVALID); } +void test_wdb_global_recalculate_all_agent_groups_hash_bind_fail(void **state) +{ + test_struct_t *data = (test_struct_t *)*state; + + will_return(__wrap_wdb_begin2, 1); + will_return(__wrap_wdb_stmt_cache, 1); + expect_value(__wrap_sqlite3_bind_int, index, 1); + expect_value(__wrap_sqlite3_bind_int, value, 0); + will_return(__wrap_sqlite3_bind_int, SQLITE_ERROR); + will_return(__wrap_sqlite3_errmsg, "ERROR MESSAGE"); + expect_string(__wrap__merror, formatted_msg, "DB(global) sqlite3_bind_int(): ERROR MESSAGE"); + + int result = wdb_global_recalculate_all_agent_groups_hash(data->wdb); + + assert_int_equal(result, OS_INVALID); +} + void test_wdb_global_recalculate_all_agent_groups_hash_exec_stmt_null(void **state) { test_struct_t *data = (test_struct_t *)*state; will_return(__wrap_wdb_begin2, 1); will_return(__wrap_wdb_stmt_cache, 1); + expect_value(__wrap_sqlite3_bind_int, index, 1); + expect_value(__wrap_sqlite3_bind_int, value, 0); + will_return(__wrap_sqlite3_bind_int, SQLITE_OK); will_return(__wrap_wdb_exec_stmt, NULL); expect_function_call(__wrap_cJSON_Delete); @@ -9333,6 +9353,9 @@ void test_wdb_global_recalculate_all_agent_groups_hash_invalid_id(void **state) will_return(__wrap_wdb_begin2, 1); will_return(__wrap_wdb_stmt_cache, 1); + expect_value(__wrap_sqlite3_bind_int, index, 1); + expect_value(__wrap_sqlite3_bind_int, value, 0); + will_return(__wrap_sqlite3_bind_int, SQLITE_OK); will_return(__wrap_wdb_exec_stmt, json_agent); expect_string(__wrap__merror, formatted_msg, "Invalid element returned by get all agents query"); @@ -9358,6 +9381,9 @@ void test_wdb_global_recalculate_all_agent_groups_hash_recalculate_error(void ** will_return(__wrap_wdb_begin2, 1); will_return(__wrap_wdb_stmt_cache, 1); + expect_value(__wrap_sqlite3_bind_int, index, 1); + expect_value(__wrap_sqlite3_bind_int, value, 0); + will_return(__wrap_sqlite3_bind_int, SQLITE_OK); will_return(__wrap_wdb_exec_stmt, j_stmt_result); /* wdb_global_calculate_agent_group_csv */ @@ -9857,6 +9883,7 @@ int main() /* Tests wdb_global_recalculate_all_agent_groups_hash */ cmocka_unit_test_setup_teardown(test_wdb_global_recalculate_all_agent_groups_hash_transaction_fail, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_wdb_global_recalculate_all_agent_groups_hash_cache_fail, test_setup, test_teardown), + cmocka_unit_test_setup_teardown(test_wdb_global_recalculate_all_agent_groups_hash_bind_fail, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_wdb_global_recalculate_all_agent_groups_hash_exec_stmt_null, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_wdb_global_recalculate_all_agent_groups_hash_invalid_id, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_wdb_global_recalculate_all_agent_groups_hash_recalculate_error, test_setup, test_teardown), diff --git a/src/unit_tests/wazuh_db/test_wdb_global_parser.c b/src/unit_tests/wazuh_db/test_wdb_global_parser.c index f4431bd54b5..624544b32c4 100644 --- a/src/unit_tests/wazuh_db/test_wdb_global_parser.c +++ b/src/unit_tests/wazuh_db/test_wdb_global_parser.c @@ -4808,7 +4808,7 @@ void test_wdb_parse_global_recalculate_agent_group_hashes_error(void **state) will_return(__wrap_wdb_open_global, data->wdb); expect_string(__wrap__mdebug2, formatted_msg, "Global query: recalculate-agent-group-hashes"); will_return(__wrap_wdb_global_recalculate_all_agent_groups_hash, OS_INVALID); - expect_string(__wrap__mdebug1, formatted_msg, "Error recalculating group hash of agents in global.db."); + expect_string(__wrap__mwarn, formatted_msg, "Error recalculating group hash of agents in global.db."); expect_function_call(__wrap_w_inc_queries_total); expect_function_call(__wrap_w_inc_global); diff --git a/src/wazuh_db/wdb.c b/src/wazuh_db/wdb.c index f10651ff60b..183a0e35020 100644 --- a/src/wazuh_db/wdb.c +++ b/src/wazuh_db/wdb.c @@ -207,7 +207,6 @@ static const char *SQL_STMT[] = { [WDB_STMT_GLOBAL_GET_AGENTS] = "SELECT id FROM agent WHERE id > ?;", [WDB_STMT_GLOBAL_GET_AGENTS_BY_CONNECTION_STATUS] = "SELECT id FROM agent WHERE id > ? AND connection_status = ?;", [WDB_STMT_GLOBAL_GET_AGENTS_BY_CONNECTION_STATUS_AND_NODE] = "SELECT id FROM agent WHERE id > ? AND connection_status = ? AND node_name = ? ORDER BY id LIMIT ?;", - [WDB_STMT_GLOBAL_GET_AGENTS_TO_RECALCULATE_GROUP_HASH] = "SELECT id FROM agent WHERE id > 0;", [WDB_STMT_GLOBAL_GET_AGENT_INFO] = "SELECT * FROM agent WHERE id = ?;", [WDB_STMT_GLOBAL_RESET_CONNECTION_STATUS] = "UPDATE agent SET connection_status = 'disconnected', status_code = ?, sync_status = ?, disconnection_time = STRFTIME('%s', 'NOW') where connection_status != 'disconnected' AND connection_status != 'never_connected' AND id != 0;", [WDB_STMT_GLOBAL_GET_AGENTS_TO_DISCONNECT] = "SELECT id FROM agent WHERE id > ? AND (connection_status = 'active' OR connection_status = 'pending') AND last_keepalive < ?;", diff --git a/src/wazuh_db/wdb.h b/src/wazuh_db/wdb.h index 9a7bfbf04a4..dfe048658ea 100644 --- a/src/wazuh_db/wdb.h +++ b/src/wazuh_db/wdb.h @@ -266,7 +266,6 @@ typedef enum wdb_stmt { WDB_STMT_GLOBAL_GET_AGENTS, WDB_STMT_GLOBAL_GET_AGENTS_BY_CONNECTION_STATUS, WDB_STMT_GLOBAL_GET_AGENTS_BY_CONNECTION_STATUS_AND_NODE, - WDB_STMT_GLOBAL_GET_AGENTS_TO_RECALCULATE_GROUP_HASH, WDB_STMT_GLOBAL_GET_AGENT_INFO, WDB_STMT_GLOBAL_GET_AGENTS_TO_DISCONNECT, WDB_STMT_GLOBAL_RESET_CONNECTION_STATUS, diff --git a/src/wazuh_db/wdb_global.c b/src/wazuh_db/wdb_global.c index 38fab052f19..e0f80b92499 100644 --- a/src/wazuh_db/wdb_global.c +++ b/src/wazuh_db/wdb_global.c @@ -1551,11 +1551,16 @@ int wdb_global_recalculate_all_agent_groups_hash(wdb_t* wdb) { return OS_INVALID; } - if (wdb_stmt_cache(wdb, WDB_STMT_GLOBAL_GET_AGENTS_TO_RECALCULATE_GROUP_HASH) < 0) { + if (wdb_stmt_cache(wdb, WDB_STMT_GLOBAL_GET_AGENTS) < 0) { mdebug1("Cannot cache statement"); return OS_INVALID; } - sqlite3_stmt* stmt = wdb->stmt[WDB_STMT_GLOBAL_GET_AGENTS_TO_RECALCULATE_GROUP_HASH]; + sqlite3_stmt* stmt = wdb->stmt[WDB_STMT_GLOBAL_GET_AGENTS]; + + if (sqlite3_bind_int(stmt, 1, 0) != SQLITE_OK) { + merror("DB(%s) sqlite3_bind_int(): %s", wdb->id, sqlite3_errmsg(wdb->db)); + return OS_INVALID; + } //Get agents to recalculate hash cJSON* j_stmt_result = wdb_exec_stmt(stmt); diff --git a/src/wazuh_db/wdb_parser.c b/src/wazuh_db/wdb_parser.c index 63d0c0d3074..c15e7cc44f6 100644 --- a/src/wazuh_db/wdb_parser.c +++ b/src/wazuh_db/wdb_parser.c @@ -6030,7 +6030,7 @@ int wdb_parse_get_groups_integrity(wdb_t* wdb, char* input, char* output) { int wdb_parse_global_recalculate_agent_group_hashes(wdb_t* wdb, char* output) { if (OS_SUCCESS != wdb_global_recalculate_all_agent_groups_hash(wdb)) { - mdebug1("Error recalculating group hash of agents in global.db."); + mwarn("Error recalculating group hash of agents in global.db."); snprintf(output, OS_MAXSTR + 1, "err Error recalculating group hash of agents in global.db"); return OS_INVALID; } From 76800651bc334721a0bc428ff36482701030b651 Mon Sep 17 00:00:00 2001 From: Tomas Turina Date: Thu, 16 May 2024 15:48:30 +0000 Subject: [PATCH 132/419] Fix UT after removing rollback function in groups queries --- src/unit_tests/wazuh_db/test_wdb_global_parser.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/unit_tests/wazuh_db/test_wdb_global_parser.c b/src/unit_tests/wazuh_db/test_wdb_global_parser.c index d60fb80b99e..ea08be6d6ba 100644 --- a/src/unit_tests/wazuh_db/test_wdb_global_parser.c +++ b/src/unit_tests/wazuh_db/test_wdb_global_parser.c @@ -2319,7 +2319,6 @@ void test_wdb_parse_global_delete_group_query_error(void **state) will_return(__wrap_wdb_open_global, data->wdb); expect_string(__wrap__mdebug2, formatted_msg, "Global query: delete-group test_group"); - will_return(__wrap_wdb_commit2, OS_SUCCESS); expect_string(__wrap_wdb_global_delete_group, group_name, "test_group"); will_return(__wrap_wdb_global_delete_group, OS_INVALID); expect_string(__wrap__mdebug1, formatted_msg, "Error deleting group in global.db."); @@ -2352,7 +2351,6 @@ void test_wdb_parse_global_delete_group_success(void **state) will_return(__wrap_wdb_open_global, data->wdb); expect_string(__wrap__mdebug2, formatted_msg, "Global query: delete-group test_group"); - will_return(__wrap_wdb_commit2, OS_SUCCESS); expect_string(__wrap_wdb_global_delete_group, group_name, "test_group"); will_return(__wrap_wdb_global_delete_group, OS_SUCCESS); @@ -2845,7 +2843,6 @@ void test_wdb_parse_global_set_agent_groups_invalid_json(void **state) expect_function_call(__wrap_gettimeofday); expect_function_call(__wrap_gettimeofday); expect_function_call(__wrap_w_inc_global_open_time); - will_return(__wrap_wdb_commit2, OS_SUCCESS); expect_function_call(__wrap_w_inc_global_agent_set_agent_groups); expect_function_call(__wrap_gettimeofday); expect_function_call(__wrap_gettimeofday); @@ -2876,7 +2873,6 @@ void test_wdb_parse_global_set_agent_groups_missing_field(void **state) expect_function_call(__wrap_gettimeofday); expect_function_call(__wrap_gettimeofday); expect_function_call(__wrap_w_inc_global_open_time); - will_return(__wrap_wdb_commit2, OS_SUCCESS); expect_function_call(__wrap_w_inc_global_agent_set_agent_groups); expect_function_call(__wrap_gettimeofday); expect_function_call(__wrap_gettimeofday); @@ -2907,7 +2903,6 @@ void test_wdb_parse_global_set_agent_groups_invalid_mode(void **state) expect_function_call(__wrap_gettimeofday); expect_function_call(__wrap_gettimeofday); expect_function_call(__wrap_w_inc_global_open_time); - will_return(__wrap_wdb_commit2, OS_SUCCESS); expect_function_call(__wrap_w_inc_global_agent_set_agent_groups); expect_function_call(__wrap_gettimeofday); expect_function_call(__wrap_gettimeofday); @@ -2941,7 +2936,6 @@ void test_wdb_parse_global_set_agent_groups_fail(void **state) expect_function_call(__wrap_gettimeofday); expect_function_call(__wrap_gettimeofday); expect_function_call(__wrap_w_inc_global_open_time); - will_return(__wrap_wdb_commit2, OS_SUCCESS); expect_function_call(__wrap_w_inc_global_agent_set_agent_groups); expect_function_call(__wrap_gettimeofday); expect_function_call(__wrap_gettimeofday); @@ -2965,7 +2959,6 @@ void test_wdb_parse_global_set_agent_groups_success(void **state) will_return(__wrap_wdb_open_global, data->wdb); expect_string(__wrap__mdebug2, formatted_msg, "Global query: set-agent-groups {\"mode\":\"append\",\"sync_status\":\"synced\",\"data\":[{\"id\":1,\"groups\":[\"default\"]}]}"); - will_return(__wrap_wdb_commit2, OS_SUCCESS); expect_value(__wrap_wdb_global_set_agent_groups, mode, WDB_GROUP_APPEND); expect_string(__wrap_wdb_global_set_agent_groups, sync_status, "synced"); expect_string(__wrap_wdb_global_set_agent_groups, agents_group_info, "[{\"id\":1,\"groups\":[\"default\"]}]"); From b67e8553f7a8f5fd377f3a479a08ef4b0834a0d5 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Thu, 16 May 2024 17:35:05 -0300 Subject: [PATCH 133/419] CL: - Fixed exception catch blocks - Updated UTs --- src/shared_modules/utils/socketDBWrapper.hpp | 5 +- .../buildAllAgentListContext.hpp | 2 - .../src/scanOrchestrator/scanAgentList.hpp | 4 +- .../tests/unit/scanAgentList_test.cpp | 174 ++++++++++++++++++ 4 files changed, 180 insertions(+), 5 deletions(-) diff --git a/src/shared_modules/utils/socketDBWrapper.hpp b/src/shared_modules/utils/socketDBWrapper.hpp index c89e759e237..bfd73311d5b 100644 --- a/src/shared_modules/utils/socketDBWrapper.hpp +++ b/src/shared_modules/utils/socketDBWrapper.hpp @@ -189,7 +189,10 @@ class SocketDBWrapper final : public Singleton case DbQueryStatus::QUERY_IGNORE: case DbQueryStatus::QUERY_UNKNOWN: case DbQueryStatus::QUERY_NOT_SYNCED: throw SocketDbWrapperException(m_exceptionStr); break; - default: throw std::runtime_error(m_exceptionStr); + case DbQueryStatus::UNKNOWN: + case DbQueryStatus::JSON_PARSING: + case DbQueryStatus::INVALID_RESPONSE: + default: throw std::runtime_error(m_exceptionStr); break; } } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp index 2374682b21f..098a0cd63d2 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp @@ -64,13 +64,11 @@ class TBuildAllAgentListContext final : public AbstractHandlerm_agentsWithIncompletedScan.push_back(agent); } - catch (const std::exception& e) + catch (const std::runtime_error& e) { logError(WM_VULNSCAN_LOGTAG, "Error handling request: %s", e.what()); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp index b22b2ae8751..aa258f8d6a3 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp @@ -156,6 +156,180 @@ TEST_F(ScanAgentListTest, EmptyPackagesWDBResponseTest) scanAgentList->handleRequest(contextData); } +TEST_F(ScanAgentListTest, MutipleRecoverableExceptions) +{ + spSocketDBWrapperMock = std::make_shared(); + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "upstream", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(testing::_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(testing::_)) + .WillRepeatedly(testing::Return(Remediation {})); + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(2) + .WillRepeatedly(testing::Throw(SocketDbWrapperException("Temporal error on DB"))); + + auto spOsOrchestrationMock = std::make_shared>>(); + + auto spPackageInsertOrchestrationMock = + std::make_shared>>(); + + // Called twice because the server socket response has two packages. + EXPECT_CALL(*spPackageInsertOrchestrationMock, handleRequest(testing::_)).Times(0); + + auto scanAgentList = std::make_shared>, + TrampolineSocketDBWrapper>>(spOsOrchestrationMock, + spPackageInsertOrchestrationMock); + + nlohmann::json jsonData = nlohmann::json::parse( + R"({"agent_info": {"agent_id":"001", "agent_version":"4.8.0", "agent_name":"test_agent_name", "agent_ip":"10.0.0.1", "node_name":"node01"}, "action":"upgradeAgentDB"})"); + + std::variant + data = &jsonData; + + auto contextData = std::make_shared(data); + contextData->m_agents.push_back({"001", "test_agent_name_1", "4.8.0", "192.168.0.1"}); + contextData->m_agents.push_back({"002", "test_agent_name_2", "4.8.0", "192.168.0.2"}); + + EXPECT_THROW(scanAgentList->handleRequest(contextData), AgentReScanListException); + spSocketDBWrapperMock.reset(); +} + +TEST_F(ScanAgentListTest, OneRecoverableException) +{ + spSocketDBWrapperMock = std::make_shared(); + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "upstream", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(testing::_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(testing::_)) + .WillRepeatedly(testing::Return(Remediation {})); + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::Throw(SocketDbWrapperException("Temporal error on DB"))); + + auto spOsOrchestrationMock = std::make_shared>>(); + + auto spPackageInsertOrchestrationMock = + std::make_shared>>(); + + // Called twice because the server socket response has two packages. + EXPECT_CALL(*spPackageInsertOrchestrationMock, handleRequest(testing::_)).Times(0); + + auto scanAgentList = std::make_shared>, + TrampolineSocketDBWrapper>>(spOsOrchestrationMock, + spPackageInsertOrchestrationMock); + + nlohmann::json jsonData = nlohmann::json::parse( + R"({"agent_info": {"agent_id":"001", "agent_version":"4.8.0", "agent_name":"test_agent_name", "agent_ip":"10.0.0.1", "node_name":"node01"}, "action":"upgradeAgentDB"})"); + + std::variant + data = &jsonData; + + auto contextData = std::make_shared(data); + contextData->m_agents.push_back({"001", "test_agent_name", "4.8.0", "192.168.0.1"}); + + EXPECT_THROW(scanAgentList->handleRequest(contextData), AgentReScanException); + spSocketDBWrapperMock.reset(); +} + +TEST_F(ScanAgentListTest, UnrecoverableException) +{ + spSocketDBWrapperMock = std::make_shared(); + + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "upstream", + .majorVersion = "osdata_majorVersion", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "osdata_platform", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(testing::_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(testing::_)) + .WillRepeatedly(testing::Return(Remediation {})); + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::Throw(std::runtime_error("FAILURE"))); + + auto spOsOrchestrationMock = std::make_shared>>(); + + auto spPackageInsertOrchestrationMock = + std::make_shared>>(); + + // Called twice because the server socket response has two packages. + EXPECT_CALL(*spPackageInsertOrchestrationMock, handleRequest(testing::_)).Times(0); + + auto scanAgentList = std::make_shared>, + TrampolineSocketDBWrapper>>(spOsOrchestrationMock, + spPackageInsertOrchestrationMock); + + nlohmann::json jsonData = nlohmann::json::parse( + R"({"agent_info":{"agent_id":"001","agent_version":"4.8.0","agent_name":"test_agent_name","agent_ip":"10.0.0.1","node_name":"node01"}, "action":"upgradeAgentDB"})"); + + std::variant + data = &jsonData; + + auto contextData = std::make_shared(data); + contextData->m_agents.push_back({"001", "test_agent_name", "4.8.0", "192.168.0.1"}); + + EXPECT_NO_THROW(scanAgentList->handleRequest(contextData)); + spRemediationDataCacheMock.reset(); + spSocketDBWrapperMock.reset(); + spOsDataCacheMock.reset(); +} + TEST_F(ScanAgentListTest, DISABLED_InsertAllTestNotSyncedResponse) { spSocketDBWrapperMock = std::make_shared(); From 174fc5478ac879be8063c3545535b5b4eac2f624 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Thu, 16 May 2024 20:23:13 -0300 Subject: [PATCH 134/419] CL: - Restored exception catch --- .../src/scanOrchestrator/scanAgentList.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp index c0baae556a3..a39e45c851f 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp @@ -219,7 +219,7 @@ class TScanAgentList final : public AbstractHandlerm_agentsWithIncompletedScan.push_back(agent); } - catch (const std::runtime_error& e) + catch (const std::exception& e) { logError(WM_VULNSCAN_LOGTAG, "Error handling request: %s", e.what()); } From ab3274bc96198d1f3d95ae200c2ba3fd46387fd8 Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Fri, 17 May 2024 03:33:41 +0000 Subject: [PATCH 135/419] Fixing typo --- .../src/scanOrchestrator/packageScanner.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp index f987d0b620d..8374b6099e6 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp @@ -558,7 +558,7 @@ class TPackageScanner final : public AbstractHandlergetRemediationData(); - if (!agentRemediations.hotfixes.empty()) + if (agentRemediations.hotfixes.empty()) { logDebug2( WM_VULNSCAN_LOGTAG, "No remediations for agent '%s' have been found.", contextData->agentId().data()); From 9b601209748d66f70e74b56814be80df73f39114 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 17 May 2024 01:34:11 -0300 Subject: [PATCH 136/419] Change the trigger size to run a bulk query, to insert, delete or modify elements. --- src/shared_modules/indexer_connector/src/indexerConnector.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shared_modules/indexer_connector/src/indexerConnector.cpp b/src/shared_modules/indexer_connector/src/indexerConnector.cpp index 0e5fbc9b556..8a28d51e07f 100644 --- a/src/shared_modules/indexer_connector/src/indexerConnector.cpp +++ b/src/shared_modules/indexer_connector/src/indexerConnector.cpp @@ -21,7 +21,7 @@ constexpr auto NOT_USED {-1}; constexpr auto INDEXER_COLUMN {"indexer"}; constexpr auto USER_KEY {"username"}; constexpr auto PASSWORD_KEY {"password"}; -constexpr auto ELEMENTS_PER_BULK {50}; +constexpr auto ELEMENTS_PER_BULK {1000}; namespace Log { From 4310fcd36836f480298506334a475543f492d786 Mon Sep 17 00:00:00 2001 From: "Victor M. Fernandez-Castro" Date: Fri, 17 May 2024 09:05:13 +0000 Subject: [PATCH 137/419] fix: clean http-request on make clean-deps --- src/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index 3693076cbdf..a942cb44b24 100644 --- a/src/Makefile +++ b/src/Makefile @@ -2727,7 +2727,7 @@ ifneq ($(wildcard external/cpython/*),) endif clean-deps: - rm -rf $(EXTERNAL_DIR) $(EXTERNAL_CPYTHON) external/$(WPYTHON_TAR) $(EXTERNAL_ROCKSDB) + rm -rf $(EXTERNAL_DIR) $(EXTERNAL_TAR) shared_modules/http-request/* shared_modules/http-request/.??* clean-internals: clean-unit-tests rm -f $(BUILD_SERVER) From 51c983264013a80f9e966d5eafac5552c98cd424 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Fri, 17 May 2024 11:34:41 +0200 Subject: [PATCH 138/419] Apply suggested modifications --- framework/wazuh/core/cluster/common.py | 18 ++++++- framework/wazuh/core/cluster/master.py | 4 +- .../wazuh/core/cluster/tests/test_common.py | 51 ++++++++++++++++++- .../wazuh/core/cluster/tests/test_master.py | 8 +-- .../wazuh/core/cluster/tests/test_worker.py | 5 +- framework/wazuh/core/cluster/worker.py | 9 ++-- 6 files changed, 77 insertions(+), 18 deletions(-) diff --git a/framework/wazuh/core/cluster/common.py b/framework/wazuh/core/cluster/common.py index 77b6d9e882d..dd07531fbfb 100644 --- a/framework/wazuh/core/cluster/common.py +++ b/framework/wazuh/core/cluster/common.py @@ -25,7 +25,7 @@ from wazuh.core import common, exception from wazuh.core import utils from wazuh.core.cluster import cluster, utils as cluster_utils -from wazuh.core.wdb import WazuhDBConnection +from wazuh.core.wdb import WazuhDBConnection, AsyncWazuhDBConnection class Response: """ @@ -1198,6 +1198,22 @@ def __init__(self): """Class constructor.""" self.sync_tasks = {} + @staticmethod + async def recalculate_group_hash(logger) -> None: + """Recalculate agent-group hash in the DB. + + Parameters + ---------- + logger : Logger object + Logger to use during the recalculation process. + """ + try: + # Recalculate group hashes before retrieving agent groups info + logger.debug('Recalculating agent-group hash.') + await AsyncWazuhDBConnection().run_wdb_command(command='global recalculate-agent-group-hashes') + except (exception.WazuhInternalError, exception.WazuhError) as e: + logger.warning(f'Error {e.code} executing recalculate agent-group hash command: {e.message}') + def get_logger(self, logger_tag: str = '') -> logging.Logger: """Get a logger object. diff --git a/framework/wazuh/core/cluster/master.py b/framework/wazuh/core/cluster/master.py index 1df6979e7fd..14fc0f349f0 100644 --- a/framework/wazuh/core/cluster/master.py +++ b/framework/wazuh/core/cluster/master.py @@ -666,9 +666,7 @@ async def send_entire_agent_groups_information(self): start_time = get_utc_now() logger.info('Starting.') - # Recalculate group hashes before retrieving agent groups info - logger.debug('Recalculating agent-group hash.') - await AsyncWazuhDBConnection().run_wdb_command(command='global recalculate-agent-group-hashes') + await self.recalculate_group_hash(logger) sync_object = c_common.SyncWazuhdb(manager=self, logger=logger, cmd=b'syn_g_m_w_c', data_retriever=AsyncWazuhDBConnection().run_wdb_command, diff --git a/framework/wazuh/core/cluster/tests/test_common.py b/framework/wazuh/core/cluster/tests/test_common.py index 29a6b506c8d..e94211cdae3 100644 --- a/framework/wazuh/core/cluster/tests/test_common.py +++ b/framework/wazuh/core/cluster/tests/test_common.py @@ -13,7 +13,7 @@ import sys from contextvars import ContextVar from datetime import datetime -from unittest.mock import patch, MagicMock, mock_open, call, ANY +from unittest.mock import patch, MagicMock, AsyncMock, mock_open, call, ANY import cryptography import pytest @@ -1254,6 +1254,55 @@ def test_wazuh_common_init(): assert wazuh_common_test.sync_tasks == {} +@pytest.mark.asyncio +@patch("wazuh.core.cluster.common.AsyncWazuhDBConnection", return_value=AsyncMock()) +async def test_wazuh_common_recalculate_group_hash(asyncwazuhdbconnection_mock): + class LoggerMock: + """Auxiliary class.""" + + def __init__(self): + self._debug = [] + + def debug(self, data): + """Auxiliary method.""" + self._debug.append(data) + + logger = LoggerMock() + await wazuh_common.recalculate_group_hash(logger) + assert logger._debug == ['Recalculating agent-group hash.'] + + +@pytest.mark.asyncio +@patch("wazuh.core.cluster.common.AsyncWazuhDBConnection") +async def test_wazuh_common_recalculate_group_hash_ko(asyncwazuhdbconnection_mock): + class LoggerMock: + """Auxiliary class.""" + + def __init__(self): + self._warning = [] + self._debug = [] + + def debug(self, data): + """Auxiliary method.""" + self._debug.append(data) + + def warning(self, data): + """Auxiliary method.""" + self._warning.append(data) + + logger = LoggerMock() + asyncwazuhdbconnection_mock.side_effect = [exception.WazuhInternalError(2007), exception.WazuhError(2003)] + + await wazuh_common.recalculate_group_hash(logger) + assert logger._debug == ['Recalculating agent-group hash.'] + assert logger._warning == ['Error 2007 executing recalculate agent-group hash command: ' + 'Error retrieving data from Wazuh DB'] + + logger = LoggerMock() + await wazuh_common.recalculate_group_hash(logger) + assert logger._debug == ['Recalculating agent-group hash.'] + assert logger._warning == ['Error 2003 executing recalculate agent-group hash command: Error in wazuhdb request'] + def test_wazuh_common_get_logger(): """Check if a Logger object is properly returned.""" diff --git a/framework/wazuh/core/cluster/tests/test_master.py b/framework/wazuh/core/cluster/tests/test_master.py index a6eaedbb4d1..8f49067ffa6 100644 --- a/framework/wazuh/core/cluster/tests/test_master.py +++ b/framework/wazuh/core/cluster/tests/test_master.py @@ -896,9 +896,9 @@ def info(self, info): @pytest.mark.asyncio -@patch("wazuh.core.cluster.master.AsyncWazuhDBConnection", return_value=AsyncMock()) +@patch("wazuh.core.cluster.master.MasterHandler.recalculate_group_hash", return_value=AsyncMock()) @patch('wazuh.core.cluster.common.SyncWazuhdb') -async def test_manager_handler_send_entire_agent_groups_information(syncwazuhdb_mock, asyncwazuhdbconnection_mock): +async def test_manager_handler_send_entire_agent_groups_information(syncwazuhdb_mock, recalculate_group_hash_mock): """Check if the data chunks are being properly forward to the Wazuh-db socket.""" class LoggerMock: @@ -909,9 +909,6 @@ def __init__(self): self._info = [] self._error = [] - def debug(self, debug): - self._debug.append(debug) - def info(self, data): """Auxiliary method.""" self._info.append(data) @@ -932,7 +929,6 @@ def info(self, data): syncwazuhdb_mock.return_value.retrieve_information.assert_called_once() syncwazuhdb_mock.return_value.sync.assert_called_once_with(start_time=ANY, chunks=ANY) assert logger._info == ['Starting.'] - assert logger._debug == ['Recalculating agent-group hash.'] @pytest.mark.asyncio diff --git a/framework/wazuh/core/cluster/tests/test_worker.py b/framework/wazuh/core/cluster/tests/test_worker.py index d2371e5927d..34e94dccb1d 100644 --- a/framework/wazuh/core/cluster/tests/test_worker.py +++ b/framework/wazuh/core/cluster/tests/test_worker.py @@ -607,14 +607,14 @@ def clear(self): @pytest.mark.asyncio @freeze_time('1970-01-01') -@patch("wazuh.core.cluster.worker.AsyncWazuhDBConnection") +@patch("wazuh.core.cluster.worker.WorkerHandler.recalculate_group_hash", return_value=AsyncMock()) @patch('wazuh.core.cluster.worker.WorkerHandler.check_agent_groups_checksums', return_value='') @patch('wazuh.core.cluster.common.Handler.send_request', return_value='check') @patch('wazuh.core.cluster.common.Handler.update_chunks_wdb', return_value={'updated_chunks': 1}) @patch('wazuh.core.cluster.common.Handler.get_chunks_in_task_id', return_value='chunks') async def test_worker_handler_recv_agent_groups_information(get_chunks_in_task_id_mock, update_chunks_wdb_mock, send_request_mock, check_agent_groups_checksums_mock, - asyncwazuhdbconnection_mock): + recalculate_group_hash_mock): """Check that the wazuh-db data reception task is created.""" class LoggerMock: @@ -646,7 +646,6 @@ def reset_mock(): assert 'Finished in 0.000s. Updated 1 chunks.' in logger._info reset_mock() - asyncwazuhdbconnection_mock.return_value = AsyncMock() assert await worker_handler.recv_agent_groups_entire_information(task_id=b'17', info_type='agent-groups') == 'check' get_chunks_in_task_id_mock.assert_called_once_with(b'17', b'syn_wgc_err') update_chunks_wdb_mock.assert_called_once_with('chunks', 'agent-groups', logger_c, b'syn_wgc_err', 0) diff --git a/framework/wazuh/core/cluster/worker.py b/framework/wazuh/core/cluster/worker.py index cc28258853d..07af52d9061 100644 --- a/framework/wazuh/core/cluster/worker.py +++ b/framework/wazuh/core/cluster/worker.py @@ -452,7 +452,11 @@ async def recv_agent_groups_entire_information(self, task_id: bytes, info_type: error_command = b'syn_wgc_err' timeout = self.cluster_items['intervals']['worker']['timeout_agent_groups'] - return await self.recv_agent_groups_information(task_id, info_type, logger, command, error_command, timeout) + master_groups_info = await self.recv_agent_groups_information(task_id, info_type, logger, + command, error_command, timeout) + await self.recalculate_group_hash(logger) + + return master_groups_info async def recv_agent_groups_information(self, task_id: bytes, info_type: str, logger: logging.Logger, command: bytes, error_command: bytes, timeout: int): @@ -483,9 +487,6 @@ async def recv_agent_groups_information(self, task_id: bytes, info_type: str, lo data = await super().get_chunks_in_task_id(task_id, error_command) result = await super().update_chunks_wdb(data, info_type, logger, error_command, timeout) response = await self.send_request(command=command, data=json.dumps(result).encode()) - if command == b'syn_wgc_e': - # Recalculate group hash before comparing with master's - await AsyncWazuhDBConnection().run_wdb_command(command='global recalculate-agent-group-hashes') await self.check_agent_groups_checksums(data, logger) end_time = datetime.utcnow().replace(tzinfo=timezone.utc) From a96eb9c1cc7338ba46590f311180a4ea06dfa9a4 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 17 May 2024 17:41:03 -0300 Subject: [PATCH 139/419] CL: - Updated exception types for different DB events - Updated catch blocks on calls to DB queries --- src/shared_modules/utils/socketDBWrapper.hpp | 6 +-- .../buildAllAgentListContext.hpp | 10 +++-- .../buildSingleAgentListContext.hpp | 14 +++++-- .../src/scanOrchestrator/osDataCache.hpp | 8 +++- .../src/scanOrchestrator/osScanner.hpp | 10 ++++- .../scanOrchestrator/remediationDataCache.hpp | 6 ++- .../src/scanOrchestrator/scanAgentList.hpp | 42 +++++++++++++++---- .../src/vulnerabilityScannerFacade.cpp | 4 ++ 8 files changed, 80 insertions(+), 20 deletions(-) diff --git a/src/shared_modules/utils/socketDBWrapper.hpp b/src/shared_modules/utils/socketDBWrapper.hpp index bfd73311d5b..317c703509b 100644 --- a/src/shared_modules/utils/socketDBWrapper.hpp +++ b/src/shared_modules/utils/socketDBWrapper.hpp @@ -184,12 +184,12 @@ class SocketDBWrapper final : public Singleton // coverity[missing_lock] switch (m_queryStatus) { + case DbQueryStatus::QUERY_NOT_SYNCED: throw SocketDbWrapperException(m_exceptionStr); break; case DbQueryStatus::EMPTY_RESPONSE: + case DbQueryStatus::UNKNOWN: case DbQueryStatus::QUERY_ERROR: - case DbQueryStatus::QUERY_IGNORE: case DbQueryStatus::QUERY_UNKNOWN: - case DbQueryStatus::QUERY_NOT_SYNCED: throw SocketDbWrapperException(m_exceptionStr); break; - case DbQueryStatus::UNKNOWN: + case DbQueryStatus::QUERY_IGNORE: case DbQueryStatus::JSON_PARSING: case DbQueryStatus::INVALID_RESPONSE: default: throw std::runtime_error(m_exceptionStr); break; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp index 098a0cd63d2..d723a1ade8d 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp @@ -64,10 +64,14 @@ class TBuildAllAgentListContext final : public AbstractHandleragentId().data()); + throw WdbDataException(e.what(), data->agentId().data()); } + catch (std::exception& e) + { + logError(WM_VULNSCAN_LOGTAG, + "Unable to retrieve agent-info (agent %s). Reason: %s", + data->agentId().data(), + e.what()); + return nullptr; + } + // Return elements should be one agent. if (response.size() == 1) { diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp index 8fe8caf16b2..a4c2faba50d 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp @@ -62,14 +62,18 @@ class OsDataCache final : public Singleton> TSocketDBWrapper::instance().query(WazuhDBQueryBuilder::builder().agentGetOsInfoCommand(agentId).build(), response); } - catch (const std::exception& e) + catch (const SocketDbWrapperException& e) { throw WdbDataException(e.what(), agentId); } + catch (const std::exception& e) + { + throw std::runtime_error("Unable to retrieve OS data from Wazuh-DB (agent " + agentId + "). Reason: " + e.what()); + } if (response.empty()) { - throw WdbDataException("Empty response from Wazuh-DB", agentId); + throw std::runtime_error("Empty OS data from Wazuh-DB (agent " + agentId + ")."); } Os osData; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp index 4e0b4bf0a71..63bac74dc87 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp @@ -63,10 +63,18 @@ class TOsScanner final : public AbstractHandler> WazuhDBQueryBuilder::builder().agentGetHotfixesCommand(data->agentId().data()).build(), responseHotfixes); } - catch (const std::exception& e) + catch (const SocketDbWrapperException& e) { throw WdbDataException(e.what(), data->agentId()); } + catch (const std::exception& e) + { + logError(WM_VULNSCAN_LOGTAG, + "Unable to retrieve hotfixes for agent %s. Reason: %s", + data->agentId().data(), + e.what()); + return nullptr; + } } const auto osCPE = ScannerHelper::parseCPE(data->osCPEName().data()); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index 5745cc4c3b7..9326ac60760 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -50,10 +50,14 @@ class RemediationDataCache final : public Singleton> TSocketDBWrapper::instance().query(WazuhDBQueryBuilder::builder().agentGetHotfixesCommand(agentId).build(), response); } - catch (const std::exception& e) + catch (const SocketDbWrapperException& e) { throw WdbDataException(e.what(), agentId); } + catch (const std::exception& e) + { + throw std::runtime_error("Unable to retrieve remediation data from Wazuh-DB (agent " + agentId + "). Reason: " + e.what()); + } Remediation remediationData; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp index a39e45c851f..d183605e1e2 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp @@ -46,11 +46,25 @@ class TScanAgentList final : public AbstractHandlerpushEventToDelayedDispatcher(element, e.agentId()); } catch (const AgentReScanException& e) { + logDebug2( + WM_VULNSCAN_LOGTAG, "AgentReScanException (Agent %s). Reason: %s", e.agentId().c_str(), e.what()); scanOrchestrator->pushEventToDelayedDispatcher(element, e.agentId()); } catch (const AgentReScanListException& e) { + logDebug2(WM_VULNSCAN_LOGTAG, "AgentReScanListException. Reason: %s", e.what()); for (const auto& agentData : e.agentList()) { scanOrchestrator->pushEventToDelayedDispatcher(element, agentData.id); From f6771c99748d85bbdfd83306635d471bde1fe1c2 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 17 May 2024 17:41:54 -0300 Subject: [PATCH 140/419] CL: - Updated UTs --- .../tests/unit/osDataCache_test.cpp | 22 ++++++++++++++++--- .../tests/unit/remediationDataCache_test.cpp | 18 ++++++++++++++- .../tests/unit/scanAgentList_test.cpp | 4 ++-- 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp index fcb0cc66ad6..cbfb02f7120 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp @@ -25,7 +25,7 @@ TEST_F(OsDataCacheTest, TestSetAndGetSuccess) std::string agentId {"1"}; // Try to get value from empty cache - EXPECT_THROW(cache.getOsData(agentId), WdbDataException); + EXPECT_THROW(cache.getOsData(agentId), std::runtime_error); // Set value in cache Os osData {.hostName = "hostName", @@ -134,10 +134,10 @@ TEST_F(OsDataCacheTest, EmptyResponse) std::string agentId {"1"}; // Try to get value from empty cache - EXPECT_THROW(cache.getOsData(agentId), WdbDataException); + EXPECT_THROW(cache.getOsData(agentId), std::runtime_error); } -TEST_F(OsDataCacheTest, ExceptionOnDB) +TEST_F(OsDataCacheTest, UnrecoverableExceptionOnDB) { spSocketDBWrapperMock = std::make_shared(); OsDataCache cache; @@ -148,6 +148,22 @@ TEST_F(OsDataCacheTest, ExceptionOnDB) std::string agentId {"1"}; + EXPECT_THROW(cache.getOsData(agentId), std::runtime_error); + spSocketDBWrapperMock.reset(); +} + +TEST_F(OsDataCacheTest, RecoverableExceptionOnDB) +{ + spSocketDBWrapperMock = std::make_shared(); + OsDataCache cache; + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::Throw(SocketDbWrapperException("Warning on DB"))); + + std::string agentId {"1"}; + EXPECT_THROW(cache.getOsData(agentId), WdbDataException); spSocketDBWrapperMock.reset(); } + diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp index 3cc7714e431..9e4883b8e37 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/remediationDataCache_test.cpp @@ -143,7 +143,7 @@ TEST_F(RemediationDataCacheTest, ResponseFromDB) EXPECT_TRUE(remediationsAreEqual(remediationData, expected)); } -TEST_F(RemediationDataCacheTest, ExceptionOnDB) +TEST_F(RemediationDataCacheTest, UnrecoverableExceptionOnDB) { RemediationDataCache cache; spSocketDBWrapperMock = std::make_shared(); @@ -154,6 +154,22 @@ TEST_F(RemediationDataCacheTest, ExceptionOnDB) std::string agentId {"1"}; + // Attempt to get value from the cache + EXPECT_THROW(cache.getRemediationData(agentId), std::runtime_error); + spSocketDBWrapperMock.reset(); +} + +TEST_F(RemediationDataCacheTest, RecoverableExceptionOnDB) +{ + RemediationDataCache cache; + spSocketDBWrapperMock = std::make_shared(); + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::Throw(SocketDbWrapperException("Warning on DB"))); + + std::string agentId {"1"}; + // Attempt to get value from the cache EXPECT_THROW(cache.getRemediationData(agentId), WdbDataException); spSocketDBWrapperMock.reset(); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp index aa258f8d6a3..e615b3d37fb 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp @@ -299,8 +299,8 @@ TEST_F(ScanAgentListTest, UnrecoverableException) .WillRepeatedly(testing::Return(Remediation {})); EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) - .Times(1) - .WillOnce(testing::Throw(std::runtime_error("FAILURE"))); + .Times(2) + .WillRepeatedly(testing::Throw(std::runtime_error("ERROR on DB"))); auto spOsOrchestrationMock = std::make_shared>>(); From 1cd9357c1215b7a6d04661b00c4bb60f133050a9 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 17 May 2024 17:44:18 -0300 Subject: [PATCH 141/419] CL: - Style changes --- .../utils/tests/socketDBWrapper_test.hpp | 10 +++++----- .../src/scanOrchestrator/osDataCache.hpp | 3 ++- .../src/scanOrchestrator/remediationDataCache.hpp | 3 ++- .../tests/unit/osDataCache_test.cpp | 1 - 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/shared_modules/utils/tests/socketDBWrapper_test.hpp b/src/shared_modules/utils/tests/socketDBWrapper_test.hpp index 7d5d4cc37e4..d1bdf41ce41 100644 --- a/src/shared_modules/utils/tests/socketDBWrapper_test.hpp +++ b/src/shared_modules/utils/tests/socketDBWrapper_test.hpp @@ -12,22 +12,22 @@ #ifndef _SOCKET_DB_WRAPPER_TEST_HPP #define _SOCKET_DB_WRAPPER_TEST_HPP -#include "socketServer.hpp" #include "socketDBWrapper.hpp" +#include "socketServer.hpp" #include "gtest/gtest.h" #include #include auto constexpr TEST_SOCKET {"queue/db/wdb"}; - class SocketDBWrapperTest : public ::testing::Test { protected: SocketDBWrapperTest() - : m_sleepTime {0} { - SocketDBWrapper::instance().init(); - }; + : m_sleepTime {0} + { + SocketDBWrapper::instance().init(); + }; ~SocketDBWrapperTest() override = default; void SetUp() override diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp index a4c2faba50d..7b274771556 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osDataCache.hpp @@ -68,7 +68,8 @@ class OsDataCache final : public Singleton> } catch (const std::exception& e) { - throw std::runtime_error("Unable to retrieve OS data from Wazuh-DB (agent " + agentId + "). Reason: " + e.what()); + throw std::runtime_error("Unable to retrieve OS data from Wazuh-DB (agent " + agentId + + "). Reason: " + e.what()); } if (response.empty()) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp index 9326ac60760..079d562aee6 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/remediationDataCache.hpp @@ -56,7 +56,8 @@ class RemediationDataCache final : public Singleton> } catch (const std::exception& e) { - throw std::runtime_error("Unable to retrieve remediation data from Wazuh-DB (agent " + agentId + "). Reason: " + e.what()); + throw std::runtime_error("Unable to retrieve remediation data from Wazuh-DB (agent " + agentId + + "). Reason: " + e.what()); } Remediation remediationData; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp index cbfb02f7120..8c8c71460fc 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/osDataCache_test.cpp @@ -166,4 +166,3 @@ TEST_F(OsDataCacheTest, RecoverableExceptionOnDB) EXPECT_THROW(cache.getOsData(agentId), WdbDataException); spSocketDBWrapperMock.reset(); } - From a1f8bc8b0b7f919010902885ba47c9c5c2191b4f Mon Sep 17 00:00:00 2001 From: pereyra-m Date: Thu, 16 May 2024 01:00:14 +0000 Subject: [PATCH 142/419] Adding more details for the tests in case of failure --- .../vulnerability_scanner/qa/Readme.md | 20 ++++++++++++++++ .../qa/test_efficacy_log.py | 24 +++++++++++-------- 2 files changed, 34 insertions(+), 10 deletions(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/Readme.md diff --git a/src/wazuh_modules/vulnerability_scanner/qa/Readme.md b/src/wazuh_modules/vulnerability_scanner/qa/Readme.md new file mode 100644 index 00000000000..038d45b4095 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/Readme.md @@ -0,0 +1,20 @@ +# QA Tests + +This document is meant to describe the current component test behavior and the steps required for its maintenance. + +## Efficacy test + +### Description + +The purpose of this test is to verify the scanner's accuracy when some specific inputs are applied. The results are verified by analyzing the logs written by the test tool. + +The `test_data` folder is read in ascending order, and for each one of them, the corresponding inputs are sent. There is an output expected file that contains all the lines that should be found for those specific inputs. If the line isn't found after the timeout expires, the corresponding error message is printed. The test also verifies that the scan begins/ends properly, and that all the events are processed. + +Consider also that folder `000` only verifies that the DB is properly decompressed, that's why it doesn't contain input files. + +### How to add cases + +When a new test is being added, these are the general steps to follow: +- Create a new folder, use the next available number +- Add `input_xxx.json` files that contain the sync/deltas messages that the vulnerability scanner test tool will process +- For each input, create an `expected_xxx.out` file. The logs in the array will be looked for in the test output. If the inputs only prepares the tests and no output is expected (for example, agent OS information), the file can contain an empty array. diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_efficacy_log.py b/src/wazuh_modules/vulnerability_scanner/qa/test_efficacy_log.py index 12833a645e0..f5a024e4442 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_efficacy_log.py +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_efficacy_log.py @@ -105,7 +105,7 @@ def tail_log(file, expected_lines, found_lines, timeout): # Check if the line contains the expected output for expected in expected_lines: if expected in line and not found_lines[expected]: - LOGGER.debug(f"Found line: {line}") + LOGGER.info(f"Found log line: {line}") found_lines[expected] = True @pytest.fixture @@ -174,13 +174,13 @@ def run_process_and_monitor_log(request, run_on_end): if test_folder.name == '000': LOGGER.debug(f"Waiting for the decompression to start.") found = find_regex_in_file(r"Starting database file decompression.", log_file) - assert found, "The decompression did not start." - LOGGER.debug(f"Decompression started") + assert found, "The decompression of the DB did not start." + LOGGER.info(f"Decompression started") else: LOGGER.debug(f"Wating for the process to be initialized") found = find_regex_in_file(r"Vulnerability scanner module started", log_file) - assert found, "The process is not initialized" - LOGGER.debug(f"Process initialized") + assert found, "The process is not initialized, timeout waiting vulnerability scanner module to start." + LOGGER.info(f"Process initialized") expected_json_files = sorted(Path(test_folder).glob("expected_*.out")) expected_lines = [] @@ -233,18 +233,19 @@ def run_process_and_monitor_log(request, run_on_end): found = find_regex_in_file(regex, log_file, len(expected_json_files)) regex = r"Discarded event: DB query not synced" found = found or find_regex_in_file(regex, log_file, len(expected_json_files)) - assert found, "The scan is not finished" + assert found, "The scan is not finished, some events were not processed" + LOGGER.info(f"Scan finished, all events were processed") basetimeout = timeout for expected_line in expected_lines: while not found_lines[expected_line]: - LOGGER.debug(f"Waiting for: {expected_line}") + LOGGER.debug(f"Waiting for log line: {expected_line}") if timeout < 8*basetimeout: tail_log(log_file, expected_lines, found_lines, timeout) timeout = 1.5*timeout else: - LOGGER.error(f"Timeout waiting for: {expected_line}") - basetimeout = timeout + LOGGER.error(f"Timeout waiting for log line: {expected_line}") + timeout = basetimeout break process.terminate() @@ -261,4 +262,7 @@ def test_false_negatives(run_process_and_monitor_log): os.chdir(Path(__file__).parent.parent.parent.parent) found_lines = run_process_and_monitor_log - assert all(found_lines.values()), "The test is failed because the expected output is not found" + for line, found in found_lines.items(): + if not found: + LOGGER.error(f"Log entry not found: {line}") + assert all(found_lines.values()), "The test failed because some expected lines were not found" From 07222c41590af0b38c9d6761745fc0e21ee89db5 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Mon, 20 May 2024 11:29:14 +0200 Subject: [PATCH 143/419] Add upgrade_delay parameter to upgrade task creation test --- api/test/integration/test_agent_PUT_endpoints.tavern.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/api/test/integration/test_agent_PUT_endpoints.tavern.yaml b/api/test/integration/test_agent_PUT_endpoints.tavern.yaml index 5889877dd89..b35937cd745 100644 --- a/api/test/integration/test_agent_PUT_endpoints.tavern.yaml +++ b/api/test/integration/test_agent_PUT_endpoints.tavern.yaml @@ -973,6 +973,7 @@ stages: total_failed_items: 0 failed_items: [ ] message: !anystr + delay_after: !float "{upgrade_delay}" - name: Try to upgrade an agent that is updated to the latest version request: From 00cde964596872c3a2a27b3dd6b3e6a15535c206 Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Mon, 29 Apr 2024 08:58:53 +0200 Subject: [PATCH 144/419] fix: downgrade macOS image version for scan-build test --- .github/workflows/scan-build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scan-build.yml b/.github/workflows/scan-build.yml index c2f460902c1..686415107f9 100644 --- a/.github/workflows/scan-build.yml +++ b/.github/workflows/scan-build.yml @@ -63,7 +63,7 @@ jobs: run: exit 1 scan-build-macos-agent: - runs-on: macos-latest + runs-on: macos-13 steps: - uses: actions/checkout@v3 - name: Install dependencies From 4f6f40533d31a24cda4bd36cae12e976068b7626 Mon Sep 17 00:00:00 2001 From: Daniel Sappa Date: Thu, 15 Feb 2024 19:31:01 +0000 Subject: [PATCH 145/419] * MacOS 13 linker parameter fail (#21830) * remove obsolete ld option for all MacOS versions --- src/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Makefile b/src/Makefile index a6fb9bae994..e54c1a02664 100644 --- a/src/Makefile +++ b/src/Makefile @@ -1798,7 +1798,7 @@ ifeq (${uname_S},Darwin) WAZUH_SHFLAGS=-install_name @rpath/libwazuhext.$(SHARED) $(WAZUHEXT_LIB): $(EXTERNAL_LIBS) - $(OSSEC_SHARED) $(OSSEC_CFLAGS) $(WAZUH_SHFLAGS) -o $@ -Wl,-all_load $^ -Wl,-noall_load $(OSSEC_LIBS) + $(OSSEC_SHARED) $(OSSEC_CFLAGS) $(WAZUH_SHFLAGS) -o $@ -Wl,-all_load $^ $(OSSEC_LIBS) else ifeq (${TARGET}, winagent) $(WAZUHEXT_LIB): $(WAZUHEXT_DLL) $(WAZUHEXT_LIB_DEF) @@ -2020,7 +2020,7 @@ ifeq (${uname_S},Darwin) WAZUH_SHARED_SHFLAGS=-install_name @rpath/libwazuhshared.$(SHARED) $(WAZUH_LIB): $(WAZUHEXT_LIB) $(AR_PROGRAMS_DEPS) - $(OSSEC_SHARED) $(OSSEC_CFLAGS) $(WAZUH_SHARED_SHFLAGS) -o $@ -Wl,-all_load $^ -Wl,-noall_load $(OSSEC_LIBS) + $(OSSEC_SHARED) $(OSSEC_CFLAGS) $(WAZUH_SHARED_SHFLAGS) -o $@ -Wl,-all_load $^ $(OSSEC_LIBS) else ifeq (${TARGET}, winagent) $(WAZUH_DLL) $(WAZUH_DEF) : $(WAZUHEXT_DLL) $(AR_PROGRAMS_DEPS) win32/version-dll.o From cf03a7ccf4701408a4921243ab9f5f97e2f8ea13 Mon Sep 17 00:00:00 2001 From: Juan Cabrera Date: Thu, 16 May 2024 12:09:59 +0000 Subject: [PATCH 146/419] Fixed segmentation fault in multiline configuration Co-authored-by: Julian Morales --- src/config/localfile-config.c | 45 +++++++++++++++++++++++++++++++++ src/config/localfile-config.h | 26 +++++++++++++++---- src/headers/expression.h | 2 +- src/logcollector/logcollector.c | 8 ++++++ src/shared/expression.c | 2 +- 5 files changed, 76 insertions(+), 7 deletions(-) diff --git a/src/config/localfile-config.c b/src/config/localfile-config.c index 73d0400bd90..8e0be7e53e8 100644 --- a/src/config/localfile-config.c +++ b/src/config/localfile-config.c @@ -642,6 +642,7 @@ int Read_Localfile(XML_NODE node, void *d1, __attribute__((unused)) void *d2) memset(&log_config->globs[gl + 1], 0, sizeof(logreader_glob)); os_calloc(1, sizeof(logreader), log_config->globs[gl].gfiles); memcpy(log_config->globs[gl].gfiles, &logf[pl], sizeof(logreader)); + logf[pl].multiline = NULL; // Prevent freeing the multiline config in Remove_Localfile log_config->globs[gl].gfiles->file = NULL; /* Wildcard exclusion, check for date */ @@ -683,6 +684,7 @@ int Read_Localfile(XML_NODE node, void *d1, __attribute__((unused)) void *d2) memset(&log_config->globs[gl + 1], 0, sizeof(logreader_glob)); os_calloc(1, sizeof(logreader), log_config->globs[gl].gfiles); memcpy(log_config->globs[gl].gfiles, &logf[pl], sizeof(logreader)); + logf[pl].multiline = NULL; // Prevent freeing the multiline config in Remove_Localfile log_config->globs[gl].gfiles->file = NULL; } @@ -800,6 +802,7 @@ void Free_Logreader(logreader * logf) { os_free(logf->ffile); os_free(logf->file); os_free(logf->logformat); + w_multiline_log_config_free(&(logf->multiline)); os_free(logf->djb_program_name); os_free(logf->alias); os_free(logf->query); @@ -840,6 +843,7 @@ void Free_Logreader(logreader * logf) { free(logf->out_format); } + } } @@ -855,6 +859,8 @@ int Remove_Localfile(logreader **logf, int i, int gl, int fr, logreader_glob *gl Free_Logreader(&(*logf)[i]); } else { free((*logf)[i].file); + // If is a glob entry and multiline is set, we need to free the multiline config + w_multiline_log_config_free(&(*logf)[i].multiline); if((*logf)[i].fp) { fclose((*logf)[i].fp); } @@ -969,6 +975,45 @@ const char * multiline_attr_match_str(w_multiline_match_type_t match_type) { return match_str[match_type]; } +void w_multiline_log_config_free(w_multiline_config_t ** config) { + if (config == NULL || *config == NULL) { + return; + } + + if ((*config)->ctxt) { + os_free((*config)->ctxt->buffer); + os_free((*config)->ctxt); + } + w_free_expression_t(&((*config)->regex)); + os_free((*config)); +} + +w_multiline_config_t* w_multiline_log_config_clone(w_multiline_config_t* config) +{ + if (config == NULL) + { + return NULL; + } + + w_multiline_config_t* new_config = NULL; + os_calloc(1, sizeof(w_multiline_config_t), new_config); + + new_config->match_type = config->match_type; + new_config->replace_type = config->replace_type; + new_config->timeout = config->timeout; + + w_calloc_expression_t(&(new_config->regex), config->regex->exp_type); + if (!w_expression_compile(new_config->regex, w_expression_get_regex_pattern(config->regex), 0)) + { + merror_exit("Failed to clone multiline regex"); // Should never happen + } + + // No clone the context + new_config->ctxt = NULL; + + return new_config; +} + STATIC int w_logcollector_get_macos_log_type(const char * content) { const size_t MAX_ARRAY_SIZE = 64; diff --git a/src/config/localfile-config.h b/src/config/localfile-config.h index 400c254f18c..633cb661829 100644 --- a/src/config/localfile-config.h +++ b/src/config/localfile-config.h @@ -19,7 +19,7 @@ #define MULTI_LINE_REGEX_MAX_TIMEOUT 120 #define DATE_MODIFIED 1 #define DEFAULT_EVENTCHANNEL_REC_TIME 5 -#define DIFF_DEFAULT_SIZE 10 * 1024 * 1024 +#define DIFF_DEFAULT_SIZE (10 * 1024 * 1024) #define DEFAULT_FREQUENCY_SECS 360 #define DIFF_MAX_SIZE (2 * 1024 * 1024 * 1024LL) @@ -72,7 +72,7 @@ typedef struct _logtarget { logsocket * log_socket; } logtarget; -/* Logreader config */ +/* -- Multiline regex log format specific configuration -- */ /** * @brief Specifies end-of-line replacement type in multiline log (multi-line-regex log format) */ @@ -118,6 +118,7 @@ typedef struct { int64_t offset_last_read; ///< absolut file offset of last complete multiline log processed } w_multiline_config_t; +/* -- macos log format specific configuration -- */ typedef enum _w_macos_log_state_t { LOG_NOT_RUNNING, LOG_RUNNING_STREAM, @@ -186,8 +187,8 @@ typedef struct _logreader { char *ffile; char *file; char *logformat; - w_multiline_config_t * multiline; ///< Multiline regex config & state - w_macos_log_config_t * macos_log; ///< macOS log config & state + w_multiline_config_t* multiline; ///< Multiline regex config & state + w_macos_log_config_t* macos_log; ///< macOS log config & state long linecount; char *djb_program_name; char * channel_str; @@ -239,11 +240,26 @@ typedef struct _logreader_config { void Free_Localfile(logreader_config * config); /* Frees a localfile */ -void Free_Logreader(logreader * config); +void Free_Logreader(logreader * logf); /* Removes a specific localfile of an array */ int Remove_Localfile(logreader **logf, int i, int gl, int fr, logreader_glob *globf); +/** + * @brief Free the multiline log config and all its resources + * + * @param multiline Multiline log config + */ +void w_multiline_log_config_free(w_multiline_config_t ** config); + +/** + * @brief Clone a multiline log config + * + * @param config Multiline log config to clone + * @return w_multiline_config_t* Cloned multiline log config + */ +w_multiline_config_t* w_multiline_log_config_clone(w_multiline_config_t * config); + /** * @brief Get match attribute for multiline regex * @param node node to find match value diff --git a/src/headers/expression.h b/src/headers/expression.h index acc4e4a020e..85395edeb19 100644 --- a/src/headers/expression.h +++ b/src/headers/expression.h @@ -95,7 +95,7 @@ bool w_expression_add_osip(w_expression_t ** var, char * ip); * @param flags Compilation flags (dependent on expression type) * @return false on error. True otherwise */ -bool w_expression_compile(w_expression_t * expression, char * pattern, int flags); +bool w_expression_compile(w_expression_t * expression, const char * pattern, int flags); /** * @brief Test match a compiled pattern to string diff --git a/src/logcollector/logcollector.c b/src/logcollector/logcollector.c index a652b823a43..35ecd919c0d 100644 --- a/src/logcollector/logcollector.c +++ b/src/logcollector/logcollector.c @@ -1388,6 +1388,8 @@ int check_pattern_expand(int do_seek) { /* Copy the current item to the end mark as it should be a pattern */ memcpy(globs[j].gfiles + i + 1, globs[j].gfiles + i, sizeof(logreader)); + // Clone the multiline configuration if it exists + globs[j].gfiles[i + 1].multiline = w_multiline_log_config_clone(globs[j].gfiles[i].multiline); os_strdup(g.gl_pathv[glob_offset], globs[j].gfiles[i].file); w_mutex_init(&globs[j].gfiles[i].mutex, &attr); @@ -1415,6 +1417,8 @@ int check_pattern_expand(int do_seek) { /* Copy the current item to the end mark as it should be a pattern */ memcpy(globs[j].gfiles + i + 1, globs[j].gfiles + i, sizeof(logreader)); + // Clone the multiline configuration if it exists + globs[j].gfiles[i + 1].multiline = w_multiline_log_config_clone(globs[j].gfiles[i].multiline); os_strdup(g.gl_pathv[glob_offset], globs[j].gfiles[i].file); w_mutex_init(&globs[j].gfiles[i].mutex, &attr); @@ -1574,6 +1578,8 @@ int check_pattern_expand(int do_seek) { os_realloc(globs[j].gfiles, (i + 2) * sizeof(logreader), globs[j].gfiles); /* Copy the current item to the end mark as it should be a pattern */ memcpy(globs[j].gfiles + i + 1, globs[j].gfiles + i, sizeof(logreader)); + // Clone the multiline configuration if it exists + globs[j].gfiles[i + 1].multiline = w_multiline_log_config_clone(globs[j].gfiles[i].multiline); os_strdup(full_path, globs[j].gfiles[i].file); w_mutex_init(&globs[j].gfiles[i].mutex, &win_el_mutex_attr); @@ -1602,6 +1608,8 @@ int check_pattern_expand(int do_seek) { /* Copy the current item to the end mark as it should be a pattern */ memcpy(globs[j].gfiles + i + 1, globs[j].gfiles + i, sizeof(logreader)); + // Clone the multiline configuration if it exists + globs[j].gfiles[i + 1].multiline = w_multiline_log_config_clone(globs[j].gfiles[i].multiline); os_strdup(full_path, globs[j].gfiles[i].file); w_mutex_init(&globs[j].gfiles[i].mutex, &win_el_mutex_attr); diff --git a/src/shared/expression.c b/src/shared/expression.c index 90287782dc9..ec2a489fd5e 100644 --- a/src/shared/expression.c +++ b/src/shared/expression.c @@ -144,7 +144,7 @@ bool w_expression_add_osip(w_expression_t ** var, char * ip) { return true; } -bool w_expression_compile(w_expression_t * expression, char * pattern, int flags) { +bool w_expression_compile(w_expression_t * expression, const char * pattern, int flags) { bool retval = true; From e40c525dee6ae39063dcf7c28f33b464091dbd51 Mon Sep 17 00:00:00 2001 From: Julian Morales Date: Fri, 17 May 2024 18:23:51 +0000 Subject: [PATCH 147/419] UT for multiline-regex free and clone added --- .../logcollector/test_localfile-config.c | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/src/unit_tests/logcollector/test_localfile-config.c b/src/unit_tests/logcollector/test_localfile-config.c index 8ccc983ef50..969f8301a24 100644 --- a/src/unit_tests/logcollector/test_localfile-config.c +++ b/src/unit_tests/logcollector/test_localfile-config.c @@ -419,6 +419,82 @@ void test_w_logcollector_get_macos_log_type_content_log_multiword_invalid(void * assert_int_equal(ret, MACOS_LOG_TYPE_LOG); } +/* w_multiline_log_config_free */ +void test_w_multiline_log_config_free_null(void **state) +{ + w_multiline_log_config_free(NULL); + + w_multiline_config_t *config = NULL; + w_multiline_log_config_free(&config); +} + +void test_w_multiline_log_config_free_success(void ** state) { + w_multiline_config_t * config = NULL; + os_calloc(1, sizeof(w_multiline_config_t), config); + + // Set a valid config + + // Regex config + w_calloc_expression_t(&config->regex, EXP_TYPE_PCRE2); + assert_true(w_expression_compile(config->regex, "valid regex .*", 0)); + + // collector config + config->match_type = ML_MATCH_START; + config->replace_type = ML_REPLACE_NO_REPLACE; + config->timeout = 10; + + // Simulate non-empty ctxt + os_calloc(1, sizeof(w_multiline_ctxt_t), config->ctxt); + os_calloc(100, sizeof(char), config->ctxt->buffer); + + w_multiline_log_config_free(&config); + assert_null(config); +} + +// Test w_multiline_log_config_clone +void test_w_multiline_log_config_clone_null(void ** state) { + assert_null(w_multiline_log_config_clone(NULL)); +} + +void test_w_multiline_log_config_clone_success(void ** state) { + + + w_multiline_config_t * config = NULL; + os_calloc(1, sizeof(w_multiline_config_t), config); + + // Set a valid config + w_calloc_expression_t(&config->regex, EXP_TYPE_PCRE2); + assert_true(w_expression_compile(config->regex, "valid regex .*", 0)); + + // collector config + config->match_type = ML_MATCH_END; + config->replace_type = ML_REPLACE_NONE; + config->timeout = 10; + + // Simulate non-empty ctxt + os_calloc(1, sizeof(w_multiline_ctxt_t), config->ctxt); + os_calloc(100, sizeof(char), config->ctxt->buffer); + + + // Test clone + w_multiline_config_t * cloned_config = w_multiline_log_config_clone(config); + w_multiline_log_config_free(&config); + + // Checks + assert_non_null(cloned_config); + assert_non_null(cloned_config->regex); + assert_string_equal(w_expression_get_regex_pattern(cloned_config->regex), "valid regex .*"); + + assert_int_equal(cloned_config->match_type, ML_MATCH_END); + assert_int_equal(cloned_config->replace_type, ML_REPLACE_NONE); + assert_int_equal(cloned_config->timeout, 10); + + assert_null(cloned_config->ctxt); // Should be a empty context + + w_multiline_log_config_free(&cloned_config); + +} + int main(void) { const struct CMUnitTest tests[] = { // Tests replace_char @@ -461,6 +537,12 @@ int main(void) { cmocka_unit_test(test_w_logcollector_get_macos_log_type_content_trace_activity), cmocka_unit_test(test_w_logcollector_get_macos_log_type_content_trace_log_activity), cmocka_unit_test(test_w_logcollector_get_macos_log_type_content_log_multiword_invalid), + // Test w_multiline_log_config_free + cmocka_unit_test(test_w_multiline_log_config_free_null), + cmocka_unit_test(test_w_multiline_log_config_free_success), + // Test w_multiline_log_config_clone + cmocka_unit_test(test_w_multiline_log_config_clone_null), + cmocka_unit_test(test_w_multiline_log_config_clone_success), }; From abd19af643eccda7be6ebe65b9ae4f032afb8e94 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Tue, 21 May 2024 13:00:19 -0300 Subject: [PATCH 148/419] CL: - Added new paths to macOS sysinfo for pypi --- src/data_provider/src/sysInfoMac.cpp | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/data_provider/src/sysInfoMac.cpp b/src/data_provider/src/sysInfoMac.cpp index 06d6599199f..c599ff4e513 100644 --- a/src/data_provider/src/sysInfoMac.cpp +++ b/src/data_provider/src/sysInfoMac.cpp @@ -431,9 +431,23 @@ void SysInfo::getPackages(std::function callback) const } } + // Add all the unix default paths + std::set pypyMacOSPaths = + { + UNIX_PYPI_DEFAULT_BASE_DIRS.begin(), + UNIX_PYPI_DEFAULT_BASE_DIRS.end() + }; + + // Add macOS specific paths + pypyMacOSPaths.emplace("/Library/Python/*/*-packages"); + pypyMacOSPaths.emplace("/Library/Frameworks/Python.framework/Versions/*/lib/python*/*-packages"); + pypyMacOSPaths.emplace( + "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/*/lib/python*/*-package"); + pypyMacOSPaths.emplace("/System/Library/Frameworks/Python.framework/*-packages"); + static const std::map> searchPaths = { - {"PYPI", UNIX_PYPI_DEFAULT_BASE_DIRS}, + {"PYPI", pypyMacOSPaths}, {"NPM", UNIX_NPM_DEFAULT_BASE_DIRS} }; ModernFactoryPackagesCreator::getPackages(searchPaths, callback); From 1648f047f835e5cb737871c0f8756aa600852bfd Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Tue, 21 May 2024 15:58:04 -0300 Subject: [PATCH 149/419] CL: - Fixed style --- src/data_provider/src/sysInfoMac.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/data_provider/src/sysInfoMac.cpp b/src/data_provider/src/sysInfoMac.cpp index c599ff4e513..16bd5fb93b4 100644 --- a/src/data_provider/src/sysInfoMac.cpp +++ b/src/data_provider/src/sysInfoMac.cpp @@ -434,7 +434,7 @@ void SysInfo::getPackages(std::function callback) const // Add all the unix default paths std::set pypyMacOSPaths = { - UNIX_PYPI_DEFAULT_BASE_DIRS.begin(), + UNIX_PYPI_DEFAULT_BASE_DIRS.begin(), UNIX_PYPI_DEFAULT_BASE_DIRS.end() }; From 110c3866b110b9a35d5c7472544fc677d680cb4d Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Wed, 22 May 2024 12:28:03 +0200 Subject: [PATCH 150/419] fix: prevent crash in wazuh-syscheckd on parsing non-UTF8 files names --- src/error_messages/warning_messages.h | 1 + src/syscheckd/src/create_db.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/src/error_messages/warning_messages.h b/src/error_messages/warning_messages.h index 9a5a09e58e2..90461a3dff1 100644 --- a/src/error_messages/warning_messages.h +++ b/src/error_messages/warning_messages.h @@ -71,6 +71,7 @@ #define FIM_WHODATA_POLICY_CHANGE_CHECKER "(6952): Audit policy change detected. Switching directories to realtime." #define FIM_WHODATA_POLICY_CHANGE_CHANNEL "(6953): Event 4719 received due to changes in audit policy. Switching directories to realtime." #define FIM_EMPTY_CHANGED_ATTRIBUTES "(6954): Entry '%s' does not have any modified fields. No event will be generated." +#define FIM_INVALID_FILE_NAME "(6955): Ignoring file '%s' due to unsupported name (non-UTF8)." /* Monitord warning messages */ #define ROTATE_LOG_LONG_PATH "(7500): The path of the rotated log is too long." diff --git a/src/syscheckd/src/create_db.c b/src/syscheckd/src/create_db.c index 3306e138605..e43ccd25ae1 100644 --- a/src/syscheckd/src/create_db.c +++ b/src/syscheckd/src/create_db.c @@ -628,6 +628,11 @@ void fim_checker(const char *path, directory_t *configuration; int depth; + if (!w_utf8_valid(path)) { + mwarn(FIM_INVALID_FILE_NAME, path); + return; + } + #ifdef WIN32 // Ignore the recycle bin. if (check_removed_file(path)){ From f82ec56bf330c685b538ddab8e8bcc8f5db6a6f9 Mon Sep 17 00:00:00 2001 From: Jotacarma90 Date: Wed, 22 May 2024 12:29:03 +0200 Subject: [PATCH 151/419] fix: prevent FIM from aborting due to uncontrolled exceptions --- src/syscheckd/src/db/src/db.cpp | 36 ++++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/src/syscheckd/src/db/src/db.cpp b/src/syscheckd/src/db/src/db.cpp index 3cd660919ae..a062b062201 100644 --- a/src/syscheckd/src/db/src/db.cpp +++ b/src/syscheckd/src/db/src/db.cpp @@ -166,7 +166,14 @@ FIMDBErrorCode fim_db_init(int storage, FIMDB::instance().setTimeLastSyncMsg(); } - sync_callback(FIM_COMPONENT_FILE, json.dump().c_str()); + try + { + sync_callback(FIM_COMPONENT_FILE, json.dump().c_str()); + } + catch (std::exception& err) + { + FIMDB::instance().logFunction(LOG_ERROR, err.what()); + } } } }; @@ -218,7 +225,14 @@ FIMDBErrorCode fim_db_init(int storage, FIMDB::instance().setTimeLastSyncMsg(); } - sync_callback(component.c_str(), json.dump().c_str()); + try + { + sync_callback(component.c_str(), json.dump().c_str()); + } + catch (std::exception& err) + { + FIMDB::instance().logFunction(LOG_ERROR, err.what()); + } } } }; @@ -347,14 +361,22 @@ FIMDBErrorCode fim_db_transaction_sync_row(TXN_HANDLE txn_handler, const fim_ent } } - const std::unique_ptr jsInput + try { - cJSON_Parse((*syncItem->toJSON()).dump().c_str()) - }; - if (dbsync_sync_txn_row(txn_handler, jsInput.get()) == 0) + const std::unique_ptr jsInput + { + cJSON_Parse((*syncItem->toJSON()).dump().c_str()) + }; + + if (dbsync_sync_txn_row(txn_handler, jsInput.get()) == 0) + { + retval = FIMDB_OK; + } + } + catch (std::exception& err) { - retval = FIMDB_OK; + FIMDB::instance().logFunction(LOG_ERROR, err.what()); } } From e498b71d411c3af17c16e607949f1014e30c5b58 Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Wed, 22 May 2024 12:49:02 +0200 Subject: [PATCH 152/419] test: add FIM unit test to check unsuported file names --- src/unit_tests/syscheckd/test_create_db.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/unit_tests/syscheckd/test_create_db.c b/src/unit_tests/syscheckd/test_create_db.c index abd3f84e93f..f94583e3826 100644 --- a/src/unit_tests/syscheckd/test_create_db.c +++ b/src/unit_tests/syscheckd/test_create_db.c @@ -2747,6 +2747,13 @@ static void test_fim_scan_no_limit(void **state) { #endif +static void test_fim_checker_unsupported_path(void **state) { + const char * PATH = "Unsupported\xFF\x02"; + expect_string(__wrap__mwarn, formatted_msg, "(6955): Ignoring file 'Unsupported\xFF\x02' due to unsupported name (non-UTF8)."); + + fim_checker(PATH, NULL, NULL, NULL, NULL); +} + /* fim_check_db_state */ static void test_fim_check_db_state_normal_to_empty(void **state) { @@ -4388,6 +4395,7 @@ int main(void) { #ifndef TEST_WINAGENT cmocka_unit_test_setup_teardown(test_fim_checker_fim_directory_on_max_recursion_level, setup_struct_dirent, teardown_struct_dirent), #endif + cmocka_unit_test(test_fim_checker_unsupported_path), /* fim_directory */ cmocka_unit_test_setup_teardown(test_fim_directory, setup_struct_dirent, teardown_struct_dirent), From 03a2a6ede4c3e0bc3a6a9c7ee88e5489df5a0176 Mon Sep 17 00:00:00 2001 From: Tomas Turina Date: Wed, 22 May 2024 15:32:52 +0000 Subject: [PATCH 153/419] Update changelog for v4.7.5 --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 13bffa310c8..407b651464a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,24 @@ All notable changes to this project will be documented in this file. ## [v4.7.5] +### Manager + +#### Added + +- Added a database endpoint to recalculate the hash of agent groups. ([#23441](https://github.com/wazuh/wazuh/pull/23441)) + +#### Fixed + +- Fixed an issue in a cluster task where full group synchronization was constantly triggered. ([#23447](https://github.com/wazuh/wazuh/pull/23447)) +- Fix race condition when creating agent database files from a template. ([#23216](https://github.com/wazuh/wazuh/pull/23216)) + +### Agent + +#### Fixed + +- Fix segmentation fault in logcollector multiline-regex configuration. ([#23468](https://github.com/wazuh/wazuh/pull/23468)) +- Fix crash in fim when processing paths with non UTF-8 characters. ([#23543](https://github.com/wazuh/wazuh/pull/23543)) + ## [v4.7.4] From 5e0f8c63e2c4460ba9af44e1aeeb095c6e35008d Mon Sep 17 00:00:00 2001 From: Tomas Turina Date: Wed, 22 May 2024 15:34:49 +0000 Subject: [PATCH 154/419] Bump 4.7.5 branch to revision 40719 --- api/api/spec/spec.yaml | 2 +- framework/wazuh/core/cluster/__init__.py | 2 +- src/Doxyfile | 2 +- src/REVISION | 2 +- src/init/wazuh-client.sh | 2 +- src/init/wazuh-local.sh | 2 +- src/init/wazuh-server.sh | 2 +- src/win32/wazuh-installer.nsi | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index fdf4093a297..d4ac5f0e4b4 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -41,7 +41,7 @@ info: version: '4.7.5' - x-revision: '40718' + x-revision: '40719' title: 'Wazuh API REST' license: name: 'GPL 2.0' diff --git a/framework/wazuh/core/cluster/__init__.py b/framework/wazuh/core/cluster/__init__.py index 3754106f44c..c49a33b76f0 100644 --- a/framework/wazuh/core/cluster/__init__.py +++ b/framework/wazuh/core/cluster/__init__.py @@ -5,7 +5,7 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 __version__ = '4.7.5' -__revision__ = '40718' +__revision__ = '40719' __author__ = "Wazuh Inc" __wazuh_name__ = "Wazuh" __licence__ = "\ diff --git a/src/Doxyfile b/src/Doxyfile index ff0d51ba7ec..599e3fd3f7f 100644 --- a/src/Doxyfile +++ b/src/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "WAZUH" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = "v4.7.5-40718" +PROJECT_NUMBER = "v4.7.5-40719" # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/src/REVISION b/src/REVISION index 4119a90ff7a..bcc76d3f7a1 100644 --- a/src/REVISION +++ b/src/REVISION @@ -1 +1 @@ -40718 +40719 diff --git a/src/init/wazuh-client.sh b/src/init/wazuh-client.sh index bc50b5daebf..7742369b336 100755 --- a/src/init/wazuh-client.sh +++ b/src/init/wazuh-client.sh @@ -12,7 +12,7 @@ DIR=`dirname $PWD`; # Installation info VERSION="v4.7.5" -REVISION="40718" +REVISION="40719" TYPE="agent" ### Do not modify below here ### diff --git a/src/init/wazuh-local.sh b/src/init/wazuh-local.sh index 24bef291051..be49ea3496c 100644 --- a/src/init/wazuh-local.sh +++ b/src/init/wazuh-local.sh @@ -14,7 +14,7 @@ PLIST=${DIR}/bin/.process_list; # Installation info VERSION="v4.7.5" -REVISION="40718" +REVISION="40719" TYPE="local" ### Do not modify below here ### diff --git a/src/init/wazuh-server.sh b/src/init/wazuh-server.sh index 12fb7d699e3..a1800dcd007 100755 --- a/src/init/wazuh-server.sh +++ b/src/init/wazuh-server.sh @@ -14,7 +14,7 @@ PLIST=${DIR}/bin/.process_list; # Installation info VERSION="v4.7.5" -REVISION="40718" +REVISION="40719" TYPE="server" ### Do not modify below here ### diff --git a/src/win32/wazuh-installer.nsi b/src/win32/wazuh-installer.nsi index 99741ed3741..06e15139504 100644 --- a/src/win32/wazuh-installer.nsi +++ b/src/win32/wazuh-installer.nsi @@ -21,7 +21,7 @@ !define MUI_ICON install.ico !define MUI_UNICON uninstall.ico !define VERSION "4.7.5" -!define REVISION "40718" +!define REVISION "40719" !define NAME "Wazuh" !define SERVICE "WazuhSvc" From e5a1a89702598b98fd4944469a6b538a0cf2dff0 Mon Sep 17 00:00:00 2001 From: Tomas Turina Date: Wed, 22 May 2024 15:47:48 +0000 Subject: [PATCH 155/419] Replace fix with fixed in changelog --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 407b651464a..c36c1728b40 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,14 +12,14 @@ All notable changes to this project will be documented in this file. #### Fixed - Fixed an issue in a cluster task where full group synchronization was constantly triggered. ([#23447](https://github.com/wazuh/wazuh/pull/23447)) -- Fix race condition when creating agent database files from a template. ([#23216](https://github.com/wazuh/wazuh/pull/23216)) +- Fixed race condition when creating agent database files from a template. ([#23216](https://github.com/wazuh/wazuh/pull/23216)) ### Agent #### Fixed -- Fix segmentation fault in logcollector multiline-regex configuration. ([#23468](https://github.com/wazuh/wazuh/pull/23468)) -- Fix crash in fim when processing paths with non UTF-8 characters. ([#23543](https://github.com/wazuh/wazuh/pull/23543)) +- Fixed segmentation fault in logcollector multiline-regex configuration. ([#23468](https://github.com/wazuh/wazuh/pull/23468)) +- Fixed crash in fim when processing paths with non UTF-8 characters. ([#23543](https://github.com/wazuh/wazuh/pull/23543)) ## [v4.7.4] From efc2527acecc22b3ced6ef9f8958dd6982b46bb8 Mon Sep 17 00:00:00 2001 From: Sebastian Falcone Date: Wed, 22 May 2024 13:45:35 -0300 Subject: [PATCH 156/419] CL: - Fixed typo Co-authored-by: Octavio Valle --- src/data_provider/src/sysInfoMac.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/data_provider/src/sysInfoMac.cpp b/src/data_provider/src/sysInfoMac.cpp index 16bd5fb93b4..02ef4dd0a5b 100644 --- a/src/data_provider/src/sysInfoMac.cpp +++ b/src/data_provider/src/sysInfoMac.cpp @@ -442,7 +442,7 @@ void SysInfo::getPackages(std::function callback) const pypyMacOSPaths.emplace("/Library/Python/*/*-packages"); pypyMacOSPaths.emplace("/Library/Frameworks/Python.framework/Versions/*/lib/python*/*-packages"); pypyMacOSPaths.emplace( - "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/*/lib/python*/*-package"); + "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/*/lib/python*/*-packages"); pypyMacOSPaths.emplace("/System/Library/Frameworks/Python.framework/*-packages"); static const std::map> searchPaths = From 0702151489a10a637df408e0270dd2f5e3fa40ae Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 22 May 2024 16:35:29 -0300 Subject: [PATCH 157/419] CL: - Added handling for source field on feedHGlobalMap - Updated UTs --- .../databaseFeedManager.hpp | 22 ++++++++ .../src/scanOrchestrator/packageScanner.hpp | 11 +++- .../tests/mocks/MockDatabaseFeedManager.hpp | 6 ++ .../tests/unit/databaseFeedManager_test.cpp | 26 ++++++++- .../tests/unit/packageScanner_test.cpp | 55 +++++++++++++++++++ 5 files changed, 116 insertions(+), 4 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp index d5cd28212ff..3ad2612b745 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp @@ -636,6 +636,28 @@ class TDatabaseFeedManager final : public Observer NSVulnerabilityScanner::GetVulnerabilityDescription(resultContainer.slice.data())); } + /** + * @brief Get CNA/ADP name based on the package source. + * + * @param source Package source. + * @return std::string CNA/ADP name. Empty string otherwise. + */ + std::string getCnaNameBySource(std::string_view source) const + { + if (const auto& vendorMap = GlobalData::instance().vendorMaps(); vendorMap.contains("source")) + { + for (const auto& item : vendorMap.at("source")) + { + if (source == item.begin().key()) + { + return item.begin().value(); + } + } + } + + return {}; + } + /** * @brief Get CNA/ADP name based on the package format. * @param format Package format. diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp index 8374b6099e6..c15934518e6 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp @@ -182,14 +182,19 @@ class TPackageScanner final : public AbstractHandlergetCnaNameByPrefix(ctx->packageVendor().data(), ctx->osPlatform().data()); + cnaName = m_databaseFeedManager->getCnaNameBySource(ctx->packageSource().data()); if (cnaName.empty()) { cnaName = - m_databaseFeedManager->getCnaNameByContains(ctx->packageVendor().data(), ctx->osPlatform().data()); + m_databaseFeedManager->getCnaNameByPrefix(ctx->packageVendor().data(), ctx->osPlatform().data()); if (cnaName.empty()) { - return DEFAULT_CNA; + cnaName = m_databaseFeedManager->getCnaNameByContains(ctx->packageVendor().data(), + ctx->osPlatform().data()); + if (cnaName.empty()) + { + return DEFAULT_CNA; + } } } } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp index b34646f2e85..2b186eef957 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp @@ -116,6 +116,12 @@ class MockDatabaseFeedManager * */ MOCK_METHOD(std::string, getCnaNameByPrefix, (std::string_view vendor, std::string_view platform), ()); + + /** + * @brief Mock method for getCnaNameBySource + * + */ + MOCK_METHOD(std::string, getCnaNameBySource, (std::string_view source), ()); }; #endif // _MOCK_DATABASEFEEDMANAGER_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp index c89c0f0215d..2db9a0b9098 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp @@ -222,7 +222,7 @@ void DatabaseFeedManagerTest::SetUp() { rocksDBWrapper.createColumn(VENDOR_MAP_COLUMN); } - auto map = R"({"prefix": [],"contains": [],"format": []})"; + auto map = R"({"prefix": [],"contains": [],"format": [], "source": []})"; rocksDBWrapper.put("FEED-GLOBAL", map, VENDOR_MAP_COLUMN); @@ -1557,6 +1557,9 @@ void DatabaseFeedManagerVendorMapTest::SetUp() "format": [ {"pypi": "pypi"}, {"npm": "npm"} + ], + "source": [ + {"homebrew": "homebrew"} ] })"; @@ -1688,6 +1691,27 @@ TEST_F(DatabaseFeedManagerVendorMapTest, TestGetCnaNameByContains) EXPECT_EQ(cnaName, ""); } +TEST_F(DatabaseFeedManagerVendorMapTest, TestGetCnaNameBySource) +{ + auto cnaName = m_spDatabaseFeedManager->getCnaNameBySource(" "); + EXPECT_EQ(cnaName, ""); + + cnaName = m_spDatabaseFeedManager->getCnaNameBySource("linux-meta"); + EXPECT_EQ(cnaName, ""); + + cnaName = m_spDatabaseFeedManager->getCnaNameBySource("python-click"); + EXPECT_EQ(cnaName, ""); + + cnaName = m_spDatabaseFeedManager->getCnaNameBySource("util-linux"); + EXPECT_EQ(cnaName, ""); + + cnaName = m_spDatabaseFeedManager->getCnaNameBySource("invalid"); + EXPECT_EQ(cnaName, ""); + + cnaName = m_spDatabaseFeedManager->getCnaNameBySource("homebrew"); + EXPECT_EQ(cnaName, "homebrew"); +} + TEST_F(DatabaseFeedManagerMessageProcessorTest, FileProcessingOffsetAndHashUpdate) { std::atomic shouldStop {false}; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp index 6c4ae60f3f0..4e10d11d580 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp @@ -1941,6 +1941,7 @@ TEST_F(PackageScannerTest, TestGetCnaNameByPrefix) auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameBySource(_)).WillOnce(testing::Return("")); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByPrefix(_, _)).WillOnce(testing::Return("suse")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates("suse_server_15", _, _)); @@ -1991,6 +1992,7 @@ TEST_F(PackageScannerTest, TestGetCnaNameByContains) auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameBySource(_)).WillOnce(testing::Return("")); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByPrefix(_, _)).WillOnce(testing::Return("")); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByContains(_, _)).WillOnce(testing::Return("suse")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates("suse_server_15", _, _)); @@ -2016,6 +2018,58 @@ TEST_F(PackageScannerTest, TestGetCnaNameByContains) EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); } +TEST_F(PackageScannerTest, TestGetCnaNameBySource) +{ + Os osData {.hostName = "osdata_hostname", + .architecture = "osdata_architecture", + .name = "osdata_name", + .codeName = "upstream", + .majorVersion = "15", + .minorVersion = "osdata_minorVersion", + .patch = "osdata_patch", + .build = "osdata_build", + .platform = "sles", + .version = "osdata_version", + .release = "osdata_release", + .displayVersion = "osdata_displayVersion", + .sysName = "osdata_sysName", + .kernelVersion = "osdata_kernelVersion", + .kernelRelease = "osdata_kernelRelease"}; + + spOsDataCacheMock = std::make_shared(); + EXPECT_CALL(*spOsDataCacheMock, getOsData(_)).WillRepeatedly(testing::Return(osData)); + + spRemediationDataCacheMock = std::make_shared(); + EXPECT_CALL(*spRemediationDataCacheMock, getRemediationData(_)).WillRepeatedly(testing::Return(Remediation {})); + + auto spDatabaseFeedManagerMock = std::make_shared(); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameBySource(_)).WillOnce(testing::Return("cnaName")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByPrefix(_, _)).Times(0); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByContains(_, _)).Times(0); + EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates("cnaName", _, _)); + + flatbuffers::Parser parser; + ASSERT_TRUE(parser.Parse(syscollector_deltas_SCHEMA)); + ASSERT_TRUE(parser.Parse(DELTA_PACKAGES_INSERTED_MSG.c_str())); + uint8_t* buffer = parser.builder_.GetBufferPointer(); + std::variant + syscollectorDelta = SyscollectorDeltas::GetDelta(reinterpret_cast(buffer)); + auto scanContextOriginal = + std::make_shared>( + syscollectorDelta); + + spGlobalDataMock = std::make_shared(); + EXPECT_CALL(*spGlobalDataMock, cnaMappings()).WillOnce(testing::Return(CNA_MAPPINGS)); + + TPackageScanner, + TrampolineGlobalData> + packageScanner(spDatabaseFeedManagerMock); + + EXPECT_NO_THROW(packageScanner.handleRequest(scanContextOriginal)); +} + TEST_F(PackageScannerTest, TestGetDefaultCna) { Os osData {.hostName = "osdata_hostname", @@ -2042,6 +2096,7 @@ TEST_F(PackageScannerTest, TestGetDefaultCna) auto spDatabaseFeedManagerMock = std::make_shared(); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByFormat(_)).WillOnce(testing::Return("")); + EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameBySource(_)).WillOnce(testing::Return("")); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByPrefix(_, _)).WillOnce(testing::Return("")); EXPECT_CALL(*spDatabaseFeedManagerMock, getCnaNameByContains(_, _)).WillOnce(testing::Return("")); EXPECT_CALL(*spDatabaseFeedManagerMock, getVulnerabilitiesCandidates(DEFAULT_CNA, _, _)); From 1ab7e7495fa2f7c94843212749125c8b13f9d394 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 22 May 2024 16:39:37 -0300 Subject: [PATCH 158/419] CL: - Updated efficacy tests (no results expected due to missing brew feed) --- .../qa/test_data/014/expected_001.out | 100 ++++++++++++++++++ .../qa/test_data/014/expected_002.out | 2 + .../qa/test_data/014/input_001.json | 23 ++++ .../qa/test_data/014/input_002.json | 24 +++++ 4 files changed, 149 insertions(+) create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/014/expected_001.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/014/expected_002.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/014/input_001.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data/014/input_002.json diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/014/expected_001.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/014/expected_001.out new file mode 100644 index 00000000000..783c4147505 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/014/expected_001.out @@ -0,0 +1,100 @@ +[ +"Processing and publish key: CVE-2024-23224", +"Processing and publish key: CVE-2024-23222", +"Processing and publish key: CVE-2024-23215", +"Processing and publish key: CVE-2024-23211", +"Processing and publish key: CVE-2024-23210", +"Processing and publish key: CVE-2024-23209", +"Processing and publish key: CVE-2024-23207", +"Processing and publish key: CVE-2024-23218", +"Processing and publish key: CVE-2024-23206", +"Processing and publish key: CVE-2024-23204", +"Processing and publish key: CVE-2024-23213", +"Processing and publish key: CVE-2024-23203", +"Processing and publish key: CVE-2023-4751", +"Processing and publish key: CVE-2023-4750", +"Processing and publish key: CVE-2023-4736", +"Processing and publish key: CVE-2023-4734", +"Processing and publish key: CVE-2024-23214", +"Processing and publish key: CVE-2023-4733", +"Processing and publish key: CVE-2023-45866", +"Processing and publish key: CVE-2023-42940", +"Processing and publish key: CVE-2023-4752", +"Processing and publish key: CVE-2023-42935", +"Processing and publish key: CVE-2023-42926", +"Processing and publish key: CVE-2023-42924", +"Processing and publish key: CVE-2023-42919", +"Processing and publish key: CVE-2023-42916", +"Processing and publish key: CVE-2023-42912", +"Processing and publish key: CVE-2024-23212", +"Processing and publish key: CVE-2023-42911", +"Processing and publish key: CVE-2024-23217", +"Processing and publish key: CVE-2023-42909", +"Processing and publish key: CVE-2023-42438", +"Processing and publish key: CVE-2023-42844", +"Processing and publish key: CVE-2023-42906", +"Processing and publish key: CVE-2023-41072", +"Processing and publish key: CVE-2023-4781", +"Processing and publish key: CVE-2023-42887", +"Processing and publish key: CVE-2023-41983", +"Processing and publish key: CVE-2023-42905", +"Processing and publish key: CVE-2023-41975", +"Processing and publish key: CVE-2023-40408", +"Processing and publish key: CVE-2023-4738", +"Processing and publish key: CVE-2023-42891", +"Processing and publish key: CVE-2023-42890", +"Processing and publish key: CVE-2023-42917", +"Processing and publish key: CVE-2023-41997", +"Processing and publish key: CVE-2023-42902", +"Processing and publish key: CVE-2023-42904", +"Processing and publish key: CVE-2023-41988", +"Processing and publish key: CVE-2023-41982", +"Processing and publish key: CVE-2023-42888", +"Processing and publish key: CVE-2023-40405", +"Processing and publish key: CVE-2023-40444", +"Processing and publish key: CVE-2023-42901", +"Processing and publish key: CVE-2023-42908", +"Processing and publish key: CVE-2023-40413", +"Processing and publish key: CVE-2023-42932", +"Processing and publish key: CVE-2023-42841", +"Processing and publish key: CVE-2023-40404", +"Processing and publish key: CVE-2023-41977", +"Processing and publish key: CVE-2023-41989", +"Processing and publish key: CVE-2023-42852", +"Processing and publish key: CVE-2023-38403", +"Processing and publish key: CVE-2023-41254", +"Processing and publish key: CVE-2023-42854", +"Processing and publish key: CVE-2024-23223", +"Processing and publish key: CVE-2023-40423", +"Processing and publish key: CVE-2023-42922", +"Processing and publish key: CVE-2023-41976", +"Processing and publish key: CVE-2023-30774", +"Processing and publish key: CVE-2023-42842", +"Processing and publish key: CVE-2023-42847", +"Processing and publish key: CVE-2023-42857", +"Processing and publish key: CVE-2023-40416", +"Processing and publish key: CVE-2023-42881", +"Processing and publish key: CVE-2023-42845", +"Processing and publish key: CVE-2023-42910", +"Processing and publish key: CVE-2023-40449", +"Processing and publish key: CVE-2023-40447", +"Processing and publish key: CVE-2023-42856", +"Processing and publish key: CVE-2023-42849", +"Processing and publish key: CVE-2023-42850", +"Processing and publish key: CVE-2023-42874", +"Processing and publish key: CVE-2023-42882", +"Processing and publish key: CVE-2023-42883", +"Processing and publish key: CVE-2024-23208", +"Processing and publish key: CVE-2023-42861", +"Processing and publish key: CVE-2023-42884", +"Processing and publish key: CVE-2023-42914", +"Processing and publish key: CVE-2023-40421", +"Processing and publish key: CVE-2023-42886", +"Processing and publish key: CVE-2023-42937", +"Processing and publish key: CVE-2023-42894", +"Processing and publish key: CVE-2023-42898", +"Processing and publish key: CVE-2023-42899", +"Processing and publish key: CVE-2023-42900", +"Processing and publish key: CVE-2023-42903", +"Processing and publish key: CVE-2023-42907" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/014/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/014/expected_002.out new file mode 100644 index 00000000000..0d4f101c7a3 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/014/expected_002.out @@ -0,0 +1,2 @@ +[ +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/014/input_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/014/input_001.json new file mode 100644 index 00000000000..2d1374e600b --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/014/input_001.json @@ -0,0 +1,23 @@ +{ + "agent_info": { + "agent_id": "001" + }, + "data_type": "state", + "data": { + "attributes_type": "syscollector_osinfo", + "attributes": { + "architecture":"x86_64", + "checksum":"1691178971959743855", + "hostname":"fd9b83c25f30", + "os_major":"14", + "os_minor":"0", + "os_name":"macOS", + "os_platform":"darwin", + "os_version":"14.0", + "release":"5.4.0-155-generic", + "scan_time":"2023/08/04 19:56:11", + "sysname":"macOS", + "version":"darwin 23.0" + } + } +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/014/input_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data/014/input_002.json new file mode 100644 index 00000000000..137e9c545c8 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/014/input_002.json @@ -0,0 +1,24 @@ +{ + "agent_info": { + "agent_id": "001" + }, + "data_type": "dbsync_packages", + "data": { + "version": "1.24.5", + "vendor": " ", + "architecture": " ", + "name": "wget", + "size": 0, + "format": "pkg", + "checksum": "16f028c029a9d70dbf517739d642af4ce2e91cad", + "description": "Internet file retriever", + "install_time": "2024/04/03 22:53:13", + "item_id": "cf15b75fa23de290a28dce697d0455abe1d273f5", + "multiarch": null, + "priority": " ", + "scan_time": "2024/04/24 14:11:04", + "source": "homebrew" + }, + "operation": "INSERTED" +} + From a23f7a9a473fe252e180e09ba110503cf03e7162 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 22 May 2024 16:44:30 -0300 Subject: [PATCH 159/419] CL: - Style fixed --- .../tests/mocks/MockDatabaseFeedManager.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp index 2b186eef957..7fc88c43e2a 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp @@ -119,7 +119,7 @@ class MockDatabaseFeedManager /** * @brief Mock method for getCnaNameBySource - * + * */ MOCK_METHOD(std::string, getCnaNameBySource, (std::string_view source), ()); }; From 3c96e47384a82876ccceb918621077afe1bee562 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 22 May 2024 18:59:35 -0300 Subject: [PATCH 160/419] CL: - Finished to align UTs with changes on content --- .../tests/unit/databaseFeedManager_test.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp index 2db9a0b9098..2ebfc6dbfab 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp @@ -891,7 +891,7 @@ void DatabaseFeedManagerMessageProcessorTest::SetUp() std::ofstream file4 {"file4.json"}; // add feed-global to db - file4 << R"({"name": "FEED-GLOBAL", "payload":{"prefix": [],"contains": [],"format": []}})"; + file4 << R"({"name": "FEED-GLOBAL", "payload":{"prefix": [],"contains": [],"format": [], "source": []}})"; // Add new line file4 << "\n"; // add oscpe-global to db @@ -912,7 +912,7 @@ void DatabaseFeedManagerMessageProcessorTest::SetUp() spRocksDBWrapper->createColumn(VENDOR_MAP_COLUMN); } - auto map = R"({"prefix": [],"contains": [],"format": []})"; + auto map = R"({"prefix": [],"contains": [],"format": [], "source": []})"; spRocksDBWrapper->put("FEED-GLOBAL", map, VENDOR_MAP_COLUMN); From 7e2e9dd304877f33c689e8fc91ea9af0fe989b56 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Wed, 22 May 2024 23:06:49 -0300 Subject: [PATCH 161/419] CL: - Baseline to identify changes on cluster (re arranged methods) --- .../src/vulnerabilityScannerFacade.cpp | 89 +++++++++---------- .../src/vulnerabilityScannerFacade.hpp | 29 ++++-- 2 files changed, 63 insertions(+), 55 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index a35d2080137..f38a392a7c0 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -35,6 +35,10 @@ constexpr auto VD_STATE_QUEUE_PATH = "queue/vd/state_track"; constexpr auto VD_KEYSTORE_PATH = "queue/keystore"; constexpr auto VD_DATABASE_PATH {"queue/vd"}; constexpr auto VD_DATABASE_VERSION_KEY {"installed_content"}; +constexpr auto VD_STATE_KEY {"vulnerability_scanner_previous_state"}; +constexpr auto VD_MANAGER_STATE_KEY {"manager_scan_previous_state"}; +constexpr auto VD_DISABLED {"disabled"}; +constexpr auto VD_ENABLED {"enabled"}; bool VulnerabilityScannerFacade::decompressDatabase(std::string_view databaseVersion) const { @@ -213,9 +217,29 @@ void VulnerabilityScannerFacade::initWazuhDBEventSubscription() { pushEvent(message, BufferType::BufferType_JSON); }); } -void VulnerabilityScannerFacade::vulnerabilityScannerPolicyChange() const +void VulnerabilityScannerFacade::vulnerabilityScanPolicyChange(Utils::RocksDBWrapper& stateDB) const { - // Check if a rescan is required based on the value of 'm_shouldRescan'. + const auto& policyManager = PolicyManager::instance(); + + // Retrieve the previous state of the vulnerability scanner module. + std::string moduleState; + if (stateDB.get(VD_STATE_KEY, moduleState)) + { + // If the module was disabled and now is enabled, set the flag to re-scan all agents. + if (moduleState == VD_DISABLED && policyManager.isVulnerabilityDetectionEnabled()) + { + logInfo(WM_VULNSCAN_LOGTAG, "Vulnerability scanner module is re-enabled. Re-scanning all agents."); + m_shouldRescan.store(true); + } + } + + // Set the new state of the vulnerability scanner module based on the policy. + stateDB.put(VD_STATE_KEY, policyManager.isVulnerabilityDetectionEnabled() ? VD_ENABLED : VD_DISABLED); +} + +void VulnerabilityScannerFacade::handleReScan() const +{ + // Check if a re-scan is required based on the value of 'm_shouldRescan'. if (m_shouldRescan.load()) { logDebug1(WM_VULNSCAN_LOGTAG, "Perform re-scan after reboot"); @@ -234,18 +258,16 @@ void VulnerabilityScannerFacade::vulnerabilityScannerPolicyChange() const void VulnerabilityScannerFacade::managerScanPolicyChange(Utils::RocksDBWrapper& stateDB) const { - std::string lastDisableState; const auto& policyManager = PolicyManager::instance(); - stateDB.get("disable_manager_scan", lastDisableState); - - if (!lastDisableState.empty()) + std::string managerLastState; + if ( stateDB.get(VD_MANAGER_STATE_KEY, managerLastState)) { - // Check if the last known disable state is "no" and the scanner is now disabled - if (lastDisableState == "no" && + // If the scanner was enabled and now is disabled, perform a clean-up + if (managerLastState == VD_ENABLED && (policyManager.getManagerDisabledScan() == DisableManagerScanStatus::DISABLE_MANAGER_SCAN)) { - // Perform manager cleanup + logInfo(WM_VULNSCAN_LOGTAG, "Vulnerability scanner in manager deactivated. Performing clean-up."); // Create a JSON object 'dataValue' to specify the action as "deleteAgent." @@ -257,10 +279,11 @@ void VulnerabilityScannerFacade::managerScanPolicyChange(Utils::RocksDBWrapper& const std::vector message(dataValue.begin(), dataValue.end()); pushEvent(message, BufferType::BufferType_JSON); } - // Check if the last known disable state is "yes" and the scanner is now enabled - else if (lastDisableState == "yes" && + // If the scanner was disabled and now is enabled, perform a scan + else if (managerLastState == VD_DISABLED && (policyManager.getManagerDisabledScan() == DisableManagerScanStatus::SCAN_MANAGER)) { + // Initiate a scan logInfo(WM_VULNSCAN_LOGTAG, "Vulnerability scanner in manager activated. Performing scan."); nlohmann::json dataValueJson; @@ -273,10 +296,12 @@ void VulnerabilityScannerFacade::managerScanPolicyChange(Utils::RocksDBWrapper& pushEvent(message, BufferType::BufferType_JSON); } } - stateDB.put("disable_manager_scan", - policyManager.getManagerDisabledScan() == DisableManagerScanStatus::DISABLE_MANAGER_SCAN ? "yes" - : "no"); + + stateDB.put(VD_MANAGER_STATE_KEY, + policyManager.getManagerDisabledScan() == DisableManagerScanStatus::DISABLE_MANAGER_SCAN ? VD_ENABLED + : VD_DISABLED); } + // LCOV_EXCL_START void VulnerabilityScannerFacade::start( const std::function(VD_STATE_QUEUE_PATH); - // Initialize a string to store the last known state from the database. - std::string lastState; - - // Retrieve the last known state from the database and store it in 'lastState'. - stateDB->get("previous_config", lastState); - - // Check if 'lastState' is not empty. - if (!lastState.empty()) - { - // Check if the previous state was "no" (disabled) and the current policy allows vulnerability detection. - if (lastState == "no" && policyManager.isVulnerabilityDetectionEnabled()) - { - // Log that the vulnerability scanner module is enabled and re-scan all agents. - logInfo(WM_VULNSCAN_LOGTAG, "Vulnerability scanner module is enabled. Re-scanning all agents."); - - // Update the database state to "yes" (enabled) and set 'm_shouldRescan' to true so the thread can - // rescan. - stateDB->put("previous_config", "yes"); - m_shouldRescan.store(true); - } - // Check if the previous state was "yes" (enabled) and the current policy disables vulnerability detection. - else if (lastState == "yes" && !policyManager.isVulnerabilityDetectionEnabled()) - { - // Update the database state to "no" (disabled). - stateDB->put("previous_config", "no"); - } - } - else - { - // If the value wasn't present, this is the first execution of the refactored module. We store the current - // value also. - stateDB->put("previous_config", policyManager.isVulnerabilityDetectionEnabled() ? "yes" : "no"); - } - // Return if the module is disabled. if (!policyManager.isVulnerabilityDetectionEnabled()) { @@ -365,7 +356,7 @@ void VulnerabilityScannerFacade::start( // Policy manager change managerScanPolicyChange(*stateDB); - // Rescan if VD policy change from false to true. + // Re-scan if VD policy change from false to true. vulnerabilityScannerPolicyChange(); // Subscription to syscollector delta events. diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp index dfa73495574..4394b0ed368 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp @@ -96,21 +96,38 @@ class VulnerabilityScannerFacade final : public Singleton Date: Thu, 23 May 2024 11:56:55 -0300 Subject: [PATCH 162/419] CL: - Added handle for cluster name changes on the vulnerability scanner - Perform a re-scan to force new index to be generated --- .../src/policyManager/policyManager.hpp | 22 ++-- .../buildAllAgentListContext.hpp | 3 +- .../src/vulnerabilityScannerFacade.cpp | 111 ++++++++++-------- .../src/vulnerabilityScannerFacade.hpp | 22 ++-- 4 files changed, 89 insertions(+), 69 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp index 8d4100779b6..ccaf86251e0 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp @@ -32,10 +32,10 @@ constexpr auto DEFAULT_OSDATA_LRU_SIZE {1000}; constexpr auto DEFAULT_REMEDIATION_LRU_SIZE {2048}; const static std::string UPDATER_PATH {"queue/vd_updater"}; -enum class DisableManagerScanStatus : uint32_t +enum class ManagerScanStatus : uint32_t { - DISABLE_MANAGER_SCAN = 1, - SCAN_MANAGER = 0 + DISABLED = 1, + ENABLED = 0 }; /** @@ -185,7 +185,7 @@ class PolicyManager final : public Singleton if (!newPolicy.contains("managerDisabledScan")) { - newPolicy["managerDisabledScan"] = DisableManagerScanStatus::SCAN_MANAGER; + newPolicy["managerDisabledScan"] = ManagerScanStatus::ENABLED; } return newPolicy; @@ -683,17 +683,17 @@ class PolicyManager final : public Singleton * @brief Retrieves the current status of the manager's scan. * * This function retrieves the current status of the manager's scan - * from the configuration and returns it as a DisableManagerScanStatus enum value. + * from the configuration and returns it as a ManagerScanStatus enum value. * - * @return DisableManagerScanStatus The status of the manager's scan. - * - If the scan is disabled, returns DisableManagerScanStatus::DISABLE_MANAGER_SCAN. - * - If the scan is enabled, returns DisableManagerScanStatus::SCAN_MANAGER. + * @return ManagerScanStatus The status of the manager's scan. + * - If the scan is disabled, returns ManagerScanStatus::DISABLED. + * - If the scan is enabled, returns ManagerScanStatus::ENABLED. */ - DisableManagerScanStatus getManagerDisabledScan() const + ManagerScanStatus getManagerDisabledScan() const { return m_configuration.at("managerDisabledScan").get() == 1 - ? DisableManagerScanStatus::DISABLE_MANAGER_SCAN - : DisableManagerScanStatus::SCAN_MANAGER; + ? ManagerScanStatus::DISABLED + : ManagerScanStatus::ENABLED; } /** diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp index d723a1ade8d..73c680c29fc 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp @@ -80,8 +80,7 @@ class TBuildAllAgentListContext final : public AbstractHandler() == 0)) + if (!(isManagerScanDisabled == ManagerScanStatus::DISABLED && agent.at("id").get() == 0)) { data->m_agents.push_back({Utils::padString(std::to_string(agent.at("id").get()), '0', 3), agent.at("name"), diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index f38a392a7c0..185f84b28e9 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -37,8 +37,9 @@ constexpr auto VD_DATABASE_PATH {"queue/vd"}; constexpr auto VD_DATABASE_VERSION_KEY {"installed_content"}; constexpr auto VD_STATE_KEY {"vulnerability_scanner_previous_state"}; constexpr auto VD_MANAGER_STATE_KEY {"manager_scan_previous_state"}; -constexpr auto VD_DISABLED {"disabled"}; -constexpr auto VD_ENABLED {"enabled"}; +constexpr auto CLUSTER_NAME_KEY {"cluster_previous_name"}; +constexpr auto DISABLED {"disabled"}; +constexpr auto ENABLED {"enabled"}; bool VulnerabilityScannerFacade::decompressDatabase(std::string_view databaseVersion) const { @@ -219,58 +220,34 @@ void VulnerabilityScannerFacade::initWazuhDBEventSubscription() void VulnerabilityScannerFacade::vulnerabilityScanPolicyChange(Utils::RocksDBWrapper& stateDB) const { - const auto& policyManager = PolicyManager::instance(); + const auto vulnerabilityDetectionEnabled = PolicyManager::instance().isVulnerabilityDetectionEnabled(); - // Retrieve the previous state of the vulnerability scanner module. - std::string moduleState; - if (stateDB.get(VD_STATE_KEY, moduleState)) + if (std::string moduleLastState; stateDB.get(VD_STATE_KEY, moduleLastState)) { // If the module was disabled and now is enabled, set the flag to re-scan all agents. - if (moduleState == VD_DISABLED && policyManager.isVulnerabilityDetectionEnabled()) + if (moduleLastState == DISABLED && vulnerabilityDetectionEnabled) { - logInfo(WM_VULNSCAN_LOGTAG, "Vulnerability scanner module is re-enabled. Re-scanning all agents."); + logInfo(WM_VULNSCAN_LOGTAG, "Vulnerability scanner module was re-enabled (re-scan needed)"); m_shouldRescan.store(true); } } // Set the new state of the vulnerability scanner module based on the policy. - stateDB.put(VD_STATE_KEY, policyManager.isVulnerabilityDetectionEnabled() ? VD_ENABLED : VD_DISABLED); -} - -void VulnerabilityScannerFacade::handleReScan() const -{ - // Check if a re-scan is required based on the value of 'm_shouldRescan'. - if (m_shouldRescan.load()) - { - logDebug1(WM_VULNSCAN_LOGTAG, "Perform re-scan after reboot"); - - // Create a JSON object 'dataValue' to specify the action as "cleanup." - std::string dataValueCleanup = R"({"action":"cleanup"})"; - const std::vector messageCleanup(dataValueCleanup.begin(), dataValueCleanup.end()); - pushEvent(messageCleanup, BufferType::BufferType_JSON); - - // Create a JSON object 'dataValue' to specify the action as "reboot." - std::string dataValueReScan = R"({"action":"reboot"})"; - const std::vector messageReScan(dataValueReScan.begin(), dataValueReScan.end()); - pushEvent(messageReScan, BufferType::BufferType_JSON); - } + stateDB.put(VD_STATE_KEY, vulnerabilityDetectionEnabled ? ENABLED : DISABLED); } void VulnerabilityScannerFacade::managerScanPolicyChange(Utils::RocksDBWrapper& stateDB) const { - const auto& policyManager = PolicyManager::instance(); + const auto managerScanStatus = PolicyManager::instance().getManagerDisabledScan(); - std::string managerLastState; - if ( stateDB.get(VD_MANAGER_STATE_KEY, managerLastState)) + if (std::string managerLastState; stateDB.get(VD_MANAGER_STATE_KEY, managerLastState)) { // If the scanner was enabled and now is disabled, perform a clean-up - if (managerLastState == VD_ENABLED && - (policyManager.getManagerDisabledScan() == DisableManagerScanStatus::DISABLE_MANAGER_SCAN)) + if (managerLastState == ENABLED && (managerScanStatus == ManagerScanStatus::DISABLED)) { - logInfo(WM_VULNSCAN_LOGTAG, "Vulnerability scanner in manager deactivated. Performing clean-up."); - // Create a JSON object 'dataValue' to specify the action as "deleteAgent." + // Create and queue the delete event nlohmann::json dataValueJson; dataValueJson["action"] = "deleteAgent"; dataValueJson["agent_info"]["agent_id"] = "000"; @@ -280,26 +257,66 @@ void VulnerabilityScannerFacade::managerScanPolicyChange(Utils::RocksDBWrapper& pushEvent(message, BufferType::BufferType_JSON); } // If the scanner was disabled and now is enabled, perform a scan - else if (managerLastState == VD_DISABLED && - (policyManager.getManagerDisabledScan() == DisableManagerScanStatus::SCAN_MANAGER)) + else if (managerLastState == DISABLED && (managerScanStatus == ManagerScanStatus::ENABLED)) { - - // Initiate a scan logInfo(WM_VULNSCAN_LOGTAG, "Vulnerability scanner in manager activated. Performing scan."); + + // Create and queue the scan event nlohmann::json dataValueJson; dataValueJson["action"] = "scanAgent"; dataValueJson["agent_info"]["agent_id"] = "000"; const auto& dataValue = dataValueJson.dump(); const std::vector message(dataValue.begin(), dataValue.end()); - pushEvent(message, BufferType::BufferType_JSON); } } - stateDB.put(VD_MANAGER_STATE_KEY, - policyManager.getManagerDisabledScan() == DisableManagerScanStatus::DISABLE_MANAGER_SCAN ? VD_ENABLED - : VD_DISABLED); + stateDB.put(VD_MANAGER_STATE_KEY, managerScanStatus == ManagerScanStatus::DISABLED ? DISABLED : ENABLED); +} + +void VulnerabilityScannerFacade::clusterConfigurationChange(Utils::RocksDBWrapper& stateDB) const +{ + const auto& clusterName = PolicyManager::instance().getClusterName(); + + if (std::string clusterLastName; stateDB.get(CLUSTER_NAME_KEY, clusterLastName)) + { + // If the cluster name changed, perform a scan + if (clusterLastName.compare(clusterName) != 0) + { + logInfo(WM_VULNSCAN_LOGTAG, "Cluster name changed (re-scan needed)"); + m_shouldRescan.store(true); + } + } + + stateDB.put(CLUSTER_NAME_KEY, clusterName); +} + +void VulnerabilityScannerFacade::handlePolicyChange(Utils::RocksDBWrapper& stateDB) const +{ + // Check the policy for the vulnerability scanner on the manager + managerScanPolicyChange(stateDB); + + // Check the policy for the vulnerability scanner + vulnerabilityScanPolicyChange(stateDB); + + // Check the cluster configuration + clusterConfigurationChange(stateDB); + + if (m_shouldRescan) + { + logInfo(WM_VULNSCAN_LOGTAG, "Re-scanning all agents"); + + // Create and queue the cleanup event + std::string dataValueCleanup = R"({"action":"cleanup"})"; + const std::vector messageCleanup(dataValueCleanup.begin(), dataValueCleanup.end()); + pushEvent(messageCleanup, BufferType::BufferType_JSON); + + // Create and queue the reboot event (force scan) + std::string dataValueReScan = R"({"action":"reboot"})"; + const std::vector messageReScan(dataValueReScan.begin(), dataValueReScan.end()); + pushEvent(messageReScan, BufferType::BufferType_JSON); + } } // LCOV_EXCL_START @@ -314,7 +331,6 @@ void VulnerabilityScannerFacade::start( { try { - m_noWaitToStop = noWaitToStop; // Initialize logging @@ -353,11 +369,8 @@ void VulnerabilityScannerFacade::start( m_eventDispatcher = std::make_shared(EVENTS_QUEUE_PATH, EVENTS_BULK_SIZE); - // Policy manager change - managerScanPolicyChange(*stateDB); - - // Re-scan if VD policy change from false to true. - vulnerabilityScannerPolicyChange(); + // Checks for the actions to be performed after the policy change. + handlePolicyChange(*stateDB); // Subscription to syscollector delta events. initDeltasSubscription(); diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp index 4394b0ed368..c35cccaf7b1 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp @@ -106,14 +106,19 @@ class VulnerabilityScannerFacade final : public Singleton m_shouldStop {false}; - std::atomic m_shouldRescan {false}; + mutable std::atomic m_shouldRescan {false}; bool m_noWaitToStop {true}; std::shared_ptr m_eventDispatcher; std::shared_mutex m_internalMutex; From 95b4530ebf6545a107cf68b15f1933f23098d65a Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Thu, 23 May 2024 14:14:03 -0300 Subject: [PATCH 163/419] CL: - Fixed error in logic, vulnerability scanner status was not being updated --- .../src/vulnerabilityScannerFacade.cpp | 30 +++++++++---------- .../src/vulnerabilityScannerFacade.hpp | 20 ++++--------- 2 files changed, 21 insertions(+), 29 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index 185f84b28e9..f866cd84212 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -236,14 +236,14 @@ void VulnerabilityScannerFacade::vulnerabilityScanPolicyChange(Utils::RocksDBWra stateDB.put(VD_STATE_KEY, vulnerabilityDetectionEnabled ? ENABLED : DISABLED); } -void VulnerabilityScannerFacade::managerScanPolicyChange(Utils::RocksDBWrapper& stateDB) const +void VulnerabilityScannerFacade::handleManagerScanPolicyChange(Utils::RocksDBWrapper& stateDB) const { const auto managerScanStatus = PolicyManager::instance().getManagerDisabledScan(); if (std::string managerLastState; stateDB.get(VD_MANAGER_STATE_KEY, managerLastState)) { // If the scanner was enabled and now is disabled, perform a clean-up - if (managerLastState == ENABLED && (managerScanStatus == ManagerScanStatus::DISABLED)) + if (managerLastState == ENABLED && managerScanStatus == ManagerScanStatus::DISABLED) { logInfo(WM_VULNSCAN_LOGTAG, "Vulnerability scanner in manager deactivated. Performing clean-up."); @@ -257,7 +257,7 @@ void VulnerabilityScannerFacade::managerScanPolicyChange(Utils::RocksDBWrapper& pushEvent(message, BufferType::BufferType_JSON); } // If the scanner was disabled and now is enabled, perform a scan - else if (managerLastState == DISABLED && (managerScanStatus == ManagerScanStatus::ENABLED)) + else if (managerLastState == DISABLED && managerScanStatus == ManagerScanStatus::ENABLED) { logInfo(WM_VULNSCAN_LOGTAG, "Vulnerability scanner in manager activated. Performing scan."); @@ -294,15 +294,6 @@ void VulnerabilityScannerFacade::clusterConfigurationChange(Utils::RocksDBWrappe void VulnerabilityScannerFacade::handlePolicyChange(Utils::RocksDBWrapper& stateDB) const { - // Check the policy for the vulnerability scanner on the manager - managerScanPolicyChange(stateDB); - - // Check the policy for the vulnerability scanner - vulnerabilityScanPolicyChange(stateDB); - - // Check the cluster configuration - clusterConfigurationChange(stateDB); - if (m_shouldRescan) { logInfo(WM_VULNSCAN_LOGTAG, "Re-scanning all agents"); @@ -343,6 +334,14 @@ void VulnerabilityScannerFacade::start( // Create a unique pointer to a RocksDBWrapper instance for managing state information. auto stateDB = std::make_unique(VD_STATE_QUEUE_PATH); + SocketDBWrapper::instance().init(); + + // Check the policy for the vulnerability scanner + vulnerabilityScanPolicyChange(*stateDB); + + // Check the cluster configuration + clusterConfigurationChange(*stateDB); + // Return if the module is disabled. if (!policyManager.isVulnerabilityDetectionEnabled()) { @@ -350,8 +349,6 @@ void VulnerabilityScannerFacade::start( return; } - SocketDBWrapper::instance().init(); - // Indexer connector initialization. if (policyManager.isIndexerEnabled()) { @@ -369,7 +366,10 @@ void VulnerabilityScannerFacade::start( m_eventDispatcher = std::make_shared(EVENTS_QUEUE_PATH, EVENTS_BULK_SIZE); - // Checks for the actions to be performed after the policy change. + // Checks for the actions to be performed after the policy change (manager). + handleManagerScanPolicyChange(*stateDB); + + // Checks for the actions to be performed after the policy change (vulnerability scanner). handlePolicyChange(*stateDB); // Subscription to syscollector delta events. diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp index c35cccaf7b1..4a111fad3e8 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp @@ -97,16 +97,14 @@ class VulnerabilityScannerFacade final : public Singleton Date: Thu, 23 May 2024 15:20:51 -0300 Subject: [PATCH 164/419] CL: - Style changes --- .../src/policyManager/policyManager.hpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp index ccaf86251e0..56c5eab8579 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp @@ -691,9 +691,8 @@ class PolicyManager final : public Singleton */ ManagerScanStatus getManagerDisabledScan() const { - return m_configuration.at("managerDisabledScan").get() == 1 - ? ManagerScanStatus::DISABLED - : ManagerScanStatus::ENABLED; + return m_configuration.at("managerDisabledScan").get() == 1 ? ManagerScanStatus::DISABLED + : ManagerScanStatus::ENABLED; } /** From 76f0cccb1d3cdfe1021e9cca4c2cb98b5d85c1f0 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Thu, 23 May 2024 15:51:05 -0300 Subject: [PATCH 165/419] CL: - Updated logging level to be more precise - Updated test policy change expected logs --- .../qa/test_data_policy/001/expected_003.out | 2 +- .../qa/test_data_policy/001/expected_005.out | 2 +- .../qa/test_data_policy/002/expected_003.out | 1 + .../qa/test_data_policy/003/expected_003.out | 2 +- .../qa/test_data_policy/003/expected_005.out | 2 +- .../qa/test_data_policy/004/expected_003.out | 1 + .../src/vulnerabilityScannerFacade.cpp | 6 +++--- 7 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out index 6f81452efcf..7bc9913b606 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out @@ -1,6 +1,6 @@ [ "Vulnerability scanner module started", - "Vulnerability scanner module is enabled. Re-scanning all agents.", + "Policy changed. Re-scanning all agents", "Event type: 9 processed", "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Fetched 2 agents from Wazuh-DB.", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_005.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_005.out index 33cee0b94de..004661a3187 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_005.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_005.out @@ -1,6 +1,6 @@ [ "Vulnerability scanner module started", - "Vulnerability scanner module is enabled. Re-scanning all agents.", + "Policy changed. Re-scanning all agents", "Fetched 1 agents from Wazuh-DB.", "Skipping manager agent with id 0.", "Vulnerability scan for OS 'enterprise_linux' on Agent '001' has completed.", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out index d3d09ce7e52..cd2303c7302 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/expected_003.out @@ -1,5 +1,6 @@ [ "Vulnerability scanner module started", + "Vulnerability scanner in manager activated. Performing scan.", "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical'", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out index 8668258ceb5..a8483a09e0c 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out @@ -1,6 +1,6 @@ [ "Vulnerability scanner module started", - "Vulnerability scanner module is enabled. Re-scanning all agents.", + "Policy changed. Re-scanning all agents", "Event type: 9 processed", "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Fetched 2 agents from Wazuh-DB.", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out index 70664c51519..e0137cd698b 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out @@ -1,6 +1,6 @@ [ "Vulnerability scanner module started", - "Vulnerability scanner module is enabled. Re-scanning all agents.", + "Policy changed. Re-scanning all agents", "Fetched 1 agents from Wazuh-DB.", "Skipping manager agent with id 0.", "Vulnerability scan for OS 'enterprise_linux' on Agent '001' has completed.", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_003.out index b170087b388..5e38d9563a8 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/expected_003.out @@ -1,5 +1,6 @@ [ "Vulnerability scanner module started", + "Vulnerability scanner in manager activated. Performing scan.", "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical'", diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index f866cd84212..aebfa4b72b3 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -227,7 +227,7 @@ void VulnerabilityScannerFacade::vulnerabilityScanPolicyChange(Utils::RocksDBWra // If the module was disabled and now is enabled, set the flag to re-scan all agents. if (moduleLastState == DISABLED && vulnerabilityDetectionEnabled) { - logInfo(WM_VULNSCAN_LOGTAG, "Vulnerability scanner module was re-enabled (re-scan needed)"); + logDebug1(WM_VULNSCAN_LOGTAG, "Vulnerability scanner module was re-enabled (re-scan needed)"); m_shouldRescan.store(true); } } @@ -284,7 +284,7 @@ void VulnerabilityScannerFacade::clusterConfigurationChange(Utils::RocksDBWrappe // If the cluster name changed, perform a scan if (clusterLastName.compare(clusterName) != 0) { - logInfo(WM_VULNSCAN_LOGTAG, "Cluster name changed (re-scan needed)"); + logDebug1(WM_VULNSCAN_LOGTAG, "Cluster name changed (re-scan needed)"); m_shouldRescan.store(true); } } @@ -296,7 +296,7 @@ void VulnerabilityScannerFacade::handlePolicyChange(Utils::RocksDBWrapper& state { if (m_shouldRescan) { - logInfo(WM_VULNSCAN_LOGTAG, "Re-scanning all agents"); + logInfo(WM_VULNSCAN_LOGTAG, "Policy changed. Re-scanning all agents"); // Create and queue the cleanup event std::string dataValueCleanup = R"({"action":"cleanup"})"; From 8042de2e2ea2be2b3b5df05f032c9d8c1b34ddbc Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Thu, 23 May 2024 21:33:56 -0300 Subject: [PATCH 166/419] CL: - Added new policy change test to check new behaviour --- .../005/agentHotfixesData.json | 174 ++++++++++++++++++ .../qa/test_data_policy/005/agentOsData.json | 39 ++++ .../005/agentPackagesData.json | 50 +++++ .../qa/test_data_policy/005/args_001.json | 19 ++ .../qa/test_data_policy/005/args_002.json | 19 ++ .../qa/test_data_policy/005/config.json | 25 +++ .../005/configEnableCluster.json | 25 +++ .../qa/test_data_policy/005/expected_001.out | 3 + .../qa/test_data_policy/005/expected_002.out | 12 ++ .../qa/test_data_policy/005/globalData.json | 58 ++++++ 10 files changed, 424 insertions(+) create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentHotfixesData.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentOsData.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentPackagesData.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/args_001.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/args_002.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/config.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/configEnableCluster.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/expected_001.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/expected_002.out create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/globalData.json diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentHotfixesData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentHotfixesData.json new file mode 100644 index 00000000000..f5c66184a1e --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentHotfixesData.json @@ -0,0 +1,174 @@ +{ + "1001": + [ + { + "hotfix": "KB2468871" + }, + { + "hotfix": "KB2478063" + }, + { + "hotfix": "KB2533523" + }, + { + "hotfix": "KB2544514" + }, + { + "hotfix": "KB2600211" + }, + { + "hotfix": "KB2600217" + }, + { + "hotfix": "KB4502496" + }, + { + "hotfix": "KB4512577" + }, + { + "hotfix": "KB4512578" + }, + { + "hotfix": "KB4514366" + }, + { + "hotfix": "KB4535680" + }, + { + "hotfix": "KB4535684" + }, + { + "hotfix": "KB4535685" + }, + { + "hotfix": "KB4577586" + }, + { + "hotfix": "KB4580325" + }, + { + "hotfix": "KB4589208" + }, + { + "hotfix": "KB4601558" + }, + { + "hotfix": "KB5003171" + }, + { + "hotfix": "KB5003243" + }, + { + "hotfix": "KB5034619" + }, + { + "hotfix": "KB5034768" + }, + { + "hotfix": "KB5034863" + } + ], + "1002": + [ + { + "hotfix": "KB2468871" + }, + { + "hotfix": "KB2478063" + }, + { + "hotfix": "KB2533523" + }, + { + "hotfix": "KB2544514" + }, + { + "hotfix": "KB2600211" + }, + { + "hotfix": "KB2600217" + }, + { + "hotfix": "KB5008882" + }, + { + "hotfix": "KB5010523" + }, + { + "hotfix": "KB5011497" + } + ], + "1003": + [ + { + "hotfix": "KB2468871" + }, + { + "hotfix": "KB2478063" + }, + { + "hotfix": "KB2533523" + }, + { + "hotfix": "KB2544514" + }, + { + "hotfix": "KB2600211" + }, + { + "hotfix": "KB2600217" + }, + { + "hotfix": "KB4502496" + }, + { + "hotfix": "KB4512577" + }, + { + "hotfix": "KB4512578" + }, + { + "hotfix": "KB4514366" + }, + { + "hotfix": "KB4535680" + }, + { + "hotfix": "KB4535684" + }, + { + "hotfix": "KB4535685" + }, + { + "hotfix": "KB4577586" + }, + { + "hotfix": "KB4580325" + }, + { + "hotfix": "KB4589208" + }, + { + "hotfix": "KB4601558" + }, + { + "hotfix": "KB5003171" + }, + { + "hotfix": "KB5003243" + }, + { + "hotfix": "KB5034619" + }, + { + "hotfix": "KB5034768" + }, + { + "hotfix": "KB5034863" + } + ], + "1004": + { + "status": "NOT_SYNCED" + } +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentOsData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentOsData.json new file mode 100644 index 00000000000..0fe7b6af977 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentOsData.json @@ -0,0 +1,39 @@ +{ + "000": [ + { + "architecture": "x86_64", + "checksum": "1704514361693635656", + "hostname": "ubuntu-jammy", + "os_codename": "jammy", + "os_major": "22", + "os_minor": "04", + "os_name": "Ubuntu", + "os_patch": "3", + "os_platform": "ubuntu", + "os_version": "22.04.3 LTS (Jammy Jellyfish)", + "reference": "f22553c945b045bfc0d162cb890344d2f4fa8609", + "release": "5.15.0-91-generic", + "scan_id": 0, + "scan_time": "2024/01/06 04:12:44", + "sysname": "Linux", + "version": "#101-Ubuntu SMP Tue Nov 14 13:30:08 UTC 2023" + } + ], + "001": [ + { + "architecture": "x86_64", + "checksum": "1704514864922425008", + "hostname": "vagrant", + "os_major": "8", + "os_name": "Red Hat Enterprise Linux", + "os_platform": "rhel", + "os_version": "8.9", + "reference": "e778c1fe83f2b15cdb013471a2c8223132c9e1ca", + "release": "4.14.311-233.529.amzn2.x86_64", + "scan_id": 0, + "scan_time": "2024/01/06 04:21:05", + "sysname": "Linux", + "version": "#1 SMP Thu Mar 23 09:54:12 UTC 2023" + } + ] +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentPackagesData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentPackagesData.json new file mode 100644 index 00000000000..d476d167cac --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentPackagesData.json @@ -0,0 +1,50 @@ +{ + "000": [ + { + "scan_id": "0", + "scan_time": "2024/01/11 00:05:48", + "format": "deb", + "name": "gzip", + "priority": "required", + "section": "utils", + "size": 245, + "vendor": "Ubuntu Developers ", + "install_time": "", + "version": "1.10-0ubuntu4.1", + "architecture": "amd64", + "multiarch": "", + "source": "", + "description": "GNU compression utilities", + "location": "", + "triaged": "0", + "cpe": "", + "msu_name": "", + "checksum": "653552fc5b2cc4c4cc281ee1a2fdd55351cae8f4", + "item_id": "040334345fd47ab6e72026cf3c45640456198fb4" + } + ], + "001": [ + { + "scan_id": "0", + "scan_time": "2024/01/11 00:05:58", + "format": "rpm", + "name": "lua-libs", + "priority": "", + "section": "Development/Languages", + "size": 247936, + "vendor": "Red Hat, Inc.", + "install_time": "1698808920", + "version": "5.3.4-12.el8", + "architecture": "x86_64", + "multiarch": "", + "source": "", + "description": "Libraries for lua", + "location": "", + "triaged": "0", + "cpe": "", + "msu_name": "", + "checksum": "70901207054653e2ef475cad7b77d31c4757b16d", + "item_id": "6a15840a129f0021c18e7a09e88e1dc7f1ef84b0" + } + ] +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/args_001.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/args_001.json new file mode 100644 index 00000000000..539af06d82d --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/args_001.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/config.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentPackagesData.json", + "-a", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/args_002.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/args_002.json new file mode 100644 index 00000000000..f50e509006b --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/args_002.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/configEnableCluster.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-h", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentPackagesData.json", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/config.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/config.json new file mode 100644 index 00000000000..7bd30fa9062 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/config.json @@ -0,0 +1,25 @@ +{ + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + }, + "indexer": { + "enabled": "yes", + "hosts": [ + "https://0.0.0.0:9200" + ], + "username": "admin", + "password": "admin", + "ssl": { + "certificate_authorities": [ + "/home/dwordcito/Development/wazuh/src/root-ca.pem" + ], + "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", + "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" + } + }, + "clusterName":"cluster01", + "clusterNodeName":"node01", + "clusterEnabled":false +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/configEnableCluster.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/configEnableCluster.json new file mode 100644 index 00000000000..afacf118c81 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/configEnableCluster.json @@ -0,0 +1,25 @@ +{ + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "cti-url": "https://cti.wazuh.com/api/v1/catalog/contexts/vd_1.0.0/consumers/vd_4.8.0" + }, + "indexer": { + "enabled": "yes", + "hosts": [ + "https://0.0.0.0:9200" + ], + "username": "admin", + "password": "admin", + "ssl": { + "certificate_authorities": [ + "/home/dwordcito/Development/wazuh/src/root-ca.pem" + ], + "certificate": "/home/dwordcito/Development/wazuh/src/node-1.pem", + "key": "/home/dwordcito/Development/wazuh/src/node-1-key.pem" + } + }, + "clusterName":"cluster02", + "clusterNodeName":"node02", + "clusterEnabled":true +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/expected_001.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/expected_001.out new file mode 100644 index 00000000000..4b74d44d4e3 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/expected_001.out @@ -0,0 +1,3 @@ +[ + "Vulnerability scanner module started" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/expected_002.out new file mode 100644 index 00000000000..7d0b792549b --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/expected_002.out @@ -0,0 +1,12 @@ +[ + "Vulnerability scanner module started", + "Cluster name changed (re-scan needed)", + "Policy changed. Re-scanning all agents", + "Fetched 2 agents from Wazuh-DB.", + "Vulnerability scan for OS 'enterprise_linux' on Agent '001' has completed.", + "Translation for package 'lua-libs' in platform 'rhel' not found. Using provided packageName.", + "Initiating a vulnerability scan for package 'lua-libs' (rpm) (red hat, inc.) with CVE Numbering Authorities (CNA) 'redhat_8' on Agent 'agent_redhat_8' (ID: '001', Version: 'v4.7.1').", + "Vulnerability scan for package 'lua-libs' on Agent '001' has completed.", + "Event type: 7 processed", + "Event type: 9 processed" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/globalData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/globalData.json new file mode 100644 index 00000000000..5025d61115f --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/globalData.json @@ -0,0 +1,58 @@ +[ + { + "id": 0, + "name": "agent_ubuntu_22", + "ip": "10.0.0.3", + "register_ip": "any", + "internal_key": "9e369606c6f3c20a114f399853abff716f07cdec0ebd30a1e2a59b6b4b439795", + "os_name": "Ubuntu", + "os_version": "22.04.6 LTS", + "os_major": "22", + "os_minor": "04", + "os_codename": "jammy", + "os_build": "ubuntu", + "os_platform": "ubuntu", + "os_uname": "agent_ubuntu_22 | 6.5.13-7-MANJARO | #1 SMP PREEMPT_DYNAMIC Wed Dec 20 07:15:58 UTC 2023", + "os_arch": "x86_64", + "version": "Wazuh v4.7.1", + "config_sum": "ab73af41699f13fdd81903b5f23d8d00", + "merged_sum": "4a8724b20dee0124ff9656783c490c4e", + "manager_host": "pr-test", + "node_name": "node01", + "date_add": "1704931524", + "last_keepalive": "1705096327", + "group": "default", + "group_hash": "37a8eec1", + "group_sync_status": "synced", + "sync_status": "synced", + "connection_status": "disconnected" + }, + { + "id": 1, + "name": "agent_redhat_8", + "ip": "10.0.0.18", + "register_ip": "any", + "internal_key": "3426434231c609dcebcab2676d732db376ceae26af88d13c864668fc3acac778", + "os_name": "Red Hat Enterprise Linux", + "os_version": "8.9", + "os_major": "8", + "os_minor": "9", + "os_codename": "Ootpa", + "os_build": "rhel", + "os_platform": "rhel", + "os_uname": "agent_redhat_8 | 6.5.13-7-MANJARO | #1 SMP PREEMPT_DYNAMIC Wed Dec 20 07:15:58 UTC 2023", + "os_arch": "x86_64", + "version": "Wazuh v4.7.1", + "config_sum": "ab73af41699f13fdd81903b5f23d8d00", + "merged_sum": "4a8724b20dee0124ff9656783c490c4e", + "manager_host": "pr-test", + "node_name": "node01", + "date_add": "1704931528", + "last_keepalive": "1705096327", + "group": "default", + "group_hash": "37a8eec1", + "group_sync_status": "synced", + "sync_status": "synced", + "connection_status": "disconnected" + } +] From 6edfdeb0910c9d07cb60c8e9851f64c160526fd9 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Thu, 23 May 2024 15:09:07 -0300 Subject: [PATCH 167/419] Add post action and re-scan after that update. --- .../src/indexerConnector.cpp | 42 +++++++++------ src/shared_modules/utils/timeHelper.h | 11 ++++ .../databaseFeedManager.hpp | 35 ++++++++----- .../src/scanOrchestrator/osScanner.hpp | 4 +- .../src/scanOrchestrator/packageScanner.hpp | 18 +++---- .../src/scanOrchestrator/resultIndexer.hpp | 8 ++- .../src/scanOrchestrator/scanAgentList.hpp | 18 +++++-- .../src/scanOrchestrator/scanContext.hpp | 11 ++++ .../src/scanOrchestrator/scanOrchestrator.hpp | 50 ++++++++++++++++++ .../src/vulnerabilityScannerFacade.cpp | 25 +++++++-- .../tests/mocks/MockDatabaseFeedManager.hpp | 4 +- .../tests/unit/databaseFeedManager_test.cpp | 12 ++--- .../tests/unit/packageScanner_test.cpp | 52 +++++++++---------- .../tests/unit/resultIndexer_test.cpp | 2 +- 14 files changed, 209 insertions(+), 83 deletions(-) diff --git a/src/shared_modules/indexer_connector/src/indexerConnector.cpp b/src/shared_modules/indexer_connector/src/indexerConnector.cpp index 8a28d51e07f..f147f584bba 100644 --- a/src/shared_modules/indexer_connector/src/indexerConnector.cpp +++ b/src/shared_modules/indexer_connector/src/indexerConnector.cpp @@ -383,32 +383,44 @@ IndexerConnector::IndexerConnector( dataQueue.pop(); auto parsedData = nlohmann::json::parse(data); const auto& id = parsedData.at("id").get_ref(); + // If the element should not be indexed, only delete it from the sync database. + const bool shouldIndex = !parsedData.contains("no-index") || !parsedData.at("no-index").get(); if (parsedData.at("operation").get_ref().compare("DELETED") == 0) { - builderBulkDelete(bulkData, id, m_indexName); + if (shouldIndex) + { + builderBulkDelete(bulkData, id, m_indexName); + } m_db->delete_(id); } else { const auto dataString = parsedData.at("data").dump(); - builderBulkIndex(bulkData, id, m_indexName, dataString); + if (shouldIndex) + { + builderBulkIndex(bulkData, id, m_indexName, dataString); + } m_db->put(id, dataString); } } - // Process data. - HTTPRequest::instance().post( - HttpURL(url), - bulkData, - [](const std::string& response) { logDebug2(IC_NAME, "Response: %s", response.c_str()); }, - [](const std::string& error, const long statusCode) - { - logError(IC_NAME, "%s, status code: %ld", error.c_str(), statusCode); - throw std::runtime_error(error); - }, - "", - DEFAULT_HEADERS, - secureCommunication); + + if (!bulkData.empty()) + { + // Process data. + HTTPRequest::instance().post( + HttpURL(url), + bulkData, + [](const std::string& response) { logDebug2(IC_NAME, "Response: %s", response.c_str()); }, + [](const std::string& error, const long statusCode) + { + logError(IC_NAME, "%s, status code: %ld", error.c_str(), statusCode); + throw std::runtime_error(error); + }, + "", + DEFAULT_HEADERS, + secureCommunication); + } }, DATABASE_BASE_PATH + m_indexName, ELEMENTS_PER_BULK); diff --git a/src/shared_modules/utils/timeHelper.h b/src/shared_modules/utils/timeHelper.h index 20fe5eb4ce2..037264611de 100644 --- a/src/shared_modules/utils/timeHelper.h +++ b/src/shared_modules/utils/timeHelper.h @@ -141,6 +141,17 @@ namespace Utils return output.str(); } + /** + * @brief Get seconds from epoch. + * This method is used to get the seconds from epoch. + * @return seconds from epoch. + */ + static int64_t getSecondsFromEpoch() + { + return std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()) + .count(); + }; + #pragma GCC diagnostic pop } // namespace Utils diff --git a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp index 3ad2612b745..0bc6c4af118 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/databaseFeedManager/databaseFeedManager.hpp @@ -46,7 +46,7 @@ constexpr auto EMPTY_KEY {""}; /** * @brief Scanning package data struct. */ -struct packageData final +struct PackageData final { std::string name; ///< Package name. std::string vendor; ///< Package vendor. @@ -269,25 +269,28 @@ class TDatabaseFeedManager final : public Observer * @param isLocalSubscriber Configures the router subscription lambda execution as local or remote. * @param reloadGlobalMapsStartup If true, the vendor and os cpe maps will be reloaded at startup. * @param initContentUpdater If true, the content updater will be initialized. + * @param postUpdateCallback Callback to be executed after the update process. */ // LCOV_EXCL_START - explicit TDatabaseFeedManager(std::shared_ptr indexerConnector, - const std::atomic& shouldStop, - std::shared_mutex& mutex, - const bool isLocalSubscriber = true, - const bool reloadGlobalMapsStartup = true, - const bool initContentUpdater = true) + explicit TDatabaseFeedManager( + std::shared_ptr indexerConnector, + const std::atomic& shouldStop, + std::shared_mutex& mutex, + const bool isLocalSubscriber = true, + const bool reloadGlobalMapsStartup = true, + const bool initContentUpdater = true, + const std::function& postUpdateCallback = + []() { // Not used + }) : Observer("database_feed_manager") , m_indexerConnector(std::move(indexerConnector)) , m_shouldStop(shouldStop) , m_mutex(mutex) + { const auto updaterPolicy = TPolicyManager::instance().getUpdaterConfiguration(); const std::string topicName = updaterPolicy.at("topicName"); - m_translationL2Cache = - std::make_unique(TPolicyManager::instance().getTranslationLRUSize()); - try { m_feedDatabase = std::make_unique(DATABASE_PATH, false); @@ -320,7 +323,7 @@ class TDatabaseFeedManager final : public Observer std::make_unique(topicName, "vulnerability_feed_manager", isLocalSubscriber); m_contentUpdateSubscription->subscribe( - [&, topicName]([[maybe_unused]] const std::vector& message) + [&, topicName, postUpdateCallback]([[maybe_unused]] const std::vector& message) { auto eventDecoder = std::make_shared(); eventDecoder->setLast(std::make_shared()); @@ -342,6 +345,9 @@ class TDatabaseFeedManager final : public Observer // Verify vendor-map and oscpe-map values and update the maps in memory reloadGlobalMaps(); + + // Dispatch the post update Callback + postUpdateCallback(); logInfo(WM_VULNSCAN_LOGTAG, "Feed update process completed"); } catch (const std::exception& e) @@ -542,9 +548,9 @@ class TDatabaseFeedManager final : public Observer */ void getVulnerabilitiesCandidates( const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { if (package.name.empty() || cnaName.empty()) @@ -750,7 +756,8 @@ class TDatabaseFeedManager final : public Observer std::shared_ptr m_indexerConnector; std::unique_ptr m_contentRegistration; std::unique_ptr m_feedDatabase; - std::unique_ptr m_translationL2Cache; + std::unique_ptr m_translationL2Cache = + std::make_unique(TPolicyManager::instance().getTranslationLRUSize()); std::unique_ptr m_contentUpdateSubscription; const std::atomic& m_shouldStop; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp index 63bac74dc87..560b82cdc91 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/osScanner.hpp @@ -80,7 +80,7 @@ class TOsScanner final : public AbstractHandler> const auto osCPE = ScannerHelper::parseCPE(data->osCPEName().data()); auto vulnerabilityScan = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const NSVulnerabilityScanner::ScanVulnerabilityCandidate& callbackData) { try @@ -275,7 +275,7 @@ class TOsScanner final : public AbstractHandler> } else { - packageData package = {.name = osCPE.product}; + PackageData package = {.name = osCPE.product}; m_databaseFeedManager->getVulnerabilitiesCandidates("nvd", package, vulnerabilityScan); diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp index c15934518e6..bed4b8f8380 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/packageScanner.hpp @@ -82,7 +82,7 @@ class TPackageScanner final : public AbstractHandler data, const std::function& vulnerabilityScan) { @@ -116,7 +116,7 @@ class TPackageScanner final : public AbstractHandleragentId().data(), data->agentVersion().data()); - packageData translatedPackage = {.name = translation.translatedProduct, + PackageData translatedPackage = {.name = translation.translatedProduct, .vendor = translation.translatedVendor, .format = data->packageFormat().data(), .version = data->packageVersion().data()}; @@ -149,7 +149,7 @@ class TPackageScanner final : public AbstractHandleragentId().data(), data->agentVersion().data()); - packageData translatedPackage = {.name = translation.translatedProduct, + PackageData translatedPackage = {.name = translation.translatedProduct, .vendor = translation.translatedVendor, .format = data->packageFormat().data(), .version = data->packageVersion().data()}; @@ -252,7 +252,7 @@ class TPackageScanner final : public AbstractHandler contextData) { @@ -323,7 +323,7 @@ class TPackageScanner final : public AbstractHandler contextData) { @@ -371,7 +371,7 @@ class TPackageScanner final : public AbstractHandler contextData) { @@ -548,7 +548,7 @@ class TPackageScanner final : public AbstractHandler contextData) { @@ -620,7 +620,7 @@ class TPackageScanner final : public AbstractHandler handleRequest(std::shared_ptr data) override { auto vulnerabilityScan = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const NSVulnerabilityScanner::ScanVulnerabilityCandidate& callbackData) { try @@ -693,7 +693,7 @@ class TPackageScanner final : public AbstractHandleragentId().data(), data->agentVersion().data()); - packageData package = {.name = packageName, + PackageData package = {.name = packageName, .vendor = packageVendor, .format = data->packageFormat().data(), .version = data->packageVersion().data()}; diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp index 2cedd0553b2..acef694fb92 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/resultIndexer.hpp @@ -51,8 +51,14 @@ class TResultIndexer final : public AbstractHandlerm_elements) + for (auto& [key, value] : data->m_elements) { + // Add no-index field to the json object, based on the scan context value. + // This field is used to determine if the element should be indexed or not. + // If the element is not indexed, it will not be published to the indexer. + // This is useful for elements that needs the trigger of agent to be indexed. + value["no-index"] = data->m_noIndex; + logDebug2(WM_VULNSCAN_LOGTAG, "Processing and publish key: %s", key.c_str()); if (value.contains("operation") && value.contains("id")) { diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp index d183605e1e2..a4b74b0d921 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp @@ -44,7 +44,7 @@ class TScanAgentList final : public AbstractHandler(variantData); + if (noIndex) + { + context->m_noIndex = true; + } + m_osScanSuborchestration->handleRequest(std::move(context)); } } @@ -126,7 +131,7 @@ class TScanAgentList final : public AbstractHandler(variantData); + if (noIndex) + { + context->m_noIndex = true; + } + m_packageScanSuborchestration->handleRequest(std::move(context)); } } @@ -238,8 +248,8 @@ class TScanAgentList final : public AbstractHandlerm_noIndex); + scanAgentPackages(agent, data->m_noIndex); } catch (const WdbDataException& e) { diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index 621e2d332ff..184007dac5d 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -559,6 +559,11 @@ struct TScanContext final { throw std::runtime_error("Unable to build scan context. Unknown action"); } + + if (message->contains("no-index") && message->at("no-index").get()) + { + m_noIndex = true; + } } else { @@ -1530,6 +1535,12 @@ struct TScanContext final * */ bool m_isFirstScan = true; + + /** + * @brief Variable to check if the scan is a no-index scan. + * @details This is used to avoid indexing the scan results. + */ + bool m_noIndex = false; // LCOV_EXCL_STOP private: /** diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp index 8efa0645427..b2e44839762 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp @@ -138,6 +138,24 @@ class TScanOrchestrator final : public TOSPrimitives logDebug2(WM_VULNSCAN_LOGTAG, "Postponed delayed event for agent %s", e.agentId().c_str()); throw std::runtime_error(e.what()); } + catch (const AgentReScanException& e) + { + logDebug2(WM_VULNSCAN_LOGTAG, + "AgentReScanException (Agent %s). Reason: %s", + e.agentId().c_str(), + e.what()); + + pushReScanToDelayedDispatcher(e.agentId()); + } + catch (const AgentReScanListException& e) + { + logDebug2(WM_VULNSCAN_LOGTAG, "AgentReScanListException. Reason: %s", e.what()); + for (const auto& agentData : e.agentList()) + { + pushReScanToDelayedDispatcher(agentData.id); + } + } + catch (const std::exception& e) { logError(WM_VULNSCAN_LOGTAG, "Error processing delayed event: %s", e.what()); @@ -156,6 +174,38 @@ class TScanOrchestrator final : public TOSPrimitives m_eventDelayedDispatcher->push(agentId, element); } + /** + * @brief Push a rescan event to the delayed dispatcher. + * @param agentId Agent ID. + */ + void pushReScanToDelayedDispatcher(const std::string& agentId, const bool noIndex = false) + { + nlohmann::json dataValueJson; + dataValueJson["action"] = "scanAgent"; + dataValueJson["agent_info"]["agent_id"] = agentId; + + if (noIndex) + { + dataValueJson["no-index"] = true; + } + + std::string dataValue = dataValueJson.dump(); + const std::vector message(dataValue.begin(), dataValue.end()); + + flatbuffers::FlatBufferBuilder builder; + auto object = CreateMessageBufferDirect(builder, + reinterpret_cast*>(&message), + BufferType::BufferType_JSON, + Utils::getSecondsFromEpoch()); + + builder.Finish(object); + + // Delete all delayed events for the agent, because we are going to rescan it. + m_eventDelayedDispatcher->clear(agentId); + m_eventDelayedDispatcher->push(agentId, + {reinterpret_cast(builder.GetBufferPointer()), builder.GetSize()}); + } + /** * @brief Process an event. * diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index aebfa4b72b3..9e7a937429f 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -16,6 +16,7 @@ #include "defs.h" #include "loggerHelper.h" #include "messageBuffer_generated.h" +#include "scanOrchestrator.hpp" #include "wazuh_modules/vulnerability_scanner/src/policyManager/policyManager.hpp" #include "wdbDataException.hpp" #include "xzHelper.hpp" @@ -165,14 +166,15 @@ void VulnerabilityScannerFacade::initEventDispatcher() { logDebug2( WM_VULNSCAN_LOGTAG, "AgentReScanException (Agent %s). Reason: %s", e.agentId().c_str(), e.what()); - scanOrchestrator->pushEventToDelayedDispatcher(element, e.agentId()); + + scanOrchestrator->pushReScanToDelayedDispatcher(e.agentId()); } catch (const AgentReScanListException& e) { logDebug2(WM_VULNSCAN_LOGTAG, "AgentReScanListException. Reason: %s", e.what()); for (const auto& agentData : e.agentList()) { - scanOrchestrator->pushEventToDelayedDispatcher(element, agentData.id); + scanOrchestrator->pushReScanToDelayedDispatcher(agentData.id); } } catch (const std::exception& e) @@ -398,7 +400,24 @@ void VulnerabilityScannerFacade::start( // Database feed manager initialization. m_databaseFeedManager = std::make_shared( - m_indexerConnector, m_shouldStop, m_internalMutex, true, reloadGlobalMapsStartup, initContentUpdater); + m_indexerConnector, + m_shouldStop, + m_internalMutex, + true, + reloadGlobalMapsStartup, + initContentUpdater, + [this]() + { + // Create a JSON object 'dataValue' to specify the action as "cleanup." + std::string dataValueCleanup = R"({"action":"cleanup","no-index":true})"; + const std::vector messageCleanup(dataValueCleanup.begin(), dataValueCleanup.end()); + pushEvent(messageCleanup, BufferType::BufferType_JSON); + + // Create a JSON object 'dataValue' to specify the action as "reboot." + std::string dataValueReScan = R"({"action":"reboot","no-index":true})"; + const std::vector messageReScan(dataValueReScan.begin(), dataValueReScan.end()); + pushEvent(messageReScan, BufferType::BufferType_JSON); + }); // Add subscribers for policy updates. policyManager.addSubscriber(m_databaseFeedManager); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp index 7fc88c43e2a..4e182287e7c 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/mocks/MockDatabaseFeedManager.hpp @@ -70,9 +70,9 @@ class MockDatabaseFeedManager MOCK_METHOD(void, getVulnerabilitiesCandidates, (const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback), ()); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp index 2ebfc6dbfab..fe243de7fb9 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/databaseFeedManager_test.cpp @@ -477,13 +477,13 @@ TEST_F(DatabaseFeedManagerTest, DISABLED_GetVulnerabilityCandidatesSuccess) std::vector cves; - packageData package = {.name = PACKAGE_NAME}; + PackageData package = {.name = PACKAGE_NAME}; pDatabaseFeedManager->getVulnerabilitiesCandidates( CNA_NAME, package, [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const NSVulnerabilityScanner::ScanVulnerabilityCandidate& candidate) -> bool { auto cveId = candidate.cveId()->str(); @@ -532,7 +532,7 @@ TEST_F(DatabaseFeedManagerTest, GetVulnerabilityCandidatesCorrupted) TrampolineContentRegister, TrampolineRouterSubscriber>>(pIndexerConnectorTrap, shouldStop, mutex)}; - packageData package = {.name = CORRUPTED_PACKAGE_NAME}; + PackageData package = {.name = CORRUPTED_PACKAGE_NAME}; EXPECT_THROW( { @@ -540,7 +540,7 @@ TEST_F(DatabaseFeedManagerTest, GetVulnerabilityCandidatesCorrupted) CNA_NAME, package, [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const NSVulnerabilityScanner::ScanVulnerabilityCandidate& candidate) -> bool { return true; }); }, std::runtime_error); @@ -572,14 +572,14 @@ TEST_F(DatabaseFeedManagerTest, GetVulnerabilityCandidatesNoPackageName) TrampolineContentRegister, TrampolineRouterSubscriber>>(pIndexerConnectorTrap, shouldStop, mutex)}; - packageData package = {.name = ""}; + PackageData package = {.name = ""}; EXPECT_ANY_THROW({ pDatabaseFeedManager->getVulnerabilitiesCandidates( CNA_NAME, package, [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const NSVulnerabilityScanner::ScanVulnerabilityCandidate& candidate) -> bool { return true; }); }); } diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp index 4e10d11d580..914b7caf0ee 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/packageScanner_test.cpp @@ -360,9 +360,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedEqualTo) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -460,9 +460,9 @@ TEST_F(PackageScannerTest, TestPackageUnaffectedEqualTo) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -550,9 +550,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThan) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -650,9 +650,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanVendorMissing) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -740,9 +740,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanVendorMismatch) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -830,9 +830,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanVendorMatch) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -930,9 +930,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanOrEqual) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -1030,9 +1030,9 @@ TEST_F(PackageScannerTest, TestPackageAffectedLessThanWithVersionNotZero) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -1130,9 +1130,9 @@ TEST_F(PackageScannerTest, TestPackageUnaffectedLessThan) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -1220,9 +1220,9 @@ TEST_F(PackageScannerTest, TestPackageDefaultStatusAffected) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -1319,9 +1319,9 @@ TEST_F(PackageScannerTest, TestPackageDefaultStatusUnaffected) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; @@ -1409,9 +1409,9 @@ TEST_F(PackageScannerTest, TestPackageGetVulnerabilitiesCandidatesGeneratesExcep { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { throw std::runtime_error("Invalid package/cna name."); @@ -1816,9 +1816,9 @@ TEST_F(PackageScannerTest, TestGetTranslationFromL2) { auto mockGetVulnerabilitiesCandidates = [&](const std::string& cnaName, - const packageData& package, + const PackageData& package, const std::function& callback) { std::string candidatesFlatbufferSchemaStr; diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp index f7c80b5e5af..377dc3a3da3 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/resultIndexer_test.cpp @@ -77,7 +77,7 @@ void ResultIndexerTest::TearDown() */ TEST_F(ResultIndexerTest, TestHandleRequest) { - auto elementValue = nlohmann::json::parse(R"({"id": "id_test", "operation":"INSERTED"})"); + auto elementValue = nlohmann::json::parse(R"({"id": "id_test","no-index":false,"operation":"INSERTED"})"); spIndexerConnectorMock = std::make_shared(); EXPECT_CALL(*spIndexerConnectorMock, publish(elementValue.dump())).Times(1); From 6ea330366f2f77b788084b1efcd5d7c416ca6291 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Thu, 23 May 2024 16:25:40 -0300 Subject: [PATCH 168/419] Remove non necessary exception type. --- .../scanOrchestrator/agentReScanException.hpp | 63 ------------------- .../agentReScanListException.hpp | 17 ++++- .../src/scanOrchestrator/scanAgentList.hpp | 15 +---- .../src/scanOrchestrator/scanContext.hpp | 5 +- .../src/scanOrchestrator/scanOrchestrator.hpp | 20 ++---- .../src/vulnerabilityScannerFacade.cpp | 10 +-- .../tests/unit/scanAgentList_test.cpp | 2 +- 7 files changed, 25 insertions(+), 107 deletions(-) delete mode 100644 src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/agentReScanException.hpp diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/agentReScanException.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/agentReScanException.hpp deleted file mode 100644 index 1a3b87b5390..00000000000 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/agentReScanException.hpp +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Wazuh Vulnerability scanner - * Copyright (C) 2015, Wazuh Inc. - * April 5, 2024. - * - * This program is free software; you can redistribute it - * and/or modify it under the terms of the GNU General Public - * License (version 2) as published by the FSF - Free Software - * Foundation. - */ - -#ifndef _AGENT_RESCAN_EXCEPTION_HPP -#define _AGENT_RESCAN_EXCEPTION_HPP - -#include - -/** - * @brief AgentReScanException class. - * - */ -class AgentReScanException : public std::exception -{ -public: - /** - * @brief Overload what() method. - * - * @return const char* - */ - // LCOV_EXCL_START - const char* what() const noexcept override - { - return m_msg.what(); - } - - /** - * @brief Get agent id. - * - * @return const std::string& - */ - const std::string& agentId() const - { - return m_agentId; - } - - /** - * @brief Construct a new Os Data Exception object - * - * @param message - * @param agentId - */ - explicit AgentReScanException(const std::string& message, std::string_view agentId) - : m_msg {message} // NOLINT - , m_agentId {agentId} - { - } - // LCOV_EXCL_STOP - -private: - std::runtime_error m_msg; - std::string m_agentId; -}; - -#endif // _AGENT_RESCAN_EXCEPTION_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/agentReScanListException.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/agentReScanListException.hpp index 07823889ab8..9a956604d00 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/agentReScanListException.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/agentReScanListException.hpp @@ -43,15 +43,29 @@ class AgentReScanListException : public std::exception return m_agentList; } + /** + * @brief Get no-index. + * + * @return bool + */ + bool noIndex() const + { + return m_noIndex; + } + /** * @brief Construct a new Os Data Exception object * * @param message message * @param agentList list of agents + * @param noIndex no index */ - explicit AgentReScanListException(const std::string& message, const std::vector& agentList) + explicit AgentReScanListException(const std::string& message, + const std::vector& agentList, + const bool noIndex = false) : m_msg {message} // NOLINT , m_agentList {agentList} + , m_noIndex {noIndex} { } // LCOV_EXCL_STOP @@ -59,6 +73,7 @@ class AgentReScanListException : public std::exception private: std::runtime_error m_msg; std::vector m_agentList; + bool m_noIndex; }; #endif // _AGENT_RESCAN_LIST_EXCEPTION_HPP diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp index a4b74b0d921..28092972c02 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp @@ -12,7 +12,6 @@ #ifndef _SCAN_AGENT_LIST_HPP #define _SCAN_AGENT_LIST_HPP -#include "agentReScanException.hpp" #include "agentReScanListException.hpp" #include "chainOfResponsability.hpp" #include "loggerHelper.h" @@ -265,18 +264,8 @@ class TScanAgentList final : public AbstractHandlerm_agentsWithIncompletedScan.empty()) { - // If the rescan is only for one agent with incomplete scan, throw agentReScanException - if (data->m_agents.size() == 1) - { - throw AgentReScanException("Error executing rescan for agent.", - data->m_agentsWithIncompletedScan.front().id); - } - // If there are multiple agents with incomplete scan, throw agentReScanListException - else - { - throw AgentReScanListException("Error executing rescan for multiple agents.", - data->m_agentsWithIncompletedScan); - } + throw AgentReScanListException( + "Error executing rescan for multiple agents.", data->m_agentsWithIncompletedScan, data->m_noIndex); } return AbstractHandler>::handleRequest(std::move(data)); } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp index 184007dac5d..aadbdebc8e3 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanContext.hpp @@ -560,10 +560,7 @@ struct TScanContext final throw std::runtime_error("Unable to build scan context. Unknown action"); } - if (message->contains("no-index") && message->at("no-index").get()) - { - m_noIndex = true; - } + m_noIndex = message->contains("no-index") ? message->at("no-index").get() : false; } else { diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp index b2e44839762..699c96c8bdd 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp @@ -138,21 +138,12 @@ class TScanOrchestrator final : public TOSPrimitives logDebug2(WM_VULNSCAN_LOGTAG, "Postponed delayed event for agent %s", e.agentId().c_str()); throw std::runtime_error(e.what()); } - catch (const AgentReScanException& e) - { - logDebug2(WM_VULNSCAN_LOGTAG, - "AgentReScanException (Agent %s). Reason: %s", - e.agentId().c_str(), - e.what()); - - pushReScanToDelayedDispatcher(e.agentId()); - } catch (const AgentReScanListException& e) { logDebug2(WM_VULNSCAN_LOGTAG, "AgentReScanListException. Reason: %s", e.what()); for (const auto& agentData : e.agentList()) { - pushReScanToDelayedDispatcher(agentData.id); + pushReScanToDelayedDispatcher(agentData.id, e.noIndex()); } } @@ -177,17 +168,14 @@ class TScanOrchestrator final : public TOSPrimitives /** * @brief Push a rescan event to the delayed dispatcher. * @param agentId Agent ID. + * @param noIndex Flag to indicate if the elements should be indexed. */ - void pushReScanToDelayedDispatcher(const std::string& agentId, const bool noIndex = false) + void pushReScanToDelayedDispatcher(const std::string& agentId, const bool noIndex) { nlohmann::json dataValueJson; dataValueJson["action"] = "scanAgent"; dataValueJson["agent_info"]["agent_id"] = agentId; - - if (noIndex) - { - dataValueJson["no-index"] = true; - } + dataValueJson["no-index"] = noIndex; std::string dataValue = dataValueJson.dump(); const std::vector message(dataValue.begin(), dataValue.end()); diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index 9e7a937429f..d3222bc9abb 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -10,7 +10,6 @@ */ #include "vulnerabilityScannerFacade.hpp" -#include "agentReScanException.hpp" #include "agentReScanListException.hpp" #include "archiveHelper.hpp" #include "defs.h" @@ -162,19 +161,12 @@ void VulnerabilityScannerFacade::initEventDispatcher() logDebug2(WM_VULNSCAN_LOGTAG, "WdbDataException (Agent %s). Reason: %s", e.agentId().c_str(), e.what()); scanOrchestrator->pushEventToDelayedDispatcher(element, e.agentId()); } - catch (const AgentReScanException& e) - { - logDebug2( - WM_VULNSCAN_LOGTAG, "AgentReScanException (Agent %s). Reason: %s", e.agentId().c_str(), e.what()); - - scanOrchestrator->pushReScanToDelayedDispatcher(e.agentId()); - } catch (const AgentReScanListException& e) { logDebug2(WM_VULNSCAN_LOGTAG, "AgentReScanListException. Reason: %s", e.what()); for (const auto& agentData : e.agentList()) { - scanOrchestrator->pushReScanToDelayedDispatcher(agentData.id); + scanOrchestrator->pushReScanToDelayedDispatcher(agentData.id, e.noIndex()); } } catch (const std::exception& e) diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp index e615b3d37fb..ccaf712e2b7 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/scanAgentList_test.cpp @@ -267,7 +267,7 @@ TEST_F(ScanAgentListTest, OneRecoverableException) auto contextData = std::make_shared(data); contextData->m_agents.push_back({"001", "test_agent_name", "4.8.0", "192.168.0.1"}); - EXPECT_THROW(scanAgentList->handleRequest(contextData), AgentReScanException); + EXPECT_THROW(scanAgentList->handleRequest(contextData), AgentReScanListException); spSocketDBWrapperMock.reset(); } From 75b549847e2e981ee9819c29117282577b9a388f Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Thu, 23 May 2024 23:17:25 -0300 Subject: [PATCH 169/419] Fix persistent queue initialization and add more tests. --- .../src/indexerConnector.cpp | 6 +- src/shared_modules/utils/rocksDBQueueCF.hpp | 4 +- .../utils/tests/threadSafeMultiQueue_test.cpp | 33 ++++ src/shared_modules/utils/timeHelper.h | 3 +- .../001/agentHotfixesData.json | 172 ------------------ .../qa/test_data_policy/001/expected_003.out | 1 - .../002/agentHotfixesData.json | 168 ----------------- .../003/agentHotfixesData.json | 107 +---------- .../003/agentHotfixesDataNoSync.json | 6 + .../qa/test_data_policy/003/agentOsData.json | 17 ++ .../003/agentPackagesData.json | 24 +++ .../qa/test_data_policy/003/args_003.json | 2 +- .../qa/test_data_policy/003/args_004.json | 2 +- .../qa/test_data_policy/003/args_005.json | 2 +- .../qa/test_data_policy/003/args_006.json | 19 ++ .../qa/test_data_policy/003/expected_003.out | 8 +- .../qa/test_data_policy/003/expected_004.out | 3 +- .../qa/test_data_policy/003/expected_005.out | 13 +- .../qa/test_data_policy/003/expected_006.out | 14 ++ .../qa/test_data_policy/003/globalData.json | 31 ++++ .../004/agentHotfixesData.json | 167 +---------------- .../agentReScanListException.hpp | 4 +- .../scanOrchestrator/factoryOrchestrator.hpp | 7 +- .../src/scanOrchestrator/scanAgentList.hpp | 12 +- .../src/scanOrchestrator/scanOrchestrator.hpp | 13 +- .../src/vulnerabilityScannerFacade.cpp | 10 - .../tests/unit/factoryOrchestrator_test.cpp | 7 +- 27 files changed, 183 insertions(+), 672 deletions(-) create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesDataNoSync.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_006.json create mode 100644 src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_006.out diff --git a/src/shared_modules/indexer_connector/src/indexerConnector.cpp b/src/shared_modules/indexer_connector/src/indexerConnector.cpp index f147f584bba..4626de24256 100644 --- a/src/shared_modules/indexer_connector/src/indexerConnector.cpp +++ b/src/shared_modules/indexer_connector/src/indexerConnector.cpp @@ -384,11 +384,11 @@ IndexerConnector::IndexerConnector( auto parsedData = nlohmann::json::parse(data); const auto& id = parsedData.at("id").get_ref(); // If the element should not be indexed, only delete it from the sync database. - const bool shouldIndex = !parsedData.contains("no-index") || !parsedData.at("no-index").get(); + const bool noIndex = parsedData.contains("no-index") ? parsedData.at("no-index").get() : false; if (parsedData.at("operation").get_ref().compare("DELETED") == 0) { - if (shouldIndex) + if (!noIndex) { builderBulkDelete(bulkData, id, m_indexName); } @@ -397,7 +397,7 @@ IndexerConnector::IndexerConnector( else { const auto dataString = parsedData.at("data").dump(); - if (shouldIndex) + if (!noIndex) { builderBulkIndex(bulkData, id, m_indexName, dataString); } diff --git a/src/shared_modules/utils/rocksDBQueueCF.hpp b/src/shared_modules/utils/rocksDBQueueCF.hpp index 4bb6bac70ea..0ee0eae9d6d 100644 --- a/src/shared_modules/utils/rocksDBQueueCF.hpp +++ b/src/shared_modules/utils/rocksDBQueueCF.hpp @@ -42,6 +42,7 @@ class RocksDBQueueCF final constexpr auto QUEUE_NUMBER = 1; auto it = std::unique_ptr(m_db->NewIterator(rocksdb::ReadOptions())); + it->SeekToFirst(); while (it->Valid()) { // Split key to get the ID and queue number. @@ -51,7 +52,8 @@ class RocksDBQueueCF final if (m_queueMetadata.find(id.data()) == m_queueMetadata.end()) { - m_queueMetadata.emplace(id, QueueMetadata {1, 0, 0, std::chrono::system_clock::now()}); + m_queueMetadata.emplace(id, + QueueMetadata {queueNumber, queueNumber, 0, std::chrono::system_clock::now()}); } auto& element = m_queueMetadata[id]; diff --git a/src/shared_modules/utils/tests/threadSafeMultiQueue_test.cpp b/src/shared_modules/utils/tests/threadSafeMultiQueue_test.cpp index 576a50d935c..022a3cb90c8 100644 --- a/src/shared_modules/utils/tests/threadSafeMultiQueue_test.cpp +++ b/src/shared_modules/utils/tests/threadSafeMultiQueue_test.cpp @@ -184,3 +184,36 @@ TEST_F(ThreadSafeMultiQueueTest, ClearAll) EXPECT_EQ(0, queue.size("002")); EXPECT_TRUE(queue.empty()); } + +TEST_F(ThreadSafeMultiQueueTest, LoadAfterStop) +{ + { + Utils::TSafeMultiQueue> + queue(RocksDBQueueCF("test")); + + rocksdb::Slice slice("DATA"); + queue.push("000", slice); + queue.push("001", slice); + queue.push("002", slice); + EXPECT_FALSE(queue.empty()); + EXPECT_EQ(1, queue.size("000")); + EXPECT_EQ(1, queue.size("001")); + EXPECT_EQ(1, queue.size("002")); + } + + Utils:: + TSafeMultiQueue> + queue(RocksDBQueueCF("test")); + + EXPECT_FALSE(queue.empty()); + EXPECT_EQ(1, queue.size("000")); + EXPECT_EQ(1, queue.size("001")); + EXPECT_EQ(1, queue.size("002")); + queue.clear(""); + EXPECT_EQ(0, queue.size("000")); + EXPECT_EQ(0, queue.size("001")); + EXPECT_EQ(0, queue.size("002")); + EXPECT_TRUE(queue.empty()); +} diff --git a/src/shared_modules/utils/timeHelper.h b/src/shared_modules/utils/timeHelper.h index 037264611de..82f1c7bab35 100644 --- a/src/shared_modules/utils/timeHelper.h +++ b/src/shared_modules/utils/timeHelper.h @@ -142,8 +142,7 @@ namespace Utils } /** - * @brief Get seconds from epoch. - * This method is used to get the seconds from epoch. + * @brief Get seconds from epoch, since 1970-01-01 00:00:00 UTC. * @return seconds from epoch. */ static int64_t getSecondsFromEpoch() diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentHotfixesData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentHotfixesData.json index f5c66184a1e..2c63c085104 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentHotfixesData.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/agentHotfixesData.json @@ -1,174 +1,2 @@ { - "1001": - [ - { - "hotfix": "KB2468871" - }, - { - "hotfix": "KB2478063" - }, - { - "hotfix": "KB2533523" - }, - { - "hotfix": "KB2544514" - }, - { - "hotfix": "KB2600211" - }, - { - "hotfix": "KB2600217" - }, - { - "hotfix": "KB4502496" - }, - { - "hotfix": "KB4512577" - }, - { - "hotfix": "KB4512578" - }, - { - "hotfix": "KB4514366" - }, - { - "hotfix": "KB4535680" - }, - { - "hotfix": "KB4535684" - }, - { - "hotfix": "KB4535685" - }, - { - "hotfix": "KB4577586" - }, - { - "hotfix": "KB4580325" - }, - { - "hotfix": "KB4589208" - }, - { - "hotfix": "KB4601558" - }, - { - "hotfix": "KB5003171" - }, - { - "hotfix": "KB5003243" - }, - { - "hotfix": "KB5034619" - }, - { - "hotfix": "KB5034768" - }, - { - "hotfix": "KB5034863" - } - ], - "1002": - [ - { - "hotfix": "KB2468871" - }, - { - "hotfix": "KB2478063" - }, - { - "hotfix": "KB2533523" - }, - { - "hotfix": "KB2544514" - }, - { - "hotfix": "KB2600211" - }, - { - "hotfix": "KB2600217" - }, - { - "hotfix": "KB5008882" - }, - { - "hotfix": "KB5010523" - }, - { - "hotfix": "KB5011497" - } - ], - "1003": - [ - { - "hotfix": "KB2468871" - }, - { - "hotfix": "KB2478063" - }, - { - "hotfix": "KB2533523" - }, - { - "hotfix": "KB2544514" - }, - { - "hotfix": "KB2600211" - }, - { - "hotfix": "KB2600217" - }, - { - "hotfix": "KB4502496" - }, - { - "hotfix": "KB4512577" - }, - { - "hotfix": "KB4512578" - }, - { - "hotfix": "KB4514366" - }, - { - "hotfix": "KB4535680" - }, - { - "hotfix": "KB4535684" - }, - { - "hotfix": "KB4535685" - }, - { - "hotfix": "KB4577586" - }, - { - "hotfix": "KB4580325" - }, - { - "hotfix": "KB4589208" - }, - { - "hotfix": "KB4601558" - }, - { - "hotfix": "KB5003171" - }, - { - "hotfix": "KB5003243" - }, - { - "hotfix": "KB5034619" - }, - { - "hotfix": "KB5034768" - }, - { - "hotfix": "KB5034863" - } - ], - "1004": - { - "status": "NOT_SYNCED" - } } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out index 7bc9913b606..e0d4fb08b29 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/001/expected_003.out @@ -1,7 +1,6 @@ [ "Vulnerability scanner module started", "Policy changed. Re-scanning all agents", - "Event type: 9 processed", "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", "Fetched 2 agents from Wazuh-DB.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/agentHotfixesData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/agentHotfixesData.json index 7bd280f89cc..2c63c085104 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/agentHotfixesData.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/002/agentHotfixesData.json @@ -1,170 +1,2 @@ { - "1001": [ - { - "hotfix": "KB2468871" - }, - { - "hotfix": "KB2478063" - }, - { - "hotfix": "KB2533523" - }, - { - "hotfix": "KB2544514" - }, - { - "hotfix": "KB2600211" - }, - { - "hotfix": "KB2600217" - }, - { - "hotfix": "KB4502496" - }, - { - "hotfix": "KB4512577" - }, - { - "hotfix": "KB4512578" - }, - { - "hotfix": "KB4514366" - }, - { - "hotfix": "KB4535680" - }, - { - "hotfix": "KB4535684" - }, - { - "hotfix": "KB4535685" - }, - { - "hotfix": "KB4577586" - }, - { - "hotfix": "KB4580325" - }, - { - "hotfix": "KB4589208" - }, - { - "hotfix": "KB4601558" - }, - { - "hotfix": "KB5003171" - }, - { - "hotfix": "KB5003243" - }, - { - "hotfix": "KB5034619" - }, - { - "hotfix": "KB5034768" - }, - { - "hotfix": "KB5034863" - } - ], - "1002": [ - { - "hotfix": "KB2468871" - }, - { - "hotfix": "KB2478063" - }, - { - "hotfix": "KB2533523" - }, - { - "hotfix": "KB2544514" - }, - { - "hotfix": "KB2600211" - }, - { - "hotfix": "KB2600217" - }, - { - "hotfix": "KB5008882" - }, - { - "hotfix": "KB5010523" - }, - { - "hotfix": "KB5011497" - } - ], - "1003": [ - { - "hotfix": "KB2468871" - }, - { - "hotfix": "KB2478063" - }, - { - "hotfix": "KB2533523" - }, - { - "hotfix": "KB2544514" - }, - { - "hotfix": "KB2600211" - }, - { - "hotfix": "KB2600217" - }, - { - "hotfix": "KB4502496" - }, - { - "hotfix": "KB4512577" - }, - { - "hotfix": "KB4512578" - }, - { - "hotfix": "KB4514366" - }, - { - "hotfix": "KB4535680" - }, - { - "hotfix": "KB4535684" - }, - { - "hotfix": "KB4535685" - }, - { - "hotfix": "KB4577586" - }, - { - "hotfix": "KB4580325" - }, - { - "hotfix": "KB4589208" - }, - { - "hotfix": "KB4601558" - }, - { - "hotfix": "KB5003171" - }, - { - "hotfix": "KB5003243" - }, - { - "hotfix": "KB5034619" - }, - { - "hotfix": "KB5034768" - }, - { - "hotfix": "KB5034863" - } - ], - "1004": { - "status": "NOT_SYNCED" - } } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json index f5c66184a1e..07b00297dfe 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json @@ -1,5 +1,5 @@ { - "1001": + "002": [ { "hotfix": "KB2468871" @@ -67,108 +67,5 @@ { "hotfix": "KB5034863" } - ], - "1002": - [ - { - "hotfix": "KB2468871" - }, - { - "hotfix": "KB2478063" - }, - { - "hotfix": "KB2533523" - }, - { - "hotfix": "KB2544514" - }, - { - "hotfix": "KB2600211" - }, - { - "hotfix": "KB2600217" - }, - { - "hotfix": "KB5008882" - }, - { - "hotfix": "KB5010523" - }, - { - "hotfix": "KB5011497" - } - ], - "1003": - [ - { - "hotfix": "KB2468871" - }, - { - "hotfix": "KB2478063" - }, - { - "hotfix": "KB2533523" - }, - { - "hotfix": "KB2544514" - }, - { - "hotfix": "KB2600211" - }, - { - "hotfix": "KB2600217" - }, - { - "hotfix": "KB4502496" - }, - { - "hotfix": "KB4512577" - }, - { - "hotfix": "KB4512578" - }, - { - "hotfix": "KB4514366" - }, - { - "hotfix": "KB4535680" - }, - { - "hotfix": "KB4535684" - }, - { - "hotfix": "KB4535685" - }, - { - "hotfix": "KB4577586" - }, - { - "hotfix": "KB4580325" - }, - { - "hotfix": "KB4589208" - }, - { - "hotfix": "KB4601558" - }, - { - "hotfix": "KB5003171" - }, - { - "hotfix": "KB5003243" - }, - { - "hotfix": "KB5034619" - }, - { - "hotfix": "KB5034768" - }, - { - "hotfix": "KB5034863" - } - ], - "1004": - { - "status": "NOT_SYNCED" - } + ] } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesDataNoSync.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesDataNoSync.json new file mode 100644 index 00000000000..ca64fff38a1 --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesDataNoSync.json @@ -0,0 +1,6 @@ +{ + "002": + { + "status": "NOT_SYNCED" + } +} diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json index 0fe7b6af977..0afe7c14744 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json @@ -35,5 +35,22 @@ "sysname": "Linux", "version": "#1 SMP Thu Mar 23 09:54:12 UTC 2023" } + ], + "002": [ + { + "scan_id": 0, + "scan_time": "2024/05/23 20:02:40", + "hostname": "DESKTOP-AAABBBCCC", + "architecture": "x86_64", + "os_name": "Microsoft Windows 10 Pro", + "os_version": "10.0.19045.4355", + "os_major": "10", + "os_minor": "0", + "os_build": "19045.4355", + "os_platform": "windows", + "os_release": "2009", + "os_display_version": "22H2", + "reference": "c6b7757cad89374fd88ccf67dc9cbfbd14aa02e0" + } ] } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json index 27bf67a941b..2e2c39c0a2a 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json @@ -46,5 +46,29 @@ "checksum": "70901207054653e2ef475cad7b77d31c4757b16d", "item_id": "6a15840a129f0021c18e7a09e88e1dc7f1ef84b0" } + ], + "002": [ + { + "scan_id": "0", + "scan_time": "2024/01/11 00:05:58", + "format": "rpm", + "name": "lua-libs", + "priority": "", + "section": "Development/Languages", + "size": 247936, + "vendor": "Red Hat, Inc.", + "install_time": "1698808920", + "version": "5.3.4-11.el8", + "architecture": "x86_64", + "multiarch": "", + "source": "", + "description": "Libraries for lua", + "location": "", + "triaged": "0", + "cpe": "", + "msu_name": "", + "checksum": "70901207054653e2ef475cad7b77d31c4757b16d", + "item_id": "6a15840a129f0021c18e7a09e88e1dc7f1ef84b0" + } ] } diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_003.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_003.json index 5c5f8a40711..e11daa9195f 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_003.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_003.json @@ -8,7 +8,7 @@ "-s", "120", "-h", - "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesDataNoSync.json", "-p", "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json", "-b", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_004.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_004.json index 9582806d786..5c5f8a40711 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_004.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_004.json @@ -1,6 +1,6 @@ [ "-c", - "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configDisabledAndManagerDisabled.json", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/config.json", "-t", "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", "-l", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_005.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_005.json index ea9362b5e0c..9582806d786 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_005.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_005.json @@ -1,6 +1,6 @@ [ "-c", - "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configManagerDisabled.json", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configDisabledAndManagerDisabled.json", "-t", "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", "-l", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_006.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_006.json new file mode 100644 index 00000000000..ea9362b5e0c --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/args_006.json @@ -0,0 +1,19 @@ +[ + "-c", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/configManagerDisabled.json", + "-t", + "wazuh_modules/vulnerability_scanner/indexer/template/index-template.json", + "-l", + "log.out", + "-s", + "120", + "-h", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentHotfixesData.json", + "-p", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentPackagesData.json", + "-b", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/agentOsData.json", + "-g", + "wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json", + "-u" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out index a8483a09e0c..be39c326aa8 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_003.out @@ -1,9 +1,8 @@ [ "Vulnerability scanner module started", "Policy changed. Re-scanning all agents", - "Event type: 9 processed", "Vulnerability scan for OS 'Ubuntu' on Agent '000' has completed.", - "Fetched 2 agents from Wazuh-DB.", + "Fetched 3 agents from Wazuh-DB.", "Translation for package 'gzip' in platform 'ubuntu' not found. Using provided packageName.", "Initiating a vulnerability scan for package 'gzip' (deb) (ubuntu developers ) with CVE Numbering Authorities (CNA) 'canonical'", "Scanning package - 'gzip' (Installed Version: 1.10-0ubuntu4.1, Security Vulnerability: CVE-2022-1271). Identified vulnerability: Version: 0. Required Version Threshold: 1.10-4ubuntu4. Required Version Threshold (or Equal): .", @@ -15,8 +14,9 @@ "Translation for package 'lua-libs' in platform 'rhel' not found. Using provided packageName.", "Initiating a vulnerability scan for package 'lua-libs' (rpm) (red hat, inc.) with CVE Numbering Authorities (CNA) 'redhat_8' on Agent 'agent_redhat_8' (ID: '001', Version: 'v4.7.1').", "Vulnerability scan for package 'lua-libs' on Agent '001' has completed.", - "Event type: 7 processed", "Inserting agent package key: 001_6a15840a129f0021c18e7a09e88e1dc7f1ef84b0 -> CVE-2020-24370", "Building event details for component type: 1", - "Processing and publish key: CVE-2020-24370" + "Processing and publish key: CVE-2020-24370", + "Error executing query to fetch agent data for agents. Reason: DB query not synced.", + "AgentReScanListException. Reason: Error executing rescan for multiple agents." ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_004.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_004.out index e2416840b32..a67607212eb 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_004.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_004.out @@ -1,3 +1,4 @@ [ - "Vulnerability scanner module is disabled" + "Vulnerability scanner module started", + "Event type: 8 processed" ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out index e0137cd698b..e2416840b32 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_005.out @@ -1,14 +1,3 @@ [ - "Vulnerability scanner module started", - "Policy changed. Re-scanning all agents", - "Fetched 1 agents from Wazuh-DB.", - "Skipping manager agent with id 0.", - "Vulnerability scan for OS 'enterprise_linux' on Agent '001' has completed.", - "Translation for package 'lua-libs' in platform 'rhel' not found. Using provided packageName.", - "Initiating a vulnerability scan for package 'lua-libs' (rpm) (red hat, inc.) with CVE Numbering Authorities (CNA) 'redhat_8' on Agent 'agent_redhat_8' (ID: '001', Version: 'v4.7.1').", - "Vulnerability scan for package 'lua-libs' on Agent '001' has completed.", - "Event type: 7 processed", - "Inserting agent package key: 001_6a15840a129f0021c18e7a09e88e1dc7f1ef84b0 -> CVE-2020-24370", - "Building event details for component type: 1", - "Processing and publish key: CVE-2020-24370" + "Vulnerability scanner module is disabled" ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_006.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_006.out new file mode 100644 index 00000000000..4d97356d8ad --- /dev/null +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_006.out @@ -0,0 +1,14 @@ +[ + "Vulnerability scanner module started", + "Policy changed. Re-scanning all agents.", + "Fetched 2 agents from Wazuh-DB.", + "Skipping manager agent with id 0.", + "Vulnerability scan for OS 'enterprise_linux' on Agent '001' has completed.", + "Translation for package 'lua-libs' in platform 'rhel' not found. Using provided packageName.", + "Initiating a vulnerability scan for package 'lua-libs' (rpm) (red hat, inc.) with CVE Numbering Authorities (CNA) 'redhat_8' on Agent 'agent_redhat_8' (ID: '001', Version: 'v4.7.1').", + "Vulnerability scan for package 'lua-libs' on Agent '001' has completed.", + "Event type: 7 processed", + "Inserting agent package key: 001_6a15840a129f0021c18e7a09e88e1dc7f1ef84b0 -> CVE-2020-24370", + "Building event details for component type: 1", + "Processing and publish key: CVE-2020-24370" +] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json index 5025d61115f..d6ca3e34083 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/globalData.json @@ -54,5 +54,36 @@ "group_sync_status": "synced", "sync_status": "synced", "connection_status": "disconnected" + }, + { + "id": 2, + "name": "DESKTOP-AAABBBCCC", + "ip": "10.0.0.19", + "register_ip": "any", + "internal_key": "40524bd57887186746949efe6da10d8c7249dc29c9316168d0e3d827eb7d053c", + "os_name": "Microsoft Windows 10 Pro", + "os_version": "10.0.19045.4355", + "os_major": "10", + "os_minor": "0", + "os_codename": null, + "os_build": "19045.4355", + "os_platform": "windows", + "os_uname": "Microsoft Windows 10 Pro", + "os_arch": null, + "version": "Wazuh v4.7.2", + "config_sum": "ab73af41699f13fdd81903b5f23d8d00", + "merged_sum": "d0f51333bf62a0d4dbe082107c77e1a4", + "manager_host": "WINDOWS-HOST", + "node_name": "node01", + "date_add": 1715144352, + "last_keepalive": 1716494809, + "group": "default", + "group_hash": "37a8eec1", + "group_sync_status": "synced", + "sync_status": "synced", + "connection_status": "active", + "disconnection_time": 0, + "group_config_status": "synced", + "status_code": 0 } ] diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentHotfixesData.json b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentHotfixesData.json index 7bd280f89cc..bba9f2b8fd7 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentHotfixesData.json +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/004/agentHotfixesData.json @@ -1,170 +1,5 @@ { - "1001": [ - { - "hotfix": "KB2468871" - }, - { - "hotfix": "KB2478063" - }, - { - "hotfix": "KB2533523" - }, - { - "hotfix": "KB2544514" - }, - { - "hotfix": "KB2600211" - }, - { - "hotfix": "KB2600217" - }, - { - "hotfix": "KB4502496" - }, - { - "hotfix": "KB4512577" - }, - { - "hotfix": "KB4512578" - }, - { - "hotfix": "KB4514366" - }, - { - "hotfix": "KB4535680" - }, - { - "hotfix": "KB4535684" - }, - { - "hotfix": "KB4535685" - }, - { - "hotfix": "KB4577586" - }, - { - "hotfix": "KB4580325" - }, - { - "hotfix": "KB4589208" - }, - { - "hotfix": "KB4601558" - }, - { - "hotfix": "KB5003171" - }, - { - "hotfix": "KB5003243" - }, - { - "hotfix": "KB5034619" - }, - { - "hotfix": "KB5034768" - }, - { - "hotfix": "KB5034863" - } - ], - "1002": [ - { - "hotfix": "KB2468871" - }, - { - "hotfix": "KB2478063" - }, - { - "hotfix": "KB2533523" - }, - { - "hotfix": "KB2544514" - }, - { - "hotfix": "KB2600211" - }, - { - "hotfix": "KB2600217" - }, - { - "hotfix": "KB5008882" - }, - { - "hotfix": "KB5010523" - }, - { - "hotfix": "KB5011497" - } - ], - "1003": [ - { - "hotfix": "KB2468871" - }, - { - "hotfix": "KB2478063" - }, - { - "hotfix": "KB2533523" - }, - { - "hotfix": "KB2544514" - }, - { - "hotfix": "KB2600211" - }, - { - "hotfix": "KB2600217" - }, - { - "hotfix": "KB4502496" - }, - { - "hotfix": "KB4512577" - }, - { - "hotfix": "KB4512578" - }, - { - "hotfix": "KB4514366" - }, - { - "hotfix": "KB4535680" - }, - { - "hotfix": "KB4535684" - }, - { - "hotfix": "KB4535685" - }, - { - "hotfix": "KB4577586" - }, - { - "hotfix": "KB4580325" - }, - { - "hotfix": "KB4589208" - }, - { - "hotfix": "KB4601558" - }, - { - "hotfix": "KB5003171" - }, - { - "hotfix": "KB5003243" - }, - { - "hotfix": "KB5034619" - }, - { - "hotfix": "KB5034768" - }, - { - "hotfix": "KB5034863" - } - ], - "1004": { + "3": { "status": "NOT_SYNCED" } } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/agentReScanListException.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/agentReScanListException.hpp index 9a956604d00..9d9645bda77 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/agentReScanListException.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/agentReScanListException.hpp @@ -44,9 +44,9 @@ class AgentReScanListException : public std::exception } /** - * @brief Get no-index. + * @brief Get no-index value * - * @return bool + * @return This value is used to indicate that the agent list should not be indexed. */ bool noIndex() const { diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp index 1221b784465..998af7f5ba7 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/factoryOrchestrator.hpp @@ -139,8 +139,11 @@ class TFactoryOrchestrator final break; case ScannerType::ReScanAllAgents: - orchestration = std::make_shared(); - orchestration->setNext(std::make_shared( + + orchestration = std::make_shared(inventoryDatabase, + std::make_shared(indexerConnector)); + orchestration->setLast(std::make_shared()); + orchestration->setLast(std::make_shared( TFactoryOrchestrator::create(ScannerType::PackageInsert, databaseFeedManager, indexerConnector, diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp index 28092972c02..00f709771b2 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanAgentList.hpp @@ -114,11 +114,7 @@ class TScanAgentList final : public AbstractHandler(variantData); - - if (noIndex) - { - context->m_noIndex = true; - } + context->m_noIndex = noIndex; m_osScanSuborchestration->handleRequest(std::move(context)); } @@ -200,11 +196,7 @@ class TScanAgentList final : public AbstractHandler(variantData); - - if (noIndex) - { - context->m_noIndex = true; - } + context->m_noIndex = noIndex; m_packageScanSuborchestration->handleRequest(std::move(context)); } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp index 699c96c8bdd..53e7b3ffbdb 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/scanOrchestrator.hpp @@ -144,9 +144,10 @@ class TScanOrchestrator final : public TOSPrimitives for (const auto& agentData : e.agentList()) { pushReScanToDelayedDispatcher(agentData.id, e.noIndex()); + m_eventDelayedDispatcher->postpone(agentData.id, + std::chrono::seconds(DELAYED_POSTPONE_SECONDS)); } } - catch (const std::exception& e) { logError(WM_VULNSCAN_LOGTAG, "Error processing delayed event: %s", e.what()); @@ -188,8 +189,6 @@ class TScanOrchestrator final : public TOSPrimitives builder.Finish(object); - // Delete all delayed events for the agent, because we are going to rescan it. - m_eventDelayedDispatcher->clear(agentId); m_eventDelayedDispatcher->push(agentId, {reinterpret_cast(builder.GetBufferPointer()), builder.GetSize()}); } @@ -276,20 +275,20 @@ class TScanOrchestrator final : public TOSPrimitives break; // LCOV_EXCL_START case ScannerType::ReScanAllAgents: - m_eventDelayedDispatcher->clear(); m_reScanAllOrchestration->handleRequest(std::move(context)); + m_eventDelayedDispatcher->clear(); break; case ScannerType::ReScanSingleAgent: + m_reScanOrchestration->handleRequest(context); m_eventDelayedDispatcher->clear(context->agentId()); - m_reScanOrchestration->handleRequest(std::move(context)); break; case ScannerType::CleanupAllAgentData: - m_eventDelayedDispatcher->clear(); m_cleanUpDataOrchestration->handleRequest(std::move(context)); + m_eventDelayedDispatcher->clear(); break; case ScannerType::CleanupSingleAgentData: + m_deleteAgentScanOrchestration->handleRequest(context); m_eventDelayedDispatcher->clear(context->agentId()); - m_deleteAgentScanOrchestration->handleRequest(std::move(context)); break; case ScannerType::GlobalSyncInventory: m_inventorySyncOrchestration->handleRequest(std::move(context)); diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index d3222bc9abb..5bcba8a1836 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -292,11 +292,6 @@ void VulnerabilityScannerFacade::handlePolicyChange(Utils::RocksDBWrapper& state { logInfo(WM_VULNSCAN_LOGTAG, "Policy changed. Re-scanning all agents"); - // Create and queue the cleanup event - std::string dataValueCleanup = R"({"action":"cleanup"})"; - const std::vector messageCleanup(dataValueCleanup.begin(), dataValueCleanup.end()); - pushEvent(messageCleanup, BufferType::BufferType_JSON); - // Create and queue the reboot event (force scan) std::string dataValueReScan = R"({"action":"reboot"})"; const std::vector messageReScan(dataValueReScan.begin(), dataValueReScan.end()); @@ -400,11 +395,6 @@ void VulnerabilityScannerFacade::start( initContentUpdater, [this]() { - // Create a JSON object 'dataValue' to specify the action as "cleanup." - std::string dataValueCleanup = R"({"action":"cleanup","no-index":true})"; - const std::vector messageCleanup(dataValueCleanup.begin(), dataValueCleanup.end()); - pushEvent(messageCleanup, BufferType::BufferType_JSON); - // Create a JSON object 'dataValue' to specify the action as "reboot." std::string dataValueReScan = R"({"action":"reboot","no-index":true})"; const std::vector messageReScan(dataValueReScan.begin(), dataValueReScan.end()); diff --git a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp index 231c0ce6705..34b7cde0b40 100644 --- a/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp +++ b/src/wazuh_modules/vulnerability_scanner/tests/unit/factoryOrchestrator_test.cpp @@ -380,9 +380,10 @@ TEST_F(FactoryOrchestratorTest, TestCreationReScanAllAgents) auto context = std::make_shared>(); EXPECT_NO_THROW(orchestration->handleRequest(context)); - EXPECT_EQ(context->size(), 2); - EXPECT_EQ(context->at(0), ScannerMockID::BUILD_ALL_AGENT_LIST_CONTEXT); - EXPECT_EQ(context->at(1), ScannerMockID::SCAN_AGENT_LIST); + EXPECT_EQ(context->size(), 3); + EXPECT_EQ(context->at(0), ScannerMockID::CLEAN_ALL_AGENT_INVENTORY); + EXPECT_EQ(context->at(1), ScannerMockID::BUILD_ALL_AGENT_LIST_CONTEXT); + EXPECT_EQ(context->at(2), ScannerMockID::SCAN_AGENT_LIST); } /** From 0d888fded03821b0f86e4253c80a2225548e01bc Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 24 May 2024 00:24:02 -0300 Subject: [PATCH 170/419] Add log and fix some tests after rebase. --- .../qa/test_data_policy/003/expected_006.out | 2 +- .../qa/test_data_policy/005/expected_002.out | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_006.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_006.out index 4d97356d8ad..b975383d912 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_006.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/003/expected_006.out @@ -1,6 +1,6 @@ [ "Vulnerability scanner module started", - "Policy changed. Re-scanning all agents.", + "Policy changed. Re-scanning all agents", "Fetched 2 agents from Wazuh-DB.", "Skipping manager agent with id 0.", "Vulnerability scan for OS 'enterprise_linux' on Agent '001' has completed.", diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/expected_002.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/expected_002.out index 7d0b792549b..9d721fa2d60 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/expected_002.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data_policy/005/expected_002.out @@ -7,6 +7,5 @@ "Translation for package 'lua-libs' in platform 'rhel' not found. Using provided packageName.", "Initiating a vulnerability scan for package 'lua-libs' (rpm) (red hat, inc.) with CVE Numbering Authorities (CNA) 'redhat_8' on Agent 'agent_redhat_8' (ID: '001', Version: 'v4.7.1').", "Vulnerability scan for package 'lua-libs' on Agent '001' has completed.", - "Event type: 7 processed", - "Event type: 9 processed" + "Event type: 7 processed" ] From 71aabbf77baa1f101cdfea412b416b7aae5b7dcf Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 24 May 2024 00:32:41 -0300 Subject: [PATCH 171/419] Add log for content update --- .../vulnerability_scanner/src/vulnerabilityScannerFacade.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index 5bcba8a1836..76261a89fbe 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -399,6 +399,7 @@ void VulnerabilityScannerFacade::start( std::string dataValueReScan = R"({"action":"reboot","no-index":true})"; const std::vector messageReScan(dataValueReScan.begin(), dataValueReScan.end()); pushEvent(messageReScan, BufferType::BufferType_JSON); + logInfo(WM_VULNSCAN_LOGTAG, "Triggered a re-scan after content update"); }); // Add subscribers for policy updates. From e947fe93671a19e0089ad35db6eab4e2b5bbefaf Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Fri, 24 May 2024 09:41:05 +0200 Subject: [PATCH 172/419] docs: update 4.8.0 changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b69999eac51..100c52ddc55 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,6 @@ All notable changes to this project will be documented in this file. #### Added -- Added new query "rollback" to wazuh-db. ([#16058](https://github.com/wazuh/wazuh/pull/16058)) - Transition to Wazuh Keystore for Indexer Configuration. ([#21670](https://github.com/wazuh/wazuh/pull/21670)) #### Changed @@ -52,6 +51,7 @@ All notable changes to this project will be documented in this file. - Fixed detection of the OS version on Alpine Linux. ([#16056](https://github.com/wazuh/wazuh/pull/16056)) - Fixed Solaris 10 name not showing in the Dashboard. ([#18642](https://github.com/wazuh/wazuh/pull/18642)) - Fixed macOS Ventura compilation from sources. ([#21932](https://github.com/wazuh/wazuh/pull/21932)) +- Fixed PyPI package gathering on macOS Sonoma. ([#23532](https://github.com/wazuh/wazuh/pull/23532)) ### RESTful API From 7ddae23c3d098ab347bcc2691ee2e9ad06c793ab Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Fri, 24 May 2024 09:49:53 +0200 Subject: [PATCH 173/419] build: bump revision to 40811 --- api/api/spec/spec.yaml | 2 +- framework/wazuh/core/cluster/__init__.py | 2 +- src/Doxyfile | 2 +- src/REVISION | 2 +- src/init/wazuh-client.sh | 2 +- src/init/wazuh-local.sh | 2 +- src/init/wazuh-server.sh | 2 +- src/win32/wazuh-installer.nsi | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index 2ceb1477065..7bf61087dde 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -41,7 +41,7 @@ info: version: '4.8.0' - x-revision: '40810' + x-revision: '40811' title: 'Wazuh API REST' license: name: 'GPL 2.0' diff --git a/framework/wazuh/core/cluster/__init__.py b/framework/wazuh/core/cluster/__init__.py index 83ac0bd8c49..db5d15a6c38 100644 --- a/framework/wazuh/core/cluster/__init__.py +++ b/framework/wazuh/core/cluster/__init__.py @@ -5,7 +5,7 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 __version__ = '4.8.0' -__revision__ = '40810' +__revision__ = '40811' __author__ = "Wazuh Inc" __wazuh_name__ = "Wazuh" __licence__ = "\ diff --git a/src/Doxyfile b/src/Doxyfile index a3a6de3761c..8493f99dbab 100644 --- a/src/Doxyfile +++ b/src/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "WAZUH" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = "v4.8.0-40810" +PROJECT_NUMBER = "v4.8.0-40811" # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/src/REVISION b/src/REVISION index 0a01e47db9f..f23426376a9 100644 --- a/src/REVISION +++ b/src/REVISION @@ -1 +1 @@ -40810 +40811 diff --git a/src/init/wazuh-client.sh b/src/init/wazuh-client.sh index b294d3006b7..f5365530db6 100755 --- a/src/init/wazuh-client.sh +++ b/src/init/wazuh-client.sh @@ -12,7 +12,7 @@ DIR=`dirname $PWD`; # Installation info VERSION="v4.8.0" -REVISION="40810" +REVISION="40811" TYPE="agent" ### Do not modify below here ### diff --git a/src/init/wazuh-local.sh b/src/init/wazuh-local.sh index 53c8b93d5ca..a2a87927da4 100644 --- a/src/init/wazuh-local.sh +++ b/src/init/wazuh-local.sh @@ -14,7 +14,7 @@ PLIST=${DIR}/bin/.process_list; # Installation info VERSION="v4.8.0" -REVISION="40810" +REVISION="40811" TYPE="local" ### Do not modify below here ### diff --git a/src/init/wazuh-server.sh b/src/init/wazuh-server.sh index e54ccf92ed7..0bbe774702a 100755 --- a/src/init/wazuh-server.sh +++ b/src/init/wazuh-server.sh @@ -14,7 +14,7 @@ PLIST=${DIR}/bin/.process_list; # Installation info VERSION="v4.8.0" -REVISION="40810" +REVISION="40811" TYPE="server" ### Do not modify below here ### diff --git a/src/win32/wazuh-installer.nsi b/src/win32/wazuh-installer.nsi index 43596608308..e523d702ba9 100644 --- a/src/win32/wazuh-installer.nsi +++ b/src/win32/wazuh-installer.nsi @@ -21,7 +21,7 @@ !define MUI_ICON install.ico !define MUI_UNICON uninstall.ico !define VERSION "4.8.0" -!define REVISION "40810" +!define REVISION "40811" !define NAME "Wazuh" !define SERVICE "WazuhSvc" From 3d64f60c15c3cc75f942404206c4861d98d93493 Mon Sep 17 00:00:00 2001 From: Selutario Date: Fri, 24 May 2024 10:07:45 +0200 Subject: [PATCH 174/419] docs: update 4.8.0 changelog related to API --- CHANGELOG.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 100c52ddc55..c62898a3a3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ All notable changes to this project will be documented in this file. #### Fixed - Updated cluster connection cleanup to remove temporary files when the connection between a worker and a master is broken. ([#17886](https://github.com/wazuh/wazuh/pull/17886)) +- Added a mechanism to avoid cluster errors to raise from expected wazuh-db exceptions. ([#23371](https://github.com/wazuh/wazuh/pull/23371)) ### Agent @@ -59,10 +60,13 @@ All notable changes to this project will be documented in this file. - Added new `GET /manager/version/check` endpoint to obtain information about new releases of Wazuh. ([#19952](https://github.com/wazuh/wazuh/pull/19952)) - Introduced an `auto` option for the ssl_protocol setting in the API configuration. This enables automatic negotiation of the TLS certificate to be used. ([#20420](https://github.com/wazuh/wazuh/pull/20420)) +- Added API indexer protection to allow uploading new configuration files if the `` section is not modified. ([#22727](https://github.com/wazuh/wazuh/pull/22727)) + #### Fixed - Fixed a warning from SQLAlchemy involving detached Roles instances in RBAC. ([#20527](https://github.com/wazuh/wazuh/pull/20527)) +- Fixed an issue where only the last `` item was displayed in `GET /manager/configuration`. ([#23095](https://github.com/wazuh/wazuh/issues/23095)) #### Removed @@ -106,7 +110,8 @@ All notable changes to this project will be documented in this file. #### Changed -- Upgraded external aiohttp library dependency version to 3.9.3. ([#21856](https://github.com/wazuh/wazuh/pull/21856)) +- Upgraded external aiohttp library dependency version to 3.9.5. ([#23112](https://github.com/wazuh/wazuh/pull/23112)) +- Upgraded external idna library dependency version to 3.7. ([#23112](https://github.com/wazuh/wazuh/pull/23112)) - Upgraded external cryptography library dependency version to 42.0.4. ([#22221](https://github.com/wazuh/wazuh/pull/22221)) - Upgraded external numpy library dependency version to 1.26.0. ([#20003](https://github.com/wazuh/wazuh/pull/20003)) - Upgraded external grpcio library dependency version to 1.58.0. ([#20003](https://github.com/wazuh/wazuh/pull/20003)) From 7dbfd6a0c859a681e395ec26a737c7591674d3f2 Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 24 May 2024 15:50:10 -0300 Subject: [PATCH 175/419] CL: - Added no_index condition depending on cluster environment --- .../src/vulnerabilityScannerFacade.cpp | 26 +++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index 76261a89fbe..14445b1241a 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -292,10 +292,15 @@ void VulnerabilityScannerFacade::handlePolicyChange(Utils::RocksDBWrapper& state { logInfo(WM_VULNSCAN_LOGTAG, "Policy changed. Re-scanning all agents"); - // Create and queue the reboot event (force scan) - std::string dataValueReScan = R"({"action":"reboot"})"; - const std::vector messageReScan(dataValueReScan.begin(), dataValueReScan.end()); - pushEvent(messageReScan, BufferType::BufferType_JSON); + nlohmann::json actionData; + actionData["action"] = "reboot"; + // We shouldn't index if we are in a cluster environment + actionData["no-index"] = PolicyManager::instance().getClusterStatus(); + + const std::string actionDataString = actionData.dump(); + const std::vector actionMessage(actionDataString.begin(), actionDataString.end()); + + pushEvent(actionMessage, BufferType::BufferType_JSON); } } @@ -395,10 +400,15 @@ void VulnerabilityScannerFacade::start( initContentUpdater, [this]() { - // Create a JSON object 'dataValue' to specify the action as "reboot." - std::string dataValueReScan = R"({"action":"reboot","no-index":true})"; - const std::vector messageReScan(dataValueReScan.begin(), dataValueReScan.end()); - pushEvent(messageReScan, BufferType::BufferType_JSON); + nlohmann::json actionData; + actionData["action"] = "reboot"; + // We shouldn't index if we are in a cluster environment + actionData["no-index"] = PolicyManager::instance().getClusterStatus(); + + const std::string actionDataString = actionData.dump(); + const std::vector actionMessage(actionDataString.begin(), actionDataString.end()); + + pushEvent(actionMessage, BufferType::BufferType_JSON); logInfo(WM_VULNSCAN_LOGTAG, "Triggered a re-scan after content update"); }); From e0cbffcbcc309883cef63114e823a0ac269fa7fb Mon Sep 17 00:00:00 2001 From: sebasfalcone Date: Fri, 24 May 2024 18:47:11 -0300 Subject: [PATCH 176/419] CL: - Removed unused parameter from method - Added debug logging - Added node filtering to query to wazuhDB, based on agent cluster node - Fixed typo on log --- .../src/indexerConnector.cpp | 2 +- .../buildAllAgentListContext.hpp | 22 +++++- .../src/vulnerabilityScannerFacade.cpp | 6 +- .../src/vulnerabilityScannerFacade.hpp | 3 +- .../unit/buildAllAgentListContext_test.cpp | 74 +++++++++++++++++++ 5 files changed, 99 insertions(+), 8 deletions(-) diff --git a/src/shared_modules/indexer_connector/src/indexerConnector.cpp b/src/shared_modules/indexer_connector/src/indexerConnector.cpp index 4626de24256..17dc42d3681 100644 --- a/src/shared_modules/indexer_connector/src/indexerConnector.cpp +++ b/src/shared_modules/indexer_connector/src/indexerConnector.cpp @@ -126,7 +126,7 @@ bool IndexerConnector::abuseControl(const std::string& agentId) // If the last sync was less than MINIMAL_SYNC_TIME minutes ago, return true. if (diff.count() < MINIMAL_SYNC_TIME) { - logDebug2(IC_NAME, "Agent '%s' sync ommited due to abuse control.", agentId.c_str()); + logDebug2(IC_NAME, "Agent '%s' sync omitted due to abuse control.", agentId.c_str()); return true; } } diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp index 73c680c29fc..99ab3e8b791 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp @@ -60,9 +60,25 @@ class TBuildAllAgentListContext final : public AbstractHandlerclusterStatus()) + { + // Execute query + const std::string clusterNodeName {data->clusterNodeName()}; + TSocketDBWrapper::instance().query(WazuhDBQueryBuilder::builder() + .global() + .selectAll() + .fromTable("agent") + .whereColumn("node_name") + .equalsTo(clusterNodeName) + .build(), + response); + } + else + { + // Execute query + TSocketDBWrapper::instance().query( + WazuhDBQueryBuilder::builder().global().selectAll().fromTable("agent").build(), response); + } } catch (const SocketDbWrapperException& e) { diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index 14445b1241a..879fd6d9f32 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -286,7 +286,7 @@ void VulnerabilityScannerFacade::clusterConfigurationChange(Utils::RocksDBWrappe stateDB.put(CLUSTER_NAME_KEY, clusterName); } -void VulnerabilityScannerFacade::handlePolicyChange(Utils::RocksDBWrapper& stateDB) const +void VulnerabilityScannerFacade::handlePolicyChange() const { if (m_shouldRescan) { @@ -297,6 +297,8 @@ void VulnerabilityScannerFacade::handlePolicyChange(Utils::RocksDBWrapper& state // We shouldn't index if we are in a cluster environment actionData["no-index"] = PolicyManager::instance().getClusterStatus(); + logDebug2(WM_VULNSCAN_LOGTAG, "actionData: %s", actionData.dump().c_str()); + const std::string actionDataString = actionData.dump(); const std::vector actionMessage(actionDataString.begin(), actionDataString.end()); @@ -364,7 +366,7 @@ void VulnerabilityScannerFacade::start( handleManagerScanPolicyChange(*stateDB); // Checks for the actions to be performed after the policy change (vulnerability scanner). - handlePolicyChange(*stateDB); + handlePolicyChange(); // Subscription to syscollector delta events. initDeltasSubscription(); diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp index 4a111fad3e8..f6baf3202cd 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.hpp @@ -123,11 +123,10 @@ class VulnerabilityScannerFacade final : public Singleton; +const auto configClusterEnable = nlohmann::json::parse(R"( + { + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "feed-update-interval": "60m", + "cti-url": "https://cti-url.com" + }, + "managerDisabledScan": false, + "clusterNodeName": "node_1", + "clusterName":"clusterName", + "clusterEnabled":true + })"); + +const auto configClusterDisabled = nlohmann::json::parse(R"( + { + "vulnerability-detection": { + "enabled": "yes", + "index-status": "yes", + "feed-update-interval": "60m", + "cti-url": "https://cti-url.com" + }, + "managerDisabledScan": false, + "clusterNodeName": "node_1", + "clusterName":"clusterName", + "clusterEnabled":false + })"); + TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContext) { + // Initialize policy manager + auto policyManager = std::make_unique(); + policyManager->initialize(configClusterDisabled); + spSocketDBWrapperMock = std::make_shared(); EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)).Times(1); @@ -61,6 +93,10 @@ TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContextWithElements) TEST_F(BuildAllAgentListContextTest, MissingField) { + // Initialize policy manager + auto policyManager = std::make_unique(); + policyManager->initialize(configClusterDisabled); + spSocketDBWrapperMock = std::make_shared(); nlohmann::json queryResult = nlohmann::json::parse(R"( @@ -88,6 +124,10 @@ TEST_F(BuildAllAgentListContextTest, MissingField) TEST_F(BuildAllAgentListContextTest, ExceptionOnDB) { + // Initialize policy manager + auto policyManager = std::make_unique(); + policyManager->initialize(configClusterDisabled); + spSocketDBWrapperMock = std::make_shared(); const std::string agentId {"1"}; @@ -105,3 +145,37 @@ TEST_F(BuildAllAgentListContextTest, ExceptionOnDB) spSocketDBWrapperMock.reset(); } + +TEST_F(BuildAllAgentListContextTest, BuildAllAgentListContextWithElementsInCluster) +{ + // Initialize policy manager + auto policyManager = std::make_unique(); + policyManager->initialize(configClusterEnable); + + spSocketDBWrapperMock = std::make_shared(); + + nlohmann::json queryResult = nlohmann::json::parse(R"( + [ + { + "id": 1, + "name": "name", + "version": "Wazuh 4.4.4", + "ip": "192.168.0.1", + "node_name": "node_1" + } + ])"); + + EXPECT_CALL(*spSocketDBWrapperMock, query(testing::_, testing::_)) + .Times(1) + .WillOnce(testing::SetArgReferee<1>(queryResult)); + + auto allAgentContext = + std::make_shared>(); + + auto scanContext = std::make_shared(); + + // Context is not used + allAgentContext->handleRequest(scanContext); + + EXPECT_EQ(scanContext->m_agents.size(), 1); +} From d2f7d63da2131a5b0be6295f5689b73a2ea58b79 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Sat, 25 May 2024 01:29:49 -0300 Subject: [PATCH 177/419] Remove node filtering. --- .../buildAllAgentListContext.hpp | 22 +++---------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp index 99ab3e8b791..73c680c29fc 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp +++ b/src/wazuh_modules/vulnerability_scanner/src/scanOrchestrator/buildAllAgentListContext.hpp @@ -60,25 +60,9 @@ class TBuildAllAgentListContext final : public AbstractHandlerclusterStatus()) - { - // Execute query - const std::string clusterNodeName {data->clusterNodeName()}; - TSocketDBWrapper::instance().query(WazuhDBQueryBuilder::builder() - .global() - .selectAll() - .fromTable("agent") - .whereColumn("node_name") - .equalsTo(clusterNodeName) - .build(), - response); - } - else - { - // Execute query - TSocketDBWrapper::instance().query( - WazuhDBQueryBuilder::builder().global().selectAll().fromTable("agent").build(), response); - } + // Execute query + TSocketDBWrapper::instance().query( + WazuhDBQueryBuilder::builder().global().selectAll().fromTable("agent").build(), response); } catch (const SocketDbWrapperException& e) { From eb7818710ec933175ecd003327224e8ee47bc393 Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Sun, 26 May 2024 00:24:18 -0300 Subject: [PATCH 178/419] Add conditional to avoid full-scan in content generation, to avoid failure. --- .../src/vulnerabilityScannerFacade.cpp | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index 879fd6d9f32..a85996b110e 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -400,18 +400,22 @@ void VulnerabilityScannerFacade::start( true, reloadGlobalMapsStartup, initContentUpdater, - [this]() + [this, reloadGlobalMapsStartup]() { - nlohmann::json actionData; - actionData["action"] = "reboot"; - // We shouldn't index if we are in a cluster environment - actionData["no-index"] = PolicyManager::instance().getClusterStatus(); + // Re-scan all agent after content update, only if is an instance of vulnerability scanner. + if (reloadGlobalMapsStartup) + { + nlohmann::json actionData; + actionData["action"] = "reboot"; + // We shouldn't index if we are in a cluster environment + actionData["no-index"] = PolicyManager::instance().getClusterStatus(); - const std::string actionDataString = actionData.dump(); - const std::vector actionMessage(actionDataString.begin(), actionDataString.end()); + const std::string actionDataString = actionData.dump(); + const std::vector actionMessage(actionDataString.begin(), actionDataString.end()); - pushEvent(actionMessage, BufferType::BufferType_JSON); - logInfo(WM_VULNSCAN_LOGTAG, "Triggered a re-scan after content update"); + pushEvent(actionMessage, BufferType::BufferType_JSON); + logInfo(WM_VULNSCAN_LOGTAG, "Triggered a re-scan after content update"); + } }); // Add subscribers for policy updates. From 064877886219834ed298524108b8ee6331434c4c Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Mon, 27 May 2024 08:19:21 +0200 Subject: [PATCH 179/419] fix: create wazuh-db template file after dropping permissions --- src/wazuh_db/main.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/wazuh_db/main.c b/src/wazuh_db/main.c index 576db333f6d..a24e74886b4 100644 --- a/src/wazuh_db/main.c +++ b/src/wazuh_db/main.c @@ -137,7 +137,6 @@ int main(int argc, char ** argv) snprintf(path_template, sizeof(path_template), "%s/%s/%s", home_path, WDB2_DIR, WDB_PROF_NAME); unlink(path_template); mdebug1("Template file removed: %s", path_template); - wdb_create_profile(); // Set max open files limit struct rlimit rlimit = { nofile, nofile }; @@ -204,6 +203,10 @@ int main(int argc, char ** argv) wdb_state.uptime = time(NULL); + // Create template + + wdb_create_profile(); + // Start threads if (status = pthread_create(&thread_dealer, NULL, run_dealer, NULL), status != 0) { From d80122fe1e40e40b55ae0599af4e346523d66e89 Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Wed, 6 Dec 2023 03:22:45 -0300 Subject: [PATCH 180/419] Upgrade connexion package to version 3.0 --- api/api/alogging.py | 349 +++++++------ api/api/api_exception.py | 6 + api/api/authentication.py | 19 +- api/api/configuration/api.yaml | 2 +- .../controllers/active_response_controller.py | 20 +- api/api/controllers/agent_controller.py | 420 ++++++++-------- api/api/controllers/cdb_list_controller.py | 69 ++- api/api/controllers/ciscat_controller.py | 22 +- api/api/controllers/cluster_controller.py | 240 +++++---- api/api/controllers/decoder_controller.py | 80 ++- api/api/controllers/default_controller.py | 12 +- api/api/controllers/event_controller.py | 15 +- .../controllers/experimental_controller.py | 155 +++--- api/api/controllers/logtest_controller.py | 28 +- api/api/controllers/manager_controller.py | 167 +++---- api/api/controllers/mitre_controller.py | 80 ++- api/api/controllers/overview_controller.py | 14 +- api/api/controllers/rootcheck_controller.py | 49 +- api/api/controllers/rule_controller.py | 99 ++-- api/api/controllers/sca_controller.py | 27 +- api/api/controllers/security_controller.py | 387 ++++++++------- api/api/controllers/syscheck_controller.py | 57 ++- .../controllers/syscollector_controller.py | 114 ++--- api/api/controllers/task_controller.py | 15 +- api/api/controllers/test/conftest.py | 21 + .../test/test_active_response_controller.py | 11 +- .../controllers/test/test_agent_controller.py | 415 ++++++++-------- .../test/test_cdb_list_controller.py | 48 +- .../test/test_ciscat_controller.py | 13 +- .../test/test_cluster_controller.py | 245 +++++----- .../controllers/test/test_controller_util.py | 17 + .../test/test_decoder_controller.py | 57 ++- .../test/test_default_controller.py | 7 +- .../controllers/test/test_event_controller.py | 11 +- .../test/test_experimental_controller.py | 133 ++--- .../test/test_manager_controller.py | 157 +++--- .../controllers/test/test_mitre_controller.py | 65 +-- .../test/test_overview_controller.py | 11 +- .../test/test_rootcheck_controller.py | 38 +- .../controllers/test/test_rule_controller.py | 69 +-- .../controllers/test/test_sca_controller.py | 20 +- .../test/test_security_controller.py | 393 +++++++-------- .../test/test_syscheck_controller.py | 56 +-- .../test/test_syscollector_controller.py | 102 ++-- .../controllers/test/test_task_controller.py | 11 +- api/api/controllers/test/utils.py | 6 +- api/api/controllers/util.py | 29 ++ .../controllers/vulnerability_controller.py | 214 ++++++++ api/api/error_handler.py | 295 +++++++++++ api/api/middlewares.py | 379 ++++++++------- api/api/models/base_model_.py | 10 +- api/api/models/test/test_model.py | 7 +- api/api/spec/spec.yaml | 40 ++ api/api/test/test_alogging.py | 343 +++---------- api/api/test/test_authentication.py | 2 +- api/api/test/test_error_handler.py | 257 ++++++++++ api/api/test/test_middlewares.py | 388 ++++++++++----- api/api/test/test_uri_parser.py | 60 +-- api/api/test/test_util.py | 41 +- api/api/uri_parser.py | 58 +-- api/api/util.py | 27 +- api/scripts/wazuh_apid.py | 459 ++++++++++-------- .../env/base/manager/manager.Dockerfile | 4 +- api/test/integration/env/docker-compose.yml | 15 + .../test_cdb_list_endpoints.tavern.yaml | 6 +- .../test_manager_endpoints.tavern.yaml | 45 +- framework/requirements-dev.txt | 3 +- framework/requirements.txt | 30 +- .../test_cases/cases_set_secure_headers.yaml | 8 +- 69 files changed, 3979 insertions(+), 3093 deletions(-) create mode 100644 api/api/controllers/test/conftest.py create mode 100644 api/api/controllers/test/test_controller_util.py create mode 100644 api/api/controllers/util.py create mode 100644 api/api/controllers/vulnerability_controller.py create mode 100644 api/api/error_handler.py create mode 100644 api/api/test/test_error_handler.py diff --git a/api/api/alogging.py b/api/api/alogging.py index d855475daab..04019eab0b2 100644 --- a/api/api/alogging.py +++ b/api/api/alogging.py @@ -2,23 +2,20 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 -import binascii import collections -import hashlib -import json import logging +import json import re -from base64 import b64decode - -from aiohttp import web_request -from aiohttp.abc import AbstractAccessLogger from pythonjsonlogger import jsonlogger -from wazuh.core.wlogging import WazuhLogger +from api.configuration import api_conf +from api.api_exception import APIError # Compile regex when the module is imported so it's not necessary to compile it everytime log.info is called request_pattern = re.compile(r'\[.+]|\s+\*\s+') +logger = logging.getLogger('wazuh-api') + # Variable used to specify an unknown user UNKNOWN_USER_STRING = "unknown_user" @@ -26,139 +23,24 @@ RUN_AS_LOGIN_ENDPOINT = "/security/user/authenticate/run_as" -class AccessLogger(AbstractAccessLogger): - """ - Define the log writer used by aiohttp. - """ - def custom_logging(self, user, remote, method, path, query, body, time, status, hash_auth_context=''): - """Provide the log entry structure depending on the logging format. - - Parameters - ---------- - user : str - User who perform the request. - remote : str - IP address of the request. - method : str - HTTP method used in the request. - path : str - Endpoint used in the request. - query : dict - Dictionary with the request parameters. - body : dict - Dictionary with the request body. - time : float - Required time to compute the request. - status : int - Status code of the request. - hash_auth_context : str, optional - Hash representing the authorization context. Default: '' - """ - json_info = { - 'user': user, - 'ip': remote, - 'http_method': method, - 'uri': f'{method} {path}', - 'parameters': query, - 'body': body, - 'time': f'{time:.3f}s', - 'status_code': status - } - - if not hash_auth_context: - log_info = f'{user} {remote} "{method} {path}" ' - else: - log_info = f'{user} ({hash_auth_context}) {remote} "{method} {path}" ' - json_info['hash_auth_context'] = hash_auth_context - - if path == '/events' and self.logger.level >= 20: - # If log level is info simplify the messages for the /events requests. - events = body.get('events', []) - body = {'events': len(events)} - json_info['body'] = body - - log_info += f'with parameters {json.dumps(query)} and body {json.dumps(body)} done in {time:.3f}s: {status}' +class APILoggerSize: + size_regex = re.compile(r"(\d+)([KM])") + unit_conversion = { + 'K': 1024, + 'M': 1024 ** 2 + } - self.logger.info(log_info, extra={'log_type': 'log'}) - self.logger.info(json_info, extra={'log_type': 'json'}) + def __init__(self, size_string: str): + size_string = size_string.upper() + try: + size, unit = self.size_regex.match(size_string).groups() + except AttributeError: + raise APIError(2011, details="Size value does not match the expected format: (Available" + " units: K (kilobytes), M (megabytes). For instance: 45M") from None - def log(self, request: web_request.BaseRequest, response: web_request.StreamResponse, time: float): - """Override the log method to log messages. - - Parameters - ---------- - request : web_request.BaseRequest - API request onject. - response : web_request.StreamResponse - API response object. - time : float - Time taken by the API to respond to the request. - """ - query = dict(request.query) - body = request.get("body", dict()) - if 'password' in query: - query['password'] = '****' - if 'password' in body: - body['password'] = '****' - if 'key' in body and '/agents' in request.path: - body['key'] = '****' - - # With permanent redirect, not found responses or any response with no token information, - # decode the JWT token to get the username - user = request.get('user', '') - if not user: - try: - user = b64decode(request.headers["authorization"].split()[1]).decode().split(':')[0] - except (KeyError, IndexError, binascii.Error): - user = UNKNOWN_USER_STRING - - # Get or create authorization context hash - hash_auth_context = '' - # Get hash from token information - if 'token_info' in request: - hash_auth_context = request['token_info'].get('hash_auth_context', '') - # Create hash if run_as login - if not hash_auth_context and request.path == RUN_AS_LOGIN_ENDPOINT: - hash_auth_context = hashlib.blake2b(json.dumps(body).encode(), digest_size=16).hexdigest() - - self.custom_logging(user, request.remote, request.method, request.path, query, body, time, response.status, - hash_auth_context=hash_auth_context) - - -class APILogger(WazuhLogger): - """ - Define the logger used by wazuh-apid. - """ - - def __init__(self, *args: dict, **kwargs: dict): - """APIlogger class constructor.""" - log_path = kwargs.get('log_path', '') - super().__init__(*args, **kwargs, - custom_formatter=WazuhJsonFormatter if log_path.endswith('json') else None) - - def setup_logger(self, custom_handler: logging.Handler = None): - """ - Set ups API logger. In addition to super().setup_logger() this method adds: - * Sets up log level based on the log level defined in API configuration file. - - :param custom_handler: custom handler that can be set instead of the default one from the WazuhLogger class. - """ - super().setup_logger(handler=custom_handler) - - if self.debug_level == 'debug2': - debug_level = logging.DEBUG2 - elif self.debug_level == 'debug': - debug_level = logging.DEBUG - elif self.debug_level == 'critical': - debug_level = logging.CRITICAL - elif self.debug_level == 'error': - debug_level = logging.ERROR - elif self.debug_level == 'warning': - debug_level = logging.WARNING - else: # self.debug_level == 'info' - debug_level = logging.INFO - - self.logger.setLevel(debug_level) + self.size = int(size) * self.unit_conversion[unit] + if self.size < self.unit_conversion['M']: + raise APIError(2011, details=f"Minimum value for size is 1M. Current: {size_string}") class WazuhJsonFormatter(jsonlogger.JsonFormatter): @@ -201,3 +83,192 @@ def add_fields(self, log_record: collections.OrderedDict, record: logging.LogRec log_record['timestamp'] = self.formatTime(record, self.datefmt) log_record['levelname'] = record.levelname log_record['data'] = record.message + + +def set_logging(log_filepath, log_level='INFO', foreground_mode=False) -> dict: + """Set up logging for API. + + This function creates a logging configuration dictionary, configure the wazuh-api logger + and returns the logging configuration dictionary that will be used in uvicorn logging + configuration. + + Parameters + ---------- + log_path : str + Log file path. + log_level : str + Logger Log level. + foreground_mode : bool + Log output to console streams when true + else Log output to file. + + Raise + ----- + ApiError + + Returns + ------- + log_config_dict : dict + Logging configuration dictionary. + """ + handlers = { + 'plainfile': None, + 'jsonfile': None, + } + if foreground_mode: + handlers.update({'console': {}}) + else: + if 'json' in api_conf['logs']['format']: + handlers["jsonfile"] = { + 'filename': f"{log_filepath}.json", + 'formatter': 'json', + 'filters': ['json-filter'], + } + if 'plain' in api_conf['logs']['format']: + handlers["plainfile"] = { + 'filename': f"{log_filepath}.log", + 'formatter': 'log', + 'filters': ['plain-filter'], + } + + hdls = [k for k, v in handlers.items() if isinstance(v, dict)] + if not hdls: + raise APIError(2011) + + log_config_dict = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "default": { + "()": "uvicorn.logging.DefaultFormatter", + "fmt": "%(levelprefix)s %(message)s", + "use_colors": None, + }, + "access": { + "()": "uvicorn.logging.AccessFormatter", + "fmt": '%(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s', + }, + "log": { + "()": "uvicorn.logging.DefaultFormatter", + "fmt": "%(asctime)s %(levelname)s: %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + "use_colors": None, + }, + "json" : { + '()': 'api.alogging.WazuhJsonFormatter', + 'style': '%', + 'datefmt': "%Y/%m/%d %H:%M:%S" + } + }, + "filters": { + 'plain-filter': {'()': 'wazuh.core.wlogging.CustomFilter', + 'log_type': 'log' }, + 'json-filter': {'()': 'wazuh.core.wlogging.CustomFilter', + 'log_type': 'json' } + }, + "handlers": { + "default": { + "formatter": "default", + "class": "logging.StreamHandler", + "stream": "ext://sys.stderr", + }, + "access": { + "formatter": "access", + "class": "logging.StreamHandler", + "stream": "ext://sys.stdout" + }, + "console": { + 'formatter': 'log', + 'class': 'logging.StreamHandler', + 'stream': 'ext://sys.stdout', + 'filters': ['plain-filter'] + }, + }, + "loggers": { + "wazuh-api": {"handlers": hdls, "level": log_level, "propagate": False}, + "start-stop-api": {"handlers": hdls, "level": 'INFO', "propagate": False} + } + } + + # configure file handlers + for handler, d in handlers.items(): + if d and 'filename' in d: + if api_conf['logs']['max_size']['enabled']: + max_size = APILoggerSize(api_conf['logs']['max_size']['size']).size + d.update({ + 'class':'wazuh.core.wlogging.SizeBasedFileRotatingHandler', + 'maxBytes': max_size, + 'backupCount': 1 + }) + else: + d.update({ + 'class': 'wazuh.core.wlogging.TimeBasedFileRotatingHandler', + 'when': 'midnight' + }) + log_config_dict['handlers'][handler] = d + + # Configure the uvicorn loggers. They will be created by the uvicorn server. + log_config_dict['loggers']['uvicorn'] = {"handlers": hdls, "level": 'WARNING', "propagate": False} + log_config_dict['loggers']['uvicorn.error'] = {"handlers": hdls, "level": 'WARNING', "propagate": False} + log_config_dict['loggers']['uvicorn.access'] = {'level': 'WARNING'} + + return log_config_dict + + +def custom_logging(user, remote, method, path, query, + body, elapsed_time, status, hash_auth_context='', + headers: dict = None): + """Provide the log entry structure depending on the logging format. + + Parameters + ---------- + user : str + User who perform the request. + remote : str + IP address of the request. + method : str + HTTP method used in the request. + path : str + Endpoint used in the request. + query : dict + Dictionary with the request parameters. + body : dict + Dictionary with the request body. + elapsed_time : float + Required time to compute the request. + status : int + Status code of the request. + hash_auth_context : str, optional + Hash representing the authorization context. Default: '' + headers: dict + Optional dictionary of request headers. + """ + json_info = { + 'user': user, + 'ip': remote, + 'http_method': method, + 'uri': f'{method} {path}', + 'parameters': query, + 'body': body, + 'time': f'{elapsed_time:.3f}s', + 'status_code': status + } + + if not hash_auth_context: + log_info = f'{user} {remote} "{method} {path}" ' + else: + log_info = f'{user} ({hash_auth_context}) {remote} "{method} {path}" ' + json_info['hash_auth_context'] = hash_auth_context + + if path == '/events' and logger.level >= 20: + # If log level is info simplify the messages for the /events requests. + events = body.get('events', []) + body = {'events': len(events)} + json_info['body'] = body + + log_info += f'with parameters {json.dumps(query)} and body'\ + f' {json.dumps(body)} done in {elapsed_time:.3f}s: {status}' + + logger.info(log_info, extra={'log_type': 'log'}) + logger.info(json_info, extra={'log_type': 'json'}) + logger.debug2(f'Receiving headers {headers}') diff --git a/api/api/api_exception.py b/api/api/api_exception.py index 6f9224ae7a2..4d544788c69 100644 --- a/api/api/api_exception.py +++ b/api/api/api_exception.py @@ -2,6 +2,7 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 +from connexion.exceptions import ProblemException from api.constants import RELATIVE_CONFIG_FILE_PATH, RELATIVE_SECURITY_PATH from wazuh.core.exception import DOCU_VERSION @@ -62,3 +63,8 @@ def __str__(self) -> str: class APIError(APIException): pass + + +class BlockedIPException(ProblemException): + """Bocked IP Exception Class.""" + diff --git a/api/api/authentication.py b/api/api/authentication.py index f1ba28d359f..d574b79b440 100755 --- a/api/api/authentication.py +++ b/api/api/authentication.py @@ -13,7 +13,8 @@ from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import ec -from werkzeug.exceptions import Unauthorized +from jose import JWTError, jwt +from connexion.exceptions import Unauthorized import api.configuration as conf import wazuh.core.utils as core_utils @@ -28,6 +29,7 @@ from wazuh.rbac.orm import AuthenticationManager, TokenManager, UserRolesManager from wazuh.rbac.preprocessor import optimize_resources +INVALID_TOKEN = "Invalid token" pool = ThreadPoolExecutor(max_workers=1) @@ -82,9 +84,7 @@ def check_user(user: str, password: str, required_scopes=None) -> Union[dict, No data = raise_if_exc(pool.submit(asyncio.run, dapi.distribute_function()).result()) if data['result']: - return {'sub': user, - 'active': True - } + return {'sub': user, 'active': True } # Set JWT settings @@ -191,7 +191,8 @@ def generate_token(user_id: str = None, data: dict = None, auth_context: dict = "run_as": auth_context is not None, "rbac_roles": data['roles'], "rbac_mode": result['rbac_mode'] - } | ({"hash_auth_context": hashlib.blake2b(json.dumps(auth_context).encode(), digest_size=16).hexdigest()} + } | ({"hash_auth_context": hashlib.blake2b(json.dumps(auth_context).encode(), + digest_size=16).hexdigest()} if auth_context is not None else {}) return jwt.encode(payload, generate_keypair()[0], algorithm=JWT_ALGORITHM) @@ -275,7 +276,7 @@ def decode_token(token: str) -> dict: data = raise_if_exc(pool.submit(asyncio.run, dapi.distribute_function()).result()).to_dict() if not data['result']['valid']: - raise Unauthorized + raise Unauthorized(INVALID_TOKEN) payload['rbac_policies'] = data['result']['policies'] payload['rbac_policies']['rbac_mode'] = payload.pop('rbac_mode') @@ -292,8 +293,8 @@ def decode_token(token: str) -> dict: current_expiration_time = result['auth_token_exp_timeout'] if payload['rbac_policies']['rbac_mode'] != current_rbac_mode \ or (payload['exp'] - payload['nbf']) != current_expiration_time: - raise Unauthorized + raise Unauthorized("Token Expired") return payload - except jwt.exceptions.PyJWTError as e: - raise Unauthorized from e + except JWTError as exc: + raise Unauthorized(INVALID_TOKEN) from exc diff --git a/api/api/configuration/api.yaml b/api/api/configuration/api.yaml index c1be7f7ed10..d981823861d 100644 --- a/api/api/configuration/api.yaml +++ b/api/api/configuration/api.yaml @@ -29,7 +29,7 @@ # enabled: False # size: "1M" -# Cross-origin resource sharing: https://github.com/aio-libs/aiohttp-cors#usage +# Cross-origin resource sharing: https://www.starlette.io/middleware/#corsmiddleware # cors: # enabled: no # source_route: "*" diff --git a/api/api/controllers/active_response_controller.py b/api/api/controllers/active_response_controller.py index a696a9a23ff..d3434f88dc7 100755 --- a/api/api/controllers/active_response_controller.py +++ b/api/api/controllers/active_response_controller.py @@ -4,25 +4,25 @@ import logging -from aiohttp import web +from connexion import request +from connexion.lifecycle import ConnexionResponse -import wazuh.active_response as active_response -from api.encoder import dumps, prettify +from api.controllers.util import json_response, JSON_CONTENT_TYPE from api.models.active_response_model import ActiveResponseModel from api.models.base_model_ import Body from api.util import remove_nones_to_dict, raise_if_exc from wazuh.core.cluster.dapi.dapi import DistributedAPI +import wazuh.active_response as active_response logger = logging.getLogger('wazuh-api') -async def run_command(request, agents_list: str = '*', pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def run_command(agents_list: str = '*', pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Runs an Active Response command on a specified list of agents. Parameters ---------- - request : connexion.request agents_list : str List of agents IDs. All possible values from 000 onwards. Default: '*' pretty : bool @@ -32,10 +32,10 @@ async def run_command(request, agents_list: str = '*', pretty: bool = False, Returns ------- - web.Response + ConnexionResponse """ # Get body parameters - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await ActiveResponseModel.get_kwargs(request, additional_kwargs={'agent_list': agents_list}) dapi = DistributedAPI(f=active_response.run_command, @@ -45,8 +45,8 @@ async def run_command(request, agents_list: str = '*', pretty: bool = False, wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/agent_controller.py b/api/api/controllers/agent_controller.py index ecfe7e72259..a50bfff989b 100755 --- a/api/api/controllers/agent_controller.py +++ b/api/api/controllers/agent_controller.py @@ -6,34 +6,34 @@ import mimetypes from typing import Union -from aiohttp import web +from connexion import request from connexion.lifecycle import ConnexionResponse -from api.encoder import dumps, prettify +from api.controllers.util import JSON_CONTENT_TYPE, json_response from api.models.agent_added_model import AgentAddedModel +from api.models.agent_group_added_model import GroupAddedModel from api.models.agent_inserted_model import AgentInsertedModel from api.models.base_model_ import Body -from api.models.agent_group_added_model import GroupAddedModel -from api.util import parse_api_param, remove_nones_to_dict, raise_if_exc, deprecate_endpoint +from api.util import deprecate_endpoint, parse_api_param, raise_if_exc, remove_nones_to_dict from api.validator import check_component_configuration_pair -from wazuh import agent, stats +from wazuh import agent from wazuh.core.cluster.control import get_system_nodes from wazuh.core.cluster.dapi.dapi import DistributedAPI from wazuh.core.common import DATABASE_LIMIT from wazuh.core.results import AffectedItemsWazuhResult +from wazuh import stats logger = logging.getLogger('wazuh-api') -async def delete_agents(request, pretty: bool = False, wait_for_complete: bool = False, agents_list: str = None, +async def delete_agents(pretty: bool = False, wait_for_complete: bool = False, agents_list: str = None, purge: bool = False, status: str = None, q: str = None, older_than: str = None, manager: str = None, version: str = None, group: str = None, node_name: str = None, - name: str = None, ip: str = None) -> web.Response: + name: str = None, ip: str = None) -> ConnexionResponse: """Delete all agents or a list of them based on optional criteria. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -64,7 +64,7 @@ async def delete_agents(request, pretty: bool = False, wait_for_complete: bool = Returns ------- - web.Response + ConnexionResponse Agents which have been deleted. """ if 'all' in agents_list: @@ -80,7 +80,7 @@ async def delete_agents(request, pretty: bool = False, wait_for_complete: bool = 'node_name': node_name, 'name': name, 'ip': ip, - 'registerIP': request.query.get('registerIP', None) + 'registerIP': request.query_params.get('registerIP', None) }, 'q': q } @@ -88,7 +88,7 @@ async def delete_agents(request, pretty: bool = False, wait_for_complete: bool = # Add nested fields to kwargs filters nested = ['os.version', 'os.name', 'os.platform'] for field in nested: - f_kwargs['filters'][field] = request.query.get(field, None) + f_kwargs['filters'][field] = request.query_params.get(field, None) dapi = DistributedAPI(f=agent.delete_agents, f_kwargs=remove_nones_to_dict(f_kwargs), @@ -96,23 +96,22 @@ async def delete_agents(request, pretty: bool = False, wait_for_complete: bool = is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_agents(request, pretty: bool = False, wait_for_complete: bool = False, agents_list: str = None, +async def get_agents(pretty: bool = False, wait_for_complete: bool = False, agents_list: str = None, offset: int = 0, limit: int = DATABASE_LIMIT, select: str = None, sort: str = None, search: str = None, status: str = None, q: str = None, older_than: str = None, manager: str = None, version: str = None, group: str = None, node_name: str = None, name: str = None, ip: str = None, - group_config_status: str = None, distinct: bool = False) -> web.Response: + group_config_status: str = None, distinct: bool = False) -> ConnexionResponse: """Get information about all agents or a list of them. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -156,7 +155,7 @@ async def get_agents(request, pretty: bool = False, wait_for_complete: bool = Fa Returns ------- - web.Response + ConnexionResponse Response with all selected agents' information. """ f_kwargs = {'agent_list': agents_list, @@ -174,7 +173,7 @@ async def get_agents(request, pretty: bool = False, wait_for_complete: bool = Fa 'node_name': node_name, 'name': name, 'ip': ip, - 'registerIP': request.query.get('registerIP', None), + 'registerIP': request.query_params.get('registerIP', None), 'group_config_status': group_config_status }, 'q': q, @@ -183,7 +182,7 @@ async def get_agents(request, pretty: bool = False, wait_for_complete: bool = Fa # Add nested fields to kwargs filters nested = ['os.version', 'os.name', 'os.platform'] for field in nested: - f_kwargs['filters'][field] = request.query.get(field, None) + f_kwargs['filters'][field] = request.query_params.get(field, None) dapi = DistributedAPI(f=agent.get_agents, f_kwargs=remove_nones_to_dict(f_kwargs), @@ -191,19 +190,18 @@ async def get_agents(request, pretty: bool = False, wait_for_complete: bool = Fa is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def add_agent(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def add_agent(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Add a new Wazuh agent. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -211,11 +209,11 @@ async def add_agent(request, pretty: bool = False, wait_for_complete: bool = Fal Returns ------- - web.Response + ConnexionResponse API response. """ # Get body parameters - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await AgentAddedModel.get_kwargs(request) dapi = DistributedAPI(f=agent.add_agent, @@ -224,21 +222,20 @@ async def add_agent(request, pretty: bool = False, wait_for_complete: bool = Fal is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def reconnect_agents(request, pretty: bool = False, wait_for_complete: bool = False, - agents_list: Union[list, str] = '*') -> web.Response: +async def reconnect_agents(pretty: bool = False, wait_for_complete: bool = False, + agents_list: Union[list, str] = '*') -> ConnexionResponse: """Force reconnect all agents or a list of them. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. Default `False` wait_for_complete : bool @@ -248,7 +245,7 @@ async def reconnect_agents(request, pretty: bool = False, wait_for_complete: boo Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': agents_list} @@ -258,22 +255,21 @@ async def reconnect_agents(request, pretty: bool = False, wait_for_complete: boo request_type='distributed_master', is_async=False, wait_for_complete=wait_for_complete, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], broadcasting=agents_list == '*', logger=logger ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def restart_agents(request, pretty: bool = False, wait_for_complete: bool = False, - agents_list: str = '*') -> web.Response: +async def restart_agents(pretty: bool = False, wait_for_complete: bool = False, + agents_list: str = '*') -> ConnexionResponse: """Restart all agents or a list of them. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -283,7 +279,7 @@ async def restart_agents(request, pretty: bool = False, wait_for_complete: bool Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': agents_list} @@ -293,17 +289,17 @@ async def restart_agents(request, pretty: bool = False, wait_for_complete: bool request_type='distributed_master', is_async=False, wait_for_complete=wait_for_complete, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], broadcasting=agents_list == '*', logger=logger ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def restart_agents_by_node(request, node_id: str, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def restart_agents_by_node(node_id: str, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Restart all agents belonging to a node. Parameters @@ -317,7 +313,7 @@ async def restart_agents_by_node(request, node_id: str, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ nodes = raise_if_exc(await get_system_nodes()) @@ -330,16 +326,16 @@ async def restart_agents_by_node(request, node_id: str, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_agent_config(request, pretty: bool = False, wait_for_complete: bool = False, agent_id: str = None, - component: str = None, **kwargs: dict) -> web.Response: +async def get_agent_config(pretty: bool = False, wait_for_complete: bool = False, agent_id: str = None, + component: str = None, **kwargs: dict) -> ConnexionResponse: """Get agent active configuration. Returns the active configuration the agent is currently using. This can be different from the configuration present @@ -347,7 +343,6 @@ async def get_agent_config(request, pretty: bool = False, wait_for_complete: boo Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -359,7 +354,7 @@ async def get_agent_config(request, pretty: bool = False, wait_for_complete: boo Returns ------- - web.Response + ConnexionResponse API response with the agent configuration. """ f_kwargs = {'agent_list': [agent_id], @@ -375,22 +370,21 @@ async def get_agent_config(request, pretty: bool = False, wait_for_complete: boo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def delete_single_agent_multiple_groups(request, agent_id: str, groups_list: str = None, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def delete_single_agent_multiple_groups(agent_id: str, groups_list: str = None, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Remove the agent from all groups or a list of them. The agent will automatically revert to the "default" group if it is removed from all its assigned groups. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -402,7 +396,7 @@ async def delete_single_agent_multiple_groups(request, agent_id: str, groups_lis Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': [agent_id], @@ -414,23 +408,22 @@ async def delete_single_agent_multiple_groups(request, agent_id: str, groups_lis is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_sync_agent(request, agent_id: str, pretty: bool = False, wait_for_complete=False) -> web.Response: +async def get_sync_agent(agent_id: str, pretty: bool = False, wait_for_complete=False) -> ConnexionResponse: """Get agent configuration sync status. Return whether the agent group configuration has been synchronized with the agent or not. Parameters ---------- - request : connexion.request agent_id : str Agent ID. pretty : bool @@ -451,15 +444,15 @@ async def get_sync_agent(request, agent_id: str, pretty: bool = False, wait_for_ is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def delete_single_agent_single_group(request, agent_id: str, group_id: str, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def delete_single_agent_single_group(agent_id: str, group_id: str, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Remove agent from a single group. Removes an agent from a group. If the agent has multigroups, it will preserve all previous groups except the last @@ -467,7 +460,6 @@ async def delete_single_agent_single_group(request, agent_id: str, group_id: str Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -479,7 +471,7 @@ async def delete_single_agent_single_group(request, agent_id: str, group_id: str Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': [agent_id], @@ -491,20 +483,19 @@ async def delete_single_agent_single_group(request, agent_id: str, group_id: str is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def put_agent_single_group(request, agent_id: str, group_id: str, force_single_group: bool = False, - pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def put_agent_single_group(agent_id: str, group_id: str, force_single_group: bool = False, + pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Assign an agent to the specified group. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -518,7 +509,7 @@ async def put_agent_single_group(request, agent_id: str, group_id: str, force_si Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': [agent_id], @@ -531,19 +522,18 @@ async def put_agent_single_group(request, agent_id: str, group_id: str, force_si is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_agent_key(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_agent_key(agent_id: str, pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get agent key. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -553,7 +543,7 @@ async def get_agent_key(request, agent_id: str, pretty: bool = False, wait_for_c Returns ------- - web.Response + ConnexionResponse API response with the specified agent's key. """ f_kwargs = {'agent_list': [agent_id]} @@ -564,19 +554,18 @@ async def get_agent_key(request, agent_id: str, pretty: bool = False, wait_for_c is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def restart_agent(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def restart_agent(agent_id: str, pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Restart an agent. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -586,7 +575,7 @@ async def restart_agent(request, agent_id: str, pretty: bool = False, wait_for_c Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': [agent_id]} @@ -597,23 +586,22 @@ async def restart_agent(request, agent_id: str, pretty: bool = False, wait_for_c is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def put_upgrade_agents(request, agents_list: str = None, pretty: bool = False, wait_for_complete: bool = False, +async def put_upgrade_agents(agents_list: str = None, pretty: bool = False, wait_for_complete: bool = False, wpk_repo: str = None, upgrade_version: str = None, use_http: bool = False, force: bool = False, package_type: str = None, q: str = None, manager: str = None, version: str = None, group: str = None, node_name: str = None, name: str = None, - ip: str = None) -> web.Response: + ip: str = None) -> ConnexionResponse: """Upgrade agents using a WPK file from an online repository. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -647,7 +635,7 @@ async def put_upgrade_agents(request, agents_list: str = None, pretty: bool = Fa Returns ------- - web.Response + ConnexionResponse Upgrade message after trying to upgrade the agents. """ # If we use the 'all' keyword and the request is distributed_master, agents_list must be '*' @@ -667,7 +655,7 @@ async def put_upgrade_agents(request, agents_list: str = None, pretty: bool = Fa 'node_name': node_name, 'name': name, 'ip': ip, - 'registerIP': request.query.get('registerIP', None) + 'registerIP': request.query_params.get('registerIP', None) }, 'q': q } @@ -675,7 +663,7 @@ async def put_upgrade_agents(request, agents_list: str = None, pretty: bool = Fa # Add nested fields to kwargs filters nested = ['os.version', 'os.name', 'os.platform'] for field in nested: - f_kwargs['filters'][field] = request.query.get(field, None) + f_kwargs['filters'][field] = request.query_params.get(field, None) dapi = DistributedAPI(f=agent.upgrade_agents, f_kwargs=remove_nones_to_dict(f_kwargs), @@ -683,23 +671,22 @@ async def put_upgrade_agents(request, agents_list: str = None, pretty: bool = Fa is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], broadcasting=agents_list == '*' ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def put_upgrade_custom_agents(request, agents_list: str = None, pretty: bool = False, +async def put_upgrade_custom_agents(agents_list: str = None, pretty: bool = False, wait_for_complete: bool = False, file_path: str = None, installer: str = None, q: str = None, manager: str = None, version: str = None, group: str = None, - node_name: str = None, name: str = None, ip: str = None) -> web.Response: + node_name: str = None, name: str = None, ip: str = None) -> ConnexionResponse: """Upgrade agents using a local WPK file. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -727,7 +714,7 @@ async def put_upgrade_custom_agents(request, agents_list: str = None, pretty: bo Returns ------- - web.Response + ConnexionResponse Upgrade message after trying to upgrade the agents. """ # If we use the 'all' keyword and the request is distributed_master, agents_list must be '*' @@ -744,7 +731,7 @@ async def put_upgrade_custom_agents(request, agents_list: str = None, pretty: bo 'node_name': node_name, 'name': name, 'ip': ip, - 'registerIP': request.query.get('registerIP', None) + 'registerIP': request.query_params.get('registerIP', None) }, 'q': q } @@ -752,7 +739,7 @@ async def put_upgrade_custom_agents(request, agents_list: str = None, pretty: bo # Add nested fields to kwargs filters nested = ['os.version', 'os.name', 'os.platform'] for field in nested: - f_kwargs['filters'][field] = request.query.get(field, None) + f_kwargs['filters'][field] = request.query_params.get(field, None) dapi = DistributedAPI(f=agent.upgrade_agents, f_kwargs=remove_nones_to_dict(f_kwargs), @@ -760,22 +747,21 @@ async def put_upgrade_custom_agents(request, agents_list: str = None, pretty: bo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], broadcasting=agents_list == '*' ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_agent_upgrade(request, agents_list: str = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_agent_upgrade(agents_list: str = None, pretty: bool = False, wait_for_complete: bool = False, q: str = None, manager: str = None, version: str = None, group: str = None, - node_name: str = None, name: str = None, ip: str = None) -> web.Response: + node_name: str = None, name: str = None, ip: str = None) -> ConnexionResponse: """Get upgrade results from agents. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -799,7 +785,7 @@ async def get_agent_upgrade(request, agents_list: str = None, pretty: bool = Fal Returns ------- - web.Response + ConnexionResponse Upgrade message after having upgraded the agents. """ f_kwargs = {'agent_list': agents_list, @@ -810,7 +796,7 @@ async def get_agent_upgrade(request, agents_list: str = None, pretty: bool = Fal 'node_name': node_name, 'name': name, 'ip': ip, - 'registerIP': request.query.get('registerIP', None) + 'registerIP': request.query_params.get('registerIP', None) }, 'q': q } @@ -818,7 +804,7 @@ async def get_agent_upgrade(request, agents_list: str = None, pretty: bool = Fal # Add nested fields to kwargs filters nested = ['os.version', 'os.name', 'os.platform'] for field in nested: - f_kwargs['filters'][field] = request.query.get(field, None) + f_kwargs['filters'][field] = request.query_params.get(field, None) dapi = DistributedAPI(f=agent.get_upgrade_result, f_kwargs=remove_nones_to_dict(f_kwargs), @@ -826,20 +812,19 @@ async def get_agent_upgrade(request, agents_list: str = None, pretty: bool = Fal is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_daemon_stats(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False, - daemons_list: list = None) -> web.Response: +async def get_daemon_stats(agent_id: str, pretty: bool = False, wait_for_complete: bool = False, + daemons_list: list = None) -> ConnexionResponse: """Get Wazuh statistical information from the specified daemons of a specified agent. Parameters ---------- - request : connexion.request agent_id : str ID of the agent from which the statistics are obtained. pretty : bool @@ -851,7 +836,7 @@ async def get_daemon_stats(request, agent_id: str, pretty: bool = False, wait_fo Returns ------- - web.Response + ConnexionResponse API response. """ daemons_list = daemons_list or [] @@ -864,19 +849,18 @@ async def get_daemon_stats(request, agent_id: str, pretty: bool = False, wait_fo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies']) + rbac_permissions=request.context['token_info']['rbac_policies']) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_component_stats(request, pretty: bool = False, wait_for_complete: bool = False, agent_id: str = None, - component: str = None) -> web.Response: +async def get_component_stats(pretty: bool = False, wait_for_complete: bool = False, agent_id: str = None, + component: str = None) -> ConnexionResponse: """Get a specified agent's component stats. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -888,7 +872,7 @@ async def get_component_stats(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response with the module stats. """ f_kwargs = {'agent_list': [agent_id], @@ -900,20 +884,19 @@ async def get_component_stats(request, pretty: bool = False, wait_for_complete: is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def post_new_agent(request, agent_name: str, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def post_new_agent(agent_name: str, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Add agent (quick method). Parameters ---------- - request : connexion.request agent_name : str Name used to register the agent. pretty : bool @@ -923,7 +906,7 @@ async def post_new_agent(request, agent_name: str, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = await AgentAddedModel.get_kwargs({'name': agent_name}) @@ -934,20 +917,19 @@ async def post_new_agent(request, agent_name: str, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def delete_multiple_agent_single_group(request, group_id: str, agents_list: str = None, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def delete_multiple_agent_single_group(group_id: str, agents_list: str = None, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Remove agents assignment from a specified group. Parameters ---------- - request : connexion.request group_id : str Group ID. agents_list : str @@ -959,7 +941,7 @@ async def delete_multiple_agent_single_group(request, group_id: str, agents_list Returns ------- - web.Response + ConnexionResponse API response. """ if 'all' in agents_list: @@ -973,21 +955,20 @@ async def delete_multiple_agent_single_group(request, group_id: str, agents_list is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def put_multiple_agent_single_group(request, group_id: str, agents_list: str = None, pretty: bool = False, +async def put_multiple_agent_single_group(group_id: str, agents_list: str = None, pretty: bool = False, wait_for_complete: bool = False, - force_single_group: bool = False) -> web.Response: + force_single_group: bool = False) -> ConnexionResponse: """Add multiple agents to a group. Parameters ---------- - request : connexion.request group_id : str Group ID. agents_list : str @@ -1001,7 +982,7 @@ async def put_multiple_agent_single_group(request, group_id: str, agents_list: s Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': agents_list, @@ -1014,20 +995,19 @@ async def put_multiple_agent_single_group(request, group_id: str, agents_list: s is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def delete_groups(request, groups_list: str = None, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def delete_groups(groups_list: str = None, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Delete all groups or a list of them. Parameters ---------- - request : connexion.request groups_list : str Array of group's IDs. pretty: bool @@ -1037,7 +1017,7 @@ async def delete_groups(request, groups_list: str = None, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ if 'all' in groups_list: @@ -1050,17 +1030,17 @@ async def delete_groups(request, groups_list: str = None, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_list_group(request, pretty: bool = False, wait_for_complete: bool = False, +async def get_list_group(pretty: bool = False, wait_for_complete: bool = False, groups_list: str = None, offset: int = 0, limit: int = None, sort: str = None, search: str = None, q: str = None, select: str = None, - distinct: bool = False) -> web.Response: + distinct: bool = False) -> ConnexionResponse: """Get groups. Returns a list containing basic information about each agent group such as number of agents belonging to the group @@ -1068,7 +1048,6 @@ async def get_list_group(request, pretty: bool = False, wait_for_complete: bool Parameters ---------- - request : connexion.request groups_list : str Array of group's IDs. pretty: bool @@ -1093,10 +1072,10 @@ async def get_list_group(request, pretty: bool = False, wait_for_complete: bool Returns ------- - web.Response + ConnexionResponse API response. """ - hash_ = request.query.get('hash', 'md5') # Select algorithm to generate the returned checksums. + hash_ = request.query_params.get('hash', 'md5') # Select algorithm to generate the returned checksums. f_kwargs = {'offset': offset, 'limit': limit, 'group_list': groups_list, @@ -1115,22 +1094,21 @@ async def get_list_group(request, pretty: bool = False, wait_for_complete: bool is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_agents_in_group(request, group_id: str, pretty: bool = False, wait_for_complete: bool = False, +async def get_agents_in_group(group_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = DATABASE_LIMIT, select: str = None, sort: str = None, search: str = None, status: str = None, q: str = None, - distinct: bool = False) -> web.Response: + distinct: bool = False) -> ConnexionResponse: """Get the list of agents that belongs to the specified group. Parameters ---------- - request : connexion.request group_id : str Group ID. pretty: bool @@ -1157,7 +1135,7 @@ async def get_agents_in_group(request, group_id: str, pretty: bool = False, wait Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'group_list': [group_id], @@ -1178,15 +1156,15 @@ async def get_agents_in_group(request, group_id: str, pretty: bool = False, wait is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def post_group(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def post_group(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Create a new group. Parameters @@ -1198,11 +1176,11 @@ async def post_group(request, pretty: bool = False, wait_for_complete: bool = Fa Returns ------- - web.Response + ConnexionResponse API response. """ # Get body parameters - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await GroupAddedModel.get_kwargs(request) dapi = DistributedAPI(f=agent.create_group, @@ -1211,20 +1189,19 @@ async def post_group(request, pretty: bool = False, wait_for_complete: bool = Fa is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_group_config(request, group_id: str, pretty: bool = False, wait_for_complete: bool = False, - offset: int = 0, limit: int = DATABASE_LIMIT) -> web.Response: +async def get_group_config(group_id: str, pretty: bool = False, wait_for_complete: bool = False, + offset: int = 0, limit: int = DATABASE_LIMIT) -> ConnexionResponse: """Get group configuration defined in the `agent.conf` file. Parameters ---------- - request : connexion.request group_id : str Group ID. pretty: bool @@ -1238,7 +1215,7 @@ async def get_group_config(request, group_id: str, pretty: bool = False, wait_fo Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'group_list': [group_id], @@ -1251,15 +1228,15 @@ async def get_group_config(request, group_id: str, pretty: bool = False, wait_fo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def put_group_config(request, body: bytes, group_id: str, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def put_group_config(body: bytes, group_id: str, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Update group configuration. Update a specified group's configuration. This API call expects a full valid XML file with the shared configuration @@ -1267,7 +1244,6 @@ async def put_group_config(request, body: bytes, group_id: str, pretty: bool = F Parameters ---------- - request : connexion.request body : bytes Bytes object with the new group configuration. The body is obtained from the XML file and decoded in this function. @@ -1280,7 +1256,7 @@ async def put_group_config(request, body: bytes, group_id: str, pretty: bool = F Returns ------- - web.Response + ConnexionResponse API response. """ # Parse body to utf-8 @@ -1296,21 +1272,20 @@ async def put_group_config(request, body: bytes, group_id: str, pretty: bool = F is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_group_files(request, group_id: str, pretty: bool = False, wait_for_complete: bool = False, +async def get_group_files(group_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, sort: str = None, search: str = None, - q: str = None, select: str = None, distinct: bool = False) -> web.Response: + q: str = None, select: str = None, distinct: bool = False) -> ConnexionResponse: """Get the files placed under the group directory. Parameters ---------- - request : connexion.request group_id : str Group ID. pretty: bool @@ -1335,10 +1310,10 @@ async def get_group_files(request, group_id: str, pretty: bool = False, wait_for Returns ------- - web.Response + ConnexionResponse API response. """ - hash_ = request.query.get('hash', 'md5') # Select algorithm to generate the returned checksums. + hash_ = request.query_params.get('hash', 'md5') # Select algorithm to generate the returned checksums. f_kwargs = {'group_list': [group_id], 'offset': offset, 'limit': limit, @@ -1357,20 +1332,19 @@ async def get_group_files(request, group_id: str, pretty: bool = False, wait_for is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_group_file(request, group_id: str, file_name: str, raw: bool = False, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response | ConnexionResponse: +async def get_group_file(group_id: str, file_name: str, raw: bool = False, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Get the files placed under the group directory. Parameters ---------- - request : connexion.request group_id : str Group ID. file_name : str @@ -1389,7 +1363,7 @@ async def get_group_file(request, group_id: str, file_name: str, raw: bool = Fal """ f_kwargs = {'group_list': [group_id], 'filename': file_name, - 'type_conf': request.query.get('type', None), + 'type_conf': request.query_params.get('type', None), 'raw': raw} dapi = DistributedAPI(f=agent.get_file_conf, @@ -1398,7 +1372,7 @@ async def get_group_file(request, group_id: str, file_name: str, raw: bool = Fal is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) @@ -1411,16 +1385,15 @@ async def get_group_file(request, group_id: str, file_name: str, raw: bool = Fal return ConnexionResponse(body=data['data'], mimetype=mimetype) + return json_response(data, pretty=pretty) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) -async def restart_agents_by_group(request, group_id: str, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def restart_agents_by_group(group_id: str, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Restart all agents from a group. Parameters ---------- - request : connexion.request group_id : str Group name. pretty : bool, optional @@ -1430,7 +1403,7 @@ async def restart_agents_by_group(request, group_id: str, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'group_list': [group_id], 'select': ['id'], 'limit': None} @@ -1440,14 +1413,14 @@ async def restart_agents_by_group(request, group_id: str, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) agents = raise_if_exc(await dapi.distribute_function()) agent_list = [a['id'] for a in agents.affected_items] if not agent_list: data = AffectedItemsWazuhResult(none_msg='Restart command was not sent to any agent') - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) f_kwargs = {'agent_list': agent_list} dapi = DistributedAPI(f=agent.restart_agents_by_group, @@ -1456,15 +1429,15 @@ async def restart_agents_by_group(request, group_id: str, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def insert_agent(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def insert_agent(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Insert a new agent. Parameters @@ -1476,11 +1449,11 @@ async def insert_agent(request, pretty: bool = False, wait_for_complete: bool = Returns ------- - web.Response + ConnexionResponse API response. """ # Get body parameters - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await AgentInsertedModel.get_kwargs(request) dapi = DistributedAPI(f=agent.add_agent, @@ -1489,20 +1462,19 @@ async def insert_agent(request, pretty: bool = False, wait_for_complete: bool = is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_agent_no_group(request, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, - limit: int = DATABASE_LIMIT, select=None, sort=None, search=None, q=None) -> web.Response: +async def get_agent_no_group(pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, + limit: int = DATABASE_LIMIT, select=None, sort=None, search=None, q=None) -> ConnexionResponse: """Get agents without group. Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -1523,7 +1495,7 @@ async def get_agent_no_group(request, pretty: bool = False, wait_for_complete: b Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'offset': offset, @@ -1539,21 +1511,20 @@ async def get_agent_no_group(request, pretty: bool = False, wait_for_complete: b is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_agent_outdated(request, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, +async def get_agent_outdated(pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = DATABASE_LIMIT, sort: str = None, search: str = None, - select: str = None, q: str = None) -> web.Response: + select: str = None, q: str = None) -> ConnexionResponse: """Get outdated agents. Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -1574,7 +1545,7 @@ async def get_agent_outdated(request, pretty: bool = False, wait_for_complete: b Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'offset': offset, @@ -1590,16 +1561,16 @@ async def get_agent_outdated(request, pretty: bool = False, wait_for_complete: b is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_agent_fields(request, pretty: bool = False, wait_for_complete: bool = False, fields: str = None, +async def get_agent_fields(pretty: bool = False, wait_for_complete: bool = False, fields: str = None, offset: int = 0, limit: int = DATABASE_LIMIT, sort: str = None, search: str = None, - q: str = None) -> web.Response: + q: str = None) -> ConnexionResponse: """Get distinct fields in agents. Returns all the different combinations that agents have for the selected fields. It also indicates the total number @@ -1607,7 +1578,6 @@ async def get_agent_fields(request, pretty: bool = False, wait_for_complete: boo Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -1628,7 +1598,7 @@ async def get_agent_fields(request, pretty: bool = False, wait_for_complete: boo Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'offset': offset, @@ -1644,19 +1614,18 @@ async def get_agent_fields(request, pretty: bool = False, wait_for_complete: boo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_agent_summary_status(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_agent_summary_status(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get agents status summary. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format wait_for_complete : bool @@ -1664,7 +1633,7 @@ async def get_agent_summary_status(request, pretty: bool = False, wait_for_compl Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -1675,19 +1644,18 @@ async def get_agent_summary_status(request, pretty: bool = False, wait_for_compl is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_agent_summary_os(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_agent_summary_os(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get agents OS summary. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format wait_for_complete : bool @@ -1695,7 +1663,7 @@ async def get_agent_summary_os(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -1706,8 +1674,8 @@ async def get_agent_summary_os(request, pretty: bool = False, wait_for_complete: is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/cdb_list_controller.py b/api/api/controllers/cdb_list_controller.py index d09c3551af1..05d671d69f0 100644 --- a/api/api/controllers/cdb_list_controller.py +++ b/api/api/controllers/cdb_list_controller.py @@ -3,12 +3,11 @@ # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 import logging -from typing import Union -from aiohttp import web +from connexion import request from connexion.lifecycle import ConnexionResponse -from api.encoder import dumps, prettify +from api.controllers.util import json_response from api.models.base_model_ import Body from api.util import remove_nones_to_dict, parse_api_param, raise_if_exc from wazuh import cdb_list @@ -18,14 +17,13 @@ logger = logging.getLogger('wazuh-api') -async def get_lists(request, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, +async def get_lists(pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, select: list = None, sort: str = None, search: str = None, filename: str = None, - relative_dirname: str = None, q: str = None, distinct: bool = False) -> web.Response: + relative_dirname: str = None, q: str = None, distinct: bool = False) -> ConnexionResponse: """Get all CDB lists. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -52,7 +50,7 @@ async def get_lists(request, pretty: bool = False, wait_for_complete: bool = Fal Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'offset': offset, @@ -75,20 +73,19 @@ async def get_lists(request, pretty: bool = False, wait_for_complete: bool = Fal is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_file(request, pretty: bool = False, wait_for_complete: bool = False, filename: str = None, - raw: bool = False) -> Union[web.Response, ConnexionResponse]: +async def get_file(pretty: bool = False, wait_for_complete: bool = False, filename: str = None, + raw: bool = False) -> ConnexionResponse: """Get content of one CDB list file, in raw or dict format. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -100,11 +97,11 @@ async def get_file(request, pretty: bool = False, wait_for_complete: bool = Fals Returns ------- - web.Response or ConnexionResponse - Depending on the `raw` parameter, it will return a web.Response object or a ConnexionResponse object: + ConnexionResponse + Depending on the `raw` parameter, it will return a ConnexionResponse object: raw=True -> ConnexionResponse (text/plain) - raw=False (default) -> web.Response (application/json) - If any exception was raised, it will return a web.Response with details. + raw=False (default) -> ConnexionResponse (application/json) + If any exception was raised, it will return a ConnexionResponse with details. """ f_kwargs = {'filename': filename, 'raw': raw} @@ -114,24 +111,24 @@ async def get_file(request, pretty: bool = False, wait_for_complete: bool = Fals is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) if isinstance(data, AffectedItemsWazuhResult): - response = web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + response = json_response(data, pretty=pretty) else: - response = ConnexionResponse(body=data["message"], mimetype='text/plain', content_type='text/plain') + response = ConnexionResponse(body=data["message"], + content_type='text/plain; charset=utf-8') return response -async def put_file(request, body: bytes, overwrite: bool = False, pretty: bool = False, wait_for_complete: bool = False, - filename: str = None) -> web.Response: +async def put_file(body: bytes, overwrite: bool = False, pretty: bool = False, wait_for_complete: bool = False, + filename: str = None) -> ConnexionResponse: """Upload content of CDB list file. Parameters ---------- - request : connexion.request body : bytes Bytes object with the content of the file to be uploaded. pretty : bool @@ -145,7 +142,7 @@ async def put_file(request, body: bytes, overwrite: bool = False, pretty: bool = Returns ------- - web.Response + ConnexionResponse API response. """ # Parse body to utf-8 @@ -162,20 +159,19 @@ async def put_file(request, body: bytes, overwrite: bool = False, pretty: bool = is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def delete_file(request, pretty: bool = False, wait_for_complete: bool = False, - filename: str = None) -> web.Response: +async def delete_file(pretty: bool = False, wait_for_complete: bool = False, + filename: str = None) -> ConnexionResponse: """Delete a CDB list file. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -185,7 +181,7 @@ async def delete_file(request, pretty: bool = False, wait_for_complete: bool = F Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'filename': filename} @@ -196,21 +192,20 @@ async def delete_file(request, pretty: bool = False, wait_for_complete: bool = F is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_lists_files(request, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, +async def get_lists_files(pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, sort: str = None, search: str = None, filename: str = None, - relative_dirname: str = None) -> web.Response: + relative_dirname: str = None) -> ConnexionResponse: """Get paths from all CDB lists. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -231,7 +226,7 @@ async def get_lists_files(request, pretty: bool = False, wait_for_complete: bool Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'offset': offset, @@ -252,8 +247,8 @@ async def get_lists_files(request, pretty: bool = False, wait_for_complete: bool is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/ciscat_controller.py b/api/api/controllers/ciscat_controller.py index 93ba7110f2a..e8ef78a4eef 100755 --- a/api/api/controllers/ciscat_controller.py +++ b/api/api/controllers/ciscat_controller.py @@ -5,28 +5,28 @@ import logging from typing import List -from aiohttp import web +from connexion import request +from connexion.lifecycle import ConnexionResponse -import wazuh.ciscat as ciscat -from api.encoder import dumps, prettify +from api.controllers.util import json_response from api.util import remove_nones_to_dict, parse_api_param, raise_if_exc from wazuh.core.cluster.dapi.dapi import DistributedAPI +import wazuh.ciscat as ciscat logger = logging.getLogger('wazuh-api') -async def get_agents_ciscat_results(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False, +async def get_agents_ciscat_results(agent_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, select: List[str] = None, sort: str = None, search: str = None, benchmark: str = None, profile: str = None, fail: int = None, error: int = None, notchecked: int = None, unknown: int = None, score: int = None, - q: str = None) -> web.Response: + q: str = None) -> ConnexionResponse: """Get CIS-CAT results from an agent Returns the agent's ciscat results info. Parameters ---------- - request : connexion.request agent_id : str Agent ID. All posible values since 000 onwards. pretty : bool @@ -63,7 +63,7 @@ async def get_agents_ciscat_results(request, agent_id: str, pretty: bool = False Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = { @@ -76,7 +76,7 @@ async def get_agents_ciscat_results(request, agent_id: str, pretty: bool = False 'filters': { 'benchmark': benchmark, 'profile': profile, - 'pass': request.query.get('pass', None), + 'pass': request.query_params.get('pass', None), 'fail': fail, 'error': error, 'notchecked': notchecked, @@ -92,8 +92,8 @@ async def get_agents_ciscat_results(request, agent_id: str, pretty: bool = False is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) - response = raise_if_exc(await dapi.distribute_function()) + data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=response, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/cluster_controller.py b/api/api/controllers/cluster_controller.py index 231df29022f..7bc7ee477eb 100644 --- a/api/api/controllers/cluster_controller.py +++ b/api/api/controllers/cluster_controller.py @@ -4,16 +4,15 @@ import datetime import logging -from typing import Union -from aiohttp import web +from connexion import request from connexion.lifecycle import ConnexionResponse import wazuh.cluster as cluster import wazuh.core.common as common import wazuh.manager as manager import wazuh.stats as stats -from api.encoder import dumps, prettify +from api.controllers.util import json_response, XML_CONTENT_TYPE from api.models.base_model_ import Body from api.util import remove_nones_to_dict, parse_api_param, raise_if_exc, deserialize_date, deprecate_endpoint from api.validator import check_component_configuration_pair @@ -24,12 +23,11 @@ logger = logging.getLogger('wazuh-api') -async def get_cluster_node(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_cluster_node(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get basic information about the local node. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -37,7 +35,7 @@ async def get_cluster_node(request, pretty: bool = False, wait_for_complete: boo Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -49,22 +47,21 @@ async def get_cluster_node(request, pretty: bool = False, wait_for_complete: boo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_cluster_nodes(request, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, +async def get_cluster_nodes(pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, sort: str = None, search: str = None, select: str = None, - nodes_list: str = None, q: str = None, distinct: bool = False) -> web.Response: + nodes_list: str = None, q: str = None, distinct: bool = False) -> ConnexionResponse: """Get information about all nodes in the cluster or a list of them. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -89,11 +86,11 @@ async def get_cluster_nodes(request, pretty: bool = False, wait_for_complete: bo Returns ------- - web.Response + ConnexionResponse API response. """ # Get type parameter from query - type_ = request.query.get('type', 'all') + type_ = request.query_params.get('type', 'all') f_kwargs = {'filter_node': nodes_list, 'offset': offset, @@ -113,16 +110,16 @@ async def get_cluster_nodes(request, pretty: bool = False, wait_for_complete: bo wait_for_complete=wait_for_complete, logger=logger, local_client_arg='lc', - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_healthcheck(request, pretty: bool = False, wait_for_complete: bool = False, - nodes_list: str = None) -> web.Response: +async def get_healthcheck(pretty: bool = False, wait_for_complete: bool = False, + nodes_list: str = None) -> ConnexionResponse: """Get cluster healthcheck. Returns cluster healthcheck information for all nodes or a list of them. Such information includes last keep alive, @@ -130,7 +127,6 @@ async def get_healthcheck(request, pretty: bool = False, wait_for_complete: bool Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -140,7 +136,7 @@ async def get_healthcheck(request, pretty: bool = False, wait_for_complete: bool Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'filter_node': nodes_list} @@ -153,23 +149,22 @@ async def get_healthcheck(request, pretty: bool = False, wait_for_complete: bool wait_for_complete=wait_for_complete, logger=logger, local_client_arg='lc', - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_nodes_ruleset_sync_status(request, pretty: bool = False, wait_for_complete: bool = False, - nodes_list: str = "*") -> web.Response: +async def get_nodes_ruleset_sync_status(pretty: bool = False, wait_for_complete: bool = False, + nodes_list: str = "*") -> ConnexionResponse: """Get cluster ruleset synchronization status. Returns cluster ruleset synchronization status for all nodes or a list of them. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -179,7 +174,7 @@ async def get_nodes_ruleset_sync_status(request, pretty: bool = False, wait_for_ Returns ------- - web.Response + ConnexionResponse Nodes ruleset synchronization statuses. """ nodes = raise_if_exc(await get_system_nodes()) @@ -201,20 +196,19 @@ async def get_nodes_ruleset_sync_status(request, pretty: bool = False, wait_for_ wait_for_complete=wait_for_complete, logger=logger, broadcasting=nodes_list == "*", - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_status(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_status(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get cluster status. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -222,7 +216,7 @@ async def get_status(request, pretty: bool = False, wait_for_complete: bool = Fa Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -232,19 +226,18 @@ async def get_status(request, pretty: bool = False, wait_for_complete: bool = Fa is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_config(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_config(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get the current node cluster configuration. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -252,7 +245,7 @@ async def get_config(request, pretty: bool = False, wait_for_complete: bool = Fa Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -264,20 +257,19 @@ async def get_config(request, pretty: bool = False, wait_for_complete: bool = Fa is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_status_node(request, node_id: str, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_status_node(node_id: str, pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get a specified node's Wazuh daemons status. Parameters ---------- - request : connexion.request node_id : str Cluster node name. pretty : bool @@ -287,7 +279,7 @@ async def get_status_node(request, node_id: str, pretty: bool = False, wait_for_ Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'node_id': node_id} @@ -299,22 +291,21 @@ async def get_status_node(request, node_id: str, pretty: bool = False, wait_for_ is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_info_node(request, node_id: str, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_info_node(node_id: str, pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get a specified node's information. Returns basic information about a specified node such as version, compilation date, installation path. Parameters ---------- - request : connexion.request node_id : str Cluster node name. pretty : bool @@ -324,7 +315,7 @@ async def get_info_node(request, node_id: str, pretty: bool = False, wait_for_co Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'node_id': node_id} @@ -336,17 +327,17 @@ async def get_info_node(request, node_id: str, pretty: bool = False, wait_for_co is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_configuration_node(request, node_id: str, pretty: bool = False, wait_for_complete: bool = False, +async def get_configuration_node(node_id: str, pretty: bool = False, wait_for_complete: bool = False, section: str = None, field: str = None, - raw: bool = False) -> Union[web.Response, ConnexionResponse]: + raw: bool = False) -> ConnexionResponse: """Get a specified node's configuration (ossec.conf). Parameters @@ -366,11 +357,11 @@ async def get_configuration_node(request, node_id: str, pretty: bool = False, wa Returns ------- - web.Response or ConnexionResponse - Depending on the `raw` parameter, it will return a web.Response object or a ConnexionResponse object: + ConnexionResponse + Depending on the `raw` parameter, it will return a ConnexionResponse object: raw=True -> ConnexionResponse (application/xml) - raw=False (default) -> web.Response (application/json) - If any exception was raised, it will return a web.Response with details. + raw=False (default) -> ConnexionResponse (application/json) + If any exception was raised, it will return a ConnexionResponse with details. """ f_kwargs = {'node_id': node_id, 'section': section, @@ -384,19 +375,20 @@ async def get_configuration_node(request, node_id: str, pretty: bool = False, wa is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) if isinstance(data, AffectedItemsWazuhResult): - response = web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + response = json_response(data, pretty=pretty) else: - response = ConnexionResponse(body=data["message"], mimetype='application/xml', content_type='application/xml') + response = ConnexionResponse(body=data["message"], + content_type=XML_CONTENT_TYPE) return response -async def get_daemon_stats_node(request, node_id: str, pretty: bool = False, wait_for_complete: bool = False, +async def get_daemon_stats_node(node_id: str, pretty: bool = False, wait_for_complete: bool = False, daemons_list: list = None): """Get Wazuh statistical information from the specified daemons of a specified cluster node. @@ -422,22 +414,21 @@ async def get_daemon_stats_node(request, node_id: str, pretty: bool = False, wai is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_stats_node(request, node_id: str, pretty: bool = False, wait_for_complete: bool = False, - date: str = None) -> web.Response: +async def get_stats_node(node_id: str, pretty: bool = False, wait_for_complete: bool = False, + date: str = None) -> ConnexionResponse: """Get a specified node's stats. Returns Wazuh statistical information in node {node_id} for the current or specified date. Parameters ---------- - request : connexion.request node_id : str Cluster node name. pretty : bool @@ -449,7 +440,7 @@ async def get_stats_node(request, node_id: str, pretty: bool = False, wait_for_c Returns ------- - web.Response + ConnexionResponse API response. """ if not date: @@ -467,15 +458,15 @@ async def get_stats_node(request, node_id: str, pretty: bool = False, wait_for_c is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_stats_hourly_node(request, node_id: str, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def get_stats_hourly_node(node_id: str, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Get a specified node's stats by hour. Returns Wazuh statistical information in node {node_id} per hour. Each number in the averages field represents the @@ -483,7 +474,6 @@ async def get_stats_hourly_node(request, node_id: str, pretty: bool = False, Parameters ---------- - request : connexion.request node_id : str Cluster node name. pretty : bool @@ -493,7 +483,7 @@ async def get_stats_hourly_node(request, node_id: str, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'node_id': node_id} @@ -505,16 +495,16 @@ async def get_stats_hourly_node(request, node_id: str, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_stats_weekly_node(request, node_id: str, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def get_stats_weekly_node(node_id: str, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Get a specified node's stats by week. Returns Wazuh statistical information in node {node_id} per week. Each number in the averages field represents the @@ -522,7 +512,6 @@ async def get_stats_weekly_node(request, node_id: str, pretty: bool = False, Parameters ---------- - request : connexion.request node_id : str Cluster node name. pretty : bool @@ -532,7 +521,7 @@ async def get_stats_weekly_node(request, node_id: str, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'node_id': node_id} @@ -544,17 +533,17 @@ async def get_stats_weekly_node(request, node_id: str, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_stats_analysisd_node(request, node_id: str, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def get_stats_analysisd_node(node_id: str, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Get a specified node's analysisd statistics. Notes @@ -572,7 +561,7 @@ async def get_stats_analysisd_node(request, node_id: str, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse """ f_kwargs = {'node_id': node_id, 'filename': common.ANALYSISD_STATS} @@ -584,17 +573,17 @@ async def get_stats_analysisd_node(request, node_id: str, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_stats_remoted_node(request, node_id: str, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def get_stats_remoted_node(node_id: str, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Get a specified node's remoted statistics. Notes @@ -612,7 +601,7 @@ async def get_stats_remoted_node(request, node_id: str, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse """ f_kwargs = {'node_id': node_id, 'filename': common.REMOTED_STATS} @@ -624,24 +613,23 @@ async def get_stats_remoted_node(request, node_id: str, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_log_node(request, node_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, +async def get_log_node(node_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, sort: str = None, search: str = None, tag: str = None, level: str = None, - q: str = None, select: str = None, distinct: bool = False) -> web.Response: + q: str = None, select: str = None, distinct: bool = False) -> ConnexionResponse: """Get a specified node's wazuh logs. Returns the last 2000 wazuh log entries in node {node_id}. Parameters ---------- - request : connexion.request node_id : str Cluster node name. pretty : bool @@ -670,7 +658,7 @@ async def get_log_node(request, node_id: str, pretty: bool = False, wait_for_com Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'node_id': node_id, @@ -693,21 +681,20 @@ async def get_log_node(request, node_id: str, pretty: bool = False, wait_for_com is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_log_summary_node(request, node_id: str, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def get_log_summary_node(node_id: str, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Get a summary of a specified node's wazuh logs. Parameters ---------- - request : connexion.request node_id : str Cluster node name. pretty : bool @@ -717,7 +704,7 @@ async def get_log_summary_node(request, node_id: str, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'node_id': node_id} @@ -729,21 +716,20 @@ async def get_log_summary_node(request, node_id: str, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_api_config(request, pretty: bool = False, wait_for_complete: bool = False, - nodes_list: str = '*') -> web.Response: +async def get_api_config(pretty: bool = False, wait_for_complete: bool = False, + nodes_list: str = '*') -> ConnexionResponse: """Get active API configuration in manager or local_node. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -753,7 +739,7 @@ async def get_api_config(request, pretty: bool = False, wait_for_complete: bool Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'node_list': nodes_list} @@ -766,21 +752,20 @@ async def get_api_config(request, pretty: bool = False, wait_for_complete: bool wait_for_complete=wait_for_complete, logger=logger, broadcasting=nodes_list == '*', - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def put_restart(request, pretty: bool = False, wait_for_complete: bool = False, - nodes_list: str = '*') -> web.Response: +async def put_restart(pretty: bool = False, wait_for_complete: bool = False, + nodes_list: str = '*') -> ConnexionResponse: """Restarts all nodes in the cluster or a list of them. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -790,7 +775,7 @@ async def put_restart(request, pretty: bool = False, wait_for_complete: bool = F Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'node_list': nodes_list} @@ -803,22 +788,21 @@ async def put_restart(request, pretty: bool = False, wait_for_complete: bool = F wait_for_complete=wait_for_complete, logger=logger, broadcasting=nodes_list == '*', - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_conf_validation(request, pretty: bool = False, wait_for_complete: bool = False, - nodes_list: str = '*') -> web.Response: +async def get_conf_validation(pretty: bool = False, wait_for_complete: bool = False, + nodes_list: str = '*') -> ConnexionResponse: """Check whether the Wazuh configuration in a list of cluster nodes is correct or not. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -828,7 +812,7 @@ async def get_conf_validation(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'node_list': nodes_list} @@ -841,21 +825,20 @@ async def get_conf_validation(request, pretty: bool = False, wait_for_complete: wait_for_complete=wait_for_complete, logger=logger, broadcasting=nodes_list == '*', - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_node_config(request, node_id: str, component: str, wait_for_complete: bool = False, pretty: bool = False, - **kwargs: dict) -> web.Response: +async def get_node_config(node_id: str, component: str, wait_for_complete: bool = False, pretty: bool = False, + **kwargs: dict) -> ConnexionResponse: """Get active configuration in node node_id [on demand] Parameters ---------- - request : connexion.request node_id : str Node ID. component : str @@ -868,7 +851,7 @@ async def get_node_config(request, node_id: str, component: str, wait_for_comple Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'node_id': node_id, @@ -885,21 +868,20 @@ async def get_node_config(request, node_id: str, component: str, wait_for_comple is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def update_configuration(request, node_id: str, body: bytes, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def update_configuration(node_id: str, body: bytes, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Update Wazuh configuration (ossec.conf) in node node_id. Parameters ---------- - request : connexion.request node_id : str Node ID. body : bytes @@ -911,7 +893,7 @@ async def update_configuration(request, node_id: str, body: bytes, pretty: bool Returns ------- - web.Response + ConnexionResponse API response. """ # Parse body to utf-8 @@ -928,9 +910,9 @@ async def update_configuration(request, node_id: str, body: bytes, pretty: bool is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/decoder_controller.py b/api/api/controllers/decoder_controller.py index 4b8f4cc36bc..ff828c8442e 100644 --- a/api/api/controllers/decoder_controller.py +++ b/api/api/controllers/decoder_controller.py @@ -5,10 +5,10 @@ import logging from typing import Union -from aiohttp import web +from connexion import request from connexion.lifecycle import ConnexionResponse -from api.encoder import dumps, prettify +from api.controllers.util import json_response, XML_CONTENT_TYPE from api.models.base_model_ import Body from api.util import remove_nones_to_dict, parse_api_param, raise_if_exc from wazuh import decoder as decoder_framework @@ -18,10 +18,10 @@ logger = logging.getLogger('wazuh-api') -async def get_decoders(request, decoder_names: list = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_decoders(decoder_names: list = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, select: list = None, sort: str = None, search: str = None, q: str = None, filename: str = None, relative_dirname: str = None, - status: str = None, distinct: bool = False) -> web.Response: + status: str = None, distinct: bool = False) -> ConnexionResponse: """Get all decoders. Returns information about all the decoders included in the ossec.conf file. @@ -29,7 +29,6 @@ async def get_decoders(request, decoder_names: list = None, pretty: bool = False Parameters ---------- - request : connexion.request decoder_names : list Filters by decoder name. pretty: bool @@ -60,7 +59,7 @@ async def get_decoders(request, decoder_names: list = None, pretty: bool = False Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'names': decoder_names, @@ -83,17 +82,17 @@ async def get_decoders(request, decoder_names: list = None, pretty: bool = False is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_decoders_files(request, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, +async def get_decoders_files(pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, sort: str = None, search: str = None, filename: str = None, relative_dirname: str = None, status: str = None, q: str = None, - select: str = None, distinct: bool = False) -> web.Response: + select: str = None, distinct: bool = False) -> ConnexionResponse: """Get all decoders' files. Returns information about all decoders' files used in Wazuh. @@ -101,7 +100,6 @@ async def get_decoders_files(request, pretty: bool = False, wait_for_complete: b Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -132,7 +130,7 @@ async def get_decoders_files(request, pretty: bool = False, wait_for_complete: b Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'offset': offset, @@ -154,23 +152,22 @@ async def get_decoders_files(request, pretty: bool = False, wait_for_complete: b is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_decoders_parents(request, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, +async def get_decoders_parents(pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, select: list = None, sort: str = None, - search: str = None) -> web.Response: + search: str = None) -> ConnexionResponse: """Get decoders by parents. Returns information about all parent decoders. A parent decoder is a decoder used as base of other decoders. Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -189,7 +186,7 @@ async def get_decoders_parents(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'offset': offset, @@ -207,21 +204,20 @@ async def get_decoders_parents(request, pretty: bool = False, wait_for_complete: is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_file(request, pretty: bool = False, wait_for_complete: bool = False, +async def get_file(pretty: bool = False, wait_for_complete: bool = False, filename: str = None, relative_dirname: str = None, - raw: bool = False) -> Union[web.Response, ConnexionResponse]: + raw: bool = False) -> ConnexionResponse: """Get decoder file content. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. It only works when `raw` is False (JSON format). wait_for_complete : bool @@ -236,10 +232,10 @@ async def get_file(request, pretty: bool = False, wait_for_complete: bool = Fals Returns ------- web.json_response or ConnexionResponse - Depending on the `raw` parameter, it will return a web.Response object or a ConnexionResponse object: + Depending on the `raw` parameter, it will return a ConnexionResponse object: raw=True -> ConnexionResponse (application/xml) - raw=False (default) -> web.Response (application/json) - If any exception was raised, it will return a web.Response with details. + raw=False (default) -> ConnexionResponse (application/json) + If any exception was raised, it will return a ConnexionResponse with details. """ f_kwargs = {'filename': filename, 'raw': raw, 'relative_dirname': relative_dirname} @@ -249,26 +245,25 @@ async def get_file(request, pretty: bool = False, wait_for_complete: bool = Fals is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) if isinstance(data, AffectedItemsWazuhResult): - response = web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + response = json_response(data, pretty=pretty) else: - response = ConnexionResponse(body=data["message"], - mimetype='application/xml', content_type='application/xml') + response = ConnexionResponse(body=data["message"], + content_type=XML_CONTENT_TYPE) return response -async def put_file(request, body: bytes, filename: str = None, relative_dirname: str = None, +async def put_file(body: bytes, filename: str = None, relative_dirname: str = None, overwrite: bool = False, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: + wait_for_complete: bool = False) -> ConnexionResponse: """Upload a decoder file. Parameters ---------- - request : connexion.request body : bytes Body request with the file content to be uploaded. filename : str @@ -285,7 +280,7 @@ async def put_file(request, body: bytes, filename: str = None, relative_dirname: Returns ------- - web.Response + ConnexionResponse API response. """ # Parse body to utf-8 @@ -303,22 +298,19 @@ async def put_file(request, body: bytes, filename: str = None, relative_dirname: is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def delete_file(request, filename: str = None, - relative_dirname: str = None, - pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def delete_file(filename: str = None, relative_dirname: str = None, + pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Delete a decoder file. Parameters ---------- - request : connexion.request filename : str Name of the file. relative_dirname : str @@ -330,7 +322,7 @@ async def delete_file(request, filename: str = None, Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'filename': filename, 'relative_dirname': relative_dirname} @@ -341,8 +333,8 @@ async def delete_file(request, filename: str = None, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/default_controller.py b/api/api/controllers/default_controller.py index c98d714d02e..c022e7176af 100644 --- a/api/api/controllers/default_controller.py +++ b/api/api/controllers/default_controller.py @@ -6,9 +6,9 @@ import socket from datetime import datetime -from aiohttp import web +from connexion.lifecycle import ConnexionResponse -from api.encoder import dumps, prettify +from api.controllers.util import json_response from api.models.basic_info_model import BasicInfo from wazuh.core.common import DATE_FORMAT from wazuh.core.results import WazuhResult @@ -18,7 +18,7 @@ logger = logging.getLogger('wazuh-api') -async def default_info(pretty: bool = False) -> web.Response: +async def default_info(pretty: bool = False) -> ConnexionResponse: """Return basic information about the Wazuh API. Parameters @@ -28,7 +28,7 @@ async def default_info(pretty: bool = False) -> web.Response: Returns ------- - web.Response + ConnexionResponse API response. """ info_data = load_spec() @@ -41,6 +41,6 @@ async def default_info(pretty: bool = False) -> web.Response: 'hostname': socket.gethostname(), 'timestamp': get_utc_now().strftime(DATE_FORMAT) } - response = WazuhResult({'data': BasicInfo.from_dict(data)}) + data = WazuhResult({'data': BasicInfo.from_dict(data)}) - return web.json_response(data=response, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/event_controller.py b/api/api/controllers/event_controller.py index 55dd5ece8aa..f9a935e4326 100644 --- a/api/api/controllers/event_controller.py +++ b/api/api/controllers/event_controller.py @@ -4,11 +4,12 @@ import logging -from aiohttp import web +from connexion import request +from connexion.lifecycle import ConnexionResponse from wazuh.core.cluster.dapi.dapi import DistributedAPI from wazuh.event import send_event_to_analysisd -from api.encoder import dumps, prettify +from api.controllers.util import json_response, JSON_CONTENT_TYPE from api.models.base_model_ import Body from api.models.event_ingest_model import EventIngestModel from api.util import raise_if_exc, remove_nones_to_dict @@ -16,7 +17,7 @@ logger = logging.getLogger('wazuh-api') -async def forward_event(request: web.Request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def forward_event(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Forward events to analysisd. Parameters @@ -30,10 +31,10 @@ async def forward_event(request: web.Request, pretty: bool = False, wait_for_com Returns ------- - web.Response + ConnexionResponse API Response. """ - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await EventIngestModel.get_kwargs(request) dapi = DistributedAPI(f=send_event_to_analysisd, @@ -42,9 +43,9 @@ async def forward_event(request: web.Request, pretty: bool = False, wait_for_com is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/experimental_controller.py b/api/api/controllers/experimental_controller.py index 6799a67db0b..3dcb50cfb65 100644 --- a/api/api/controllers/experimental_controller.py +++ b/api/api/controllers/experimental_controller.py @@ -5,14 +5,15 @@ import logging from functools import wraps -from aiohttp import web +from connexion import request +from connexion.lifecycle import ConnexionResponse import wazuh.ciscat as ciscat import wazuh.rootcheck as rootcheck import wazuh.syscheck as syscheck import wazuh.syscollector as syscollector from api import configuration -from api.encoder import dumps, prettify +from api.controllers.util import json_response from api.util import remove_nones_to_dict, parse_api_param, raise_if_exc, deprecate_endpoint from wazuh.core.cluster.dapi.dapi import DistributedAPI from wazuh.core.exception import WazuhResourceNotFound @@ -24,23 +25,22 @@ def check_experimental_feature_value(func): """Decorator used to check whether the experimental features are enabled in the API configuration or not.""" @wraps(func) - def wrapper(*args, **kwargs): + async def wrapper(*args, **kwargs): if not configuration.api_conf['experimental_features']: raise_if_exc(WazuhResourceNotFound(1122)) else: - return func(*args, **kwargs) + return await func(*args, **kwargs) return wrapper @check_experimental_feature_value -async def clear_rootcheck_database(request, pretty: bool = False, wait_for_complete: bool = False, - agents_list: list = None) -> web.Response: +async def clear_rootcheck_database(pretty: bool = False, wait_for_complete: bool = False, + agents_list: list = None) -> ConnexionResponse: """Clear the rootcheck database for all the agents or a list of them. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -50,7 +50,7 @@ async def clear_rootcheck_database(request, pretty: bool = False, wait_for_compl Returns ------- - web.Response + ConnexionResponse API response. """ # If we use the 'all' keyword and the request is distributed_master, agents_list must be '*' @@ -66,22 +66,21 @@ async def clear_rootcheck_database(request, pretty: bool = False, wait_for_compl wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() @check_experimental_feature_value -async def clear_syscheck_database(request, pretty: bool = False, wait_for_complete: bool = False, - agents_list: list = None) -> web.Response: +async def clear_syscheck_database(pretty: bool = False, wait_for_complete: bool = False, + agents_list: list = None) -> ConnexionResponse: """Clear the syscheck database for all agents or a list of them. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -91,7 +90,7 @@ async def clear_syscheck_database(request, pretty: bool = False, wait_for_comple Returns ------- - web.Response + ConnexionResponse API response. """ # If we use the 'all' keyword and the request is distributed_master, agents_list must be '*' @@ -107,24 +106,23 @@ async def clear_syscheck_database(request, pretty: bool = False, wait_for_comple wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @check_experimental_feature_value -async def get_cis_cat_results(request, pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', +async def get_cis_cat_results(pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, benchmark: str = None, profile: str = None, fail: int = None, error: int = None, notchecked: int = None, unknown: int = None, - score: int = None) -> web.Response: + score: int = None) -> ConnexionResponse: """Get ciscat results info from all agents or a list of them. Parameters ---------- - request : connexion.request agents_list : str List of agent's IDs. pretty : bool @@ -159,7 +157,7 @@ async def get_cis_cat_results(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': agents_list, @@ -176,7 +174,7 @@ async def get_cis_cat_results(request, pretty: bool = False, wait_for_complete: 'notchecked': notchecked, 'unknown': unknown, 'score': score, - 'pass': request.query.get('pass', None) + 'pass': request.query_params.get('pass', None) } } @@ -187,23 +185,22 @@ async def get_cis_cat_results(request, pretty: bool = False, wait_for_complete: wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() @check_experimental_feature_value -async def get_hardware_info(request, pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', +async def get_hardware_info(pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', offset: int = 0, limit: int = None, select: str = None, sort: str = None, - search: str = None, board_serial: str = None) -> web.Response: + search: str = None, board_serial: str = None) -> ConnexionResponse: """Get hardware info from all agents or a list of them. Parameters ---------- - request : connexion.request agents_list : str List of agent's IDs. pretty : bool @@ -226,7 +223,7 @@ async def get_hardware_info(request, pretty: bool = False, wait_for_complete: bo Returns ------- - web.Response + ConnexionResponse API response. """ filters = { @@ -235,7 +232,7 @@ async def get_hardware_info(request, pretty: bool = False, wait_for_complete: bo # Add nested fields to kwargs filters nested = ['ram.free', 'ram.total', 'cpu.cores', 'cpu.mhz', 'cpu.name'] for field in nested: - filters[field] = request.query.get(field, None) + filters[field] = request.query_params.get(field, None) f_kwargs = {'agent_list': agents_list, 'offset': offset, 'limit': limit, @@ -253,24 +250,23 @@ async def get_hardware_info(request, pretty: bool = False, wait_for_complete: bo wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() @check_experimental_feature_value -async def get_network_address_info(request, pretty: bool = False, wait_for_complete: bool = False, +async def get_network_address_info(pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', offset: int = 0, limit: str = None, select: str = None, sort: str = None, search: str = None, iface_name: str = None, proto: str = None, - address: str = None, broadcast: str = None, netmask: str = None) -> web.Response: + address: str = None, broadcast: str = None, netmask: str = None) -> ConnexionResponse: """Get the IPv4 and IPv6 addresses associated to all network interfaces. Parameters ---------- - request : connexion.request agents_list : str List of agent's IDs. pretty : bool @@ -301,7 +297,7 @@ async def get_network_address_info(request, pretty: bool = False, wait_for_compl Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': agents_list, @@ -327,24 +323,23 @@ async def get_network_address_info(request, pretty: bool = False, wait_for_compl wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() @check_experimental_feature_value -async def get_network_interface_info(request, pretty: bool = False, wait_for_complete: bool = False, +async def get_network_interface_info(pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, adapter: str = None, state: str = None, - mtu: str = None) -> web.Response: + mtu: str = None) -> ConnexionResponse: """Get all network interfaces from all agents or a list of them. Parameters ---------- - request : connexion.request agents_list : str List of agent's IDs. pretty : bool @@ -371,19 +366,19 @@ async def get_network_interface_info(request, pretty: bool = False, wait_for_com Returns ------- - web.Response + ConnexionResponse API response. """ filters = { 'adapter': adapter, - 'type': request.query.get('type', None), + 'type': request.query_params.get('type', None), 'state': state, 'mtu': mtu } # Add nested fields to kwargs filters nested = ['tx.packets', 'rx.packets', 'tx.bytes', 'rx.bytes', 'tx.errors', 'rx.errors', 'tx.dropped', 'rx.dropped'] for field in nested: - filters[field] = request.query.get(field, None) + filters[field] = request.query_params.get(field, None) f_kwargs = {'agent_list': agents_list, 'offset': offset, @@ -402,24 +397,23 @@ async def get_network_interface_info(request, pretty: bool = False, wait_for_com wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() @check_experimental_feature_value -async def get_network_protocol_info(request, pretty: bool = False, wait_for_complete: bool = False, +async def get_network_protocol_info(pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, iface: str = None, gateway: str = None, - dhcp: str = None) -> web.Response: + dhcp: str = None) -> ConnexionResponse: """Get network protocol info from all agents or a list of them. Parameters ---------- - request : connexion.request agents_list : str List of agent's IDs. pretty : bool @@ -446,7 +440,7 @@ async def get_network_protocol_info(request, pretty: bool = False, wait_for_comp Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': agents_list, @@ -457,7 +451,7 @@ async def get_network_protocol_info(request, pretty: bool = False, wait_for_comp 'search': parse_api_param(search, 'search'), 'filters': { 'iface': iface, - 'type': request.query.get('type', None), + 'type': request.query_params.get('type', None), 'gateway': gateway, 'dhcp': dhcp }, @@ -471,24 +465,23 @@ async def get_network_protocol_info(request, pretty: bool = False, wait_for_comp wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() @check_experimental_feature_value -async def get_os_info(request, pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', +async def get_os_info(pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, os_name: str = None, architecture: str = None, os_version: str = None, version: str = None, - release: str = None) -> web.Response: + release: str = None) -> ConnexionResponse: """Get OS info from all agents or a list of them. Parameters ---------- - request : connexion.request agents_list : str List of agent's IDs. pretty : bool @@ -519,7 +512,7 @@ async def get_os_info(request, pretty: bool = False, wait_for_complete: bool = F Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': agents_list, @@ -545,24 +538,23 @@ async def get_os_info(request, pretty: bool = False, wait_for_complete: bool = F wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() @check_experimental_feature_value -async def get_packages_info(request, pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', +async def get_packages_info(pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, vendor: str = None, name: str = None, architecture: str = None, - version: str = None) -> web.Response: + version: str = None) -> ConnexionResponse: """Get packages info from all agents or a list of them. Parameters ---------- - request : connexion.request agents_list : str List of agent's IDs. pretty : bool @@ -591,7 +583,7 @@ async def get_packages_info(request, pretty: bool = False, wait_for_complete: bo Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': agents_list, @@ -604,7 +596,7 @@ async def get_packages_info(request, pretty: bool = False, wait_for_complete: bo 'vendor': vendor, 'name': name, 'architecture': architecture, - 'format': request.query.get('format', None), + 'format': request.query_params.get('format', None), 'version': version }, 'element_type': 'packages' @@ -617,24 +609,23 @@ async def get_packages_info(request, pretty: bool = False, wait_for_complete: bo wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() @check_experimental_feature_value -async def get_ports_info(request, pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', +async def get_ports_info(pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, pid: str = None, protocol: str = None, tx_queue: str = None, state: str = None, - process: str = None) -> web.Response: + process: str = None) -> ConnexionResponse: """Get ports info from all agents or a list of them. Parameters ---------- - request : connexion.request agents_list : str List of agent's IDs. pretty : bool @@ -665,7 +656,7 @@ async def get_ports_info(request, pretty: bool = False, wait_for_complete: bool Returns ------- - web.Response + ConnexionResponse API response. """ filters = { @@ -678,7 +669,7 @@ async def get_ports_info(request, pretty: bool = False, wait_for_complete: bool # Add nested fields to kwargs filters nested = ['local.ip', 'local.port', 'remote.ip'] for field in nested: - filters[field] = request.query.get(field, None) + filters[field] = request.query_params.get(field, None) f_kwargs = {'agent_list': agents_list, 'offset': offset, @@ -697,26 +688,25 @@ async def get_ports_info(request, pretty: bool = False, wait_for_complete: bool wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() @check_experimental_feature_value -async def get_processes_info(request, pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', +async def get_processes_info(pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, pid: str = None, state: str = None, ppid: str = None, egroup: str = None, euser: str = None, fgroup: str = None, name: str = None, nlwp: str = None, pgrp: str = None, priority: str = None, rgroup: str = None, - ruser: str = None, sgroup: str = None, suser: str = None) -> web.Response: + ruser: str = None, sgroup: str = None, suser: str = None) -> ConnexionResponse: """Get processes info from all agents or a list of them. Parameters ---------- - request : connexion.request agents_list : str List of agent's IDs. pretty : bool @@ -765,7 +755,7 @@ async def get_processes_info(request, pretty: bool = False, wait_for_complete: b Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': agents_list, @@ -800,23 +790,22 @@ async def get_processes_info(request, pretty: bool = False, wait_for_complete: b wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() @check_experimental_feature_value -async def get_hotfixes_info(request, pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', +async def get_hotfixes_info(pretty: bool = False, wait_for_complete: bool = False, agents_list: str = '*', offset: int = 0, limit: int = None, sort: str = None, search: str = None, - select: str = None, hotfix: str = None) -> web.Response: + select: str = None, hotfix: str = None) -> ConnexionResponse: """Get hotfixes info from all agents or a list of them. Parameters ---------- - request : connexion.request agents_list : str List of agent's IDs. pretty : bool @@ -841,7 +830,7 @@ async def get_hotfixes_info(request, pretty: bool = False, wait_for_complete: bo Returns ------- - web.Response + ConnexionResponse API response. """ filters = {'hotfix': hotfix} @@ -862,8 +851,8 @@ async def get_hotfixes_info(request, pretty: bool = False, wait_for_complete: bo wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/logtest_controller.py b/api/api/controllers/logtest_controller.py index b70a567e4c2..50fdf4c6fd5 100644 --- a/api/api/controllers/logtest_controller.py +++ b/api/api/controllers/logtest_controller.py @@ -4,24 +4,25 @@ import logging -from aiohttp import web +from connexion import request +from connexion.lifecycle import ConnexionResponse -from api.encoder import dumps, prettify +from api.controllers.util import json_response, JSON_CONTENT_TYPE from api.models.base_model_ import Body from api.models.logtest_model import LogtestModel from api.util import remove_nones_to_dict, raise_if_exc + from wazuh import logtest from wazuh.core.cluster.dapi.dapi import DistributedAPI logger = logging.getLogger('wazuh-api') -async def run_logtest_tool(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def run_logtest_tool(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get the logtest output after sending a JSON to its socket. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -29,10 +30,10 @@ async def run_logtest_tool(request, pretty: bool = False, wait_for_complete: boo Returns ------- - web.Response + ConnexionResponse API response. """ - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await LogtestModel.get_kwargs(request) dapi = DistributedAPI(f=logtest.run_logtest, @@ -41,20 +42,19 @@ async def run_logtest_tool(request, pretty: bool = False, wait_for_complete: boo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def end_logtest_session(request, pretty: bool = False, wait_for_complete: bool = False, - token: str = None) -> web.Response: +async def end_logtest_session(pretty: bool = False, wait_for_complete: bool = False, + token: str = None) -> ConnexionResponse: """Delete the saved session corresponding to the specified token. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -64,7 +64,7 @@ async def end_logtest_session(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'token': token} @@ -75,8 +75,8 @@ async def end_logtest_session(request, pretty: bool = False, wait_for_complete: is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/manager_controller.py b/api/api/controllers/manager_controller.py index 5d00e86cc9a..9be383cd927 100644 --- a/api/api/controllers/manager_controller.py +++ b/api/api/controllers/manager_controller.py @@ -7,12 +7,14 @@ from typing import Union from aiohttp import web +from connexion import request from connexion.lifecycle import ConnexionResponse import wazuh.manager as manager import wazuh.stats as stats from api.constants import INSTALLATION_UID_KEY, UPDATE_INFORMATION_KEY from api.encoder import dumps, prettify +from api.controllers.util import json_response, XML_CONTENT_TYPE from api.models.base_model_ import Body from api.util import ( deprecate_endpoint, deserialize_date, only_master_endpoint, parse_api_param, raise_if_exc, remove_nones_to_dict @@ -27,12 +29,11 @@ logger = logging.getLogger('wazuh-api') -async def get_status(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_status(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get manager's or local_node's Wazuh daemons status Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -40,7 +41,7 @@ async def get_status(request, pretty: bool = False, wait_for_complete: bool = Fa Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -51,19 +52,18 @@ async def get_status(request, pretty: bool = False, wait_for_complete: bool = Fa is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_info(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_info(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get manager's or local_node's basic information Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -71,7 +71,7 @@ async def get_info(request, pretty: bool = False, wait_for_complete: bool = Fals Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -82,21 +82,20 @@ async def get_info(request, pretty: bool = False, wait_for_complete: bool = Fals is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_configuration(request, pretty: bool = False, wait_for_complete: bool = False, section: str = None, +async def get_configuration(pretty: bool = False, wait_for_complete: bool = False, section: str = None, field: str = None, raw: bool = False, - distinct: bool = False) -> Union[web.Response, ConnexionResponse]: + distinct: bool = False) -> ConnexionResponse: """Get manager's or local_node's configuration (ossec.conf) Parameters ---------- - request : connexion.request pretty : bool, optional Show results in human-readable format. It only works when `raw` is False (JSON format). Default `False` wait_for_complete : bool, optional @@ -112,11 +111,11 @@ async def get_configuration(request, pretty: bool = False, wait_for_complete: bo Returns ------- - web.Response or ConnexionResponse - Depending on the `raw` parameter, it will return a web.Response object or a ConnexionResponse object: + ConnexionResponse + Depending on the `raw` parameter, it will return a ConnexionResponse object: raw=True -> ConnexionResponse (application/xml) - raw=False (default) -> web.Response (application/json) - If any exception was raised, it will return a web.Response with details. + raw=False (default) -> ConnexionResponse (application/json) + If any exception was raised, it will return a ConnexionResponse with details. """ f_kwargs = {'section': section, 'field': field, @@ -129,18 +128,19 @@ async def get_configuration(request, pretty: bool = False, wait_for_complete: bo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) if isinstance(data, AffectedItemsWazuhResult): - response = web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + response = json_response(data, pretty=pretty) else: - response = ConnexionResponse(body=data["message"], mimetype='application/xml', content_type='application/xml') + response = ConnexionResponse(body=data["message"], + content_type=XML_CONTENT_TYPE) return response -async def get_daemon_stats(request, pretty: bool = False, wait_for_complete: bool = False, daemons_list: list = None): +async def get_daemon_stats(pretty: bool = False, wait_for_complete: bool = False, daemons_list: list = None): """Get Wazuh statistical information from the specified manager's daemons. Parameters @@ -161,20 +161,19 @@ async def get_daemon_stats(request, pretty: bool = False, wait_for_complete: boo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies']) + rbac_permissions=request.context['token_info']['rbac_policies']) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_stats(request, pretty: bool = False, wait_for_complete: bool = False, date: str = None) -> web.Response: +async def get_stats(pretty: bool = False, wait_for_complete: bool = False, date: str = None) -> ConnexionResponse: """Get manager's or local_node's stats. Returns Wazuh statistical information for the current or specified date. Parameters ---------- - request : connexion.request pretty : bool, optional Show results in human-readable format. wait_for_complete : bool, optional @@ -184,7 +183,7 @@ async def get_stats(request, pretty: bool = False, wait_for_complete: bool = Fal Returns ------- - web.Response + ConnexionResponse API response. """ if not date: @@ -200,14 +199,14 @@ async def get_stats(request, pretty: bool = False, wait_for_complete: bool = Fal is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_stats_hourly(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_stats_hourly(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get manager's or local_node's stats by hour. Returns Wazuh statistical information per hour. Each number in the averages field represents the average of alerts @@ -215,7 +214,6 @@ async def get_stats_hourly(request, pretty: bool = False, wait_for_complete: boo Parameters ---------- - request : connexion.request pretty : bool, optional Show results in human-readable format. wait_for_complete : bool, optional @@ -223,7 +221,7 @@ async def get_stats_hourly(request, pretty: bool = False, wait_for_complete: boo Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -234,14 +232,14 @@ async def get_stats_hourly(request, pretty: bool = False, wait_for_complete: boo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_stats_weekly(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_stats_weekly(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get manager's or local_node's stats by week. Returns Wazuh statistical information per week. Each number in the averages field represents the average of alerts @@ -249,7 +247,6 @@ async def get_stats_weekly(request, pretty: bool = False, wait_for_complete: boo Parameters ---------- - request : connexion.request pretty : bool, optional Show results in human-readable format. wait_for_complete : bool, optional @@ -257,7 +254,7 @@ async def get_stats_weekly(request, pretty: bool = False, wait_for_complete: boo Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -268,15 +265,15 @@ async def get_stats_weekly(request, pretty: bool = False, wait_for_complete: boo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_stats_analysisd(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_stats_analysisd(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get manager's or local_node's analysisd statistics. Notes @@ -292,7 +289,7 @@ async def get_stats_analysisd(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse """ f_kwargs = {'filename': common.ANALYSISD_STATS} @@ -302,15 +299,15 @@ async def get_stats_analysisd(request, pretty: bool = False, wait_for_complete: is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_stats_remoted(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_stats_remoted(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get manager's or local_node's remoted statistics. Notes @@ -326,7 +323,7 @@ async def get_stats_remoted(request, pretty: bool = False, wait_for_complete: bo Returns ------- - web.Response + ConnexionResponse """ f_kwargs = {'filename': common.REMOTED_STATS} @@ -336,21 +333,20 @@ async def get_stats_remoted(request, pretty: bool = False, wait_for_complete: bo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_log(request, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, +async def get_log(pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, sort: str = None, search: str = None, tag: str = None, level: str = None, - q: str = None, select: str = None, distinct: bool = False) -> web.Response: + q: str = None, select: str = None, distinct: bool = False) -> ConnexionResponse: """Get manager's or local_node's last 2000 wazuh log entries. Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -377,7 +373,7 @@ async def get_log(request, pretty: bool = False, wait_for_complete: bool = False Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'offset': offset, @@ -398,19 +394,18 @@ async def get_log(request, pretty: bool = False, wait_for_complete: bool = False is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_log_summary(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_log_summary(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get manager's or local_node's summary of the last 2000 wazuh log entries. Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -418,7 +413,7 @@ async def get_log_summary(request, pretty: bool = False, wait_for_complete: bool Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -429,19 +424,18 @@ async def get_log_summary(request, pretty: bool = False, wait_for_complete: bool is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_api_config(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_api_config(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get active API configuration in manager or local_node. Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -449,7 +443,7 @@ async def get_api_config(request, pretty: bool = False, wait_for_complete: bool Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -460,19 +454,18 @@ async def get_api_config(request, pretty: bool = False, wait_for_complete: bool is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def put_restart(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def put_restart(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Restart manager or local_node. Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -480,7 +473,7 @@ async def put_restart(request, pretty: bool = False, wait_for_complete: bool = F Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -491,19 +484,18 @@ async def put_restart(request, pretty: bool = False, wait_for_complete: bool = F is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_conf_validation(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_conf_validation(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Check if Wazuh configuration is correct in manager or local_node. Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -511,7 +503,7 @@ async def get_conf_validation(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -522,20 +514,19 @@ async def get_conf_validation(request, pretty: bool = False, wait_for_complete: is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_manager_config_ondemand(request, component: str, pretty: bool = False, wait_for_complete: bool = False, - **kwargs: dict) -> web.Response: +async def get_manager_config_ondemand(component: str, pretty: bool = False, wait_for_complete: bool = False, + **kwargs: dict) -> ConnexionResponse: """Get active configuration in manager or local_node for one component [on demand]. Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -545,7 +536,7 @@ async def get_manager_config_ondemand(request, component: str, pretty: bool = Fa Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'component': component, @@ -560,20 +551,19 @@ async def get_manager_config_ondemand(request, component: str, pretty: bool = Fa is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def update_configuration(request, body: bytes, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def update_configuration(body: bytes, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Update manager's or local_node's configuration (ossec.conf). Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -583,7 +573,7 @@ async def update_configuration(request, body: bytes, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ # Parse body to utf-8 @@ -598,23 +588,18 @@ async def update_configuration(request, body: bytes, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @only_master_endpoint -async def check_available_version( - request: web.Request, pretty: bool = False, force_query: bool = False -) -> web.Response: +async def check_available_version(pretty: bool = False, force_query: bool = False) -> ConnexionResponse: """Get available update information. Parameters ---------- - request : web.Request - API request. pretty : bool, optional Show results in human-readable format, by default False. force_query : bool, optional @@ -648,4 +633,4 @@ async def check_available_version( logger=logger ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/mitre_controller.py b/api/api/controllers/mitre_controller.py index 891605ccebc..e4f67f9e27d 100644 --- a/api/api/controllers/mitre_controller.py +++ b/api/api/controllers/mitre_controller.py @@ -4,9 +4,10 @@ import logging -from aiohttp import web +from connexion import request +from connexion.lifecycle import ConnexionResponse -from api.encoder import dumps, prettify +from api.controllers.util import json_response from api.util import raise_if_exc, parse_api_param, remove_nones_to_dict from wazuh import mitre from wazuh.core.cluster.dapi.dapi import DistributedAPI @@ -14,12 +15,11 @@ logger = logging.getLogger('wazuh-api') -async def get_metadata(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_metadata(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Return the metadata of the MITRE's database. Parameters ---------- - request : connexion.request pretty : bool, optional Show results in human-readable format. wait_for_complete : bool, optional @@ -27,7 +27,7 @@ async def get_metadata(request, pretty: bool = False, wait_for_complete: bool = Returns ------- - web.Response + ConnexionResponse API response. """ @@ -37,21 +37,20 @@ async def get_metadata(request, pretty: bool = False, wait_for_complete: bool = is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_references(request, reference_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_references(reference_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = None, limit: int = None, sort: str = None, search: str = None, - select: list = None, q: str = None) -> web.Response: + select: list = None, q: str = None) -> ConnexionResponse: """Get information of specified MITRE's references. Parameters ---------- - request : connexion.request reference_ids : list List of reference ids to be obtained. pretty : bool @@ -74,7 +73,7 @@ async def get_references(request, reference_ids: list = None, pretty: bool = Fal Returns ------- - web.Response + ConnexionResponse API response with the MITRE's references information. """ f_kwargs = { @@ -97,21 +96,20 @@ async def get_references(request, reference_ids: list = None, pretty: bool = Fal is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_tactics(request, tactic_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_tactics(tactic_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = None, limit: int = None, sort: str = None, search: str = None, select: list = None, - q: str = None, distinct: bool = False) -> web.Response: + q: str = None, distinct: bool = False) -> ConnexionResponse: """Get information of specified MITRE's tactics. Parameters ---------- - request : connexion.request tactic_ids : list List of tactic ids to be obtained. pretty : bool @@ -136,7 +134,7 @@ async def get_tactics(request, tactic_ids: list = None, pretty: bool = False, wa Returns ------- - web.Response + ConnexionResponse API response with the MITRE's tactics information. """ f_kwargs = { @@ -160,21 +158,20 @@ async def get_tactics(request, tactic_ids: list = None, pretty: bool = False, wa is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_techniques(request, technique_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_techniques(technique_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = None, limit: int = None, sort: str = None, search: str = None, - select: list = None, q: str = None, distinct: bool = False) -> web.Response: + select: list = None, q: str = None, distinct: bool = False) -> ConnexionResponse: """Get information of specified MITRE's techniques. Parameters ---------- - request : connexion.request technique_ids : list, optional List of technique ids to be obtained. pretty : bool, optional @@ -199,7 +196,7 @@ async def get_techniques(request, technique_ids: list = None, pretty: bool = Fal Returns ------- - web.Response + ConnexionResponse API response with the MITRE's techniques information. """ f_kwargs = {'filters': { @@ -221,21 +218,20 @@ async def get_techniques(request, technique_ids: list = None, pretty: bool = Fal is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies']) + rbac_permissions=request.context['token_info']['rbac_policies']) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_mitigations(request, mitigation_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_mitigations(mitigation_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = None, limit: int = None, sort: str = None, search: str = None, - select: list = None, q: str = None, distinct: bool = False) -> web.Response: + select: list = None, q: str = None, distinct: bool = False) -> ConnexionResponse: """Get information of specified MITRE's mitigations. Parameters ---------- - request : connexion.request mitigation_ids : list, optional List of mitigation ids to be obtained. pretty : bool, optional @@ -260,7 +256,7 @@ async def get_mitigations(request, mitigation_ids: list = None, pretty: bool = F Returns ------- - web.Response + ConnexionResponse API response with the MITRE's mitigations information. """ f_kwargs = {'filters': { @@ -282,21 +278,20 @@ async def get_mitigations(request, mitigation_ids: list = None, pretty: bool = F is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_groups(request, group_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_groups(group_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = None, limit: int = None, sort: str = None, search: str = None, select: list = None, - q: str = None, distinct: bool = False) -> web.Response: + q: str = None, distinct: bool = False) -> ConnexionResponse: """Get information of specified MITRE's groups. Parameters ---------- - request : connexion.request group_ids : list, optional List of group IDs to be obtained. pretty : bool, optional @@ -321,7 +316,7 @@ async def get_groups(request, group_ids: list = None, pretty: bool = False, wait Returns ------- - web.Response + ConnexionResponse API response with the MITRE's groups information. """ f_kwargs = { @@ -344,21 +339,20 @@ async def get_groups(request, group_ids: list = None, pretty: bool = False, wait is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_software(request, software_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_software(software_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = None, limit: int = None, sort: str = None, search: str = None, select: list = None, - q: str = None, distinct: bool = False) -> web.Response: + q: str = None, distinct: bool = False) -> ConnexionResponse: """Get information of specified MITRE's software. Parameters ---------- - request : connexion.request software_ids : list, optional List of softwware IDs to be obtained. pretty : bool, optional @@ -383,7 +377,7 @@ async def get_software(request, software_ids: list = None, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response with the MITRE's software information. """ f_kwargs = { @@ -406,8 +400,8 @@ async def get_software(request, software_ids: list = None, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies']) + rbac_permissions=request.context['token_info']['rbac_policies']) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/overview_controller.py b/api/api/controllers/overview_controller.py index 8fc74f137fe..ff88a4163d2 100644 --- a/api/api/controllers/overview_controller.py +++ b/api/api/controllers/overview_controller.py @@ -4,9 +4,10 @@ import logging -from aiohttp import web +from connexion import request +from connexion.lifecycle import ConnexionResponse -from api.encoder import dumps, prettify +from api.controllers.util import json_response from api.util import raise_if_exc, remove_nones_to_dict from wazuh.agent import get_full_overview from wazuh.core.cluster.dapi.dapi import DistributedAPI @@ -14,12 +15,11 @@ logger = logging.getLogger('wazuh-api') -async def get_overview_agents(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_overview_agents(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get full summary of agents. Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -27,7 +27,7 @@ async def get_overview_agents(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -38,8 +38,8 @@ async def get_overview_agents(request, pretty: bool = False, wait_for_complete: is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/rootcheck_controller.py b/api/api/controllers/rootcheck_controller.py index a208badbca6..111cebc11c9 100644 --- a/api/api/controllers/rootcheck_controller.py +++ b/api/api/controllers/rootcheck_controller.py @@ -4,9 +4,10 @@ import logging -from aiohttp import web +from connexion.lifecycle import ConnexionResponse -from api.encoder import dumps, prettify +from connexion import request +from api.controllers.util import json_response from api.util import parse_api_param, remove_nones_to_dict, raise_if_exc from wazuh import rootcheck from wazuh.core.cluster.dapi.dapi import DistributedAPI @@ -14,13 +15,12 @@ logger = logging.getLogger('wazuh-api') -async def put_rootcheck(request, agents_list: str = '*', pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def put_rootcheck(agents_list: str = '*', pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Run rootcheck scan over the agent_ids. Parameters ---------- - request : connexion.request agents_list : str List of agent's IDs. pretty: bool @@ -30,7 +30,7 @@ async def put_rootcheck(request, agents_list: str = '*', pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': agents_list} @@ -42,20 +42,19 @@ async def put_rootcheck(request, agents_list: str = '*', pretty: bool = False, wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def delete_rootcheck(request, pretty: bool = False, wait_for_complete: bool = False, - agent_id: str = '') -> web.Response: +async def delete_rootcheck(pretty: bool = False, wait_for_complete: bool = False, + agent_id: str = '') -> ConnexionResponse: """Clear the rootcheck database for a list of agents. Parameters ---------- - request : connexion.request pretty: bool Show results in human-readable format. wait_for_complete : bool @@ -65,7 +64,7 @@ async def delete_rootcheck(request, pretty: bool = False, wait_for_complete: boo Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': [agent_id]} @@ -76,22 +75,21 @@ async def delete_rootcheck(request, pretty: bool = False, wait_for_complete: boo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_rootcheck_agent(request, pretty: bool = False, wait_for_complete: bool = False, agent_id: str = None, +async def get_rootcheck_agent(pretty: bool = False, wait_for_complete: bool = False, agent_id: str = None, offset: int = 0, limit: int = None, sort: str = None, search: str = None, select: str = None, q: str = '', distinct: bool = False, status: str = 'all', - pci_dss: str = None, cis: str = None) -> web.Response: + pci_dss: str = None, cis: str = None) -> ConnexionResponse: """Return a list of events from the rootcheck database. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -122,7 +120,7 @@ async def get_rootcheck_agent(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': [agent_id], @@ -146,20 +144,19 @@ async def get_rootcheck_agent(request, pretty: bool = False, wait_for_complete: is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_last_scan_agent(request, pretty: bool = False, wait_for_complete: bool = False, - agent_id: str = None) -> web.Response: +async def get_last_scan_agent(pretty: bool = False, wait_for_complete: bool = False, + agent_id: str = None) -> ConnexionResponse: """Get the last rootcheck scan of an agent. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -169,7 +166,7 @@ async def get_last_scan_agent(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': [agent_id]} @@ -180,8 +177,8 @@ async def get_last_scan_agent(request, pretty: bool = False, wait_for_complete: is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/rule_controller.py b/api/api/controllers/rule_controller.py index a12b4e74370..1f3b1f99c87 100644 --- a/api/api/controllers/rule_controller.py +++ b/api/api/controllers/rule_controller.py @@ -5,12 +5,10 @@ import logging from typing import Union -from aiohttp import web -from aiohttp_cache import cache +from connexion import request from connexion.lifecycle import ConnexionResponse -from api.configuration import api_conf -from api.encoder import dumps, prettify +from api.controllers.util import json_response, XML_CONTENT_TYPE from api.models.base_model_ import Body from api.util import remove_nones_to_dict, parse_api_param, raise_if_exc from wazuh import rule as rule_framework @@ -20,17 +18,15 @@ logger = logging.getLogger('wazuh-api') -@cache(expires=api_conf['cache']['time']) -async def get_rules(request, rule_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_rules(rule_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, select: str = None, limit: int = None, sort: str = None, search: str = None, q: str = None, status: str = None, group: str = None, level: str = None, filename: list = None, relative_dirname: str = None, pci_dss: str = None, gdpr: str = None, gpg13: str = None, - hipaa: str = None, tsc: str = None, mitre: str = None, distinct: bool = False) -> web.Response: + hipaa: str = None, tsc: str = None, mitre: str = None, distinct: bool = False) -> ConnexionResponse: """Get information about all Wazuh rules. Parameters ---------- - request : connexion.request rule_ids : list Filters by rule ID. pretty : bool @@ -77,7 +73,7 @@ async def get_rules(request, rule_ids: list = None, pretty: bool = False, wait_f Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'rule_ids': rule_ids, 'offset': offset, 'limit': limit, 'select': select, @@ -95,7 +91,7 @@ async def get_rules(request, rule_ids: list = None, pretty: bool = False, wait_f 'gdpr': gdpr, 'gpg13': gpg13, 'hipaa': hipaa, - 'nist_800_53': request.query.get('nist-800-53', None), + 'nist_800_53': request.query_params.get('nist-800-53', None), 'tsc': tsc, 'mitre': mitre, 'distinct': distinct} @@ -106,21 +102,19 @@ async def get_rules(request, rule_ids: list = None, pretty: bool = False, wait_f is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -@cache(expires=api_conf['cache']['time']) -async def get_rules_groups(request, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, - limit: int = None, sort: str = None, search: str = None) -> web.Response: +async def get_rules_groups(pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, + limit: int = None, sort: str = None, search: str = None) -> ConnexionResponse: """Get all rule groups names. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -137,7 +131,7 @@ async def get_rules_groups(request, pretty: bool = False, wait_for_complete: boo Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'offset': offset, @@ -154,22 +148,20 @@ async def get_rules_groups(request, pretty: bool = False, wait_for_complete: boo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -@cache(expires=api_conf['cache']['time']) -async def get_rules_requirement(request, requirement: str = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_rules_requirement(requirement: str = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, sort: str = None, - search: str = None) -> web.Response: + search: str = None) -> ConnexionResponse: """Get all specified requirements. Parameters ---------- - request : connexion.request requirement : str Get the specified requirement in all rules in the system. pretty : bool @@ -188,7 +180,7 @@ async def get_rules_requirement(request, requirement: str = None, pretty: bool = Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'requirement': requirement.replace('-', '_'), 'offset': offset, 'limit': limit, @@ -203,23 +195,21 @@ async def get_rules_requirement(request, requirement: str = None, pretty: bool = is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -@cache(expires=api_conf['cache']['time']) -async def get_rules_files(request, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, +async def get_rules_files(pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, sort: str = None, search: str = None, status: str = None, filename: list = None, relative_dirname: str = None, q: str = None, - select: str = None, distinct: bool = False) -> web.Response: + select: str = None, distinct: bool = False) -> ConnexionResponse: """Get all the rules files. Parameters ---------- - request : connexion.request pretty : bool Show results in human-readable format. wait_for_complete : bool @@ -248,7 +238,7 @@ async def get_rules_files(request, pretty: bool = False, wait_for_complete: bool Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'offset': offset, @@ -270,22 +260,20 @@ async def get_rules_files(request, pretty: bool = False, wait_for_complete: bool is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -@cache(expires=api_conf['cache']['time']) -async def get_file(request, pretty: bool = False, wait_for_complete: bool = False, +async def get_file(pretty: bool = False, wait_for_complete: bool = False, filename: str = None, relative_dirname: str = None, - raw: bool = False) -> Union[web.Response, ConnexionResponse]: + raw: bool = False) -> ConnexionResponse: """Get rule file content. Parameters ---------- - request : connexion.request pretty : bool, optional Show results in human-readable format. It only works when `raw` is False (JSON format). Default `True`. wait_for_complete : bool, optional @@ -299,11 +287,11 @@ async def get_file(request, pretty: bool = False, wait_for_complete: bool = Fals Returns ------- - web.Response or ConnexionResponse - Depending on the `raw` parameter, it will return a web.Response object or a ConnexionResponse object: + ConnexionResponse + Depending on the `raw` parameter, it will return a ConnexionResponse object: raw=True -> ConnexionResponse (application/xml) - raw=False (default) -> web.Response (application/json) - If any exception was raised, it will return a web.Response with details. + raw=False (default) -> ConnexionResponse (application/json) + If any exception was raised, it will return a ConnexionResponse with details. """ f_kwargs = {'filename': filename, 'raw': raw, 'relative_dirname': relative_dirname} @@ -313,25 +301,25 @@ async def get_file(request, pretty: bool = False, wait_for_complete: bool = Fals is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) if isinstance(data, AffectedItemsWazuhResult): - response = web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + response = json_response(data, pretty=pretty) else: - response = ConnexionResponse(body=data["message"], mimetype='application/xml', content_type='application/xml') + response = ConnexionResponse(body=data["message"], + content_type=XML_CONTENT_TYPE) return response -async def put_file(request, body: bytes, filename: str = None, overwrite: bool = False, +async def put_file(body: bytes, filename: str = None, overwrite: bool = False, pretty: bool = False, relative_dirname: str = None, - wait_for_complete: bool = False) -> web.Response: + wait_for_complete: bool = False) -> ConnexionResponse: """Upload a rule file. Parameters ---------- - request : connexion.request body : bytes Body request with the file content to be uploaded. filename : str, optional @@ -348,7 +336,7 @@ async def put_file(request, body: bytes, filename: str = None, overwrite: bool = Returns ------- - web.Response + ConnexionResponse API response. """ # Parse body to utf-8 @@ -366,22 +354,21 @@ async def put_file(request, body: bytes, filename: str = None, overwrite: bool = is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def delete_file(request, filename: str = None, +async def delete_file(filename: str = None, relative_dirname: str = None, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: + wait_for_complete: bool = False) -> ConnexionResponse: """Delete a rule file. Parameters ---------- - request : connexion.request filename : str, optional Name of the file. relative_dirname : str @@ -393,7 +380,7 @@ async def delete_file(request, filename: str = None, Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'filename': filename, 'relative_dirname': relative_dirname} @@ -404,8 +391,8 @@ async def delete_file(request, filename: str = None, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/sca_controller.py b/api/api/controllers/sca_controller.py index 299d789ca0b..aa1228c8fef 100755 --- a/api/api/controllers/sca_controller.py +++ b/api/api/controllers/sca_controller.py @@ -5,10 +5,11 @@ import logging -from aiohttp import web +from connexion import request +from connexion.lifecycle import ConnexionResponse import wazuh.sca as sca -from api.encoder import dumps, prettify +from api.controllers.util import json_response from api.util import remove_nones_to_dict, parse_api_param, raise_if_exc from wazuh.core.cluster.dapi.dapi import DistributedAPI from wazuh.core.common import DATABASE_LIMIT @@ -16,15 +17,14 @@ logger = logging.getLogger('wazuh-api') -async def get_sca_agent(request, agent_id: str = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_sca_agent(agent_id: str = None, pretty: bool = False, wait_for_complete: bool = False, name: str = None, description: str = None, references: str = None, offset: int = 0, limit: int = DATABASE_LIMIT, sort: str = None, search: str = None, select: str = None, - q: str = None, distinct: bool = False) -> web.Response: + q: str = None, distinct: bool = False) -> ConnexionResponse: """Get security configuration assessment (SCA) database of an agent. Parameters ---------- - request : connexion.request agent_id : str Agent ID. All possible values since 000 onwards. pretty : bool @@ -56,7 +56,7 @@ async def get_sca_agent(request, agent_id: str = None, pretty: bool = False, wai Returns ------- - web.Response + ConnexionResponse API response. """ filters = {'name': name, @@ -78,25 +78,24 @@ async def get_sca_agent(request, agent_id: str = None, pretty: bool = False, wai is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_sca_checks(request, agent_id: str = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_sca_checks(agent_id: str = None, pretty: bool = False, wait_for_complete: bool = False, policy_id: str = None, title: str = None, description: str = None, rationale: str = None, remediation: str = None, command: str = None, reason: str = None, file: str = None, process: str = None, directory: str = None, registry: str = None, references: str = None, result: str = None, condition: str = None, offset: int = 0, limit: int = DATABASE_LIMIT, sort: str = None, search: str = None, select: str = None, - q: str = None, distinct: bool = False) -> web.Response: + q: str = None, distinct: bool = False) -> ConnexionResponse: """Get policy monitoring alerts for a given policy. Parameters ---------- - request : connexion.request agent_id : str Agent ID. All possible values since 000 onwards. pretty : bool @@ -150,7 +149,7 @@ async def get_sca_checks(request, agent_id: str = None, pretty: bool = False, wa Returns ------- - web.Response + ConnexionResponse API response. """ filters = {'title': title, @@ -184,8 +183,8 @@ async def get_sca_checks(request, agent_id: str = None, pretty: bool = False, wa is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/security_controller.py b/api/api/controllers/security_controller.py index 31b2a768a64..be154aed6f6 100644 --- a/api/api/controllers/security_controller.py +++ b/api/api/controllers/security_controller.py @@ -5,21 +5,24 @@ import logging import re -from aiohttp import web +from connexion import request +from connexion.lifecycle import ConnexionResponse +from api.encoder import dumps +from api.models.security_token_response_model import TokenResponseModel from api.authentication import generate_token from api.configuration import default_security_configuration -from api.encoder import dumps, prettify +from api.controllers.util import json_response, JSON_CONTENT_TYPE from api.models.base_model_ import Body from api.models.configuration_model import SecurityConfigurationModel from api.models.security_model import (CreateUserModel, PolicyModel, RoleModel, RuleModel, UpdateUserModel) -from api.models.security_token_response_model import TokenResponseModel from api.util import (deprecate_endpoint, parse_api_param, raise_if_exc, remove_nones_to_dict) from wazuh import security, __version__ from wazuh.core.cluster.control import get_system_nodes from wazuh.core.cluster.dapi.dapi import DistributedAPI +from wazuh.core.common import WAZUH_VERSION from wazuh.core.exception import WazuhException, WazuhPermissionError from wazuh.core.results import AffectedItemsWazuhResult, WazuhResult from wazuh.core.security import revoke_tokens @@ -31,7 +34,7 @@ @deprecate_endpoint(link=f'https://documentation.wazuh.com/{__version__}/user-manual/api/reference.html#' f'operation/api.controllers.security_controller.login_user') -async def deprecated_login_user(user: str, raw: bool = False) -> web.Response: +async def deprecated_login_user(user: str, raw: bool = False) -> ConnexionResponse: """User/password authentication to get an access token. This method should be called to get an API token. This token will expire at some time. @@ -44,7 +47,7 @@ async def deprecated_login_user(user: str, raw: bool = False) -> web.Response: Returns ------- - web.Response + ConnexionResponse Raw or JSON response with the generated access token. """ f_kwargs = {'user_id': user} @@ -63,11 +66,15 @@ async def deprecated_login_user(user: str, raw: bool = False) -> web.Response: except WazuhException as e: raise_if_exc(e) - return web.Response(text=token, content_type='text/plain', status=200) if raw \ - else web.json_response(data=WazuhResult({'data': TokenResponseModel(token=token)}), status=200, dumps=dumps) + return ConnexionResponse(body=token, + content_type='text/plain', + status_code=200) if raw else \ + ConnexionResponse(body=dumps(WazuhResult({'data': TokenResponseModel(token=token)})), + content_type=JSON_CONTENT_TYPE, + status_code=200) -async def login_user(user: str, raw: bool = False) -> web.Response: +async def login_user(user: str, raw: bool = False) -> ConnexionResponse: """User/password authentication to get an access token. This method should be called to get an API token. This token will expire at some time. @@ -80,7 +87,7 @@ async def login_user(user: str, raw: bool = False) -> web.Response: Returns ------- - web.Response + ConnexionResponse Raw or JSON response with the generated access token. """ f_kwargs = {'user_id': user} @@ -99,18 +106,21 @@ async def login_user(user: str, raw: bool = False) -> web.Response: except WazuhException as e: raise_if_exc(e) - return web.Response(text=token, content_type='text/plain', status=200) if raw \ - else web.json_response(data=WazuhResult({'data': TokenResponseModel(token=token)}), status=200, dumps=dumps) + return ConnexionResponse(body=token, + content_type='text/plain', + status_code=200) if raw else \ + ConnexionResponse(body=dumps(WazuhResult({'data': TokenResponseModel(token=token)})), + content_type=JSON_CONTENT_TYPE, + status_code=200) -async def run_as_login(request, user: str, raw: bool = False) -> web.Response: +async def run_as_login(user: str, raw: bool = False) -> ConnexionResponse: """User/password authentication to get an access token. This method should be called to get an API token using an authorization context body. This token will expire at some time. Parameters ---------- - request : connexion.request user : str Name of the user who wants to be authenticated. raw : bool, optional @@ -118,7 +128,7 @@ async def run_as_login(request, user: str, raw: bool = False) -> web.Response: Returns ------- - web.Response + ConnexionResponse Raw or JSON response with the generated access token. """ auth_context = await request.json() @@ -138,16 +148,19 @@ async def run_as_login(request, user: str, raw: bool = False) -> web.Response: except WazuhException as e: raise_if_exc(e) - return web.Response(text=token, content_type='text/plain', status=200) if raw \ - else web.json_response(data=WazuhResult({'data': TokenResponseModel(token=token)}), status=200, dumps=dumps) + return ConnexionResponse(body=token, + content_type='text/plain', + status_code=200) if raw else \ + ConnexionResponse(body=dumps(WazuhResult({'data': TokenResponseModel(token=token)})), + content_type=JSON_CONTENT_TYPE, + status_code=200) -async def get_user_me(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_user_me(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Returns information about the current user. Parameters ---------- - request : connexion.request pretty : bool, optional Show results in human-readable format. wait_for_complete : bool, optional @@ -155,30 +168,29 @@ async def get_user_me(request, pretty: bool = False, wait_for_complete: bool = F Returns ------- - web.Response + ConnexionResponse API response with the user information. """ - f_kwargs = {'token': request['token_info']} + f_kwargs = {'token': request.context['token_info']} dapi = DistributedAPI(f=security.get_user_me, f_kwargs=remove_nones_to_dict(f_kwargs), request_type='local_master', is_async=False, logger=logger, wait_for_complete=wait_for_complete, - current_user=request['token_info']['sub'], - rbac_permissions=request['token_info']['rbac_policies'] + current_user=request.context['token_info']['sub'], + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_user_me_policies(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_user_me_policies(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Return processed RBAC policies and rbac_mode for the current user. Parameters ---------- - request : connexion.request pretty : bool, optional Show results in human-readable format. wait_for_complete : bool, optional @@ -186,21 +198,20 @@ async def get_user_me_policies(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response with the user RBAC policies and mode. """ - data = WazuhResult({'data': request['token_info']['rbac_policies'], + data = WazuhResult({'data': request.context['token_info']['rbac_policies'], 'message': "Current user processed policies information was returned"}) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def logout_user(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def logout_user(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Invalidate all current user's tokens. Parameters ---------- - request : connexion.request pretty : bool, optional Show results in human-readable format. wait_for_complete : bool, optional @@ -208,30 +219,29 @@ async def logout_user(request, pretty: bool = False, wait_for_complete: bool = F Returns ------- - web.Response + ConnexionResponse API response. """ dapi = DistributedAPI(f=security.revoke_current_user_tokens, request_type='local_master', is_async=False, - current_user=request['token_info']['sub'], + current_user=request.context['token_info']['sub'], wait_for_complete=wait_for_complete, logger=logger ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_users(request, user_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_users(user_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, search: str = None, select: str = None, - sort: str = None, q: str = None, distinct: bool = False) -> web.Response: + sort: str = None, q: str = None, distinct: bool = False) -> ConnexionResponse: """Returns information from all system users. Parameters ---------- - request : connexion.request user_ids : list, optional List of users to be obtained. pretty : bool, optional @@ -256,7 +266,7 @@ async def get_users(request, user_ids: list = None, pretty: bool = False, wait_f Returns ------- - web.Response + ConnexionResponse API response with the users information. """ f_kwargs = {'user_ids': user_ids, 'offset': offset, 'limit': limit, 'select': select, @@ -273,20 +283,19 @@ async def get_users(request, user_ids: list = None, pretty: bool = False, wait_f is_async=False, logger=logger, wait_for_complete=wait_for_complete, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def edit_run_as(request, user_id: str, allow_run_as: bool, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def edit_run_as(user_id: str, allow_run_as: bool, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Modify the specified user's allow_run_as flag. Parameters ---------- - request : connexion.request user_id : str User ID of the user to be updated. allow_run_as : bool @@ -298,7 +307,7 @@ async def edit_run_as(request, user_id: str, allow_run_as: bool, pretty: bool = Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'user_id': user_id, 'allow_run_as': allow_run_as} @@ -308,21 +317,20 @@ async def edit_run_as(request, user_id: str, allow_run_as: bool, pretty: bool = request_type='local_master', is_async=False, logger=logger, - current_user=request['token_info']['sub'], - rbac_permissions=request['token_info']['rbac_policies'], + current_user=request.context['token_info']['sub'], + rbac_permissions=request.context['token_info']['rbac_policies'], wait_for_complete=wait_for_complete ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def create_user(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def create_user(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Create a new user. Parameters ---------- - request : connexion.request pretty : bool, optional Show results in human-readable format. wait_for_complete : bool, optional @@ -330,10 +338,10 @@ async def create_user(request, pretty: bool = False, wait_for_complete: bool = F Returns ------- - web.Response + ConnexionResponse API response. """ - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await CreateUserModel.get_kwargs(request) dapi = DistributedAPI(f=security.create_user, @@ -341,20 +349,19 @@ async def create_user(request, pretty: bool = False, wait_for_complete: bool = F request_type='local_master', is_async=False, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], wait_for_complete=wait_for_complete ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def update_user(request, user_id: str, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def update_user(user_id: str, pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Modify an existent user. Parameters ---------- - request : connexion.request user_id : str User ID of the user to be updated. pretty : bool, optional @@ -364,33 +371,34 @@ async def update_user(request, user_id: str, pretty: bool = False, wait_for_comp Returns ------- - web.Response + ConnexionResponse API response. """ - Body.validate_content_type(request, expected_content_type='application/json') - f_kwargs = await UpdateUserModel.get_kwargs(request, additional_kwargs={'user_id': user_id, - 'current_user': request.get("user")}) + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) + f_kwargs = await UpdateUserModel.get_kwargs(request, + additional_kwargs= + {'user_id': user_id, + 'current_user': request.get("user")}) dapi = DistributedAPI(f=security.update_user, f_kwargs=remove_nones_to_dict(f_kwargs), request_type='local_master', is_async=False, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], wait_for_complete=wait_for_complete ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def delete_users(request, user_ids: list = None, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def delete_users(user_ids: list = None, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Delete an existent list of users. Parameters ---------- - request : connexion.request user_ids : list, optional IDs of the users to be removed. pretty : bool, optional @@ -400,7 +408,7 @@ async def delete_users(request, user_ids: list = None, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ if 'all' in user_ids: @@ -412,23 +420,22 @@ async def delete_users(request, user_ids: list = None, pretty: bool = False, request_type='local_master', is_async=False, logger=logger, - current_user=request['token_info']['sub'], - rbac_permissions=request['token_info']['rbac_policies'], + current_user=request.context['token_info']['sub'], + rbac_permissions=request.context['token_info']['rbac_policies'], wait_for_complete=wait_for_complete ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_roles(request, role_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_roles(role_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, search: str = None, select: str = None, - sort: str = None, q: str = None, distinct: bool = False) -> web.Response: + sort: str = None, q: str = None, distinct: bool = False) -> ConnexionResponse: """Get information about the security roles in the system. Parameters ---------- - request : connexion.request role_ids : list, optional List of roles ids to be obtained. pretty : bool, optional @@ -453,7 +460,7 @@ async def get_roles(request, role_ids: list = None, pretty: bool = False, wait_f Returns ------- - web.Response + ConnexionResponse API response with the roles information. """ f_kwargs = {'role_ids': role_ids, 'offset': offset, 'limit': limit, 'select': select, @@ -471,14 +478,14 @@ async def get_roles(request, role_ids: list = None, pretty: bool = False, wait_f is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def add_role(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def add_role(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Add a specified role. Parameters @@ -491,11 +498,11 @@ async def add_role(request, pretty: bool = False, wait_for_complete: bool = Fals Returns ------- - web.Response + ConnexionResponse API response. """ # Get body parameters - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await RoleModel.get_kwargs(request) dapi = DistributedAPI(f=security.add_role, @@ -504,20 +511,19 @@ async def add_role(request, pretty: bool = False, wait_for_complete: bool = Fals is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def remove_roles(request, role_ids: list = None, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def remove_roles(role_ids: list = None, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Removes a list of roles in the system. Parameters ---------- - request : connexion.request role_ids : list, optional List of roles ids to be deleted. pretty : bool, optional @@ -527,7 +533,7 @@ async def remove_roles(request, role_ids: list = None, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response composed of two lists: one contains the deleted roles and the other the non-deleted roles. """ if 'all' in role_ids: @@ -540,19 +546,18 @@ async def remove_roles(request, role_ids: list = None, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def update_role(request, role_id: int, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def update_role(role_id: int, pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Update the information of a specified role. Parameters ---------- - request : connexion.request role_id : int Specific role id in the system to be updated. pretty : bool, optional @@ -562,11 +567,11 @@ async def update_role(request, role_id: int, pretty: bool = False, wait_for_comp Returns ------- - web.Response + ConnexionResponse API response. """ # Get body parameters - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await RoleModel.get_kwargs(request, additional_kwargs={'role_id': role_id}) dapi = DistributedAPI(f=security.update_role, @@ -575,21 +580,20 @@ async def update_role(request, role_id: int, pretty: bool = False, wait_for_comp is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_rules(request, rule_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_rules(rule_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, search: str = None, select: str = None, - sort: str = None, q: str = '', distinct: bool = False) -> web.Response: + sort: str = None, q: str = '', distinct: bool = False) -> ConnexionResponse: """Get information about the security rules in the system. Parameters ---------- - request : connexion.request rule_ids : list, optional List of rule ids to be obtained. pretty : bool, optional @@ -614,7 +618,7 @@ async def get_rules(request, rule_ids: list = None, pretty: bool = False, wait_f Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'rule_ids': rule_ids, 'offset': offset, 'limit': limit, 'select': select, @@ -632,14 +636,14 @@ async def get_rules(request, rule_ids: list = None, pretty: bool = False, wait_f is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def add_rule(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def add_rule(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Add a specified rule. Parameters @@ -652,11 +656,11 @@ async def add_rule(request, pretty: bool = False, wait_for_complete: bool = Fals Returns ------- - web.Response + ConnexionResponse API response. """ # Get body parameters - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await RuleModel.get_kwargs(request) dapi = DistributedAPI(f=security.add_rule, @@ -665,19 +669,18 @@ async def add_rule(request, pretty: bool = False, wait_for_complete: bool = Fals is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def update_rule(request, rule_id: int, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def update_rule(rule_id: int, pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Update the information of a specified rule. Parameters ---------- - request : connexion.request rule_id : int Specific rule id in the system to be updated. pretty : bool, optional @@ -687,11 +690,11 @@ async def update_rule(request, rule_id: int, pretty: bool = False, wait_for_comp Returns ------- - web.Response + ConnexionResponse API response. """ # Get body parameters - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await RuleModel.get_kwargs(request, additional_kwargs={'rule_id': rule_id}) dapi = DistributedAPI(f=security.update_rule, @@ -700,20 +703,19 @@ async def update_rule(request, rule_id: int, pretty: bool = False, wait_for_comp is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def remove_rules(request, rule_ids: list = None, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def remove_rules(rule_ids: list = None, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Remove a list of rules from the system. Parameters ---------- - request : connexion.request rule_ids : list, optional List of rule ids to be deleted. pretty : bool, optional @@ -723,7 +725,7 @@ async def remove_rules(request, rule_ids: list = None, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response composed of two lists: one contains the deleted rules and the other the non-deleted rules. """ if 'all' in rule_ids: @@ -736,21 +738,20 @@ async def remove_rules(request, rule_ids: list = None, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_policies(request, policy_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, +async def get_policies(policy_ids: list = None, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, search: str = None, select: str = None, - sort: str = None, q: str = None, distinct: bool = False) -> web.Response: + sort: str = None, q: str = None, distinct: bool = False) -> ConnexionResponse: """Returns information from all system policies. Parameters ---------- - request : connexion.request policy_ids : list, optional List of policies. pretty : bool, optional @@ -775,7 +776,7 @@ async def get_policies(request, policy_ids: list = None, pretty: bool = False, w Returns ------- - web.Response + ConnexionResponse API response with the policies information. """ f_kwargs = {'policy_ids': policy_ids, 'offset': offset, 'limit': limit, 'select': select, @@ -793,19 +794,18 @@ async def get_policies(request, policy_ids: list = None, pretty: bool = False, w is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def add_policy(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def add_policy(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Add a specified policy. Parameters ---------- - request : connexion.request pretty : bool, optional Show results in human-readable format. wait_for_complete : bool, optional @@ -813,11 +813,11 @@ async def add_policy(request, pretty: bool = False, wait_for_complete: bool = Fa Returns ------- - web.Response + ConnexionResponse API response. """ # Get body parameters - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await PolicyModel.get_kwargs(request) dapi = DistributedAPI(f=security.add_policy, @@ -826,20 +826,19 @@ async def add_policy(request, pretty: bool = False, wait_for_complete: bool = Fa is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def remove_policies(request, policy_ids: list = None, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def remove_policies(policy_ids: list = None, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Removes a list of roles in the system. Parameters ---------- - request : connexion.request policy_ids : list, optional List of policies ids to be deleted. pretty : bool, optional @@ -849,7 +848,7 @@ async def remove_policies(request, policy_ids: list = None, pretty: bool = False Returns ------- - web.Response + ConnexionResponse API response composed of two lists: one contains the deleted policies and the other the non-deleted policies. """ if 'all' in policy_ids: @@ -862,19 +861,18 @@ async def remove_policies(request, policy_ids: list = None, pretty: bool = False is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def update_policy(request, policy_id: int, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def update_policy(policy_id: int, pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Update the information of a specified policy. Parameters ---------- - request : connexion.request policy_id : int Specific policy id in the system to be updated pretty : bool, optional @@ -884,11 +882,11 @@ async def update_policy(request, policy_id: int, pretty: bool = False, wait_for_ Returns ------- - web.Response + ConnexionResponse API response. """ # Get body parameters - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = await PolicyModel.get_kwargs(request, additional_kwargs={'policy_id': policy_id}) dapi = DistributedAPI(f=security.update_policy, @@ -897,20 +895,19 @@ async def update_policy(request, policy_id: int, pretty: bool = False, wait_for_ is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def set_user_role(request, user_id: str, role_ids: list, position: int = None, - pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def set_user_role(user_id: str, role_ids: list, position: int = None, + pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Add a list of roles to a specified user. Parameters ---------- - request : connexion.request user_id : str User ID. role_ids : list @@ -924,7 +921,7 @@ async def set_user_role(request, user_id: str, role_ids: list, position: int = N Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'user_id': user_id, 'role_ids': role_ids, 'position': position} @@ -934,20 +931,19 @@ async def set_user_role(request, user_id: str, role_ids: list, position: int = N is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def remove_user_role(request, user_id: str, role_ids: list, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def remove_user_role(user_id: str, role_ids: list, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Delete a list of roles of a specified user. Parameters ---------- - request : connexion.request user_id : str User ID. role_ids : list @@ -959,7 +955,7 @@ async def remove_user_role(request, user_id: str, role_ids: list, pretty: bool = Returns ------- - web.Response + ConnexionResponse API response. """ if 'all' in role_ids: @@ -972,20 +968,19 @@ async def remove_user_role(request, user_id: str, role_ids: list, pretty: bool = is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def set_role_policy(request, role_id: int, policy_ids: list, position: int = None, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def set_role_policy(role_id: int, policy_ids: list, position: int = None, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Add a list of policies to a specified role. Parameters ---------- - request : connexion.request role_id : int Role ID. policy_ids : list @@ -999,7 +994,7 @@ async def set_role_policy(request, role_id: int, policy_ids: list, position: int Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'role_id': role_id, 'policy_ids': policy_ids, 'position': position} @@ -1010,15 +1005,15 @@ async def set_role_policy(request, role_id: int, policy_ids: list, position: int is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def remove_role_policy(request, role_id: int, policy_ids: list, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def remove_role_policy(role_id: int, policy_ids: list, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Delete a list of policies of a specified role. Parameters @@ -1035,7 +1030,7 @@ async def remove_role_policy(request, role_id: int, policy_ids: list, pretty: bo Returns ------- - web.Response + ConnexionResponse API response. """ if 'all' in policy_ids: @@ -1048,15 +1043,15 @@ async def remove_role_policy(request, role_id: int, policy_ids: list, pretty: bo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def set_role_rule(request, role_id: int, rule_ids: list, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def set_role_rule(role_id: int, rule_ids: list, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Add a list of rules to a specified role. Parameters @@ -1073,11 +1068,11 @@ async def set_role_rule(request, role_id: int, rule_ids: list, pretty: bool = Fa Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'role_id': role_id, 'rule_ids': rule_ids, - 'run_as': {'user': request['token_info']['sub'], 'run_as': request['token_info']['run_as']}} + 'run_as': {'user': request.context['token_info']['sub'], 'run_as': request.context['token_info']['run_as']}} dapi = DistributedAPI(f=security.set_role_rule, f_kwargs=remove_nones_to_dict(f_kwargs), @@ -1085,15 +1080,15 @@ async def set_role_rule(request, role_id: int, rule_ids: list, pretty: bool = Fa is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def remove_role_rule(request, role_id: int, rule_ids: list, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def remove_role_rule(role_id: int, rule_ids: list, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Delete a list of rules of a specified role. Parameters @@ -1110,7 +1105,7 @@ async def remove_role_rule(request, role_id: int, rule_ids: list, pretty: bool = Returns ------- - web.Response + ConnexionResponse API response. """ if 'all' in rule_ids: @@ -1123,14 +1118,14 @@ async def remove_role_rule(request, role_id: int, rule_ids: list, pretty: bool = is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_rbac_resources(resource: str = None, pretty: bool = False) -> web.Response: +async def get_rbac_resources(resource: str = None, pretty: bool = False) -> ConnexionResponse: """Gets all the current defined resources for RBAC. Parameters @@ -1142,7 +1137,7 @@ async def get_rbac_resources(resource: str = None, pretty: bool = False) -> web. Returns ------- - web.Response + ConnexionResponse API response with the RBAC resources. """ f_kwargs = {'resource': resource} @@ -1156,10 +1151,10 @@ async def get_rbac_resources(resource: str = None, pretty: bool = False) -> web. ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_rbac_actions(pretty: bool = False, endpoint: str = None) -> web.Response: +async def get_rbac_actions(pretty: bool = False, endpoint: str = None) -> ConnexionResponse: """Gets all the current defined actions for RBAC. Parameters @@ -1171,7 +1166,7 @@ async def get_rbac_actions(pretty: bool = False, endpoint: str = None) -> web.Re Returns ------- - web.Response + ConnexionResponse API response with the RBAC actions. """ f_kwargs = {'endpoint': endpoint} @@ -1185,10 +1180,10 @@ async def get_rbac_actions(pretty: bool = False, endpoint: str = None) -> web.Re ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def revoke_all_tokens(request, pretty: bool = False) -> web.Response: +async def revoke_all_tokens(pretty: bool = False) -> ConnexionResponse: """Revoke all tokens. Parameters @@ -1199,7 +1194,7 @@ async def revoke_all_tokens(request, pretty: bool = False) -> web.Response: Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -1215,17 +1210,17 @@ async def revoke_all_tokens(request, pretty: bool = False) -> web.Response: broadcasting=nodes is not None, wait_for_complete=True, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'], + rbac_permissions=request.context['token_info']['rbac_policies'], nodes=nodes ) data = raise_if_exc(await dapi.distribute_function()) if type(data) == AffectedItemsWazuhResult and len(data.affected_items) == 0: raise_if_exc(WazuhPermissionError(4000, data.message)) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_security_config(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def get_security_config(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Get active security configuration. Parameters @@ -1238,7 +1233,7 @@ async def get_security_config(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {} @@ -1249,11 +1244,11 @@ async def get_security_config(request, pretty: bool = False, wait_for_complete: is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) async def security_revoke_tokens(): @@ -1273,7 +1268,7 @@ async def security_revoke_tokens(): raise_if_exc(await dapi.distribute_function()) -async def put_security_config(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def put_security_config(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Update current security configuration with the given one Parameters @@ -1286,10 +1281,10 @@ async def put_security_config(request, pretty: bool = False, wait_for_complete: Returns ------- - web.Response + ConnexionResponse API response. """ - Body.validate_content_type(request, expected_content_type='application/json') + Body.validate_content_type(request, expected_content_type=JSON_CONTENT_TYPE) f_kwargs = {'updated_config': await SecurityConfigurationModel.get_kwargs(request)} dapi = DistributedAPI(f=security.update_security_config, @@ -1298,15 +1293,15 @@ async def put_security_config(request, pretty: bool = False, wait_for_complete: is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) await security_revoke_tokens() - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def delete_security_config(request, pretty: bool = False, wait_for_complete: bool = False) -> web.Response: +async def delete_security_config(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: """Restore default security configuration. Parameters @@ -1319,10 +1314,12 @@ async def delete_security_config(request, pretty: bool = False, wait_for_complet Returns ------- - web.Response + ConnexionResponse API response. """ - f_kwargs = {"updated_config": await SecurityConfigurationModel.get_kwargs(default_security_configuration)} + f_kwargs = {"updated_config": + await SecurityConfigurationModel.get_kwargs(request, + default_security_configuration)} dapi = DistributedAPI(f=security.update_security_config, f_kwargs=remove_nones_to_dict(f_kwargs), @@ -1330,9 +1327,9 @@ async def delete_security_config(request, pretty: bool = False, wait_for_complet is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) await security_revoke_tokens() - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/syscheck_controller.py b/api/api/controllers/syscheck_controller.py index 54ac99074ea..42b64a4dec5 100644 --- a/api/api/controllers/syscheck_controller.py +++ b/api/api/controllers/syscheck_controller.py @@ -4,9 +4,10 @@ import logging -from aiohttp import web +from connexion import request +from connexion.lifecycle import ConnexionResponse -from api.encoder import dumps, prettify +from api.controllers.util import json_response from api.util import remove_nones_to_dict, parse_api_param, raise_if_exc, deprecate_endpoint from wazuh.core.cluster.dapi.dapi import DistributedAPI from wazuh.syscheck import run, clear, files, last_scan @@ -14,13 +15,12 @@ logger = logging.getLogger('wazuh-api') -async def put_syscheck(request, agents_list: str = '*', pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def put_syscheck(agents_list: str = '*', pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Run a syscheck scan in the specified agents. Parameters ---------- - request : request.connexion agents_list : str List of agent ids. pretty : bool @@ -30,7 +30,7 @@ async def put_syscheck(request, agents_list: str = '*', pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': agents_list} @@ -42,23 +42,22 @@ async def put_syscheck(request, agents_list: str = '*', pretty: bool = False, wait_for_complete=wait_for_complete, logger=logger, broadcasting=agents_list == '*', - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_syscheck_agent(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False, +async def get_syscheck_agent(agent_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, distinct: bool = False, summary: bool = False, md5: str = None, - sha1: str = None, sha256: str = None, q: str = None, arch: str = None) -> web.Response: + sha1: str = None, sha256: str = None, q: str = None, arch: str = None) -> ConnexionResponse: """Get file integrity monitoring scan result from an agent. Parameters ---------- - request : request.connexion agent_id : str Agent ID. pretty : bool @@ -93,19 +92,19 @@ async def get_syscheck_agent(request, agent_id: str, pretty: bool = False, wait_ Returns ------- - web.Response + ConnexionResponse API response. """ # get type parameter from query - type_ = request.query.get('type', None) + type_ = request.query_params.get('type', None) # get hash parameter from query - hash_ = request.query.get('hash', None) + hash_ = request.query_params.get('hash', None) # get file parameter from query - file_ = request.query.get('file', None) + file_ = request.query_params.get('file', None) filters = {'type': type_, 'md5': md5, 'sha1': sha1, 'sha256': sha256, 'hash': hash_, 'file': file_, 'arch': arch, - 'value.name': request.query.get('value.name', None), 'value.type': request.query.get('value.type', None)} + 'value.name': request.query_params.get('value.name', None), 'value.type': request.query_params.get('value.type', None)} f_kwargs = {'agent_list': [agent_id], 'offset': offset, 'limit': limit, 'select': select, 'sort': parse_api_param(sort, 'sort'), 'search': parse_api_param(search, 'search'), @@ -117,21 +116,20 @@ async def get_syscheck_agent(request, agent_id: str, pretty: bool = False, wait_ is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def delete_syscheck_agent(request, agent_id: str = '*', pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def delete_syscheck_agent(agent_id: str = '*', pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Clear file integrity monitoring scan results for a specified agent. Parameters ---------- - request : request.connexion agent_id : str Agent ID. pretty : bool @@ -141,7 +139,7 @@ async def delete_syscheck_agent(request, agent_id: str = '*', pretty: bool = Fal Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': [agent_id]} @@ -152,20 +150,19 @@ async def delete_syscheck_agent(request, agent_id: str = '*', pretty: bool = Fal is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) -async def get_last_scan_agent(request, agent_id: str, pretty: bool = False, - wait_for_complete: bool = False) -> web.Response: +async def get_last_scan_agent(agent_id: str, pretty: bool = False, + wait_for_complete: bool = False) -> ConnexionResponse: """Return when the last syscheck scan of a specified agent started and ended. Parameters ---------- - request : request.connexion agent_id : str Agent ID. pretty : bool @@ -175,7 +172,7 @@ async def get_last_scan_agent(request, agent_id: str, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': [agent_id]} @@ -186,8 +183,8 @@ async def get_last_scan_agent(request, agent_id: str, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/syscollector_controller.py b/api/api/controllers/syscollector_controller.py index a54ab6b3702..c87480eb256 100644 --- a/api/api/controllers/syscollector_controller.py +++ b/api/api/controllers/syscollector_controller.py @@ -4,10 +4,11 @@ import logging -from aiohttp import web +from connexion import request +from connexion.lifecycle import ConnexionResponse import wazuh.syscollector as syscollector -from api.encoder import dumps, prettify +from api.controllers.util import json_response from api.util import remove_nones_to_dict, parse_api_param, raise_if_exc, deprecate_endpoint from wazuh.core.cluster.dapi.dapi import DistributedAPI @@ -15,13 +16,12 @@ @deprecate_endpoint() -async def get_hardware_info(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False, - select: str = None) -> web.Response: +async def get_hardware_info(agent_id: str, pretty: bool = False, wait_for_complete: bool = False, + select: str = None) -> ConnexionResponse: """Get hardware info of an agent. Parameters ---------- - request : request.connexion agent_id : str Agent ID. pretty : bool @@ -33,7 +33,7 @@ async def get_hardware_info(request, agent_id: str, pretty: bool = False, wait_f Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': [agent_id], @@ -45,22 +45,21 @@ async def get_hardware_info(request, agent_id: str, pretty: bool = False, wait_f is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_hotfix_info(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False, +async def get_hotfix_info(agent_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, sort: str = None, search: str = None, select: str = None, - hotfix: str = None, q: str = None, distinct: bool = False) -> web.Response: + hotfix: str = None, q: str = None, distinct: bool = False) -> ConnexionResponse: """Get info about an agent's hotfixes. Parameters ---------- - request : request.connexion agent_id : str Agent ID. offset : int @@ -87,7 +86,7 @@ async def get_hotfix_info(request, agent_id: str, pretty: bool = False, wait_for Returns ------- - web.Response + ConnexionResponse API response. """ @@ -110,24 +109,23 @@ async def get_hotfix_info(request, agent_id: str, pretty: bool = False, wait_for is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_network_address_info(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False, +async def get_network_address_info(agent_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, iface: str = None, proto: str = None, address: str = None, broadcast: str = None, netmask: str = None, q: str = None, - distinct: bool = False) -> web.Response: + distinct: bool = False) -> ConnexionResponse: """Get network address info of an agent. Parameters ---------- - request : request.connexion agent_id : str Agent ID. offset : int @@ -162,7 +160,7 @@ async def get_network_address_info(request, agent_id: str, pretty: bool = False, Returns ------- - web.Response + ConnexionResponse API response. """ filters = {'iface': iface, @@ -188,23 +186,22 @@ async def get_network_address_info(request, agent_id: str, pretty: bool = False, is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_network_interface_info(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False, +async def get_network_interface_info(agent_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, name: str = None, adapter: str = None, state: str = None, - mtu: str = None, q: str = None, distinct: bool = False) -> web.Response: + mtu: str = None, q: str = None, distinct: bool = False) -> ConnexionResponse: """Get network interface info of an agent. Parameters ---------- - request : request.connexion agent_id : str Agent ID. offset : int @@ -237,18 +234,18 @@ async def get_network_interface_info(request, agent_id: str, pretty: bool = Fals Returns ------- - web.Response + ConnexionResponse API response. """ filters = {'adapter': adapter, - 'type': request.query.get('type', None), + 'type': request.query_params.get('type', None), 'state': state, 'name': name, 'mtu': mtu} # Add nested fields to kwargs filters nested = ['tx.packets', 'rx.packets', 'tx.bytes', 'rx.bytes', 'tx.errors', 'rx.errors', 'tx.dropped', 'rx.dropped'] for field in nested: - filters[field] = request.query.get(field, None) + filters[field] = request.query_params.get(field, None) f_kwargs = {'agent_list': [agent_id], 'offset': offset, @@ -267,23 +264,22 @@ async def get_network_interface_info(request, agent_id: str, pretty: bool = Fals is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_network_protocol_info(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False, +async def get_network_protocol_info(agent_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, iface: str = None, gateway: str = None, dhcp: str = None, - q: str = None, distinct: bool = False) -> web.Response: + q: str = None, distinct: bool = False) -> ConnexionResponse: """Get network protocol info of an agent. Parameters ---------- - request : request.connexion agent_id : str Agent ID. offset : int @@ -314,11 +310,11 @@ async def get_network_protocol_info(request, agent_id: str, pretty: bool = False Returns ------- - web.Response + ConnexionResponse API response. """ filters = {'iface': iface, - 'type': request.query.get('type', None), + 'type': request.query_params.get('type', None), 'gateway': gateway, 'dhcp': dhcp} @@ -339,21 +335,20 @@ async def get_network_protocol_info(request, agent_id: str, pretty: bool = False is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_os_info(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False, - select: str = None) -> web.Response: +async def get_os_info(agent_id: str, pretty: bool = False, wait_for_complete: bool = False, + select: str = None) -> ConnexionResponse: """Get OS info of an agent. Parameters ---------- - request : request.connexion agent_id : str Agent ID. select : str @@ -365,7 +360,7 @@ async def get_os_info(request, agent_id: str, pretty: bool = False, wait_for_com Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'agent_list': [agent_id], @@ -378,23 +373,22 @@ async def get_os_info(request, agent_id: str, pretty: bool = False, wait_for_com is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_packages_info(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False, +async def get_packages_info(agent_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, vendor: str = None, name: str = None, architecture: str = None, - version: str = None, q: str = None, distinct: bool = False) -> web.Response: + version: str = None, q: str = None, distinct: bool = False) -> ConnexionResponse: """Get packages info of an agent. Parameters ---------- - request : request.connexion agent_id : str Agent ID. offset : int @@ -427,13 +421,13 @@ async def get_packages_info(request, agent_id: str, pretty: bool = False, wait_f Returns ------- - web.Response + ConnexionResponse API response. """ filters = {'vendor': vendor, 'name': name, 'architecture': architecture, - 'format': request.query.get('format', None), + 'format': request.query_params.get('format', None), 'version': version} f_kwargs = {'agent_list': [agent_id], @@ -453,23 +447,22 @@ async def get_packages_info(request, agent_id: str, pretty: bool = False, wait_f is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_ports_info(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, +async def get_ports_info(agent_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, pid: str = None, protocol: str = None, tx_queue: str = None, state: str = None, process: str = None, - q: str = None, distinct: bool = False) -> web.Response: + q: str = None, distinct: bool = False) -> ConnexionResponse: """Get ports info of an agent. Parameters ---------- - request : request.connexion agent_id : str Agent ID. offset : int @@ -504,7 +497,7 @@ async def get_ports_info(request, agent_id: str, pretty: bool = False, wait_for_ Returns ------- - web.Response + ConnexionResponse API response. """ filters = {'pid': pid, @@ -515,7 +508,7 @@ async def get_ports_info(request, agent_id: str, pretty: bool = False, wait_for_ # Add nested fields to kwargs filters nested = ['local.ip', 'local.port', 'remote.ip'] for field in nested: - filters[field] = request.query.get(field, None) + filters[field] = request.query_params.get(field, None) f_kwargs = {'agent_list': [agent_id], 'offset': offset, @@ -534,26 +527,25 @@ async def get_ports_info(request, agent_id: str, pretty: bool = False, wait_for_ is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) @deprecate_endpoint() -async def get_processes_info(request, agent_id: str, pretty: bool = False, wait_for_complete: bool = False, +async def get_processes_info(agent_id: str, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = None, select: str = None, sort: str = None, search: str = None, pid: str = None, state: str = None, ppid: str = None, egroup: str = None, euser: str = None, fgroup: str = None, name: str = None, nlwp: str = None, pgrp: str = None, priority: str = None, rgroup: str = None, ruser: str = None, sgroup: str = None, suser: str = None, q: str = None, - distinct: bool = False) -> web.Response: + distinct: bool = False) -> ConnexionResponse: """Get processes info an agent. Parameters ---------- - request : request.connexion agent_id : str Agent ID. offset : int @@ -606,7 +598,7 @@ async def get_processes_info(request, agent_id: str, pretty: bool = False, wait_ Returns ------- - web.Response + ConnexionResponse API response. """ filters = {'state': state, @@ -641,8 +633,8 @@ async def get_processes_info(request, agent_id: str, pretty: bool = False, wait_ is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/task_controller.py b/api/api/controllers/task_controller.py index 1854e20687b..9f5b2ad47f7 100755 --- a/api/api/controllers/task_controller.py +++ b/api/api/controllers/task_controller.py @@ -4,9 +4,10 @@ import logging -from aiohttp import web +from connexion import request +from connexion.lifecycle import ConnexionResponse -from api.encoder import dumps, prettify +from api.controllers.util import json_response from api.util import remove_nones_to_dict, parse_api_param, raise_if_exc from wazuh.core.cluster.dapi.dapi import DistributedAPI from wazuh.core.common import DATABASE_LIMIT @@ -15,10 +16,10 @@ logger = logging.getLogger('wazuh') -async def get_tasks_status(request, pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, +async def get_tasks_status(pretty: bool = False, wait_for_complete: bool = False, offset: int = 0, limit: int = DATABASE_LIMIT, tasks_list: list = None, agents_list: list = None, command: str = None, node: str = None, module: str = None, status: str = None, q: str = None, - search: str = None, select: str = None, sort: str = None) -> web.Response: + search: str = None, select: str = None, sort: str = None) -> ConnexionResponse: """Check the status of the specified tasks. Parameters @@ -56,7 +57,7 @@ async def get_tasks_status(request, pretty: bool = False, wait_for_complete: boo Returns ------- - web.Response + ConnexionResponse API response. """ f_kwargs = {'select': select, 'search': parse_api_param(search, 'search'), @@ -78,8 +79,8 @@ async def get_tasks_status(request, pretty: bool = False, wait_for_complete: boo is_async=False, wait_for_complete=wait_for_complete, logger=logger, - rbac_permissions=request['token_info']['rbac_policies'] + rbac_permissions=request.context['token_info']['rbac_policies'] ) data = raise_if_exc(await dapi.distribute_function()) - return web.json_response(data=data, status=200, dumps=prettify if pretty else dumps) + return json_response(data, pretty=pretty) diff --git a/api/api/controllers/test/conftest.py b/api/api/controllers/test/conftest.py new file mode 100644 index 00000000000..9b946c2f4fb --- /dev/null +++ b/api/api/controllers/test/conftest.py @@ -0,0 +1,21 @@ +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 + +from unittest.mock import MagicMock, patch +import pytest + +from connexion.testing import TestContext + + +@pytest.fixture +def mock_request(request): + """fixture to wrap functions with request.""" + controller_name = request.param + operation = MagicMock(name="operation") + operation.method = "post" + with TestContext(operation=operation): + with patch(f'api.controllers.{controller_name}.request') as m_req: + m_req.query_params.get = lambda key, default: None + m_req.context = {'token_info': {'rbac_policies': {}}} + yield m_req diff --git a/api/api/controllers/test/test_active_response_controller.py b/api/api/controllers/test/test_active_response_controller.py index 7acd98e89cf..3aa13bb9292 100644 --- a/api/api/controllers/test/test_active_response_controller.py +++ b/api/api/controllers/test/test_active_response_controller.py @@ -6,7 +6,7 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems with patch('wazuh.common.wazuh_uid'): @@ -21,16 +21,17 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["active_response_controller"], indirect=True) @patch('api.controllers.active_response_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.active_response_controller.remove_nones_to_dict') @patch('api.controllers.active_response_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.active_response_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_run_command(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_run_command(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'run_command' endpoint is working as expected.""" with patch('api.controllers.active_response_controller.Body'): with patch('api.controllers.active_response_controller.ActiveResponseModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: - result = await run_command(request=mock_request) + result = await run_command() mock_dapi.assert_called_once_with(f=active_response.run_command, f_kwargs=mock_remove.return_value, request_type='distributed_master', @@ -38,8 +39,8 @@ async def test_run_command(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_agent_controller.py b/api/api/controllers/test/test_agent_controller.py index e0a85d96c98..293a86fad5e 100644 --- a/api/api/controllers/test/test_agent_controller.py +++ b/api/api/controllers/test/test_agent_controller.py @@ -6,7 +6,6 @@ from unittest.mock import ANY, AsyncMock, MagicMock, call, patch import pytest -from aiohttp import web_response from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems @@ -38,16 +37,16 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_alist', ['001', 'all']) async def test_delete_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_alist, - mock_request=MagicMock()): + mock_request): """Verify 'delete_agents' endpoint is working as expected.""" - result = await delete_agents(request=mock_request, - agents_list=mock_alist) + result = await delete_agents(agents_list=mock_alist) if 'all' in mock_alist: mock_alist = None f_kwargs = {'agent_list': mock_alist, @@ -61,35 +60,36 @@ async def test_delete_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ 'node_name': None, 'name': None, 'ip': None, - 'registerIP': mock_request.query.get('registerIP', None) + 'registerIP': mock_request.query_params.get('registerIP', None) }, 'q': None } nested = ['os.version', 'os.name', 'os.platform'] for field in nested: - f_kwargs['filters'][field] = mock_request.query.get(field, None) - mock_dapi.assert_called_once_with(f=agent.delete_agents, - f_kwargs=mock_remove.return_value, - request_type='local_master', - is_async=False, - wait_for_complete=False, - logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] - ) + f_kwargs['filters'][field] = mock_request.query_params.get(field, None) + mock_dapi.awaited_once_with(f=agent.delete_agents, + f_kwargs=mock_remove.return_value, + request_type='local_master', + is_async=False, + wait_for_complete=False, + logger=ANY, + rbac_permissions=mock_request.context['token_info']['rbac_policies'] + ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_agents' endpoint is working as expected.""" - result = await get_agents(request=mock_request) + result = await get_agents() f_kwargs = {'agent_list': None, 'offset': 0, 'limit': DATABASE_LIMIT, @@ -105,7 +105,7 @@ async def test_get_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp 'node_name': None, 'name': None, 'ip': None, - 'registerIP': mock_request.query.get('registerIP', None), + 'registerIP': mock_request.query_params.get('registerIP', None), 'group_config_status': None }, 'q': None, @@ -113,54 +113,56 @@ async def test_get_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp } nested = ['os.version', 'os.name', 'os.platform'] for field in nested: - f_kwargs['filters'][field] = mock_request.query.get(field, None) + f_kwargs['filters'][field] = mock_request.query_params.get(field, None) mock_dapi.assert_called_once_with(f=agent.get_agents, f_kwargs=mock_remove.return_value, request_type='local_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_add_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_add_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'add_agent' endpoint is working as expected.""" with patch('api.controllers.agent_controller.Body.validate_content_type'): with patch('api.controllers.agent_controller.AgentAddedModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: - result = await add_agent(request=mock_request) + result = await add_agent() mock_dapi.assert_called_once_with(f=agent.add_agent, f_kwargs=mock_remove.return_value, request_type='local_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_reconnect_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_reconnect_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'reconnect_agents' endpoint is working as expected.""" - result = await reconnect_agents(request=mock_request) + result = await reconnect_agents() f_kwargs = {'agent_list': '*' } mock_dapi.assert_called_once_with(f=agent.reconnect_agents, @@ -170,22 +172,23 @@ async def test_reconnect_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mo wait_for_complete=False, broadcasting=True, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_restart_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_restart_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'restart_agents' endpoint is working as expected.""" - result = await restart_agents(request=mock_request) + result = await restart_agents() f_kwargs = {'agent_list': '*' } mock_dapi.assert_called_once_with(f=agent.restart_agents, @@ -195,23 +198,24 @@ async def test_restart_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock wait_for_complete=False, broadcasting=True, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_restart_agents_by_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_restart_agents_by_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'restart_agents_by_node' endpoint is working as expected.""" with patch('api.controllers.agent_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await restart_agents_by_node(request=mock_request, + result = await restart_agents_by_node( node_id='001') f_kwargs = {'node_id': '001', 'agent_list': '*' @@ -222,17 +226,18 @@ async def test_restart_agents_by_node(mock_exc, mock_dapi, mock_remove, mock_dfu is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @@ -240,12 +245,11 @@ async def test_restart_agents_by_node(mock_exc, mock_dapi, mock_remove, mock_dfu @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) @patch('api.controllers.agent_controller.check_component_configuration_pair') async def test_get_agent_config(mock_check_pair, mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, - mock_request=MagicMock()): + mock_request): """Verify 'get_agent_config' endpoint is working as expected.""" kwargs_param = {'configuration': 'configuration_value' } - result = await get_agent_config(request=mock_request, - **kwargs_param) + result = await get_agent_config(**kwargs_param) f_kwargs = {'agent_list': [None], 'component': None, 'config': kwargs_param.get('configuration', None) @@ -256,24 +260,24 @@ async def test_get_agent_config(mock_check_pair, mock_exc, mock_dapi, mock_remov is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) async def test_delete_single_agent_multiple_groups(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, - mock_request=MagicMock()): + mock_request): """Verify 'delete_single_agent_multiple_groups' endpoint is working as expected.""" - result = await delete_single_agent_multiple_groups(request=mock_request, - agent_id='001') + result = await delete_single_agent_multiple_groups(agent_id='001') f_kwargs = {'agent_list': ['001'], 'group_list': None } @@ -283,23 +287,23 @@ async def test_delete_single_agent_multiple_groups(mock_exc, mock_dapi, mock_rem is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_sync_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_sync_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_sync_agent' endpoint is working as expected.""" - result = await get_sync_agent(request=mock_request, - agent_id='001') + result = await get_sync_agent(agent_id='001') f_kwargs = {'agent_list': ['001'] } mock_dapi.assert_called_once_with(f=agent.get_agents_sync_group, @@ -308,25 +312,24 @@ async def test_get_sync_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) async def test_delete_single_agent_single_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, - mock_request=MagicMock()): + mock_request): """Verify 'delete_single_agent_single_group' endpoint is working as expected.""" - result = await delete_single_agent_single_group(request=mock_request, - agent_id='001', - group_id='001') + result = await delete_single_agent_single_group(agent_id='001', group_id='001') f_kwargs = {'agent_list': ['001'], 'group_list': ['001'] } @@ -336,24 +339,23 @@ async def test_delete_single_agent_single_group(mock_exc, mock_dapi, mock_remove is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_agent_single_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_put_agent_single_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'put_agent_single_group' endpoint is working as expected.""" - result = await put_agent_single_group(request=mock_request, - agent_id='001', - group_id='001') + result = await put_agent_single_group(agent_id='001', group_id='001') f_kwargs = {'agent_list': ['001'], 'group_list': ['001'], 'replace': False @@ -364,23 +366,23 @@ async def test_put_agent_single_group(mock_exc, mock_dapi, mock_remove, mock_dfu is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_agent_key(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_agent_key(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_agent_key' endpoint is working as expected.""" - result = await get_agent_key(request=mock_request, - agent_id='001') + result = await get_agent_key(agent_id='001') f_kwargs = {'agent_list': ['001'] } mock_dapi.assert_called_once_with(f=agent.get_agents_keys, @@ -389,22 +391,23 @@ async def test_get_agent_key(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_restart_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_restart_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'restart_agent' endpoint is working as expected.""" - result = await restart_agent(request=mock_request, + result = await restart_agent( agent_id='001') f_kwargs = {'agent_list': ['001'] } @@ -414,14 +417,15 @@ async def test_restart_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @pytest.mark.parametrize('agents_list', [ (['all']), (['001', '002']), @@ -431,10 +435,10 @@ async def test_restart_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_upgrade_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, agents_list): +async def test_put_upgrade_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, + agents_list, mock_request): """Verify 'put_upgrade_agents' endpoint is working as expected.""" - mock_request = MagicMock() - result = await put_upgrade_agents(request=mock_request, agents_list=agents_list) + result = await put_upgrade_agents(agents_list=agents_list) if 'all' in agents_list: agents_list = '*' @@ -451,14 +455,14 @@ async def test_put_upgrade_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, 'node_name': None, 'name': None, 'ip': None, - 'registerIP': mock_request.query.get('registerIP', None) + 'registerIP': mock_request.query_params.get('registerIP', None) }, 'q': None } nested = ['os.version', 'os.name', 'os.platform'] for field in nested: - f_kwargs['filters'][field] = mock_request.query.get(field, None) + f_kwargs['filters'][field] = mock_request.query_params.get(field, None) mock_dapi.assert_called_once_with(f=agent.upgrade_agents, f_kwargs=mock_remove.return_value, @@ -466,15 +470,16 @@ async def test_put_upgrade_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], broadcasting=agents_list == '*' ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @pytest.mark.parametrize('agents_list, file_path', [ (['all'], '/var/ossec/valid_file.wpk'), (['001', '002'], '/var/ossec/var/upgrade/valid_file.wpk'), @@ -485,10 +490,10 @@ async def test_put_upgrade_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_upgrade_custom_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, agents_list, file_path): +async def test_put_upgrade_custom_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, + agents_list, file_path, mock_request): """Verify 'put_upgrade_custom_agents' endpoint is working as expected.""" - mock_request = MagicMock() - result = await put_upgrade_custom_agents(request=mock_request, agents_list=agents_list, file_path=file_path) + result = await put_upgrade_custom_agents(agents_list=agents_list, file_path=file_path) if 'all' in agents_list: agents_list = '*' @@ -502,14 +507,14 @@ async def test_put_upgrade_custom_agents(mock_exc, mock_dapi, mock_remove, mock_ 'node_name': None, 'name': None, 'ip': None, - 'registerIP': mock_request.query.get('registerIP', None) + 'registerIP': mock_request.query_params.get('registerIP', None) }, 'q': None } nested = ['os.version', 'os.name', 'os.platform'] for field in nested: - f_kwargs['filters'][field] = mock_request.query.get(field, None) + f_kwargs['filters'][field] = mock_request.query_params.get(field, None) mock_dapi.assert_called_once_with(f=agent.upgrade_agents, f_kwargs=mock_remove.return_value, @@ -517,24 +522,23 @@ async def test_put_upgrade_custom_agents(mock_exc, mock_dapi, mock_remove, mock_ is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], broadcasting=agents_list == '*' ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_daemon_stats(mock_exc, mock_dapi, mock_remove, mock_dfunc): +async def test_get_daemon_stats(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_daemon_stats' function is working as expected.""" - mock_request = MagicMock() - result = await get_daemon_stats(request=mock_request, - agent_id='001', + result = await get_daemon_stats(agent_id='001', daemons_list=['daemon_1', 'daemon_2']) f_kwargs = {'agent_list': ['001'], @@ -545,22 +549,23 @@ async def test_get_daemon_stats(mock_exc, mock_dapi, mock_remove, mock_dfunc): is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies']) + rbac_permissions=mock_request.context['token_info']['rbac_policies']) mock_remove.assert_called_once_with(f_kwargs) mock_exc.assert_called_once_with(mock_dfunc.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_component_stats(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_component_stats(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_component_stats' endpoint is working as expected.""" - result = await get_component_stats(request=mock_request) + result = await get_component_stats() f_kwargs = {'agent_list': [None], 'component': None } @@ -570,23 +575,24 @@ async def test_get_component_stats(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_agent_upgrade(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp): +async def test_get_agent_upgrade(mock_exc, mock_dapi, mock_remove, mock_dfunc, + mock_exp, mock_request): """Verify 'get_agent_upgrade' endpoint is working as expected.""" - mock_request = MagicMock() - result = await get_agent_upgrade(request=mock_request) + result = await get_agent_upgrade() f_kwargs = {'agent_list': None, 'filters': { 'manager': None, @@ -595,7 +601,7 @@ async def test_get_agent_upgrade(mock_exc, mock_dapi, mock_remove, mock_dfunc, m 'node_name': None, 'name': None, 'ip': None, - 'registerIP': mock_request.query.get('registerIP', None) + 'registerIP': mock_request.query_params.get('registerIP', None) }, 'q': None } @@ -603,7 +609,7 @@ async def test_get_agent_upgrade(mock_exc, mock_dapi, mock_remove, mock_dfunc, m # Add nested fields to kwargs filters nested = ['os.version', 'os.name', 'os.platform'] for field in nested: - f_kwargs['filters'][field] = mock_request.query.get(field, None) + f_kwargs['filters'][field] = mock_request.query_params.get(field, None) mock_dapi.assert_called_once_with(f=agent.get_upgrade_result, f_kwargs=mock_remove.return_value, @@ -611,21 +617,22 @@ async def test_get_agent_upgrade(mock_exc, mock_dapi, mock_remove, mock_dfunc, m is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_post_new_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_post_new_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'post_new_agent' endpoint is working as expected.""" - result = await post_new_agent(request=mock_request, + result = await post_new_agent( agent_name='agent_name_value') # `ip` and `force` come from the API model f_kwargs = { @@ -646,14 +653,15 @@ async def test_post_new_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @@ -661,9 +669,9 @@ async def test_post_new_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_alist', ['001', 'all']) async def test_delete_multiple_agent_single_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_alist, - mock_request=MagicMock()): + mock_request): """Verify 'delete_multiple_agent_single_group' endpoint is working as expected.""" - result = await delete_multiple_agent_single_group(request=mock_request, + result = await delete_multiple_agent_single_group( agents_list=mock_alist, group_id='001') if 'all' in mock_alist: @@ -677,23 +685,24 @@ async def test_delete_multiple_agent_single_group(mock_exc, mock_dapi, mock_remo is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_multiple_agent_single_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, - mock_request=MagicMock()): +async def test_put_multiple_agent_single_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, + mock_exp, mock_request): """Verify 'put_multiple_agent_single_group' endpoint is working as expected.""" - result = await put_multiple_agent_single_group(request=mock_request, + result = await put_multiple_agent_single_group( group_id='001', agents_list='001') f_kwargs = {'agent_list': '001', @@ -706,14 +715,15 @@ async def test_put_multiple_agent_single_group(mock_exc, mock_dapi, mock_remove, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @@ -721,9 +731,9 @@ async def test_put_multiple_agent_single_group(mock_exc, mock_dapi, mock_remove, @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_alist', ['001', 'all']) async def test_delete_groups(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_alist, - mock_request=MagicMock()): + mock_request): """Verify 'delete_groups' endpoint is working as expected.""" - result = await delete_groups(request=mock_request, + result = await delete_groups( groups_list=mock_alist) if 'all' in mock_alist: mock_alist = None @@ -735,23 +745,24 @@ async def test_delete_groups(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_list_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_list_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_list_group' endpoint is working as expected.""" - result = await get_list_group(request=mock_request) - hash_ = mock_request.query.get('hash', 'md5') + result = await get_list_group() + hash_ = mock_request.query_params.get('hash', 'md5') f_kwargs = {'offset': 0, 'limit': None, 'group_list': None, @@ -770,22 +781,23 @@ async def test_get_list_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_agents_in_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_agents_in_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_agents_in_group' endpoint is working as expected.""" - result = await get_agents_in_group(request=mock_request, + result = await get_agents_in_group( group_id='001') f_kwargs = {'group_list': ['001'], 'offset': 0, @@ -805,47 +817,49 @@ async def test_get_agents_in_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_post_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_post_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'post_group' endpoint is working as expected.""" with patch('api.controllers.agent_controller.Body.validate_content_type'): with patch('api.controllers.agent_controller.GroupAddedModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: - result = await post_group(request=mock_request) + result = await post_group() mock_dapi.assert_called_once_with(f=agent.create_group, f_kwargs=mock_remove.return_value, request_type='local_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_group_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_group_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_group_config' endpoint is working as expected.""" - result = await get_group_config(request=mock_request, + result = await get_group_config( group_id='001') f_kwargs = {'group_list': ['001'], 'offset': 0, @@ -857,24 +871,25 @@ async def test_get_group_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mo is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_group_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_put_group_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'put_group_config' endpoint is working as expected.""" with patch('api.controllers.agent_controller.Body.validate_content_type'): with patch('api.controllers.agent_controller.Body.decode_body') as mock_dbody: - result = await put_group_config(request=mock_request, + result = await put_group_config( group_id='001', body={}) f_kwargs = {'group_list': ['001'], @@ -886,24 +901,24 @@ async def test_put_group_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mo is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_group_files(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_group_files(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_group_files' endpoint is working as expected.""" - result = await get_group_files(request=mock_request, - group_id='001') - hash_ = mock_request.query.get('hash', 'md5') # Select algorithm to generate the returned checksums. + result = await get_group_files(group_id='001') + hash_ = mock_request.query_params.get('hash', 'md5') # Select algorithm to generate the returned checksums. f_kwargs = {'group_list': ['001'], 'offset': 0, 'limit': None, @@ -922,27 +937,26 @@ async def test_get_group_files(mock_exc, mock_dapi, mock_remove, mock_dfunc, moc is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_group_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_group_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_group_file' endpoint is working as expected.""" - result = await get_group_file(request=mock_request, - group_id='001', - file_name='filename_value') + result = await get_group_file(group_id='001', file_name='filename_value') f_kwargs = {'group_list': ['001'], 'filename': 'filename_value', - 'type_conf': mock_request.query.get('type', None), + 'type_conf': mock_request.query_params.get('type', None), 'raw': False } mock_dapi.assert_called_once_with(f=agent.get_file_conf, @@ -951,26 +965,26 @@ async def test_get_group_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) +@pytest.mark.parametrize('mock_alist', [CustomAffectedItems(empty=True), CustomAffectedItems()]) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.AffectedItemsWazuhResult', return_value={}) -@pytest.mark.parametrize('mock_alist', [CustomAffectedItems(empty=True), CustomAffectedItems()]) async def test_restart_agents_by_group(mock_aiwr, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_alist, - mock_request=MagicMock()): + mock_request): """Verify 'restart_agents_by_group' endpoint is working as expected.""" with patch('api.controllers.agent_controller.raise_if_exc', return_value=mock_alist) as mock_exc: - result = await restart_agents_by_group(request=mock_request, - group_id='001') + result = await restart_agents_by_group(group_id='001') f_kwargs = {'group_list': ['001'], 'select': ['id'], 'limit': None @@ -981,7 +995,7 @@ async def test_restart_agents_by_group(mock_aiwr, mock_dapi, mock_remove, mock_d is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) ] calls_restart_agents_by_group = [call(f=agent.restart_agents_by_group, @@ -990,7 +1004,7 @@ async def test_restart_agents_by_group(mock_aiwr, mock_dapi, mock_remove, mock_d is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) ] if not mock_alist.affected_items: @@ -1007,43 +1021,44 @@ async def test_restart_agents_by_group(mock_aiwr, mock_dapi, mock_remove, mock_d call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) +@patch('api.controllers.agent_controller.AgentInsertedModel.get_kwargs') +@patch('api.controllers.agent_controller.Body.validate_content_type') @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_insert_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_insert_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, + mock_valid, mock_getkwargs, mock_request): """Verify 'insert_agent' endpoint is working as expected.""" - with patch('api.controllers.agent_controller.Body.validate_content_type'): - with patch('api.controllers.agent_controller.AgentInsertedModel.get_kwargs', - return_value=AsyncMock()) as mock_getkwargs: - result = await insert_agent(request=mock_request) - mock_dapi.assert_called_once_with(f=agent.add_agent, - f_kwargs=mock_remove.return_value, - request_type='local_master', - is_async=False, - wait_for_complete=False, - logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] - ) - mock_exc.assert_called_once_with(mock_dfunc.return_value) - mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + result = await insert_agent() + mock_dapi.assert_called_once_with(f=agent.add_agent, + f_kwargs=mock_remove.return_value, + request_type='local_master', + is_async=False, + wait_for_complete=False, + logger=ANY, + rbac_permissions=mock_request.context['token_info']['rbac_policies']) + mock_exc.assert_called_once_with(mock_dfunc.return_value) + mock_remove.assert_called_once_with(mock_getkwargs.return_value) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_agent_no_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_agent_no_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_agent_no_group' endpoint is working as expected.""" - result = await get_agent_no_group(request=mock_request) + result = await get_agent_no_group() f_kwargs = {'offset': 0, 'limit': DATABASE_LIMIT, 'select': None, @@ -1057,22 +1072,23 @@ async def test_get_agent_no_group(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_agent_outdated(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_agent_outdated(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_agent_outdated' endpoint is working as expected.""" - result = await get_agent_outdated(request=mock_request) + result = await get_agent_outdated() f_kwargs = {'offset': 0, 'limit': DATABASE_LIMIT, 'sort': None, @@ -1086,22 +1102,23 @@ async def test_get_agent_outdated(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_agent_fields(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_agent_fields(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_agent_fields' endpoint is working as expected.""" - result = await get_agent_fields(request=mock_request) + result = await get_agent_fields() f_kwargs = {'offset': 0, 'limit': DATABASE_LIMIT, 'sort': None, @@ -1115,53 +1132,55 @@ async def test_get_agent_fields(mock_exc, mock_dapi, mock_remove, mock_dfunc, mo is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) async def test_get_agent_summary_status(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, - mock_request=MagicMock()): + mock_request): """Verify 'get_agent_summary_status' endpoint is working as expected.""" - result = await get_agent_summary_status(request=mock_request) + result = await get_agent_summary_status() mock_dapi.assert_called_once_with(f=agent.get_agents_summary_status, f_kwargs=mock_remove.return_value, request_type='local_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["agent_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.agent_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.agent_controller.remove_nones_to_dict') @patch('api.controllers.agent_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.agent_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_agent_summary_os(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_agent_summary_os(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_agent_summary_os' endpoint is working as expected.""" - result = await get_agent_summary_os(request=mock_request) + result = await get_agent_summary_os() mock_dapi.assert_called_once_with(f=agent.get_agents_summary_os, f_kwargs=mock_remove.return_value, request_type='local_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_cdb_list_controller.py b/api/api/controllers/test/test_cdb_list_controller.py index f08cfdf1e8f..fbbb8ae22f7 100644 --- a/api/api/controllers/test/test_cdb_list_controller.py +++ b/api/api/controllers/test/test_cdb_list_controller.py @@ -6,9 +6,8 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response -from api.controllers.test.utils import CustomAffectedItems from connexion.lifecycle import ConnexionResponse +from api.controllers.test.utils import CustomAffectedItems with patch('wazuh.common.wazuh_uid'): with patch('wazuh.common.wazuh_gid'): @@ -25,13 +24,14 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cdb_list_controller"], indirect=True) @patch('api.controllers.cdb_list_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cdb_list_controller.remove_nones_to_dict') @patch('api.controllers.cdb_list_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cdb_list_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_lists(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_lists(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_lists' endpoint is working as expected.""" - result = await get_lists(request=mock_request) + result = await get_lists() f_kwargs = {'offset': 0, 'select': None, 'limit': None, @@ -50,23 +50,24 @@ async def test_get_lists(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_requ is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cdb_list_controller"], indirect=True) @patch('api.controllers.cdb_list_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cdb_list_controller.remove_nones_to_dict') @patch('api.controllers.cdb_list_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cdb_list_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_bool', [True, False]) -async def test_get_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, mock_request=MagicMock()): +async def test_get_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, mock_request): """Verify 'get_file' endpoint is working as expected.""" with patch('api.controllers.cdb_list_controller.isinstance', return_value=mock_bool) as mock_isinstance: - result = await get_file(request=mock_request) + result = await get_file() f_kwargs = {'filename': None, 'raw': False } @@ -76,26 +77,27 @@ async def test_get_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) if mock_isinstance.return_value: - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) else: assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cdb_list_controller"], indirect=True) @patch('api.controllers.cdb_list_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cdb_list_controller.remove_nones_to_dict') @patch('api.controllers.cdb_list_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cdb_list_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_put_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'put_file' endpoint is working as expected.""" with patch('api.controllers.cdb_list_controller.Body.validate_content_type'): with patch('api.controllers.cdb_list_controller.Body.decode_body') as mock_dbody: - result = await put_file(request=mock_request, + result = await put_file( body={}) f_kwargs = {'filename': None, 'overwrite': False, @@ -107,21 +109,22 @@ async def test_put_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_reque is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cdb_list_controller"], indirect=True) @patch('api.controllers.cdb_list_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cdb_list_controller.remove_nones_to_dict') @patch('api.controllers.cdb_list_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cdb_list_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_delete_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_delete_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'delete_file' endpoint is working as expected.""" - result = await delete_file(request=mock_request) + result = await delete_file() f_kwargs = {'filename': None } mock_dapi.assert_called_once_with(f=cdb_list.delete_list_file, @@ -130,21 +133,22 @@ async def test_delete_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cdb_list_controller"], indirect=True) @patch('api.controllers.cdb_list_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cdb_list_controller.remove_nones_to_dict') @patch('api.controllers.cdb_list_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cdb_list_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_lists_files(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_lists_files(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_lists_files' endpoint is working as expected.""" - result = await get_lists_files(request=mock_request) + result = await get_lists_files() f_kwargs = {'offset': 0, 'limit': None, 'sort_by': ['relative_dirname', 'filename'], @@ -161,8 +165,8 @@ async def test_get_lists_files(mock_exc, mock_dapi, mock_remove, mock_dfunc, moc is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_ciscat_controller.py b/api/api/controllers/test/test_ciscat_controller.py index eca91c7368f..a6ad9556254 100644 --- a/api/api/controllers/test/test_ciscat_controller.py +++ b/api/api/controllers/test/test_ciscat_controller.py @@ -6,7 +6,7 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems with patch('wazuh.common.wazuh_uid'): @@ -21,13 +21,14 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["ciscat_controller"], indirect=True) @patch('api.controllers.ciscat_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.ciscat_controller.remove_nones_to_dict') @patch('api.controllers.ciscat_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.ciscat_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_agents_ciscat_results(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_agents_ciscat_results(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_agents_ciscat_results' endpoint is working as expected.""" - result = await get_agents_ciscat_results(request=mock_request, + result = await get_agents_ciscat_results( agent_id='001') f_kwargs = { 'agent_list': ['001'], @@ -39,7 +40,7 @@ async def test_get_agents_ciscat_results(mock_exc, mock_dapi, mock_remove, mock_ 'filters': { 'benchmark': None, 'profile': None, - 'pass': mock_request.query.get('pass', None), + 'pass': mock_request.query_params.get('pass', None), 'fail': None, 'error': None, 'notchecked': None, @@ -54,8 +55,8 @@ async def test_get_agents_ciscat_results(mock_exc, mock_dapi, mock_remove, mock_ is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_cluster_controller.py b/api/api/controllers/test/test_cluster_controller.py index 49f97bec6e9..200410f2010 100644 --- a/api/api/controllers/test/test_cluster_controller.py +++ b/api/api/controllers/test/test_cluster_controller.py @@ -6,7 +6,6 @@ from unittest.mock import ANY, AsyncMock, MagicMock, call, patch import pytest -from aiohttp import web_response from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems @@ -30,46 +29,48 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_cluster_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_cluster_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_cluster_node' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_cluster_node(request=mock_request) + result = await get_cluster_node() mock_dapi.assert_called_once_with(f=cluster.get_node_wrapper, f_kwargs=mock_remove.return_value, request_type='local_any', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_cluster_nodes(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_cluster_nodes(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_cluster_nodes' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_cluster_nodes(request=mock_request) + result = await get_cluster_nodes() f_kwargs = {'filter_node': None, 'offset': 0, 'limit': None, 'sort': None, 'search': None, 'select': None, - 'filter_type': mock_request.query.get('type', 'all'), + 'filter_type': mock_request.query_params.get('type', 'all'), 'q': None, 'distinct': False } @@ -80,27 +81,27 @@ async def test_get_cluster_nodes(mock_exc, mock_dapi, mock_remove, mock_dfunc, m wait_for_complete=False, logger=ANY, local_client_arg='lc', - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_healthcheck(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_healthcheck(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_healthcheck' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_healthcheck(request=mock_request) - f_kwargs = {'filter_node': None - } + result = await get_healthcheck() + f_kwargs = {'filter_node': None} mock_dapi.assert_called_once_with(f=cluster.get_health_nodes, f_kwargs=mock_remove.return_value, request_type='local_master', @@ -108,25 +109,26 @@ async def test_get_healthcheck(mock_exc, mock_dapi, mock_remove, mock_dfunc, moc wait_for_complete=False, logger=ANY, local_client_arg='lc', - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_nodes_ruleset_sync_status(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_nodes_ruleset_sync_status(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_nodes_ruleset_sync_status' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_nodes_ruleset_sync_status(request=mock_request) + result = await get_nodes_ruleset_sync_status() f_kwargs = {'node_list': '*', 'master_md5': {'dikt_key': 'dikt_value'} } @@ -142,131 +144,131 @@ async def test_get_nodes_ruleset_sync_status(mock_exc, mock_dapi, mock_remove, m is_async=True, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value, broadcasting=True)]) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 3 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_status(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_status(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_status' endpoint is working as expected.""" - result = await get_status(request=mock_request) + result = await get_status() mock_dapi.assert_called_once_with(f=cluster.get_status_json, f_kwargs=mock_remove.return_value, request_type='local_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_config' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_config(request=mock_request) + result = await get_config() mock_dapi.assert_called_once_with(f=cluster.read_config_wrapper, f_kwargs=mock_remove.return_value, request_type='local_any', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_status_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_status_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_status_node' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_status_node(request=mock_request, - node_id='001') - f_kwargs = {'node_id': '001' - } + result = await get_status_node(node_id='001') + f_kwargs = {'node_id': '001'} mock_dapi.assert_called_once_with(f=manager.get_status, f_kwargs=mock_remove.return_value, request_type='distributed_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_info_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_info_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_info_node' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_info_node(request=mock_request, - node_id='001') - f_kwargs = {'node_id': '001' - } + result = await get_info_node(node_id='001') + f_kwargs = {'node_id': '001'} mock_dapi.assert_called_once_with(f=manager.get_basic_info, f_kwargs=mock_remove.return_value, request_type='distributed_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_bool', [True, False]) async def test_get_configuration_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, - mock_request=MagicMock()): + mock_request): """Verify 'get_configuration_node' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: with patch('api.controllers.cluster_controller.isinstance', return_value=mock_bool) as mock_isinstance: - result = await get_configuration_node(request=mock_request, - node_id='001') + result = await get_configuration_node(node_id='001') f_kwargs = {'node_id': '001', 'section': None, 'field': None, @@ -278,7 +280,7 @@ async def test_get_configuration_node(mock_exc, mock_dapi, mock_remove, mock_dfu is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), @@ -286,22 +288,21 @@ async def test_get_configuration_node(mock_exc, mock_dapi, mock_remove, mock_dfu assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) if mock_isinstance.return_value: - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) else: assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_daemon_stats_node(mock_exc, mock_dapi, mock_remove, mock_dfunc): +async def test_get_daemon_stats_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_daemon_stats_node' function is working as expected.""" - mock_request = MagicMock() with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_daemon_stats_node(request=mock_request, - node_id='worker1', + result = await get_daemon_stats_node(node_id='worker1', daemons_list=['daemon_1', 'daemon_2']) f_kwargs = {'node_id': 'worker1', @@ -312,27 +313,27 @@ async def test_get_daemon_stats_node(mock_exc, mock_dapi, mock_remove, mock_dfun is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value) mock_remove.assert_called_once_with(f_kwargs) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_date', [None, 'date_value']) -async def test_get_stats_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_date, mock_request=MagicMock()): +async def test_get_stats_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_date, mock_request): """Verify 'get_stats_node' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: with patch('api.controllers.cluster_controller.deserialize_date', return_value='desdate_value') as mock_desdate: - result = await get_stats_node(request=mock_request, - node_id='001', + result = await get_stats_node(node_id='001', date=mock_date) if not mock_date: date = ANY @@ -348,82 +349,80 @@ async def test_get_stats_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_stats_hourly_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_stats_hourly_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_stats_hourly_node' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_stats_hourly_node(request=mock_request, - node_id='001') - f_kwargs = {'node_id': '001' - } + result = await get_stats_hourly_node(node_id='001') + f_kwargs = {'node_id': '001'} mock_dapi.assert_called_once_with(f=stats.hourly, f_kwargs=mock_remove.return_value, request_type='distributed_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_stats_weekly_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_stats_weekly_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_stats_weekly_node' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_stats_weekly_node(request=mock_request, - node_id='001') - f_kwargs = {'node_id': '001' - } + result = await get_stats_weekly_node(node_id='001') + f_kwargs = {'node_id': '001'} mock_dapi.assert_called_once_with(f=stats.weekly, f_kwargs=mock_remove.return_value, request_type='distributed_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_stats_analysisd_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_stats_analysisd_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_stats_analysisd_node' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_stats_analysisd_node(request=mock_request, - node_id='001') + result = await get_stats_analysisd_node(node_id='001') f_kwargs = {'node_id': '001', 'filename': common.ANALYSISD_STATS } @@ -433,26 +432,26 @@ async def test_get_stats_analysisd_node(mock_exc, mock_dapi, mock_remove, mock_d is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_stats_remoted_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_stats_remoted_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_stats_remoted_node' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_stats_remoted_node(request=mock_request, - node_id='001') + result = await get_stats_remoted_node(node_id='001') f_kwargs = {'node_id': '001', 'filename': common.REMOTED_STATS } @@ -462,26 +461,26 @@ async def test_get_stats_remoted_node(mock_exc, mock_dapi, mock_remove, mock_dfu is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_log_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_log_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_log_node' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_log_node(request=mock_request, - node_id='001') + result = await get_log_node(node_id='001') f_kwargs = {'node_id': '001', 'offset': 0, 'limit': None, @@ -501,55 +500,54 @@ async def test_get_log_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_r is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_log_summary_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_log_summary_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_log_summary_node' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_log_summary_node(request=mock_request, - node_id='001') - f_kwargs = {'node_id': '001' - } + result = await get_log_summary_node(node_id='001') + f_kwargs = {'node_id': '001'} mock_dapi.assert_called_once_with(f=manager.ossec_log_summary, f_kwargs=mock_remove.return_value, request_type='distributed_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_api_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_api_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_api_config' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_api_config(request=mock_request) - f_kwargs = {'node_list': '*' - } + result = await get_api_config() + f_kwargs = {'node_list': '*'} mock_dapi.assert_called_once_with(f=manager.get_api_config, f_kwargs=mock_remove.return_value, request_type='distributed_master', @@ -557,27 +555,27 @@ async def test_get_api_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_restart(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_put_restart(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'put_restart' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await put_restart(request=mock_request) - f_kwargs = {'node_list': '*' - } + result = await put_restart() + f_kwargs = {'node_list': '*'} mock_dapi.assert_called_once_with(f=manager.restart, f_kwargs=mock_remove.return_value, request_type='distributed_master', @@ -585,27 +583,27 @@ async def test_put_restart(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_conf_validation(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_conf_validation(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_conf_validation' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await get_conf_validation(request=mock_request) - f_kwargs = {'node_list': '*' - } + result = await get_conf_validation() + f_kwargs = {'node_list': '*'} mock_dapi.assert_called_once_with(f=manager.validation, f_kwargs=mock_remove.return_value, request_type='distributed_master', @@ -613,29 +611,28 @@ async def test_get_conf_validation(mock_exc, mock_dapi, mock_remove, mock_dfunc, wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) @patch('api.controllers.cluster_controller.check_component_configuration_pair') -async def test_get_node_config(mock_check_pair, mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_node_config(mock_check_pair, mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_node_config' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - kwargs_param = {'configuration': 'configuration_value' - } - result = await get_node_config(request=mock_request, - node_id='001', + kwargs_param = {'configuration': 'configuration_value'} + result = await get_node_config(node_id='001', component='component_value', **kwargs_param) f_kwargs = {'node_id': '001', @@ -649,7 +646,7 @@ async def test_get_node_config(mock_check_pair, mock_exc, mock_dapi, mock_remove is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), @@ -657,21 +654,21 @@ async def test_get_node_config(mock_check_pair, mock_exc, mock_dapi, mock_remove call(mock_dfunc.return_value)]) assert mock_exc.call_count == 3 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["cluster_controller"], indirect=True) @patch('api.controllers.cluster_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.cluster_controller.remove_nones_to_dict') @patch('api.controllers.cluster_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.cluster_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_update_configuration(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_update_configuration(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'update_configuration' endpoint is working as expected.""" with patch('api.controllers.cluster_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: with patch('api.controllers.cluster_controller.Body.validate_content_type'): with patch('api.controllers.cluster_controller.Body.decode_body') as mock_dbody: - result = await update_configuration(request=mock_request, - node_id='001', + result = await update_configuration(node_id='001', body={}) f_kwargs = {'node_id': '001', 'new_conf': mock_dbody.return_value @@ -682,11 +679,11 @@ async def test_update_configuration(mock_exc, mock_dapi, mock_remove, mock_dfunc is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_exc.return_value ) mock_exc.assert_has_calls([call(mock_snodes.return_value), call(mock_dfunc.return_value)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_controller_util.py b/api/api/controllers/test/test_controller_util.py new file mode 100644 index 00000000000..9e8cbc4cf25 --- /dev/null +++ b/api/api/controllers/test/test_controller_util.py @@ -0,0 +1,17 @@ +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 +"""api.controllers.util module unit tests.""" + +import pytest +from connexion.lifecycle import ConnexionResponse +from api.controllers.util import json_response + +@pytest.mark.parametrize('pretty, body', [(False, '{"a": "1", "b": "2"}'), + (True, '{\n "a": "1",\n "b": "2"\n}')]) +def test_json_response(pretty, body): + """Veryfy if the response body is converted to json and prettyfied.""" + data = {"a": "1", "b": "2"} + response = json_response(data=data, pretty=pretty) + assert isinstance(response, ConnexionResponse) + assert response.body == body diff --git a/api/api/controllers/test/test_decoder_controller.py b/api/api/controllers/test/test_decoder_controller.py index 599d78c08a8..fd283af4a8c 100644 --- a/api/api/controllers/test/test_decoder_controller.py +++ b/api/api/controllers/test/test_decoder_controller.py @@ -6,9 +6,8 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response -from api.controllers.test.utils import CustomAffectedItems from connexion.lifecycle import ConnexionResponse +from api.controllers.test.utils import CustomAffectedItems with patch('wazuh.common.wazuh_uid'): with patch('wazuh.common.wazuh_gid'): @@ -26,13 +25,14 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["decoder_controller"], indirect=True) @patch('api.controllers.decoder_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.decoder_controller.remove_nones_to_dict') @patch('api.controllers.decoder_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.decoder_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_decoders(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_decoders(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_decoders' endpoint is working as expected.""" - result = await get_decoders(request=mock_request) + result = await get_decoders() f_kwargs = {'names': None, 'offset': 0, 'limit': None, @@ -53,21 +53,22 @@ async def test_get_decoders(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_r is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["decoder_controller"], indirect=True) @patch('api.controllers.decoder_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.decoder_controller.remove_nones_to_dict') @patch('api.controllers.decoder_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.decoder_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_decoders_files(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_decoders_files(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_decoders_files' endpoint is working as expected.""" - result = await get_decoders_files(request=mock_request) + result = await get_decoders_files() f_kwargs = {'offset': 0, 'limit': None, 'sort_by': ['filename'], @@ -87,21 +88,22 @@ async def test_get_decoders_files(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["decoder_controller"], indirect=True) @patch('api.controllers.decoder_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.decoder_controller.remove_nones_to_dict') @patch('api.controllers.decoder_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.decoder_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_decoders_parents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_decoders_parents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_decoders_parents' endpoint is working as expected.""" - result = await get_decoders_parents(request=mock_request) + result = await get_decoders_parents() f_kwargs = {'offset': 0, 'limit': None, 'select': None, @@ -117,23 +119,24 @@ async def test_get_decoders_parents(mock_exc, mock_dapi, mock_remove, mock_dfunc is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["decoder_controller"], indirect=True) @patch('api.controllers.decoder_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.decoder_controller.remove_nones_to_dict') @patch('api.controllers.decoder_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.decoder_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_bool', [True, False]) -async def test_get_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, mock_request=MagicMock()): +async def test_get_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, mock_request): """Verify 'get_file' endpoint is working as expected.""" with patch('api.controllers.decoder_controller.isinstance', return_value=mock_bool) as mock_isinstance: - result = await get_file(request=mock_request) + result = await get_file() f_kwargs = {'filename': None, 'raw': False, 'relative_dirname': None @@ -144,26 +147,27 @@ async def test_get_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) if mock_isinstance.return_value: - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) else: assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["decoder_controller"], indirect=True) @patch('api.controllers.decoder_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.decoder_controller.remove_nones_to_dict') @patch('api.controllers.decoder_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.decoder_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_put_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'put_file' endpoint is working as expected.""" with patch('api.controllers.decoder_controller.Body.validate_content_type'): with patch('api.controllers.decoder_controller.Body.decode_body') as mock_dbody: - result = await put_file(request=mock_request, + result = await put_file( body={}) f_kwargs = {'filename': None, 'relative_dirname': None, @@ -176,21 +180,22 @@ async def test_put_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_reque is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["decoder_controller"], indirect=True) @patch('api.controllers.decoder_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.decoder_controller.remove_nones_to_dict') @patch('api.controllers.decoder_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.decoder_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_delete_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_delete_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'delete_file' endpoint is working as expected.""" - result = await delete_file(request=mock_request) + result = await delete_file() f_kwargs = {'filename': None, 'relative_dirname': None} @@ -200,8 +205,8 @@ async def test_delete_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_default_controller.py b/api/api/controllers/test/test_default_controller.py index 78f59a70fad..5cbab796684 100644 --- a/api/api/controllers/test/test_default_controller.py +++ b/api/api/controllers/test/test_default_controller.py @@ -6,15 +6,14 @@ from unittest.mock import MagicMock, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse with patch('wazuh.common.wazuh_uid'): with patch('wazuh.common.wazuh_gid'): sys.modules['wazuh.rbac.orm'] = MagicMock() import wazuh.rbac.decorators from api.controllers.default_controller import (BasicInfo, DATE_FORMAT, - datetime, default_info, - socket) + default_info, socket) from wazuh.tests.util import RBAC_bypasser from wazuh.core.utils import get_utc_now wazuh.rbac.decorators.expose_resources = RBAC_bypasser @@ -38,4 +37,4 @@ async def test_default_info(mock_wresult, mock_lspec): } mock_lspec.assert_called_once_with() mock_wresult.assert_called_once_with({'data': BasicInfo.from_dict(data)}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_event_controller.py b/api/api/controllers/test/test_event_controller.py index 982e564f526..fa8045fcc21 100644 --- a/api/api/controllers/test/test_event_controller.py +++ b/api/api/controllers/test/test_event_controller.py @@ -6,7 +6,7 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems @@ -24,19 +24,20 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["event_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.event_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.event_controller.remove_nones_to_dict') @patch('api.controllers.event_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.event_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_forward_event(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_forward_event(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'forward_event' endpoint is working as expected.""" with patch('api.controllers.event_controller.Body.validate_content_type'): with patch( 'api.controllers.event_controller.EventIngestModel.get_kwargs', return_value=AsyncMock() ) as mock_getkwargs: - result = await forward_event(request=mock_request) + result = await forward_event() mock_dapi.assert_called_once_with( f=send_event_to_analysisd, f_kwargs=mock_remove.return_value, @@ -44,8 +45,8 @@ async def test_forward_event(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_experimental_controller.py b/api/api/controllers/test/test_experimental_controller.py index 00933d6f466..c03754b6d05 100644 --- a/api/api/controllers/test/test_experimental_controller.py +++ b/api/api/controllers/test/test_experimental_controller.py @@ -6,7 +6,7 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems from wazuh.core.exception import WazuhResourceNotFound @@ -27,6 +27,7 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["experimental_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.experimental_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.experimental_controller.remove_nones_to_dict') @@ -34,9 +35,9 @@ @patch('api.controllers.experimental_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_alist', ['001', 'all']) async def test_clear_rootcheck_database(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, - mock_alist, mock_request=MagicMock()): + mock_alist, mock_request): """Verify 'clear_rootcheck_database' endpoint is working as expected.""" - result = await clear_rootcheck_database(request=mock_request, + result = await clear_rootcheck_database( agents_list=mock_alist) if 'all' in mock_alist: mock_alist = '*' @@ -49,14 +50,15 @@ async def test_clear_rootcheck_database(mock_exc, mock_dapi, mock_remove, mock_d wait_for_complete=False, logger=ANY, broadcasting=mock_alist == '*', - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["experimental_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.experimental_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.experimental_controller.remove_nones_to_dict') @@ -64,9 +66,9 @@ async def test_clear_rootcheck_database(mock_exc, mock_dapi, mock_remove, mock_d @patch('api.controllers.experimental_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_alist', ['001', 'all']) async def test_clear_syscheck_database(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, - mock_alist, mock_request=MagicMock()): + mock_alist, mock_request): """Verify 'clear_syscheck_database' endpoint is working as expected.""" - result = await clear_syscheck_database(request=mock_request, + result = await clear_syscheck_database( agents_list=mock_alist) if 'all' in mock_alist: mock_alist = '*' @@ -79,22 +81,23 @@ async def test_clear_syscheck_database(mock_exc, mock_dapi, mock_remove, mock_df wait_for_complete=False, logger=ANY, broadcasting=mock_alist == '*', - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["experimental_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.experimental_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.experimental_controller.remove_nones_to_dict') @patch('api.controllers.experimental_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.experimental_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_cis_cat_results(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_cis_cat_results(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_cis_cat_results' endpoint is working as expected.""" - result = await get_cis_cat_results(request=mock_request) + result = await get_cis_cat_results() f_kwargs = {'agent_list': '*', 'offset': 0, 'limit': None, @@ -109,7 +112,7 @@ async def test_get_cis_cat_results(mock_exc, mock_dapi, mock_remove, mock_dfunc, 'notchecked': None, 'unknown': None, 'score': None, - 'pass': mock_request.query.get('pass', None) + 'pass': mock_request.query_params.get('pass', None) } } mock_dapi.assert_called_once_with(f=ciscat.get_ciscat_results, @@ -119,28 +122,29 @@ async def test_get_cis_cat_results(mock_exc, mock_dapi, mock_remove, mock_dfunc, wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["experimental_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.experimental_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.experimental_controller.remove_nones_to_dict') @patch('api.controllers.experimental_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.experimental_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_hardware_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_hardware_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_hardware_info' endpoint is working as expected.""" - result = await get_hardware_info(request=mock_request) + result = await get_hardware_info() filters = { 'board_serial': None } nested = ['ram.free', 'ram.total', 'cpu.cores', 'cpu.mhz', 'cpu.name'] for field in nested: - filters[field] = mock_request.query.get(field, None) + filters[field] = mock_request.query_params.get(field, None) f_kwargs = {'agent_list': '*', 'offset': 0, 'limit': None, @@ -157,23 +161,24 @@ async def test_get_hardware_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, m wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["experimental_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.experimental_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.experimental_controller.remove_nones_to_dict') @patch('api.controllers.experimental_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.experimental_controller.raise_if_exc', return_value=CustomAffectedItems()) async def test_get_network_address_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, - mock_request=MagicMock()): + mock_request): """Verify 'get_network_address_info' endpoint is working as expected.""" - result = await get_network_address_info(request=mock_request) + result = await get_network_address_info() f_kwargs = {'agent_list': '*', 'offset': 0, 'limit': None, @@ -196,32 +201,33 @@ async def test_get_network_address_info(mock_exc, mock_dapi, mock_remove, mock_d wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["experimental_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.experimental_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.experimental_controller.remove_nones_to_dict') @patch('api.controllers.experimental_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.experimental_controller.raise_if_exc', return_value=CustomAffectedItems()) async def test_get_network_interface_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, - mock_request=MagicMock()): + mock_request): """Verify 'get_network_interface_info' endpoint is working as expected.""" - result = await get_network_interface_info(request=mock_request) + result = await get_network_interface_info() filters = { 'adapter': None, - 'type': mock_request.query.get('type', None), + 'type': mock_request.query_params.get('type', None), 'state': None, 'mtu': None } nested = ['tx.packets', 'rx.packets', 'tx.bytes', 'rx.bytes', 'tx.errors', 'rx.errors', 'tx.dropped', 'rx.dropped'] for field in nested: - filters[field] = mock_request.query.get(field, None) + filters[field] = mock_request.query_params.get(field, None) f_kwargs = {'agent_list': '*', 'offset': 0, 'limit': None, @@ -238,23 +244,24 @@ async def test_get_network_interface_info(mock_exc, mock_dapi, mock_remove, mock wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["experimental_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.experimental_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.experimental_controller.remove_nones_to_dict') @patch('api.controllers.experimental_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.experimental_controller.raise_if_exc', return_value=CustomAffectedItems()) async def test_get_network_protocol_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, - mock_request=MagicMock()): + mock_request): """Verify 'get_network_protocol_info' endpoint is working as expected.""" - result = await get_network_protocol_info(request=mock_request) + result = await get_network_protocol_info() f_kwargs = {'agent_list': '*', 'offset': 0, 'limit': None, @@ -263,7 +270,7 @@ async def test_get_network_protocol_info(mock_exc, mock_dapi, mock_remove, mock_ 'search': None, 'filters': { 'iface': None, - 'type': mock_request.query.get('type', None), + 'type': mock_request.query_params.get('type', None), 'gateway': None, 'dhcp': None }, @@ -276,22 +283,23 @@ async def test_get_network_protocol_info(mock_exc, mock_dapi, mock_remove, mock_ wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["experimental_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.experimental_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.experimental_controller.remove_nones_to_dict') @patch('api.controllers.experimental_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.experimental_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_os_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_os_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_os_info' endpoint is working as expected.""" - result = await get_os_info(request=mock_request) + result = await get_os_info() f_kwargs = {'agent_list': '*', 'offset': 0, 'limit': None, @@ -314,22 +322,23 @@ async def test_get_os_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ex wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["experimental_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.experimental_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.experimental_controller.remove_nones_to_dict') @patch('api.controllers.experimental_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.experimental_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_packages_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_packages_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_packages_info' endpoint is working as expected.""" - result = await get_packages_info(request=mock_request) + result = await get_packages_info() f_kwargs = {'agent_list': '*', 'offset': 0, 'limit': None, @@ -340,7 +349,7 @@ async def test_get_packages_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, m 'vendor': None, 'name': None, 'architecture': None, - 'format': mock_request.query.get('format', None), + 'format': mock_request.query_params.get('format', None), 'version': None }, 'element_type': 'packages' @@ -352,22 +361,23 @@ async def test_get_packages_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, m wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["experimental_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.experimental_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.experimental_controller.remove_nones_to_dict') @patch('api.controllers.experimental_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.experimental_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_ports_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_ports_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_ports_info' endpoint is working as expected.""" - result = await get_ports_info(request=mock_request) + result = await get_ports_info() filters = { 'pid': None, 'protocol': None, @@ -377,7 +387,7 @@ async def test_get_ports_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock } nested = ['local.ip', 'local.port', 'remote.ip'] for field in nested: - filters[field] = mock_request.query.get(field, None) + filters[field] = mock_request.query_params.get(field, None) f_kwargs = {'agent_list': '*', 'offset': 0, 'limit': None, @@ -394,22 +404,23 @@ async def test_get_ports_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["experimental_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.experimental_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.experimental_controller.remove_nones_to_dict') @patch('api.controllers.experimental_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.experimental_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_processes_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_processes_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_processes_info' endpoint is working as expected.""" - result = await get_processes_info(request=mock_request) + result = await get_processes_info() f_kwargs = {'agent_list': '*', 'offset': 0, 'limit': None, @@ -441,22 +452,23 @@ async def test_get_processes_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["experimental_controller"], indirect=True) @patch('api.configuration.api_conf') @patch('api.controllers.experimental_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.experimental_controller.remove_nones_to_dict') @patch('api.controllers.experimental_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.experimental_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_hotfixes_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request=MagicMock()): +async def test_get_hotfixes_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_exp, mock_request): """Verify 'get_hotfixes_info' endpoint is working as expected.""" - result = await get_hotfixes_info(request=mock_request) + result = await get_hotfixes_info() filters = {'hotfix': None } f_kwargs = {'agent_list': '*', @@ -475,20 +487,21 @@ async def test_get_hotfixes_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, m wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) +@pytest.mark.asyncio @patch('api.controllers.experimental_controller.raise_if_exc') -def test_check_experimental_feature_value(mock_exc): +async def test_check_experimental_feature_value(mock_exc): @check_experimental_feature_value - def func_(): + async def func_(): pass with patch('api.configuration.api_conf', new={'experimental_features': False}): - func_() + await func_() mock_exc.assert_called_once_with(WazuhResourceNotFound(1122)) with patch('api.configuration.api_conf', new={'experimental_features': True}): - func_() + await func_() diff --git a/api/api/controllers/test/test_manager_controller.py b/api/api/controllers/test/test_manager_controller.py index e8cd2e609a4..c34978e91cc 100644 --- a/api/api/controllers/test/test_manager_controller.py +++ b/api/api/controllers/test/test_manager_controller.py @@ -7,9 +7,10 @@ import pytest from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse from api.constants import INSTALLATION_UID_KEY, UPDATE_INFORMATION_KEY from api.controllers.test.utils import CustomAffectedItems -from connexion.lifecycle import ConnexionResponse + with patch('wazuh.common.wazuh_uid'): with patch('wazuh.common.wazuh_gid'): @@ -31,57 +32,60 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_status(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_status(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_status' endpoint is working as expected.""" - result = await get_status(request=mock_request) + result = await get_status() mock_dapi.assert_called_once_with(f=manager.get_status, f_kwargs=mock_remove.return_value, request_type='local_any', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_info' endpoint is working as expected.""" - result = await get_info(request=mock_request) + result = await get_info() mock_dapi.assert_called_once_with(f=manager.get_basic_info, f_kwargs=mock_remove.return_value, request_type='local_any', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_bool', [True, False]) -async def test_get_configuration(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, mock_request=MagicMock()): +async def test_get_configuration(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, mock_request): """Verify 'get_configuration' endpoint is working as expected.""" with patch('api.controllers.manager_controller.isinstance', return_value=mock_bool) as mock_isinstance: - result = await get_configuration(request=mock_request) + result = await get_configuration() f_kwargs = {'section': None, 'field': None, 'raw': False, @@ -93,25 +97,25 @@ async def test_get_configuration(mock_exc, mock_dapi, mock_remove, mock_dfunc, m is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) if mock_isinstance.return_value: - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) else: assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_daemon_stats_node(mock_exc, mock_dapi, mock_remove, mock_dfunc): +async def test_get_daemon_stats_node(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_daemon_stats_node' function is working as expected.""" - mock_request = MagicMock() - result = await get_daemon_stats(request=mock_request, daemons_list=['daemon_1', 'daemon_2']) + result = await get_daemon_stats( daemons_list=['daemon_1', 'daemon_2']) f_kwargs = {'daemons_list': ['daemon_1', 'daemon_2']} mock_dapi.assert_called_once_with(f=stats.get_daemons_stats, @@ -120,22 +124,23 @@ async def test_get_daemon_stats_node(mock_exc, mock_dapi, mock_remove, mock_dfun is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies']) + rbac_permissions=mock_request.context['token_info']['rbac_policies']) mock_remove.assert_called_once_with(f_kwargs) mock_exc.assert_called_once_with(mock_dfunc.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_date', [None, 'date_value']) -async def test_get_stats(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_date, mock_request=MagicMock()): +async def test_get_stats(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_date, mock_request): """Verify 'get_stats' endpoint is working as expected.""" with patch('api.controllers.manager_controller.deserialize_date', return_value='desdate_value') as mock_desdate: - result = await get_stats(request=mock_request, + result = await get_stats( date=mock_date) if not mock_date: f_kwargs = {'date': ANY @@ -150,63 +155,66 @@ async def test_get_stats(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_date is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_stats_hourly(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_stats_hourly(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_stats_hourly' endpoint is working as expected.""" - result = await get_stats_hourly(request=mock_request) + result = await get_stats_hourly() mock_dapi.assert_called_once_with(f=stats.hourly, f_kwargs=mock_remove.return_value, request_type='local_any', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_stats_weekly(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_stats_weekly(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_stats_weekly' endpoint is working as expected.""" - result = await get_stats_weekly(request=mock_request) + result = await get_stats_weekly() mock_dapi.assert_called_once_with(f=stats.weekly, f_kwargs=mock_remove.return_value, request_type='local_any', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_stats_analysisd(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_stats_analysisd(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_stats_analysisd' endpoint is working as expected.""" - result = await get_stats_analysisd(request=mock_request) + result = await get_stats_analysisd() f_kwargs = {'filename': common.ANALYSISD_STATS } mock_dapi.assert_called_once_with(f=stats.deprecated_get_daemons_stats, @@ -215,21 +223,22 @@ async def test_get_stats_analysisd(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_stats_remoted(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_stats_remoted(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_stats_remoted' endpoint is working as expected.""" - result = await get_stats_remoted(request=mock_request) + result = await get_stats_remoted() f_kwargs = {'filename': common.REMOTED_STATS } mock_dapi.assert_called_once_with(f=stats.deprecated_get_daemons_stats, @@ -238,21 +247,22 @@ async def test_get_stats_remoted(mock_exc, mock_dapi, mock_remove, mock_dfunc, m is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_log(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_log(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_log' endpoint is working as expected.""" - result = await get_log(request=mock_request) + result = await get_log() f_kwargs = {'offset': 0, 'limit': None, 'sort_by': ['timestamp'], @@ -271,109 +281,113 @@ async def test_get_log(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_reques is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_log_summary(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_log_summary(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_log_summary' endpoint is working as expected.""" - result = await get_log_summary(request=mock_request) + result = await get_log_summary() mock_dapi.assert_called_once_with(f=manager.ossec_log_summary, f_kwargs=mock_remove.return_value, request_type='local_any', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_api_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_api_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_api_config' endpoint is working as expected.""" - result = await get_api_config(request=mock_request) + result = await get_api_config() mock_dapi.assert_called_once_with(f=manager.get_api_config, f_kwargs=mock_remove.return_value, request_type='local_any', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_restart(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_put_restart(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'put_restart' endpoint is working as expected.""" - result = await put_restart(request=mock_request) + result = await put_restart() mock_dapi.assert_called_once_with(f=manager.restart, f_kwargs=mock_remove.return_value, request_type='local_any', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_conf_validation(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_conf_validation(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_conf_validation' endpoint is working as expected.""" - result = await get_conf_validation(request=mock_request) + result = await get_conf_validation() mock_dapi.assert_called_once_with(f=manager.validation, f_kwargs=mock_remove.return_value, request_type='local_any', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) @patch('api.controllers.manager_controller.check_component_configuration_pair') -async def test_get_manager_config_ondemand(mock_check_pair, mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_manager_config_ondemand(mock_check_pair, mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_manager_config_ondemand' endpoint is working as expected.""" kwargs_param = {'configuration': 'configuration_value' } - result = await get_manager_config_ondemand(request=mock_request, - component='component1', + result = await get_manager_config_ondemand(component='component1', **kwargs_param) f_kwargs = {'component': 'component1', 'config': kwargs_param.get('configuration', None) @@ -384,24 +398,24 @@ async def test_get_manager_config_ondemand(mock_check_pair, mock_exc, mock_dapi, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.remove_nones_to_dict') @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.manager_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_update_configuration(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_update_configuration(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'update_configuration' endpoint is working as expected.""" with patch('api.controllers.manager_controller.Body.validate_content_type'): with patch('api.controllers.manager_controller.Body.decode_body') as mock_dbody: - result = await update_configuration(request=mock_request, - body={}) + result = await update_configuration(body={}) f_kwargs = {'new_conf': mock_dbody.return_value} mock_dapi.assert_called_once_with(f=manager.update_ossec_conf, f_kwargs=mock_remove.return_value, @@ -409,17 +423,18 @@ async def test_update_configuration(mock_exc, mock_dapi, mock_remove, mock_dfunc is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.parametrize( "force_query,dapi_call_count,update_check", ([True, 2, True], [True, 1, False], [False, 1, True]) ) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.configuration.update_check_is_enabled') @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @@ -432,14 +447,14 @@ async def test_check_available_version( force_query, dapi_call_count, update_check, - mock_request=MagicMock() + mock_request ): """Verify 'check_available_version' endpoint is working as expected.""" app_context = {UPDATE_INFORMATION_KEY: {"foo": 1}, INSTALLATION_UID_KEY: "1234"} mock_request.app = app_context update_check_mock.return_value = update_check - result = await check_available_version(request=mock_request, force_query=force_query) + result = await check_available_version(force_query=force_query) assert mock_dapi.call_count == dapi_call_count if force_query and update_check: @@ -460,4 +475,4 @@ async def test_check_available_version( logger=ANY, ) mock_exc.assert_called_with(mock_dfunc.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_mitre_controller.py b/api/api/controllers/test/test_mitre_controller.py index 7e55932f111..537ce8a5658 100644 --- a/api/api/controllers/test/test_mitre_controller.py +++ b/api/api/controllers/test/test_mitre_controller.py @@ -6,7 +6,7 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems with patch('wazuh.common.wazuh_uid'): @@ -26,33 +26,35 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["mitre_controller"], indirect=True) @patch('api.controllers.mitre_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.mitre_controller.remove_nones_to_dict') @patch('api.controllers.mitre_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.mitre_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_metadata(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_metadata(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_metadata' endpoint is working as expected.""" - result = await get_metadata(request=mock_request) + result = await get_metadata() mock_dapi.assert_called_once_with(f=mitre.mitre_metadata, f_kwargs={}, request_type='local_any', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["mitre_controller"], indirect=True) @patch('api.controllers.mitre_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.mitre_controller.remove_nones_to_dict') @patch('api.controllers.mitre_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.mitre_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_groups(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_groups(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_groups' endpoint is working as expected.""" - result = await get_groups(request=mock_request) + result = await get_groups() f_kwargs = { 'filters': { 'id': None, @@ -73,21 +75,22 @@ async def test_get_groups(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_req is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["mitre_controller"], indirect=True) @patch('api.controllers.mitre_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.mitre_controller.remove_nones_to_dict') @patch('api.controllers.mitre_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.mitre_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_mitigations(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_mitigations(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_mitigations' endpoint is working as expected.""" - result = await get_mitigations(request=mock_request) + result = await get_mitigations() f_kwargs = { 'filters': { 'id': None, @@ -108,21 +111,22 @@ async def test_get_mitigations(mock_exc, mock_dapi, mock_remove, mock_dfunc, moc is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["mitre_controller"], indirect=True) @patch('api.controllers.mitre_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.mitre_controller.remove_nones_to_dict') @patch('api.controllers.mitre_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.mitre_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_references(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_references(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_references' endpoint is working as expected.""" - result = await get_references(request=mock_request) + result = await get_references() f_kwargs = { 'filters': { 'id': None, @@ -142,21 +146,22 @@ async def test_get_references(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["mitre_controller"], indirect=True) @patch('api.controllers.mitre_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.mitre_controller.remove_nones_to_dict') @patch('api.controllers.mitre_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.mitre_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_software(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_software(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_software' endpoint is working as expected.""" - result = await get_software(request=mock_request) + result = await get_software() f_kwargs = { 'filters': { 'id': None, @@ -177,21 +182,22 @@ async def test_get_software(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_r is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies']) + rbac_permissions=mock_request.context['token_info']['rbac_policies']) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["mitre_controller"], indirect=True) @patch('api.controllers.mitre_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.mitre_controller.remove_nones_to_dict') @patch('api.controllers.mitre_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.mitre_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_tactics(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_tactics(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_tactics' endpoint is working as expected.""" - result = await get_tactics(request=mock_request) + result = await get_tactics() f_kwargs = { 'filters': { 'id': None, @@ -212,21 +218,22 @@ async def test_get_tactics(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["mitre_controller"], indirect=True) @patch('api.controllers.mitre_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.mitre_controller.remove_nones_to_dict') @patch('api.controllers.mitre_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.mitre_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_techniques(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_techniques(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_techniques' endpoint is working as expected.""" - result = await get_techniques(request=mock_request) + result = await get_techniques() f_kwargs = { 'filters': { 'id': None, @@ -247,8 +254,8 @@ async def test_get_techniques(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies']) + rbac_permissions=mock_request.context['token_info']['rbac_policies']) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_overview_controller.py b/api/api/controllers/test/test_overview_controller.py index d3296e3e376..7570cfc02c9 100644 --- a/api/api/controllers/test/test_overview_controller.py +++ b/api/api/controllers/test/test_overview_controller.py @@ -6,7 +6,7 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems with patch('wazuh.common.wazuh_uid'): @@ -21,21 +21,22 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["overview_controller"], indirect=True) @patch('api.controllers.overview_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.overview_controller.remove_nones_to_dict') @patch('api.controllers.overview_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.overview_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_overview_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_overview_agents(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_overview_agents' endpoint is working as expected.""" - result = await get_overview_agents(request=mock_request) + result = await get_overview_agents() mock_dapi.assert_called_once_with(f=agent.get_full_overview, f_kwargs=mock_remove.return_value, request_type='local_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_rootcheck_controller.py b/api/api/controllers/test/test_rootcheck_controller.py index 75489e1cc86..b9fb16f2948 100644 --- a/api/api/controllers/test/test_rootcheck_controller.py +++ b/api/api/controllers/test/test_rootcheck_controller.py @@ -6,7 +6,7 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems with patch('wazuh.common.wazuh_uid'): @@ -24,13 +24,14 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["rootcheck_controller"], indirect=True) @patch('api.controllers.rootcheck_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.rootcheck_controller.remove_nones_to_dict') @patch('api.controllers.rootcheck_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.rootcheck_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_rootcheck(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_put_rootcheck(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'put_rootcheck' endpoint is working as expected.""" - result = await put_rootcheck(request=mock_request) + result = await put_rootcheck() f_kwargs = {'agent_list': '*' } mock_dapi.assert_called_once_with(f=rootcheck.run, @@ -40,21 +41,22 @@ async def test_put_rootcheck(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["rootcheck_controller"], indirect=True) @patch('api.controllers.rootcheck_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.rootcheck_controller.remove_nones_to_dict') @patch('api.controllers.rootcheck_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.rootcheck_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_delete_rootcheck(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_delete_rootcheck(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'delete_rootcheck' endpoint is working as expected.""" - result = await delete_rootcheck(request=mock_request) + result = await delete_rootcheck() f_kwargs = {'agent_list': [''] } mock_dapi.assert_called_once_with(f=rootcheck.clear, @@ -63,21 +65,22 @@ async def test_delete_rootcheck(mock_exc, mock_dapi, mock_remove, mock_dfunc, mo is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["rootcheck_controller"], indirect=True) @patch('api.controllers.rootcheck_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.rootcheck_controller.remove_nones_to_dict') @patch('api.controllers.rootcheck_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.rootcheck_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_rootcheck_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_rootcheck_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_rootcheck_agent' endpoint is working as expected.""" - result = await get_rootcheck_agent(request=mock_request) + result = await get_rootcheck_agent() f_kwargs = {'agent_list': [None], 'offset': 0, 'limit': None, @@ -98,21 +101,22 @@ async def test_get_rootcheck_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["rootcheck_controller"], indirect=True) @patch('api.controllers.rootcheck_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.rootcheck_controller.remove_nones_to_dict') @patch('api.controllers.rootcheck_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.rootcheck_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_last_scan_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_last_scan_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_last_scan_agent' endpoint is working as expected.""" - result = await get_last_scan_agent(request=mock_request) + result = await get_last_scan_agent() f_kwargs = {'agent_list': [None] } mock_dapi.assert_called_once_with(f=rootcheck.get_last_scan, @@ -121,8 +125,8 @@ async def test_get_last_scan_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_rule_controller.py b/api/api/controllers/test/test_rule_controller.py index 97e3fffd71d..b02f1460723 100644 --- a/api/api/controllers/test/test_rule_controller.py +++ b/api/api/controllers/test/test_rule_controller.py @@ -6,9 +6,9 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response -from api.controllers.test.utils import CustomAffectedItems from connexion.lifecycle import ConnexionResponse +from api.controllers.test.utils import CustomAffectedItems + with patch('wazuh.common.wazuh_uid'): with patch('wazuh.common.wazuh_gid'): @@ -28,13 +28,14 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["rule_controller"], indirect=True) @patch('api.controllers.rule_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.rule_controller.remove_nones_to_dict') @patch('api.controllers.rule_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.rule_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_rules(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_rules(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_rules' endpoint is working as expected.""" - result = await get_rules(request=mock_request) + result = await get_rules() f_kwargs = {'rule_ids': None, 'offset': 0, 'limit': None, @@ -53,7 +54,7 @@ async def test_get_rules(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_requ 'gdpr': None, 'gpg13': None, 'hipaa': None, - 'nist_800_53': mock_request.query.get('nist-800-53', None), + 'nist_800_53': mock_request.query_params.get('nist-800-53', None), 'tsc': None, 'mitre': None, 'distinct': False @@ -64,21 +65,22 @@ async def test_get_rules(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_requ is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["rule_controller"], indirect=True) @patch('api.controllers.rule_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.rule_controller.remove_nones_to_dict') @patch('api.controllers.rule_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.rule_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_rules_groups(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_rules_groups(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_rules_groups' endpoint is working as expected.""" - result = await get_rules_groups(request=mock_request) + result = await get_rules_groups() f_kwargs = {'offset': 0, 'limit': None, 'sort_by': [''], @@ -92,21 +94,22 @@ async def test_get_rules_groups(mock_exc, mock_dapi, mock_remove, mock_dfunc, mo is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["rule_controller"], indirect=True) @patch('api.controllers.rule_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.rule_controller.remove_nones_to_dict') @patch('api.controllers.rule_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.rule_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_rules_requirement(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_rules_requirement(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_rules_requirement' endpoint is working as expected.""" - result = await get_rules_requirement(request=mock_request, + result = await get_rules_requirement( requirement='-') f_kwargs = {'requirement': '_', 'sort_by': [''], @@ -122,21 +125,22 @@ async def test_get_rules_requirement(mock_exc, mock_dapi, mock_remove, mock_dfun is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["rule_controller"], indirect=True) @patch('api.controllers.rule_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.rule_controller.remove_nones_to_dict') @patch('api.controllers.rule_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.rule_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_rules_files(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_rules_files(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_rules_files' endpoint is working as expected.""" - result = await get_rules_files(request=mock_request) + result = await get_rules_files() f_kwargs = {'offset': 0, 'limit': None, 'sort_by': ['filename'], @@ -156,23 +160,24 @@ async def test_get_rules_files(mock_exc, mock_dapi, mock_remove, mock_dfunc, moc is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["rule_controller"], indirect=True) @patch('api.controllers.rule_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.rule_controller.remove_nones_to_dict') @patch('api.controllers.rule_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.rule_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_bool', [True, False]) -async def test_get_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, mock_request=MagicMock()): +async def test_get_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, mock_request): """Verify 'get_file' endpoint is working as expected.""" with patch('api.controllers.rule_controller.isinstance', return_value=mock_bool) as mock_isinstance: - result = await get_file(request=mock_request) + result = await get_file() f_kwargs = {'filename': None, 'raw': False, 'relative_dirname': None @@ -183,26 +188,27 @@ async def test_get_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) if mock_isinstance.return_value: - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) else: assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["rule_controller"], indirect=True) @patch('api.controllers.rule_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.rule_controller.remove_nones_to_dict') @patch('api.controllers.rule_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.rule_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_put_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'put_file' endpoint is working as expected.""" with patch('api.controllers.rule_controller.Body.validate_content_type'): with patch('api.controllers.rule_controller.Body.decode_body') as mock_dbody: - result = await put_file(request=mock_request, + result = await put_file( body={}) f_kwargs = {'filename': None, 'overwrite': False, @@ -215,21 +221,22 @@ async def test_put_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_reque is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["rule_controller"], indirect=True) @patch('api.controllers.rule_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.rule_controller.remove_nones_to_dict') @patch('api.controllers.rule_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.rule_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_delete_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_delete_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'delete_file' endpoint is working as expected.""" - result = await delete_file(request=mock_request) + result = await delete_file() f_kwargs = {'filename': None, 'relative_dirname': None} @@ -239,8 +246,8 @@ async def test_delete_file(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_sca_controller.py b/api/api/controllers/test/test_sca_controller.py index 5d0635ffe4c..937992a6ca3 100644 --- a/api/api/controllers/test/test_sca_controller.py +++ b/api/api/controllers/test/test_sca_controller.py @@ -6,7 +6,7 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems with patch('wazuh.common.wazuh_uid'): @@ -23,13 +23,14 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["sca_controller"], indirect=True) @patch('api.controllers.sca_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.sca_controller.remove_nones_to_dict') @patch('api.controllers.sca_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.sca_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_sca_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_sca_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_sca_agent' endpoint is working as expected.""" - result = await get_sca_agent(request=mock_request) + result = await get_sca_agent() filters = {'name': None, 'description': None, 'references': None @@ -50,21 +51,22 @@ async def test_get_sca_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["sca_controller"], indirect=True) @patch('api.controllers.sca_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.sca_controller.remove_nones_to_dict') @patch('api.controllers.sca_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.sca_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_sca_checks(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_sca_checks(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_sca_checks' endpoint is working as expected.""" - result = await get_sca_checks(request=mock_request) + result = await get_sca_checks() filters = {'title': None, 'description': None, 'rationale': None, @@ -96,8 +98,8 @@ async def test_get_sca_checks(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_security_controller.py b/api/api/controllers/test/test_security_controller.py index e5ec52a5af9..06da1892677 100644 --- a/api/api/controllers/test/test_security_controller.py +++ b/api/api/controllers/test/test_security_controller.py @@ -6,8 +6,10 @@ from unittest.mock import ANY, AsyncMock, MagicMock, call, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse +from connexion.testing import TestContext +from api.controllers.util import JSON_CONTENT_TYPE from api.controllers.test.utils import CustomAffectedItems with patch('wazuh.common.wazuh_uid'): @@ -35,6 +37,24 @@ del sys.modules['wazuh.rbac.orm'] +@pytest.fixture +def mock_request(): + """fixture to wrap functions with request""" + + operation = MagicMock(name="operation") + operation.method = "post" + with TestContext(operation=operation): + with patch('api.controllers.security_controller.request', MagicMock) as m_req: + m_req.json = AsyncMock(side_effect=lambda: {'ctx': ''} ) + m_req.get = MagicMock(return_value=None) + m_req.query_params = MagicMock() + m_req.query_params.get = MagicMock(return_value=None) + m_req.context = { + 'token_info': {'sub': 'wazuh', 'run_as': 'manager', 'rbac_policies': {}} + } + yield m_req + + @pytest.mark.asyncio @pytest.mark.parametrize('raw', [True, False]) @patch('api.controllers.security_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @@ -42,12 +62,10 @@ @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) @patch('api.controllers.security_controller.generate_token', return_value='token') -async def test_login_user(mock_token, mock_exc, mock_dapi, mock_remove, mock_dfunc, raw): +async def test_login_user(mock_token, mock_exc, mock_dapi, mock_remove, mock_dfunc, raw, mock_request): """Verify 'login_user' endpoint is working as expected.""" - result = await login_user(user='001', - raw=raw) - f_kwargs = {'user_id': '001' - } + result = await login_user(user='001', raw=raw) + f_kwargs = {'user_id': '001'} mock_dapi.assert_called_once_with(f=preprocessor.get_permissions, f_kwargs=mock_remove.return_value, request_type='local_master', @@ -56,9 +74,10 @@ async def test_login_user(mock_token, mock_exc, mock_dapi, mock_remove, mock_dfu ) mock_remove.assert_called_once_with(f_kwargs) mock_exc.assert_called_once_with(mock_dfunc.return_value) - mock_token.assert_called_once_with(user_id=f_kwargs['user_id'], data=mock_exc.return_value.dikt) - assert isinstance(result, web_response.Response) - assert result.content_type == 'text/plain' if raw else result.content_type == 'application/json' + mock_token.assert_called_once_with(user_id=f_kwargs['user_id'], + data=mock_exc.return_value.dikt) + assert isinstance(result, ConnexionResponse) + assert result.content_type == 'text/plain' if raw else result.content_type == JSON_CONTENT_TYPE @pytest.mark.asyncio @@ -71,10 +90,8 @@ async def test_login_user(mock_token, mock_exc, mock_dapi, mock_remove, mock_dfu async def test_login_user_ko(mock_token, mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool): """Verify 'login_user' endpoint is handling WazuhException as expected.""" mock_token.side_effect = WazuhException(999) - result = await login_user(user='001', - raw=mock_bool) - f_kwargs = {'user_id': '001' - } + result = await login_user(user='001', raw=mock_bool) + f_kwargs = {'user_id': '001'} mock_dapi.assert_called_once_with(f=preprocessor.get_permissions, f_kwargs=mock_remove.return_value, request_type='local_master', @@ -84,7 +101,7 @@ async def test_login_user_ko(mock_token, mock_exc, mock_dapi, mock_remove, mock_ mock_exc.assert_has_calls([call(mock_dfunc.return_value), call(mock_token.side_effect)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -94,15 +111,12 @@ async def test_login_user_ko(mock_token, mock_exc, mock_dapi, mock_remove, mock_ @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) @patch('api.controllers.security_controller.generate_token', return_value='token') -async def test_run_as_login(mock_token, mock_exc, mock_dapi, mock_remove, mock_dfunc, raw, mock_request=AsyncMock()): +async def test_run_as_login(mock_token, mock_exc, mock_dapi, mock_remove, mock_dfunc, + raw, mock_request): """Verify 'run_as_login' endpoint is working as expected.""" - result = await run_as_login(request=mock_request, - user='001', - raw=raw) + result = await run_as_login(user='001', raw=raw) auth_context = await mock_request.json() - f_kwargs = {'user_id': '001', - 'auth_context': auth_context - } + f_kwargs = {'user_id': '001', 'auth_context': auth_context} mock_dapi.assert_called_once_with(f=preprocessor.get_permissions, f_kwargs=mock_remove.return_value, request_type='local_master', @@ -113,8 +127,8 @@ async def test_run_as_login(mock_token, mock_exc, mock_dapi, mock_remove, mock_d mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_token.assert_called_once_with(user_id=f_kwargs['user_id'], data=mock_exc.return_value.dikt, auth_context=auth_context) - assert isinstance(result, web_response.Response) - assert result.content_type == 'text/plain' if raw else result.content_type == 'application/json' + assert isinstance(result, ConnexionResponse) + assert result.content_type == 'text/plain' if raw else result.content_type == JSON_CONTENT_TYPE @pytest.mark.asyncio @@ -124,16 +138,12 @@ async def test_run_as_login(mock_token, mock_exc, mock_dapi, mock_remove, mock_d @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) @patch('api.controllers.security_controller.generate_token', return_value='token') @pytest.mark.parametrize('mock_bool', [True, False]) -async def test_run_as_login_ko(mock_token, mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_bool, - mock_request=AsyncMock()): +async def test_run_as_login_ko(mock_token, mock_exc, mock_dapi, mock_remove, mock_dfunc, + mock_bool, mock_request): """Verify 'run_as_login' endpoint is handling WazuhException as expected.""" mock_token.side_effect = WazuhException(999) - result = await run_as_login(request=mock_request, - user='001', - raw=mock_bool) - f_kwargs = {'user_id': '001', - 'auth_context': await mock_request.json() - } + result = await run_as_login(user='001', raw=mock_bool) + f_kwargs = {'user_id': '001', 'auth_context': await mock_request.json()} mock_dapi.assert_called_once_with(f=preprocessor.get_permissions, f_kwargs=mock_remove.return_value, request_type='local_master', @@ -143,7 +153,7 @@ async def test_run_as_login_ko(mock_token, mock_exc, mock_dapi, mock_remove, moc mock_exc.assert_has_calls([call(mock_dfunc.return_value), call(mock_token.side_effect)]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -151,10 +161,10 @@ async def test_run_as_login_ko(mock_token, mock_exc, mock_dapi, mock_remove, moc @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_user_me(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_user_me(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_user_me' endpoint is working as expected.""" - result = await get_user_me(request=mock_request) - f_kwargs = {'token': mock_request['token_info'] + result = await get_user_me() + f_kwargs = {'token': mock_request.context['token_info'] } mock_dapi.assert_called_once_with(f=security.get_user_me, f_kwargs=mock_remove.return_value, @@ -162,40 +172,40 @@ async def test_get_user_me(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re is_async=False, logger=ANY, wait_for_complete=False, - current_user=mock_request['token_info']['sub'], - rbac_permissions=mock_request['token_info']['rbac_policies'] + current_user=mock_request.context['token_info']['sub'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio -async def test_get_user_me_policies(mock_request=MagicMock()): +async def test_get_user_me_policies(mock_request): """Verify 'get_user_me_policies' endpoint is working as expected.""" with patch('api.controllers.security_controller.WazuhResult', return_value='mock_wr_result') as mock_wr: - result = await get_user_me_policies(request=mock_request) - mock_wr.assert_called_once_with({'data': mock_request['token_info']['rbac_policies'], + result = await get_user_me_policies() + mock_wr.assert_called_once_with({'data': mock_request.context['token_info']['rbac_policies'], 'message': "Current user processed policies information was returned"}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @patch('api.controllers.security_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_logout_user(mock_exc, mock_dapi, mock_dfunc, mock_request=MagicMock()): +async def test_logout_user(mock_exc, mock_dapi, mock_dfunc, mock_request): """Verify 'logout_user' endpoint is working as expected.""" - result = await logout_user(request=mock_request) + result = await logout_user() mock_dapi.assert_called_once_with(f=security.revoke_current_user_tokens, request_type='local_master', is_async=False, logger=ANY, wait_for_complete=False, - current_user=mock_request['token_info']['sub'] + current_user=mock_request.context['token_info']['sub'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -203,9 +213,9 @@ async def test_logout_user(mock_exc, mock_dapi, mock_dfunc, mock_request=MagicMo @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_users(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_users(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_users' endpoint is working as expected.""" - result = await get_users(request=mock_request) + result = await get_users() f_kwargs = {'user_ids': None, 'offset': 0, 'limit': None, @@ -223,11 +233,11 @@ async def test_get_users(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_requ is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -235,11 +245,9 @@ async def test_get_users(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_requ @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_edit_run_as(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_edit_run_as(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'edit_run_as' endpoint is working as expected.""" - result = await edit_run_as(request=mock_request, - user_id='001', - allow_run_as=False) + result = await edit_run_as(user_id='001', allow_run_as=False) f_kwargs = {'user_id': '001', 'allow_run_as': False } @@ -249,12 +257,12 @@ async def test_edit_run_as(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re is_async=False, logger=ANY, wait_for_complete=False, - current_user=mock_request['token_info']['sub'], - rbac_permissions=mock_request['token_info']['rbac_policies'] + current_user=mock_request.context['token_info']['sub'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -262,23 +270,24 @@ async def test_edit_run_as(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_create_user(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_create_user(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'create_user' endpoint is working as expected.""" with patch('api.controllers.security_controller.Body.validate_content_type'): with patch('api.controllers.security_controller.CreateUserModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: - result = await create_user(request=mock_request) - mock_dapi.assert_called_once_with(f=security.create_user, - f_kwargs=mock_remove.return_value, - request_type='local_master', - is_async=False, - logger=ANY, - wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] - ) + result = await create_user() + mock_dapi.assert_called_once_with( + f=security.create_user, + f_kwargs=mock_remove.return_value, + request_type='local_master', + is_async=False, + logger=ANY, + wait_for_complete=False, + rbac_permissions=mock_request.context['token_info']['rbac_policies'] + ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -286,24 +295,24 @@ async def test_create_user(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_update_user(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_update_user(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'update_user' endpoint is working as expected.""" with patch('api.controllers.security_controller.Body.validate_content_type'): with patch('api.controllers.security_controller.CreateUserModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: - result = await update_user(request=mock_request, - user_id='001') - mock_dapi.assert_called_once_with(f=security.update_user, - f_kwargs=mock_remove.return_value, - request_type='local_master', - is_async=False, - logger=ANY, - wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] - ) + result = await update_user(user_id='001') + mock_dapi.assert_called_once_with( + f=security.update_user, + f_kwargs=mock_remove.return_value, + request_type='local_master', + is_async=False, + logger=ANY, + wait_for_complete=False, + rbac_permissions=mock_request.context['token_info']['rbac_policies'] + ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -312,26 +321,25 @@ async def test_update_user(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_uids', ['001', 'all']) -async def test_delete_users(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_uids, mock_request=MagicMock()): +async def test_delete_users(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_uids, mock_request): """Verify 'delete_users' endpoint is working as expected.""" - result = await delete_users(request=mock_request, - user_ids=mock_uids) + result = await delete_users(user_ids=mock_uids) if 'all' in mock_uids: mock_uids = None - f_kwargs = {'user_ids': mock_uids - } - mock_dapi.assert_called_once_with(f=security.remove_users, - f_kwargs=mock_remove.return_value, - request_type='local_master', - is_async=False, - logger=ANY, - wait_for_complete=False, - current_user=mock_request['token_info']['sub'], - rbac_permissions=mock_request['token_info']['rbac_policies'] - ) + f_kwargs = {'user_ids': mock_uids} + mock_dapi.assert_called_once_with( + f=security.remove_users, + f_kwargs=mock_remove.return_value, + request_type='local_master', + is_async=False, + logger=ANY, + wait_for_complete=False, + current_user=mock_request.context['token_info']['sub'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'] + ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -339,9 +347,9 @@ async def test_delete_users(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_u @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_roles(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_roles(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_roles' endpoint is working as expected.""" - result = await get_roles(request=mock_request) + result = await get_roles() f_kwargs = {'role_ids': None, 'offset': 0, 'limit': None, @@ -359,11 +367,11 @@ async def test_get_roles(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_requ is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -371,23 +379,23 @@ async def test_get_roles(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_requ @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_add_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_add_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'add_role' endpoint is working as expected.""" with patch('api.controllers.security_controller.Body.validate_content_type'): with patch('api.controllers.security_controller.RoleModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: - result = await add_role(request=mock_request) + result = await add_role() mock_dapi.assert_called_once_with(f=security.add_role, f_kwargs=mock_remove.return_value, request_type='local_master', is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -396,9 +404,9 @@ async def test_add_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_reque @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_uids', ['001', 'all']) -async def test_remove_roles(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_uids, mock_request=MagicMock()): +async def test_remove_roles(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_uids, mock_request): """Verify 'remove_roles' endpoint is working as expected.""" - result = await remove_roles(request=mock_request, + result = await remove_roles( role_ids=mock_uids) if 'all' in mock_uids: mock_uids = None @@ -410,11 +418,11 @@ async def test_remove_roles(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_u is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -422,12 +430,12 @@ async def test_remove_roles(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_u @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_update_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_update_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'update_role' endpoint is working as expected.""" with patch('api.controllers.security_controller.Body.validate_content_type'): with patch('api.controllers.security_controller.RoleModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: - result = await update_role(request=mock_request, + result = await update_role( role_id='001') mock_dapi.assert_called_once_with(f=security.update_role, f_kwargs=mock_remove.return_value, @@ -435,11 +443,11 @@ async def test_update_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -447,9 +455,9 @@ async def test_update_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_rules(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_rules(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_rules' endpoint is working as expected.""" - result = await get_rules(request=mock_request) + result = await get_rules() f_kwargs = {'rule_ids': None, 'offset': 0, 'limit': None, @@ -467,11 +475,11 @@ async def test_get_rules(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_requ is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -479,23 +487,23 @@ async def test_get_rules(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_requ @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_add_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_add_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'add_rule' endpoint is working as expected.""" with patch('api.controllers.security_controller.Body.validate_content_type'): with patch('api.controllers.security_controller.RuleModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: - result = await add_rule(request=mock_request) + result = await add_rule() mock_dapi.assert_called_once_with(f=security.add_rule, f_kwargs=mock_remove.return_value, request_type='local_master', is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -503,12 +511,12 @@ async def test_add_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_reque @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_update_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_update_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'update_rule' endpoint is working as expected.""" with patch('api.controllers.security_controller.Body.validate_content_type'): with patch('api.controllers.security_controller.RuleModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: - result = await update_rule(request=mock_request, + result = await update_rule( rule_id='001') mock_dapi.assert_called_once_with(f=security.update_rule, f_kwargs=mock_remove.return_value, @@ -516,11 +524,11 @@ async def test_update_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -529,9 +537,9 @@ async def test_update_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_rids', ['001', 'all']) -async def test_remove_rules(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_rids, mock_request=MagicMock()): +async def test_remove_rules(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_rids, mock_request): """Verify 'remove_rules' endpoint is working as expected.""" - result = await remove_rules(request=mock_request, + result = await remove_rules( rule_ids=mock_rids) if 'all' in mock_rids: mock_rids = None @@ -543,11 +551,11 @@ async def test_remove_rules(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_r is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -555,9 +563,9 @@ async def test_remove_rules(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_r @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_policies(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_policies(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_policies' endpoint is working as expected.""" - result = await get_policies(request=mock_request) + result = await get_policies() f_kwargs = {'policy_ids': None, 'offset': 0, 'limit': None, @@ -575,11 +583,11 @@ async def test_get_policies(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_r is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -587,23 +595,23 @@ async def test_get_policies(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_r @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_add_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_add_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'add_policy' endpoint is working as expected.""" with patch('api.controllers.security_controller.Body.validate_content_type'): with patch('api.controllers.security_controller.PolicyModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: - result = await add_policy(request=mock_request) + result = await add_policy() mock_dapi.assert_called_once_with(f=security.add_policy, f_kwargs=mock_remove.return_value, request_type='local_master', is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -612,9 +620,9 @@ async def test_add_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_req @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_pids', ['001', 'all']) -async def test_remove_policies(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_pids, mock_request=MagicMock()): +async def test_remove_policies(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_pids, mock_request): """Verify 'remove_policies' endpoint is working as expected.""" - result = await remove_policies(request=mock_request, + result = await remove_policies( policy_ids=mock_pids) if 'all' in mock_pids: mock_pids = None @@ -625,11 +633,11 @@ async def test_remove_policies(mock_exc, mock_dapi, mock_remove, mock_dfunc, moc is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -637,12 +645,12 @@ async def test_remove_policies(mock_exc, mock_dapi, mock_remove, mock_dfunc, moc @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_update_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_update_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'update_policy' endpoint is working as expected.""" with patch('api.controllers.security_controller.Body.validate_content_type'): with patch('api.controllers.security_controller.PolicyModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: - result = await update_policy(request=mock_request, + result = await update_policy( policy_id='001') mock_dapi.assert_called_once_with(f=security.update_policy, f_kwargs=mock_remove.return_value, @@ -650,11 +658,11 @@ async def test_update_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(mock_getkwargs.return_value) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -662,9 +670,9 @@ async def test_update_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_set_user_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_set_user_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'set_user_role' endpoint is working as expected.""" - result = await set_user_role(request=mock_request, + result = await set_user_role( user_id='001', role_ids='001') f_kwargs = {'user_id': '001', @@ -677,11 +685,11 @@ async def test_set_user_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -690,9 +698,9 @@ async def test_set_user_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_rids', ['001', 'all']) -async def test_remove_user_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_rids, mock_request=MagicMock()): +async def test_remove_user_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_rids, mock_request): """Verify 'remove_user_role' endpoint is working as expected.""" - result = await remove_user_role(request=mock_request, + result = await remove_user_role( user_id='001', role_ids=mock_rids) if 'all' in mock_rids: @@ -706,11 +714,11 @@ async def test_remove_user_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mo is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -718,9 +726,9 @@ async def test_remove_user_role(mock_exc, mock_dapi, mock_remove, mock_dfunc, mo @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_set_role_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_set_role_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'set_role_policy' endpoint is working as expected.""" - result = await set_role_policy(request=mock_request, + result = await set_role_policy( role_id='001', policy_ids='001') f_kwargs = {'role_id': '001', @@ -733,11 +741,11 @@ async def test_set_role_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, moc is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -746,9 +754,9 @@ async def test_set_role_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, moc @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_rids', ['001', 'all']) -async def test_remove_role_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_rids, mock_request=MagicMock()): +async def test_remove_role_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_rids, mock_request): """Verify 'remove_role_policy' endpoint is working as expected.""" - result = await remove_role_policy(request=mock_request, + result = await remove_role_policy( role_id='001', policy_ids=mock_rids) if 'all' in mock_rids: @@ -762,11 +770,11 @@ async def test_remove_role_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -774,16 +782,16 @@ async def test_remove_role_policy(mock_exc, mock_dapi, mock_remove, mock_dfunc, @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_set_role_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_set_role_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'set_role_rule' endpoint is working as expected.""" - result = await set_role_rule(request=mock_request, + result = await set_role_rule( role_id='001', rule_ids='001') f_kwargs = {'role_id': '001', 'rule_ids': '001', 'run_as': { - 'user': mock_request['token_info']['sub'], - 'run_as': mock_request['token_info']['run_as'] + 'user': mock_request.context['token_info']['sub'], + 'run_as': mock_request.context['token_info']['run_as'] } } mock_dapi.assert_called_once_with(f=security.set_role_rule, @@ -792,11 +800,11 @@ async def test_set_role_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -805,9 +813,9 @@ async def test_set_role_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_ @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) @pytest.mark.parametrize('mock_rids', ['001', 'all']) -async def test_remove_role_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_rids, mock_request=MagicMock()): +async def test_remove_role_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_rids, mock_request): """Verify 'remove_role_rule' endpoint is working as expected.""" - result = await remove_role_rule(request=mock_request, + result = await remove_role_rule( role_id='001', rule_ids=mock_rids) if 'all' in mock_rids: @@ -821,11 +829,11 @@ async def test_remove_role_rule(mock_exc, mock_dapi, mock_remove, mock_dfunc, mo is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -847,7 +855,7 @@ async def test_get_rbac_resources(mock_exc, mock_dapi, mock_remove, mock_dfunc): ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -869,7 +877,7 @@ async def test_get_rbac_actions(mock_exc, mock_dapi, mock_remove, mock_dfunc): ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -880,11 +888,11 @@ async def test_get_rbac_actions(mock_exc, mock_dapi, mock_remove, mock_dfunc): @patch('api.controllers.security_controller.isinstance') @pytest.mark.parametrize('mock_snodes', [None, AsyncMock()]) async def test_revoke_all_tokens(mock_isins, mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_snodes, - mock_request=MagicMock()): + mock_request): """Verify 'revoke_all_tokens' endpoint is working as expected.""" mock_isins.return_value = True if not mock_snodes else False with patch('api.controllers.security_controller.get_system_nodes', return_value=mock_snodes): - result = await revoke_all_tokens(request=mock_request) + result = await revoke_all_tokens() if not mock_snodes: mock_isins.assert_called_once() mock_dapi.assert_called_once_with(f=security.wrapper_revoke_tokens, @@ -894,12 +902,12 @@ async def test_revoke_all_tokens(mock_isins, mock_exc, mock_dapi, mock_remove, m broadcasting=mock_snodes is not None, logger=ANY, wait_for_complete=True, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_snodes ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -910,10 +918,10 @@ async def test_revoke_all_tokens(mock_isins, mock_exc, mock_dapi, mock_remove, m @patch('api.controllers.security_controller.type', return_value=AffectedItemsWazuhResult) @patch('api.controllers.security_controller.len', return_value=0) async def test_revoke_all_tokens_ko(mock_type, mock_len, mock_exc, mock_dapi, mock_remove, mock_dfunc, - mock_request=MagicMock()): + mock_request): """Verify 'revoke_all_tokens' endpoint is handling WazuhPermissionError as expected.""" with patch('api.controllers.security_controller.get_system_nodes', return_value=AsyncMock()) as mock_snodes: - result = await revoke_all_tokens(request=mock_request) + result = await revoke_all_tokens() mock_dapi.assert_called_once_with(f=security.wrapper_revoke_tokens, f_kwargs=mock_remove.return_value, request_type='distributed_master', @@ -921,14 +929,14 @@ async def test_revoke_all_tokens_ko(mock_type, mock_len, mock_exc, mock_dapi, mo broadcasting=True, logger=ANY, wait_for_complete=True, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], nodes=mock_snodes.return_value ) mock_exc.assert_has_calls([call(mock_dfunc.return_value), call(WazuhPermissionError(4000, mock_exc.return_value.message))]) assert mock_exc.call_count == 2 mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -936,20 +944,20 @@ async def test_revoke_all_tokens_ko(mock_type, mock_len, mock_exc, mock_dapi, mo @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_security_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_security_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_security_config' endpoint is working as expected.""" - result = await get_security_config(request=mock_request) + result = await get_security_config() mock_dapi.assert_called_once_with(f=security.get_security_config, f_kwargs=mock_remove.return_value, request_type='local_master', is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with({}) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -979,13 +987,13 @@ async def test_security_revoke_tokens(mock_isins, mock_exc, mock_dapi, mock_dfun @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_security_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_put_security_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'put_security_config' endpoint is working as expected.""" with patch('api.controllers.security_controller.Body.validate_content_type'): with patch('api.controllers.security_controller.SecurityConfigurationModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: with patch('api.controllers.security_controller.security_revoke_tokens', return_value=AsyncMock()): - result = await put_security_config(request=mock_request) + result = await put_security_config() f_kwargs = {'updated_config': mock_getkwargs.return_value } mock_dapi.assert_called_once_with(f=security.update_security_config, @@ -994,11 +1002,11 @@ async def test_put_security_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'], + rbac_permissions=mock_request.context['token_info']['rbac_policies'], ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio @@ -1006,22 +1014,21 @@ async def test_put_security_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, @patch('api.controllers.security_controller.remove_nones_to_dict') @patch('api.controllers.security_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.security_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_delete_security_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_delete_security_config(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'delete_security_config' endpoint is working as expected.""" with patch('api.controllers.security_controller.SecurityConfigurationModel.get_kwargs', return_value=AsyncMock()) as mock_getkwargs: with patch('api.controllers.security_controller.security_revoke_tokens', return_value=AsyncMock()): - result = await delete_security_config(request=mock_request) - f_kwargs = {'updated_config': mock_getkwargs.return_value - } + result = await delete_security_config() + f_kwargs = {'updated_config': mock_getkwargs.return_value} mock_dapi.assert_called_once_with(f=security.update_security_config, f_kwargs=mock_remove.return_value, request_type='local_master', is_async=False, logger=ANY, wait_for_complete=False, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_syscheck_controller.py b/api/api/controllers/test/test_syscheck_controller.py index d4064321e3d..abd39e9704f 100644 --- a/api/api/controllers/test/test_syscheck_controller.py +++ b/api/api/controllers/test/test_syscheck_controller.py @@ -6,7 +6,7 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems with patch('wazuh.common.wazuh_uid'): @@ -24,15 +24,15 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscheck_controller"], indirect=True) @patch('api.controllers.syscheck_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscheck_controller.remove_nones_to_dict') @patch('api.controllers.syscheck_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscheck_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_put_syscheck(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_put_syscheck(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'put_syscheck' endpoint is working as expected.""" - result = await put_syscheck(request=mock_request) - f_kwargs = {'agent_list': '*' - } + result = await put_syscheck() + f_kwargs = {'agent_list': '*'} mock_dapi.assert_called_once_with(f=syscheck.run, f_kwargs=mock_remove.return_value, request_type='distributed_master', @@ -40,25 +40,25 @@ async def test_put_syscheck(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_r wait_for_complete=False, logger=ANY, broadcasting=True, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscheck_controller"], indirect=True) @patch('api.controllers.syscheck_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscheck_controller.remove_nones_to_dict') @patch('api.controllers.syscheck_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscheck_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_syscheck_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_syscheck_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_syscheck_agent' endpoint is working as expected.""" - result = await get_syscheck_agent(request=mock_request, - agent_id='001') - type_ = mock_request.query.get('type', None) - hash_ = mock_request.query.get('hash', None) - file_ = mock_request.query.get('file', None) + result = await get_syscheck_agent(agent_id='001') + type_ = mock_request.query_params.get('type', None) + hash_ = mock_request.query_params.get('hash', None) + file_ = mock_request.query_params.get('file', None) filters = {'type': type_, 'md5': None, 'sha1': None, @@ -66,8 +66,8 @@ async def test_get_syscheck_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, 'hash': hash_, 'file': file_, 'arch': None, - 'value.name': mock_request.query.get('value.name', None), - 'value.type': mock_request.query.get('value.type', None) + 'value.name': mock_request.query_params.get('value.name', None), + 'value.type': mock_request.query_params.get('value.type', None) } f_kwargs = {'agent_list': ['001'], 'offset': 0, @@ -86,45 +86,45 @@ async def test_get_syscheck_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscheck_controller"], indirect=True) @patch('api.controllers.syscheck_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscheck_controller.remove_nones_to_dict') @patch('api.controllers.syscheck_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscheck_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_delete_syscheck_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_delete_syscheck_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'delete_syscheck_agent' endpoint is working as expected.""" - result = await delete_syscheck_agent(request=mock_request) - f_kwargs = {'agent_list': ['*'] - } + result = await delete_syscheck_agent() + f_kwargs = {'agent_list': ['*']} mock_dapi.assert_called_once_with(f=syscheck.clear, f_kwargs=mock_remove.return_value, request_type='distributed_master', is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscheck_controller"], indirect=True) @patch('api.controllers.syscheck_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscheck_controller.remove_nones_to_dict') @patch('api.controllers.syscheck_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscheck_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_last_scan_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_last_scan_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_last_scan_agent' endpoint is working as expected.""" - result = await get_last_scan_agent(request=mock_request, - agent_id='001') + result = await get_last_scan_agent(agent_id='001') f_kwargs = {'agent_list': ['001'] } mock_dapi.assert_called_once_with(f=syscheck.last_scan, @@ -133,8 +133,8 @@ async def test_get_last_scan_agent(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_syscollector_controller.py b/api/api/controllers/test/test_syscollector_controller.py index cca66b57fa8..cc587292742 100644 --- a/api/api/controllers/test/test_syscollector_controller.py +++ b/api/api/controllers/test/test_syscollector_controller.py @@ -6,7 +6,7 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems with patch('wazuh.common.wazuh_uid'): @@ -24,14 +24,14 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscollector_controller"], indirect=True) @patch('api.controllers.syscollector_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscollector_controller.remove_nones_to_dict') @patch('api.controllers.syscollector_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscollector_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_hardware_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_hardware_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_hardware_info' endpoint is working as expected.""" - result = await get_hardware_info(request=mock_request, - agent_id='001') + result = await get_hardware_info(agent_id='001') f_kwargs = {'agent_list': ['001'], 'select': None, 'element_type': 'hardware' @@ -42,22 +42,22 @@ async def test_get_hardware_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, m is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscollector_controller"], indirect=True) @patch('api.controllers.syscollector_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscollector_controller.remove_nones_to_dict') @patch('api.controllers.syscollector_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscollector_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_hotfix_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_hotfix_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_hotfix_info' endpoint is working as expected.""" - result = await get_hotfix_info(request=mock_request, - agent_id='001') + result = await get_hotfix_info(agent_id='001') filters = {'hotfix': None} f_kwargs = {'agent_list': ['001'], 'offset': 0, @@ -76,22 +76,22 @@ async def test_get_hotfix_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, moc is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscollector_controller"], indirect=True) @patch('api.controllers.syscollector_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscollector_controller.remove_nones_to_dict') @patch('api.controllers.syscollector_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscollector_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_network_address_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_network_address_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_network_address_info' endpoint is working as expected.""" - result = await get_network_address_info(request=mock_request, - agent_id='001') + result = await get_network_address_info(agent_id='001') filters = {'iface': None, 'proto': None, 'address': None, @@ -115,31 +115,31 @@ async def test_get_network_address_info(mock_exc, mock_dapi, mock_remove, mock_d is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscollector_controller"], indirect=True) @patch('api.controllers.syscollector_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscollector_controller.remove_nones_to_dict') @patch('api.controllers.syscollector_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscollector_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_network_interface_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_network_interface_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_network_interface_info' endpoint is working as expected.""" - result = await get_network_interface_info(request=mock_request, - agent_id='001') + result = await get_network_interface_info(agent_id='001') filters = {'adapter': None, - 'type': mock_request.query.get('type', None), + 'type': mock_request.query_params.get('type', None), 'state': None, 'name': None, 'mtu': None } nested = ['tx.packets', 'rx.packets', 'tx.bytes', 'rx.bytes', 'tx.errors', 'rx.errors', 'tx.dropped', 'rx.dropped'] for field in nested: - filters[field] = mock_request.query.get(field, None) + filters[field] = mock_request.query_params.get(field, None) f_kwargs = {'agent_list': ['001'], 'offset': 0, 'limit': None, @@ -157,24 +157,24 @@ async def test_get_network_interface_info(mock_exc, mock_dapi, mock_remove, mock is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscollector_controller"], indirect=True) @patch('api.controllers.syscollector_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscollector_controller.remove_nones_to_dict') @patch('api.controllers.syscollector_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscollector_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_network_protocol_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_network_protocol_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_network_protocol_info' endpoint is working as expected.""" - result = await get_network_protocol_info(request=mock_request, - agent_id='001') + result = await get_network_protocol_info(agent_id='001') filters = {'iface': None, - 'type': mock_request.query.get('type', None), + 'type': mock_request.query_params.get('type', None), 'gateway': None, 'dhcp': None } @@ -195,22 +195,22 @@ async def test_get_network_protocol_info(mock_exc, mock_dapi, mock_remove, mock_ is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscollector_controller"], indirect=True) @patch('api.controllers.syscollector_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscollector_controller.remove_nones_to_dict') @patch('api.controllers.syscollector_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscollector_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_os_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_os_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_os_info' endpoint is working as expected.""" - result = await get_os_info(request=mock_request, - agent_id='001') + result = await get_os_info(agent_id='001') f_kwargs = {'agent_list': ['001'], 'select': None, 'element_type': 'os' @@ -221,26 +221,26 @@ async def test_get_os_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_re is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscollector_controller"], indirect=True) @patch('api.controllers.syscollector_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscollector_controller.remove_nones_to_dict') @patch('api.controllers.syscollector_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscollector_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_packages_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_packages_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_packages_info' endpoint is working as expected.""" - result = await get_packages_info(request=mock_request, - agent_id='001') + result = await get_packages_info(agent_id='001') filters = {'vendor': None, 'name': None, 'architecture': None, - 'format': mock_request.query.get('format', None), + 'format': mock_request.query_params.get('format', None), 'version': None } f_kwargs = {'agent_list': ['001'], @@ -260,22 +260,22 @@ async def test_get_packages_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, m is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscollector_controller"], indirect=True) @patch('api.controllers.syscollector_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscollector_controller.remove_nones_to_dict') @patch('api.controllers.syscollector_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscollector_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_ports_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_ports_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_ports_info' endpoint is working as expected.""" - result = await get_ports_info(request=mock_request, - agent_id='001') + result = await get_ports_info(agent_id='001') filters = {'pid': None, 'protocol': None, 'tx_queue': None, @@ -284,7 +284,7 @@ async def test_get_ports_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock } nested = ['local.ip', 'local.port', 'remote.ip'] for field in nested: - filters[field] = mock_request.query.get(field, None) + filters[field] = mock_request.query_params.get(field, None) f_kwargs = {'agent_list': ['001'], 'offset': 0, 'limit': None, @@ -302,22 +302,22 @@ async def test_get_ports_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["syscollector_controller"], indirect=True) @patch('api.controllers.syscollector_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.syscollector_controller.remove_nones_to_dict') @patch('api.controllers.syscollector_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.syscollector_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_processes_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_processes_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_processes_info' endpoint is working as expected.""" - result = await get_processes_info(request=mock_request, - agent_id='001') + result = await get_processes_info(agent_id='001') filters = {'state': None, 'pid': None, 'ppid': None, @@ -350,8 +350,8 @@ async def test_get_processes_info(mock_exc, mock_dapi, mock_remove, mock_dfunc, is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/test_task_controller.py b/api/api/controllers/test/test_task_controller.py index 9fdcad78f00..f8c85a0b494 100644 --- a/api/api/controllers/test/test_task_controller.py +++ b/api/api/controllers/test/test_task_controller.py @@ -6,7 +6,7 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response +from connexion.lifecycle import ConnexionResponse from api.controllers.test.utils import CustomAffectedItems with patch('wazuh.common.wazuh_uid'): @@ -22,13 +22,14 @@ @pytest.mark.asyncio +@pytest.mark.parametrize("mock_request", ["task_controller"], indirect=True) @patch('api.controllers.task_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.task_controller.remove_nones_to_dict') @patch('api.controllers.task_controller.DistributedAPI.__init__', return_value=None) @patch('api.controllers.task_controller.raise_if_exc', return_value=CustomAffectedItems()) -async def test_get_tasks_status(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request=MagicMock()): +async def test_get_tasks_status(mock_exc, mock_dapi, mock_remove, mock_dfunc, mock_request): """Verify 'get_tasks_status' endpoint is working as expected.""" - result = await get_tasks_status(request=mock_request) + result = await get_tasks_status() f_kwargs = {'select': None, 'search': None, 'offset': 0, @@ -50,8 +51,8 @@ async def test_get_tasks_status(mock_exc, mock_dapi, mock_remove, mock_dfunc, mo is_async=False, wait_for_complete=False, logger=ANY, - rbac_permissions=mock_request['token_info']['rbac_policies'] + rbac_permissions=mock_request.context['token_info']['rbac_policies'] ) mock_exc.assert_called_once_with(mock_dfunc.return_value) mock_remove.assert_called_once_with(f_kwargs) - assert isinstance(result, web_response.Response) + assert isinstance(result, ConnexionResponse) diff --git a/api/api/controllers/test/utils.py b/api/api/controllers/test/utils.py index fd018c0f82e..cfae001a043 100644 --- a/api/api/controllers/test/utils.py +++ b/api/api/controllers/test/utils.py @@ -2,11 +2,7 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 -from unittest.mock import patch - -with patch('wazuh.common.wazuh_uid'): - with patch('wazuh.common.wazuh_gid'): - from wazuh.core.results import AffectedItemsWazuhResult +from wazuh.core.results import AffectedItemsWazuhResult class CustomAffectedItems(AffectedItemsWazuhResult): diff --git a/api/api/controllers/util.py b/api/api/controllers/util.py new file mode 100644 index 00000000000..1f0d80f29e2 --- /dev/null +++ b/api/api/controllers/util.py @@ -0,0 +1,29 @@ +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 + +from connexion.lifecycle import ConnexionResponse +from api.encoder import dumps, prettify + +JSON_CONTENT_TYPE="application/json" +XML_CONTENT_TYPE="application/xml; charset=utf-8" + + +def json_response(data: dict, pretty: bool = False) -> ConnexionResponse: + """Generate a json Response from a dictionary. + + Parameters + ---------- + data: dict + Data dictionary to convert to json. + pretty: + Prettify the response to be human readable. + + Returns + ------- + Response + JSON response generated from the data. + """ + return ConnexionResponse(body=prettify(data) if pretty else dumps(data), + content_type=JSON_CONTENT_TYPE, + status_code=200) diff --git a/api/api/controllers/vulnerability_controller.py b/api/api/controllers/vulnerability_controller.py new file mode 100644 index 00000000000..073d3e63428 --- /dev/null +++ b/api/api/controllers/vulnerability_controller.py @@ -0,0 +1,214 @@ +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 + +import logging + +from connexion import request +from connexion.lifecycle import ConnexionResponse + +from api.controllers.util import json_response +from api.util import parse_api_param, remove_nones_to_dict, raise_if_exc, deprecate_endpoint +from wazuh import vulnerability, WazuhError +from wazuh.core.cluster.control import get_system_nodes +from wazuh.core.cluster.dapi.dapi import DistributedAPI +from wazuh.core.common import DATABASE_LIMIT + +logger = logging.getLogger('wazuh-api') + + +@deprecate_endpoint() +async def run_vulnerability_scan(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: + """Run a vulnerability detector scan in all nodes. + + Parameters + ---------- + pretty : bool + Show results in human-readable format. + wait_for_complete : bool + Disable timeout response. + + Returns + ------- + ConnexionResponse + API response. + """ + try: + nodes = await get_system_nodes() + dapi_extra_kwargs = {'f_kwargs': {'node_list': nodes}, + 'nodes': nodes, + 'remove_denied_nodes': True} \ + if not isinstance(nodes, WazuhError) else {} + except Exception as exc: + raise_if_exc(exc) + + dapi = DistributedAPI(f=vulnerability.run_vulnerability_scan, + request_type='distributed_master', + is_async=False, + wait_for_complete=wait_for_complete, + logger=logger, + rbac_permissions=request.context['token_info']['rbac_policies'], + **dapi_extra_kwargs) + data = raise_if_exc(await dapi.distribute_function()) + + return json_response(data, pretty=pretty) + + +@deprecate_endpoint() +async def get_vulnerability_agent(pretty: bool = False, wait_for_complete: bool = False, agent_id: str = None, + offset: int = 0, limit: int = None, sort: str = None, search: str = None, + select: str = None, q: str = '', distinct: str = None, architecture: str = None, + cve: str = None, name: str = None, version: str = None, type: str = None, + status: str = None, severity: str = None) -> ConnexionResponse: + """Get agents' vulnerabilities. + + Parameters + ---------- + request : request.connexion + pretty : bool + Show results in human-readable format. + wait_for_complete : bool + Disable timeout response. + agent_id : str + ID of the agent to retrieve CVE info. + offset : int + First element to return in the collection. + limit : int + Maximum number of elements to return. + sort : str + Sort the collection by a field or fields (separated by comma). Use +/- at the beginning to list in + ascending or descending order. + search : str + Look for elements with the specified string. + select : str + Fields to return. + q : str + Query to filter results by. + distinct : bool + Look for distinct values. + architecture : str + Filter by architecture. + cve : str + Filter by CVE ID. + name : str + Filter by package ID. + version : str + Filter by version. + type : str + Filter by CVE type. + status : str + Filter by CVE status. + severity : str + Filter by CVE severity. + + Returns + ------- + ConnexionResponse + API response. + """ + f_kwargs = { + 'agent_list': [agent_id], + 'offset': offset, + 'limit': limit, + 'sort': parse_api_param(sort, 'sort'), + 'search': parse_api_param(search, 'search'), + 'select': select, + 'q': q, + 'distinct': distinct, + 'filters': { + 'architecture': architecture, + 'cve': cve, + 'name': name, + 'version': version, + 'status': status, + 'type': type, + 'severity': severity + } + } + + dapi = DistributedAPI(f=vulnerability.get_agent_cve, + f_kwargs=remove_nones_to_dict(f_kwargs), + request_type='distributed_master', + is_async=False, + wait_for_complete=wait_for_complete, + logger=logger, + rbac_permissions=request.context['token_info']['rbac_policies'] + ) + data = raise_if_exc(await dapi.distribute_function()) + + return json_response(data, pretty=pretty) + + +@deprecate_endpoint() +async def get_last_scan_agent(pretty: bool = False, wait_for_complete: bool = False, + agent_id: str = None) -> ConnexionResponse: + """Return when the last full and partial vulnerability scan of a specified agent ended. + + Parameters + ---------- + pretty : bool + Show results in human-readable format. + wait_for_complete : bool + Disable timeout response. + agent_id : str + ID of the agent to retrieve scans info. + + Returns + ------- + ConnexionResponse + API response. + """ + f_kwargs = {'agent_list': [agent_id]} + + dapi = DistributedAPI(f=vulnerability.last_scan, + f_kwargs=remove_nones_to_dict(f_kwargs), + request_type='distributed_master', + is_async=False, + wait_for_complete=wait_for_complete, + logger=logger, + rbac_permissions=request.context['token_info']['rbac_policies'] + ) + data = raise_if_exc(await dapi.distribute_function()) + + return json_response(data, pretty=pretty) + + +@deprecate_endpoint() +async def get_vulnerabilities_field_summary(pretty: bool = False, wait_for_complete: bool = False, + agent_id: str = None, field: str = None, + limit: int = DATABASE_LIMIT) -> ConnexionResponse: + """Return a summary of the vulnerabilities' field of a given agent. + + Parameters + ---------- + pretty : bool + Show results in human-readable format. + wait_for_complete : bool + Disable timeout response. + agent_id : str + ID of the agent to retrieve severity summary. + field : str + Field to obtain the summary from. + limit : int + Maximum number of elements to return. Default: DATABASE_LIMIT + + Returns + ------- + ConnexionResponse + API response. + """ + f_kwargs = {'agent_list': [agent_id], + 'field': field, + 'limit': limit} + + dapi = DistributedAPI(f=vulnerability.get_inventory_summary, + f_kwargs=remove_nones_to_dict(f_kwargs), + request_type='distributed_master', + is_async=False, + wait_for_complete=wait_for_complete, + logger=logger, + rbac_permissions=request.context['token_info']['rbac_policies'] + ) + data = raise_if_exc(await dapi.distribute_function()) + + return json_response(data, pretty=pretty) diff --git a/api/api/error_handler.py b/api/api/error_handler.py new file mode 100644 index 00000000000..6d95bd5b1f6 --- /dev/null +++ b/api/api/error_handler.py @@ -0,0 +1,295 @@ +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 + +import json +import time + +from connexion.lifecycle import ConnexionRequest, ConnexionResponse +from connexion import exceptions + +from jose import JWTError +from content_size_limit_asgi.errors import ContentSizeExceeded + +from api import configuration +from api.middlewares import ip_block, ip_stats, access_log, LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT +from api.api_exception import BlockedIPException +from wazuh.core.utils import get_utc_now + +ERROR_CONTENT_TYPE="application/problem+json; charset=utf-8" + + +def prevent_bruteforce_attack(request: ConnexionRequest, attempts: int = 5): + """Check that the IPs that are requesting an API token do not do so repeatedly. + + Parameters + ---------- + request : ConnexionRequest + HTTP request. + attempts : int + Number of attempts until an IP is blocked. + """ + + if request.scope['path'] in {LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT} and \ + request.method in {'GET', 'POST'}: + if request.client.host not in ip_stats: + ip_stats[request.client.host] = dict() + ip_stats[request.client.host]['attempts'] = 1 + ip_stats[request.client.host]['timestamp'] = get_utc_now().timestamp() + else: + ip_stats[request.client.host]['attempts'] += 1 + + if ip_stats[request.client.host]['attempts'] >= attempts: + ip_block.add(request.client.host) + + +def _cleanup_detail_field(detail: str) -> str: + """Replace double endlines with '. ' and simple endlines with ''. + + Parameters + ---------- + detail : str + String to be modified. + + Returns + ------- + str + New value for the detail field. + """ + return ' '.join(str(detail).replace("\n\n", ". ").replace("\n", "").split()) + + +async def unauthorized_error_handler(request: ConnexionRequest, + exc: exceptions.Unauthorized) -> ConnexionResponse: + """Unauthorized Exception Error handler. + + Parameters + ---------- + request : ConnexionRequest + Incomming request. + exc : Unauthorized + Raised exception. + + Returns + ------- + Response + HTTP Response returned to the client. + """ + problem = { + "title": "Unauthorized", + } + if request.scope['path'] in {LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT} and \ + request.method in {'GET', 'POST'}: + problem["detail"] = "Invalid credentials" + + prevent_bruteforce_attack( + request=request, + attempts=configuration.api_conf['access']['max_login_attempts'] + ) + else: + problem.update({'detail': 'No authorization token provided'} \ + if 'token_info' not in request.context \ + else {}) + response = ConnexionResponse(status_code=exc.status_code, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) + await access_log(request, response, time.time()) + return response + + +async def bad_request_error_handler(_: ConnexionRequest, exc: exceptions.BadRequestProblem) -> ConnexionResponse: + """Bad Request Exception Error handler. + + Parameters + ---------- + _ : ConnexionRequest + Incomming request. + Unnamed parameter not used. + exc : BadRequestProblem + Raised exception. + + Returns + ------- + Response + HTTP Response returned to the client. + """ + + problem = { + "title": 'Bad Request', + } + if exc.detail: + problem['detail'] = exc.detail + return ConnexionResponse(status_code=exc.status_code, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) + + +async def http_error_handler(_: ConnexionRequest, exc: exceptions.HTTPException) -> ConnexionResponse: + """HTTPError Exception Error handler. + + Parameters + ---------- + _ : ConnexionRequest + Incomming request. + Unnamed parameter not used. + exc : HTTPException + Raised exception. + + Returns + ------- + Response + HTTP Response returned to the client. + """ + + problem = { + 'title': exc.detail, + "detail": f"{exc.status_code}: {exc.detail}", + } + return ConnexionResponse(status_code=exc.status_code, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) + + +async def jwt_error_handler(_: ConnexionRequest, __: JWTError) -> ConnexionResponse: + """JWTException Error handler. + + Parameters + ---------- + _ : ConnexionRequest + Incomming request. + Unnamed parameter not used. + __ : JWTError + Raised exception. + Unnamed parameter not used. + + Returns + ------- + Response + HTTP Response returned to the client. + """ + problem = { + "title": "Unauthorized", + "detail": "No authorization token provided" + } + + return ConnexionResponse(status_code=401, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) + + +async def problem_error_handler(_: ConnexionRequest, exc: exceptions.ProblemException) -> ConnexionResponse: + """ProblemException Error handler. + + Parameters + ---------- + _ : ConnexionRequest + Incomming request. + Unnamed parameter not used. + exc : ProblemException + Raised exception. + + Returns + ------- + Response + HTTP Response returned to the client. + """ + problem = { + "title": exc.title if exc.title else 'Bad Request', + "detail": exc.detail if isinstance(exc.detail, dict) \ + else _cleanup_detail_field(exc.detail) + } + problem.update({"type": exc.type} if exc.type else {}) + problem.update(exc.ext if exc.ext else {}) + if isinstance(problem['detail'], dict): + for field in ['status', 'type']: + if field in problem['detail']: + problem['detail'].pop(field) + if 'code' in problem: + problem['error'] = problem.pop('code') + if not problem['detail']: + del problem['detail'] + + return ConnexionResponse(body=json.dumps(problem), + status_code=exc.__dict__['status'], + content_type=ERROR_CONTENT_TYPE) + + +async def content_size_handler(_: ConnexionRequest, exc: ContentSizeExceeded) -> ConnexionResponse: + """Content size error handler. + + Parameters + ---------- + _ : ConnexionRequest + Incomming request. + Unnamed parameter not used. + exc : ContentSizeExceeded + Raised exception. + + Returns + ------- + Response + Returns status code 413 if the maximum upload file size is exceeded. + """ + problem = { + "title": "Content size exceeded.", + "detail": str(exc) + } + + return ConnexionResponse(status_code=413, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) + + +async def exceeded_requests_handler(_: ConnexionRequest, exc: exceptions.ProblemException) -> ConnexionResponse: + """Exceeded requests error handler. + + Parameters + ---------- + _ : ConnexionRequest + Incomming request. + Unnamed parameter not used. + exc : ProblemException + Raised exception. + + Returns + ------- + Response + Returns status code 429 if the maximum requests per minutes was exceeded. + """ + problem = { + "title": exc.title, + "error": exc.detail, + } + response = ConnexionResponse(status_code=429, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) + await access_log(exc.ext, response, time.time()) + return response + + +async def blocked_ip_handler(_: ConnexionRequest, exc: BlockedIPException) -> ConnexionResponse: + """Content size error handler. + + Parameters + ---------- + _ : ConnexionRequest + Incomming request. + Unnamed parameter not used. + exc : ProblemException + Raised exception. + + Returns + ------- + Response + Returns status code 403 if the maximum number of failed logins was reached. + """ + problem = { + "title": exc.title, + "detail": exc.detail, + "error": 6000 + } + response = ConnexionResponse(status_code=403, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) + await access_log(exc.ext, response, time.time()) + return response diff --git a/api/api/middlewares.py b/api/api/middlewares.py index 1cb04a0a255..3a7716058c4 100644 --- a/api/api/middlewares.py +++ b/api/api/middlewares.py @@ -2,50 +2,50 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 -from json import JSONDecodeError -from logging import getLogger - -from aiohttp import web, web_request -from aiohttp.web_exceptions import HTTPException -from connexion.exceptions import OAuthProblem, ProblemException, Unauthorized -from connexion.problem import problem as connexion_problem -from secure import SecureHeaders -from wazuh.core.exception import WazuhPermissionError, WazuhTooManyRequests -from wazuh.core.utils import get_utc_now +import json +import hashlib +import time +import contextlib +import logging +import base64 +from jose import jwt -from api.configuration import api_conf -from api.util import raise_if_exc +from starlette.requests import Request +from starlette.responses import Response +from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint -MAX_REQUESTS_EVENTS_DEFAULT = 30 +from connexion import ConnexionMiddleware +from connexion.exceptions import ProblemException +from connexion.lifecycle import ConnexionRequest +from connexion.security import AbstractSecurityHandler -# API secure headers -secure_headers = SecureHeaders(server="Wazuh", csp="none", xfo="DENY") - -logger = getLogger('wazuh-api') +from secure import Secure, ContentSecurityPolicy, XFrameOptions, Server +from wazuh.core.utils import get_utc_now -def _cleanup_detail_field(detail: str) -> str: - """Replace double endlines with '. ' and simple endlines with ''. +from api import configuration +from api.alogging import custom_logging +from api.authentication import generate_keypair, JWT_ALGORITHM +from api.api_exception import BlockedIPException - Parameters - ---------- - detail : str - String to be modified. +# Default of the max event requests allowed per minute +MAX_REQUESTS_EVENTS_DEFAULT = 30 - Returns - ------- - str - New value for the detail field. - """ - return ' '.join(str(detail).replace("\n\n", ". ").replace("\n", "").split()) +# Variable used to specify an unknown user +UNKNOWN_USER_STRING = "unknown_user" +# Run_as login endpoint path +RUN_AS_LOGIN_ENDPOINT = "/security/user/authenticate/run_as" +LOGIN_ENDPOINT = '/security/user/authenticate' -@web.middleware -async def set_secure_headers(request, handler): - resp = await handler(request) - secure_headers.aiohttp(resp) - return resp +# API secure headers +server = Server().set("Wazuh") +csp = ContentSecurityPolicy().set('none') +xfo = XFrameOptions().deny() +secure_headers = Secure(server=server, csp=csp, xfo=xfo) +logger = logging.getLogger('wazuh-api') +start_stop_logger = logging.getLogger('start-stop-api') ip_stats = dict() ip_block = set() @@ -55,94 +55,114 @@ async def set_secure_headers(request, handler): events_current_time = None -async def unlock_ip(request: web_request.BaseRequest, block_time: int): - """This function blocks/unblocks the IPs that are requesting an API token. +async def access_log(request: ConnexionRequest, response: Response, prev_time: time): + """Generate Log message from the request.""" + + time_diff = time.time() - prev_time + + context = request.context if hasattr(request, 'context') else {} + headers = request.headers if hasattr(request, 'headers') else {} + path = request.scope.get('path', '') if hasattr(request, 'scope') else '' + host = request.client.host if hasattr(request, 'client') else '' + method = request.method if hasattr(request, 'method') else '' + query = dict(request.query_params) if hasattr(request, 'query_params') else {} + # If the request content is valid, the _json attribute is set when the + # first time the json function is awaited. This check avoids raising an + # exception when the request json content is invalid. + body = await request.json() if hasattr(request, '_json') else {} + + if 'password' in query: + query['password'] = '****' + if 'password' in body: + body['password'] = '****' + if 'key' in body and '/agents' in path: + body['key'] = '****' + + # Get the user name from the request. If it is not found in the context, + # try to get it from the headers using basic or bearer authentication methods. + user = UNKNOWN_USER_STRING + if headers and not (user:= context.get('user', None)): + auth_type, user_passw = AbstractSecurityHandler.get_auth_header_value(request) + if auth_type == 'basic': + user, _ = base64.b64decode(user_passw).decode("latin1").split(":", 1) + elif auth_type == 'bearer': + s = jwt.decode(user_passw, generate_keypair()[1], + algorithms=[JWT_ALGORITHM], + audience='Wazuh API REST', + options={'verify_exp': False}) + user = s['sub'] + + # Get or create authorization context hash + hash_auth_context = context.get('token_info', {}).get('hash_auth_context', '') + # Create hash if run_as login + + if not hash_auth_context and path == RUN_AS_LOGIN_ENDPOINT: + hash_auth_context = hashlib.blake2b(json.dumps(body).encode(), + digest_size=16).hexdigest() + + custom_logging(user, host, method, + path, query, body, time_diff, response.status_code, + hash_auth_context=hash_auth_context, headers=headers) + if response.status_code == 403 and \ + path in {LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT} and \ + method in {'GET', 'POST'}: + logger.warning(f'IP blocked due to exceeded number of logins attempts: {host}') + + +def check_blocked_ip(request: Request): + """Blocks/unblocks the IPs that are requesting an API token. Parameters ---------- - request : web_request.BaseRequest - API request. + request : Request + HTTP request. block_time : int Block time used to decide if the IP is going to be unlocked. + """ global ip_block, ip_stats + access_conf = configuration.api_conf['access'] + block_time=access_conf['block_time'] try: - if get_utc_now().timestamp() - block_time >= ip_stats[request.remote]['timestamp']: - del ip_stats[request.remote] - ip_block.remove(request.remote) + if get_utc_now().timestamp() - block_time >= ip_stats[request.client.host]['timestamp']: + del ip_stats[request.client.host] + ip_block.remove(request.client.host) except (KeyError, ValueError): pass + if request.client.host in ip_block: + raise BlockedIPException( + status=403, + title="Permission Denied", + detail="Limit of login attempts reached. The current IP has been blocked due " + "to a high number of login attempts", + ext=ConnexionRequest.from_starlette_request(request)) - if request.remote in ip_block: - logger.warning(f'IP blocked due to exceeded number of logins attempts: {request.remote}') - raise_if_exc(WazuhPermissionError(6000)) - - -async def prevent_bruteforce_attack(request: web_request.BaseRequest, attempts: int = 5): - """This function checks that the IPs that are requesting an API token do not do so repeatedly. - - Parameters - ---------- - request : web_request.BaseRequest - API request. - attempts : int - Number of attempts until an IP is blocked. - """ - global ip_stats, ip_block - if request.path in {'/security/user/authenticate', '/security/user/authenticate/run_as'} and \ - request.method in {'GET', 'POST'}: - if request.remote not in ip_stats.keys(): - ip_stats[request.remote] = dict() - ip_stats[request.remote]['attempts'] = 1 - ip_stats[request.remote]['timestamp'] = get_utc_now().timestamp() - else: - ip_stats[request.remote]['attempts'] += 1 - - if ip_stats[request.remote]['attempts'] >= attempts: - ip_block.add(request.remote) - -@web.middleware -async def request_logging(request, handler): - """Add request info to logging.""" - logger.debug2(f'Receiving headers {dict(request.headers)}') - try: - body = await request.json() - request['body'] = body - except JSONDecodeError: - pass - - return await handler(request) - - -@web.middleware -async def check_rate_limit( - request: web_request.BaseRequest, +def check_rate_limit( request_counter_key: str, current_time_key: str, - max_requests: int -) -> None: - """This function checks that the maximum number of requests per minute passed in `max_requests` is not exceeded. + max_requests: int, + error_code: int +) -> int: + """Check that the maximum number of requests per minute + passed in `max_requests` is not exceeded. Parameters ---------- - request : web_request.BaseRequest - API request. request_counter_key : str Key of the request counter variable to get from globals() dict. current_time_key : str Key of the current time variable to get from globals() dict. - max_requests : int, optional + max_requests : int Maximum number of requests per minute permitted. + error_code : int + error code to return if the counter is greater than max_requests. + + Return + ------ + 0 if the counter is greater than max_requests + else error_code. """ - - error_code_mapping = { - 'general_request_counter': {'code': 6001}, - 'events_request_counter': { - 'code': 6005, - 'extra_message': f'For POST /events endpoint the limit is set to {max_requests} requests.' - } - } if not globals()[current_time_key]: globals()[current_time_key] = get_utc_now().timestamp() @@ -153,83 +173,102 @@ async def check_rate_limit( globals()[current_time_key] = get_utc_now().timestamp() if globals()[request_counter_key] > max_requests: - logger.debug(f'Request rejected due to high request per minute: Source IP: {request.remote}') - raise_if_exc(WazuhTooManyRequests(**error_code_mapping[request_counter_key])) + return error_code + return 0 -@web.middleware -async def security_middleware(request, handler): - access_conf = api_conf['access'] - max_request_per_minute = access_conf['max_request_per_minute'] - if max_request_per_minute > 0: - await check_rate_limit( - request, +class CheckRateLimitsMiddleware(BaseHTTPMiddleware): + """Rate Limits Middleware.""" + + async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: + """"Check request limits per minute.""" + max_request_per_minute = configuration.api_conf['access']['max_request_per_minute'] + error_code = check_rate_limit( 'general_request_counter', 'general_current_time', - max_request_per_minute - ) + max_request_per_minute, + 6001) - if request.path == '/events': - await check_rate_limit( - request, + if request.url.path == '/events': + error_code = check_rate_limit( 'events_request_counter', 'events_current_time', - MAX_REQUESTS_EVENTS_DEFAULT - ) - - await unlock_ip(request, block_time=access_conf['block_time']) - - return await handler(request) - - -@web.middleware -async def response_postprocessing(request, handler): - """Remove unwanted fields from error responses like 400 or 403. - - Additionally, it cleans the output given by connexion's exceptions. If no exception is raised during the - 'await handler(request) it means the output will be a 200 response and no fields needs to be removed. - """ - - def remove_unwanted_fields(fields_to_remove=None): - fields_to_remove = fields_to_remove or ['status', 'type'] - for field in fields_to_remove: - if field in problem.body: - del problem.body[field] - if problem.body.get('detail') == '': - del problem.body['detail'] - if 'code' in problem.body: - problem.body['error'] = problem.body.pop('code') - - problem = None - - try: - return await handler(request) - - except ProblemException as ex: - problem = connexion_problem(status=ex.__dict__['status'], - title=ex.__dict__['title'] if ex.__dict__.get('title') else 'Bad Request', - type=ex.__dict__.get('type', 'about:blank'), - detail=_cleanup_detail_field(ex.__dict__['detail']) - if 'detail' in ex.__dict__ else '', - ext=ex.__dict__.get('ext')) - except HTTPException as ex: - problem = connexion_problem(ex.status, - ex.reason if ex.reason else '', - type=ex.reason if ex.reason else '', - detail=ex.text if ex.text else '') - except (OAuthProblem, Unauthorized) as auth_exception: - if request.path in {'/security/user/authenticate', '/security/user/authenticate/run_as'} and \ - request.method in {'GET', 'POST'}: - await prevent_bruteforce_attack(request=request, attempts=api_conf['access']['max_login_attempts']) - problem = connexion_problem(401, "Unauthorized", type="about:blank", detail="Invalid credentials") + MAX_REQUESTS_EVENTS_DEFAULT, + 6005) + + if error_code: + raise ProblemException( + status=429, + title="Maximum number of requests per minute reached", + detail=error_code, + ext=ConnexionRequest.from_starlette_request(request)) else: - if isinstance(auth_exception, OAuthProblem): - problem = connexion_problem(401, "Unauthorized", type="about:blank", - detail="No authorization token provided") - else: - problem = connexion_problem(401, "Unauthorized", type="about:blank", detail="Invalid token") - finally: - problem and remove_unwanted_fields() - - return problem + return await call_next(request) + + +class CheckBlockedIP(BaseHTTPMiddleware): + """Rate Limits Middleware.""" + + async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: + """"Update and check if the client IP is locked.""" + + check_blocked_ip(request) + return await call_next(request) + + +class WazuhAccessLoggerMiddleware(BaseHTTPMiddleware): + """Middleware to log custom Access messages.""" + + async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: + """Log Wazuh access information. + + Parameters + ---------- + request : Request + HTTP Request received. + call_next : RequestResponseEndpoint + Endpoint callable to be executed. + + Returns + ------- + Response + Returned response. + """ + prev_time = time.time() + response = await call_next(request) + await access_log(ConnexionRequest.from_starlette_request(request), response, prev_time) + return response + + +class SecureHeadersMiddleware(BaseHTTPMiddleware): + """Secure headers Middleware.""" + + async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: + """Check and modifies the response headers with secure package. + + Parameters + ---------- + request : Request + HTTP Request received. + call_next : RequestResponseEndpoint + Endpoint callable to be executed. + + Returns + ------- + Response + Returned response. + """ + resp = await call_next(request) + secure_headers.framework.starlette(resp) + return resp + + +@contextlib.asynccontextmanager +async def lifespan_handler(_: ConnexionMiddleware): + """Logs the API startup and shutdown messages.""" + + # Log the initial server startup message. + start_stop_logger.info(f'Listening on {configuration.api_conf["host"]}:{configuration.api_conf["port"]}.') + yield + start_stop_logger.info('Shutdown wazuh-apid server.') diff --git a/api/api/models/base_model_.py b/api/api/models/base_model_.py index 72f6384735e..1ffccc6d384 100755 --- a/api/api/models/base_model_.py +++ b/api/api/models/base_model_.py @@ -203,10 +203,12 @@ async def get_kwargs(cls, request, additional_kwargs: dict = None): except JSONDecodeError: raise_if_exc(WazuhError(1018)) - invalid = get_invalid_keys(dikt, f_kwargs) + if dikt: + invalid = get_invalid_keys(dikt, f_kwargs) - if invalid: - raise ProblemException(status=400, title='Bad Request', detail='Invalid field found {}'.format(invalid)) + if invalid: + raise ProblemException(status=400, title='Bad Request', + detail=f'Invalid field found {invalid}') if additional_kwargs is not None: f_kwargs.update(additional_kwargs) @@ -236,5 +238,5 @@ def decode_body(cls, body, unicode_error=None, attribute_error=None): @classmethod def validate_content_type(cls, request, expected_content_type): - if request.content_type != expected_content_type: + if request.mimetype != expected_content_type: raise_if_exc(WazuhNotAcceptable(6002)) diff --git a/api/api/models/test/test_model.py b/api/api/models/test/test_model.py index 29746606be0..c6a6afe6e5d 100644 --- a/api/api/models/test/test_model.py +++ b/api/api/models/test/test_model.py @@ -13,6 +13,7 @@ import pytest from connexion import ProblemException +from api.controllers.util import JSON_CONTENT_TYPE with patch('wazuh.core.common.wazuh_uid'): with patch('wazuh.core.common.wazuh_gid'): sys.modules['api.authentication'] = MagicMock() @@ -51,7 +52,7 @@ def __init__(self, content_type): self._content_type = content_type @property - def content_type(self): + def mimetype(self): return self._content_type @@ -248,7 +249,7 @@ def test_body_decode_body_ko(): def test_body_validate_content_type(): """Test class Body `validate_content_type` method.""" - content_type = 'application/json' + content_type = JSON_CONTENT_TYPE request = RequestMock(content_type) TestModel.validate_content_type(request, content_type) @@ -256,7 +257,7 @@ def test_body_validate_content_type(): def test_body_validate_content_type_ko(): """Test class Body `validate_content_type` method exceptions.""" - request = RequestMock('application/json') + request = RequestMock(JSON_CONTENT_TYPE) with pytest.raises(ProblemException) as exc: TestModel.validate_content_type(request, 'application/xml') diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index 917212f2838..e65d902bac8 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -641,6 +641,26 @@ components: unknown-node: error: "The group does not exist" + InvalidUriResponse: + description: "Uri not found" + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + example: + title: "Not Found" + detail: "404: Not Found" + + UnsupportedMediaTypeResponse: + description: "Unsupported media type" + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + example: + title: "Unsupported media type" + detail: "Invalid Content-type (application/json), expected ['application/octet-stream']" + schemas: ## Common models ApiResponse: @@ -11474,6 +11494,8 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': @@ -11524,6 +11546,8 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '413': @@ -11567,6 +11591,8 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': @@ -11977,6 +12003,8 @@ paths: $ref: '#/components/responses/WrongContentTypeResponse' '413': $ref: '#/components/responses/RequestTooLargeResponse' + '415': + $ref: '#/components/responses/UnsupportedMediaTypeResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' @@ -14241,6 +14269,8 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': @@ -14296,6 +14326,8 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '406': @@ -14344,6 +14376,8 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': @@ -15023,6 +15057,8 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': @@ -15077,6 +15113,8 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '406': @@ -15125,6 +15163,8 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': diff --git a/api/api/test/test_alogging.py b/api/api/test/test_alogging.py index 7e47491fd5e..b4b2260a7d9 100644 --- a/api/api/test/test_alogging.py +++ b/api/api/test/test_alogging.py @@ -2,278 +2,21 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 - import json -import logging -import os -from unittest.mock import MagicMock, call, patch +from copy import copy +from unittest.mock import patch, call, MagicMock import pytest with patch('wazuh.core.common.wazuh_uid'): with patch('wazuh.core.common.wazuh_gid'): from api import alogging + from api.api_exception import APIError REQUEST_HEADERS_TEST = {'authorization': 'Basic d2F6dWg6cGFzc3dvcmQxMjM='} # wazuh:password123 AUTH_CONTEXT_TEST = {'auth_context': 'example'} HASH_AUTH_CONTEXT_TEST = '020efd3b53c1baf338cf143fad7131c3' -def test_accesslogger_log_credentials(): - """Check AccessLogger is hiding confidential data from logs""" - class MockedRequest(dict): - query = {'password': 'password_value' - } - path = '/agents' - remote = 'remote_value' - method = 'method_value' - - def __init__(self): - self['body'] = {'password': 'password_value', - 'key': 'key_value'} - self['user'] = 'wazuh' - - with patch('logging.Logger.info') as mock_logger_info: - test_access_logger = alogging.AccessLogger(logger=logging.getLogger('test'), log_format=MagicMock()) - test_access_logger.log(request=MockedRequest(), response=MagicMock(), time=0.0) - - assert mock_logger_info.call_count == 2 - - json_call = mock_logger_info.call_args_list[1][0][0] - log_call = mock_logger_info.call_args_list[0][0][0] - - assert json_call['parameters'] == {"password": "****"} - assert json_call['body'] == {"password": "****", "key": "****"} - assert 'parameters {"password": "****"} and body {"password": "****", "key": "****"}' in log_call - - assert mock_logger_info.call_args_list[1][1]['extra'] == {'log_type': 'json'} - assert mock_logger_info.call_args_list[0][1]['extra'] == {'log_type': 'log'} - - -@pytest.mark.parametrize('side_effect, user', [ - ('unknown', ''), - (None, ''), - (None, 'wazuh') -]) -@patch('api.alogging.json.dumps') -def test_accesslogger_log_user(mock_dumps, side_effect, user): - """Test that the user is logged properly when using log(). - - Parameters - ---------- - side_effect : function - Side effect used in the decode_token mock. - user : str - User returned by the request.get function of alogging.py, which is mocked using a class. - """ - - # Create a class with a mocked get method for request - class MockedRequest(MagicMock): - headers = REQUEST_HEADERS_TEST if side_effect is None else {} - - def get(self, *args, **kwargs): - return user - # Mock decode_token and logger.info - with patch('logging.Logger.info') as mock_logger_info: - - # Create an AccessLogger object and log a mocked call - test_access_logger = alogging.AccessLogger(logger=logging.getLogger('test'), log_format=MagicMock()) - test_access_logger.log(request=MockedRequest(), response=MagicMock(), time=0.0) - - json_call = mock_logger_info.call_args_list[1][0][0] - log_call = mock_logger_info.call_args_list[0][0][0] - - # If not user, decode_token must be called to get the user and logger.info must be called with the user - # if we have token_info or UNKNOWN_USER if not - if not user: - expected_user = 'wazuh' if side_effect is None else alogging.UNKNOWN_USER_STRING - assert json_call['user'] == expected_user - assert log_call.split(" ")[0] == expected_user - # If user, logger.info must be called with the user - else: - assert json_call['user'] == user - assert log_call.split(" ")[0] == user - - -@pytest.mark.parametrize('request_path, token_info, request_body', [ - ('/agents', {'hash_auth_context': HASH_AUTH_CONTEXT_TEST}, {}), # Test a normal request logs the auth context hash - ('/security/user/authenticate/run_as', {'other_key': 'other_value'}, - AUTH_CONTEXT_TEST), # Test a login request generates and logs the auth context hash - ('/security/user/authenticate', None, {}) # Test any other call without auth context does not log the hash -]) -def test_accesslogger_log_hash_auth_context(request_path, token_info, request_body): - """Test that the authorization context hash is logged properly when using log(). - - Parameters - ---------- - request_path : str - Path used in the custom request. - token_info : dict - Dictionary corresponding to the token information. If token_info is None, we simulate that no token was given. - request_body : dict - Request body used in the custom request. - """ - - # Create a class with custom methods for request - class CustomRequest: - def __init__(self): - self.request_dict = {'token_info': token_info} if token_info else {} - self.path = request_path - self.body = request_body - self.query = {'q': 'test'} - self.remote = 'test' - self.method = 'test' - self.user = 'test' - - def __contains__(self, key): - return key in self.request_dict - - def __getitem__(self, key): - return self.request_dict[key] - - def get(self, *args, **kwargs): - return getattr(self, args[0]) if args[0] in self.__dict__.keys() else args[1] - - # Mock logger.info - with patch('logging.Logger.info') as mock_logger_info: - # Create an AccessLogger object and log a mocked call - request = CustomRequest() - test_access_logger = alogging.AccessLogger(logger=logging.getLogger('test'), log_format=MagicMock()) - test_access_logger.log(request=request, response=MagicMock(), time=0.0) - - message_api_log = mock_logger_info.call_args_list[0][0][0].split(" ") - message_api_json = mock_logger_info.call_args_list[1][0][0] - - # Test authorization context hash is being logged - if (token_info and token_info.get('hash_auth_context')) or \ - (request_path == "/security/user/authenticate/run_as" and request_body): - assert message_api_log[1] == f"({HASH_AUTH_CONTEXT_TEST})" - assert message_api_json.get('hash_auth_context') == HASH_AUTH_CONTEXT_TEST - else: - assert message_api_log[1] == request.remote - assert 'hash_auth_context' not in message_api_json - - -@pytest.mark.parametrize('request_path,request_body,log_level,log_key,json_key', [ - ('/events', {"events": ["foo", "bar"]}, 20, 'body', 'body'), - ('/events', {"events": ["foo", "bar"]}, 5, 'body', 'body'), - ('/agents', {}, 20, 'body', 'body'), - ('/agents', {}, 5, 'body', 'body') -]) -@patch('logging.Logger.info') -@patch('logging.Logger.debug') -def test_accesslogger_log_events_correctly( - mock_logger_debug, mock_logger_info, request_path, request_body, log_level, log_key, json_key -): - """Test that the authorization context hash is logged properly when using log(). - - Parameters - ---------- - request_path : str - Path used in the custom request. - request_body : dict - Request body used in the custom request. - log_level: int - Log level used un the custom request. - """ - - # Create a class with custom methods for request - class CustomRequest: - def __init__(self): - self.request_dict = {} - self.path = request_path - self.body = request_body - self.query = {} - self.remote = 'test' - self.method = 'test' - self.user = 'test' - - def __contains__(self, key): - return key in self.request_dict - - def __getitem__(self, key): - return self.request_dict[key] - - def get(self, *args, **kwargs): - return getattr(self, args[0]) if args[0] in self.__dict__.keys() else args[1] - - # Create an AccessLogger object and log a mocked call - request = CustomRequest() - test_access_logger = alogging.AccessLogger(logger=logging.getLogger('test'), log_format=MagicMock()) - test_access_logger.logger.setLevel(log_level) - test_access_logger.log(request=request, response=MagicMock(), time=0.0) - - message_api_log = mock_logger_info.call_args_list[0][0][0] - message_api_json = mock_logger_info.call_args_list[1][0][0] - - assert log_key in message_api_log - assert json_key in message_api_json - - if log_level >= 20 and request_path == '/events': - formatted_log = {"events": len(request_body["events"])} - assert json.dumps(formatted_log) in message_api_log - assert formatted_log == message_api_json[json_key] - else: - assert json.dumps(request_body) in message_api_log - assert request_body == message_api_json[json_key] - - -@pytest.mark.parametrize('json_log', [ - False, - True -]) -@patch('wazuh.core.wlogging.WazuhLogger.__init__') -def test_apilogger_init(mock_wazuhlogger, json_log): - """Check parameters are as expected when calling __init__ method. - - Parameters - ---------- - json_log : boolean - Boolean used to define the log file format. - """ - log_name = 'testing.json' if json_log else 'testing.log' - current_logger_path = os.path.join(os.path.dirname(__file__), log_name) - alogging.APILogger(log_path=current_logger_path, foreground_mode=False, debug_level='info', - logger_name='wazuh') - - assert mock_wazuhlogger.call_args.kwargs['log_path'] == current_logger_path - assert not mock_wazuhlogger.call_args.kwargs['foreground_mode'] - assert mock_wazuhlogger.call_args.kwargs['debug_level'] == 'info' - assert mock_wazuhlogger.call_args.kwargs['logger_name'] == 'wazuh' - if json_log: - assert mock_wazuhlogger.call_args.kwargs['custom_formatter'] == alogging.WazuhJsonFormatter - else: - assert mock_wazuhlogger.call_args.kwargs['custom_formatter'] is None - - os.path.exists(current_logger_path) and os.remove(current_logger_path) - - -@pytest.mark.parametrize('debug_level, expected_level', [ - ('info', logging.INFO), - ('debug2', 5), - ('debug', logging.DEBUG), - ('critical', logging.CRITICAL), - ('error', logging.ERROR), - ('warning', logging.WARNING), -]) -@patch('api.alogging.logging.Logger.setLevel') -def test_apilogger_setup_logger(mock_logger, debug_level, expected_level): - """Check loggin level is as expected. - - Parameters - ---------- - debug_level : str - Value used to configure the debug level of the logger. - expected_level : int - Expeced value of the debug level. - """ - current_logger_path = os.path.join(os.path.dirname(__file__), 'testing') - logger = alogging.APILogger(log_path=current_logger_path, foreground_mode=False, debug_level=debug_level, - logger_name='wazuh') - logger.setup_logger() - assert mock_logger.call_args == call(expected_level) - - os.path.exists(current_logger_path) and os.remove(current_logger_path) - @pytest.mark.parametrize('message, dkt', [ (None, {'k1': 'v1'}), @@ -295,9 +38,9 @@ def test_wazuhjsonformatter(message, dkt): wjf = alogging.WazuhJsonFormatter() log_record = {} wjf.add_fields(log_record, mock_record, dkt) - assert 'timestamp' in log_record.keys() - assert 'data' in log_record.keys() - assert 'levelname' in log_record.keys() + assert 'timestamp' in log_record + assert 'data' in log_record + assert 'levelname' in log_record tb = dkt.get('exc_info') if tb is not None: assert log_record['data']['payload'] == f'{message}. {tb}' @@ -306,3 +49,77 @@ def test_wazuhjsonformatter(message, dkt): else: assert log_record['data']['payload'] == message assert isinstance(log_record, dict) + + +@pytest.mark.parametrize("size_input, expected_size", [ + ("1m", 1024 * 1024), + ("1M", 1024 * 1024), + ("1024k", 1024 * 1024), + ("1024K", 1024 * 1024), + ("5m", 5 * 1024 * 1024) +]) +def test_api_logger_size(size_input, expected_size): + """Assert `APILoggerSize` class returns the correct number of bytes depending on the given unit. + + Parameters + ---------- + size_input : str + Input for the class constructor. + expected_size : int + Expected number of bytes after translating the input. + """ + assert alogging.APILoggerSize(size_input).size == expected_size + + +def test_api_logger_size_exceptions(): + """Assert `APILoggerSize` class returns the correct exceptions when the given size is not valid.""" + # Test invalid units + with pytest.raises(APIError, match="2011.*expected format.*"): + alogging.APILoggerSize("3435j") + + # Test min value + with pytest.raises(APIError, match="2011.*Minimum value.*"): + alogging.APILoggerSize("1k") + + +@pytest.mark.parametrize("path, hash_auth_context, body, loggerlevel", [ + ("/agents", '', {'bodyfield': 1}, 1), + ("/agents", 'hashauthcontext', {'bodyfield': 1}, 21), + ("/events", '', {'bodyfield': 1, 'events' : [{'a': 1, 'b': 2 }]}, 1), + ("/events", 'hashauthcontext', {'bodyfield': 1, 'events' : [{'a': 1, 'b': 2 }]}, 22), +]) +def test_custom_logging(path, hash_auth_context, body, loggerlevel): + """Test custom access logging calls.""" + user, remote, method = ('wazuh', '1.1.1.1', 'POST') + query, elapsed_time, status, headers = {'pretty': True}, 1.01, 200, {'content-type': 'xml'} + json_info = { + 'user': user, + 'ip': remote, + 'http_method': method, + 'uri': f'{method} {path}', + 'parameters': query, + 'body': body, + 'time': f'{elapsed_time:.3f}s', + 'status_code': status + } + + log_info = f'{user} ({hash_auth_context}) {remote} "{method} {path}" ' if hash_auth_context \ + else f'{user} ({hash_auth_context}) {remote} "{method} {path}" ' + json_info.update({'hash_auth_context' : hash_auth_context} if hash_auth_context else {}) + with patch('api.alogging.logger') as log_info_mock: + log_info_mock.info = MagicMock() + log_info_mock.debug2 = MagicMock() + log_info_mock.level = loggerlevel + alogging.custom_logging(user=user, remote=remote, method=method, path=path, query=query, + body=copy(body), elapsed_time=elapsed_time, status=status, + hash_auth_context=hash_auth_context, headers=headers) + + if path == '/events' and loggerlevel >= 20: + events = body.get('events', []) + body = {'events': len(events)} + json_info['body'] = body + log_info += f'with parameters {json.dumps(query)} and body'\ + f' {json.dumps(body)} done in {elapsed_time:.3f}s: {status}' + log_info_mock.info.has_calls([call(log_info, {'log_type': 'log'}), + call(json_info, {'log_type': 'json'})]) + log_info_mock.debug2.assert_called_with(f'Receiving headers {headers}') diff --git a/api/api/test/test_authentication.py b/api/api/test/test_authentication.py index b1819b4123b..e263ba653ca 100644 --- a/api/api/test/test_authentication.py +++ b/api/api/test/test_authentication.py @@ -9,7 +9,7 @@ from copy import deepcopy from unittest.mock import patch, MagicMock, ANY, call -from werkzeug.exceptions import Unauthorized +from connexion.exceptions import Unauthorized with patch('wazuh.core.common.wazuh_uid'): with patch('wazuh.core.common.wazuh_gid'): diff --git a/api/api/test/test_error_handler.py b/api/api/test/test_error_handler.py new file mode 100644 index 00000000000..0e6b34d1eeb --- /dev/null +++ b/api/api/test/test_error_handler.py @@ -0,0 +1,257 @@ +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 + +import json +from datetime import datetime +from unittest.mock import patch, MagicMock +from copy import copy +import pytest + +from freezegun import freeze_time + +from connexion.exceptions import HTTPException, ProblemException, BadRequestProblem, Unauthorized +from api.error_handler import _cleanup_detail_field, prevent_bruteforce_attack, jwt_error_handler, \ + http_error_handler, problem_error_handler, bad_request_error_handler, unauthorized_error_handler, \ + exceeded_requests_handler, blocked_ip_handler, ERROR_CONTENT_TYPE +from api.middlewares import LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT + + +@pytest.fixture +def request_info(request): + """Return the dictionary of the parametrize""" + return request.param if 'prevent_bruteforce_attack' in request.node.name else None + + +@pytest.fixture +def mock_request(request, request_info): + """fixture to wrap functions with request""" + req = MagicMock() + req.client.host = 'ip' + if 'prevent_bruteforce_attack' in request.node.name: + for clave, valor in request_info.items(): + setattr(req, clave, valor) + + return req + + +def test_cleanup_detail_field(): + """Test `_cleanup_detail_field` function.""" + detail = """Testing + + Details field. + """ + + assert _cleanup_detail_field(detail) == "Testing. Details field." + + +@pytest.mark.parametrize('stats', [ + {}, + {'ip': {'attempts': 4}}, +]) +@pytest.mark.parametrize('request_info', [ + {'path': LOGIN_ENDPOINT, 'method': 'GET'}, + {'path': LOGIN_ENDPOINT, 'method': 'POST'}, + {'path': RUN_AS_LOGIN_ENDPOINT, 'method': 'POST'}, +], indirect=True) +def test_middlewares_prevent_bruteforce_attack(stats, request_info, mock_request): + """Test `prevent_bruteforce_attack` blocks IPs when reaching max number of attempts.""" + mock_request.configure_mock(scope={'path': request_info['path']}) + mock_request.method = request_info['method'] + with patch("api.error_handler.ip_stats", new=copy(stats)) as ip_stats, \ + patch("api.error_handler.ip_block", new=set()) as ip_block: + previous_attempts = ip_stats['ip']['attempts'] if 'ip' in ip_stats else 0 + prevent_bruteforce_attack(mock_request, attempts=5) + if stats: + # There were previous attempts. This one reached the limit + assert ip_stats['ip']['attempts'] == previous_attempts + 1 + assert 'ip' in ip_block + else: + # There were not previous attempts + assert ip_stats['ip']['attempts'] == 1 + assert 'ip' not in ip_block + + +@pytest.mark.asyncio +@freeze_time(datetime(1970, 1, 1, 0, 0, 10)) +@pytest.mark.parametrize('path, method, token_info', [ + (LOGIN_ENDPOINT, 'GET', True), + (LOGIN_ENDPOINT, 'POST', False), + (RUN_AS_LOGIN_ENDPOINT, 'POST', True), + ('/agents', 'POST', False), +]) +async def test_unauthorized_error_handler(path, method, token_info, mock_request): + """Test unauthorized error handler.""" + problem = { + "title": "Unauthorized", + } + mock_request.configure_mock(scope={'path': path}) + mock_request.method = method + if path in {LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT} \ + and method in {'GET', 'POST'}: + problem['detail'] = "Invalid credentials" + else: + if token_info: + mock_request.context = {'token_info': ''} + else: + problem['detail'] = 'No authorization token provided' + mock_request.context = {} + + exc = Unauthorized() + with patch('api.error_handler.prevent_bruteforce_attack') as mock_pbfa, \ + patch('api.configuration.api_conf', new={'access': {'max_login_attempts': 1000}}), \ + patch('api.error_handler.access_log') as access_log_mock: + response = await unauthorized_error_handler(mock_request, exc) + if path in {LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT} \ + and method in {'GET', 'POST'}: + mock_pbfa.assert_called_once_with(request=mock_request, attempts=1000) + expected_time = datetime(1970, 1, 1, 0, 0, 10).timestamp() + access_log_mock.assert_awaited_once_with(mock_request, response, expected_time) + body = json.loads(response.body) + assert body == problem + assert response.status_code == exc.status_code + assert response.content_type == ERROR_CONTENT_TYPE + + +@pytest.mark.asyncio +async def test_jwt_error_handler(): + """Test jwt error handler.""" + problem = { + "title": "Unauthorized", + "detail": "No authorization token provided" + } + response = await jwt_error_handler(None, None) + + body = json.loads(response.body) + assert body == problem + assert response.status_code == 401 + assert response.content_type == ERROR_CONTENT_TYPE + + +@pytest.mark.asyncio +@pytest.mark.parametrize('detail', [None, 'Custom detail']) +async def test_http_error_handler(detail): + """Test http error handler.""" + exc = HTTPException(status_code=401, detail=detail) + problem = { + "title": exc.detail, + 'detail': f"{exc.status_code}: {exc.detail}" + } + response = await http_error_handler(None, exc) + + body = json.loads(response.body) + assert body == problem + assert response.status_code == 401 + assert response.content_type == ERROR_CONTENT_TYPE + + +@pytest.mark.asyncio +@pytest.mark.parametrize('title, detail, ext, error_type', [ + ('title', 'detail \n detail\n', {}, None), + ('', 'detail', {}, None), + ('', '', {}, None), + ('', 'detail', {'status': 'status'}, None), + ('', 'detail', {'type': 'type'}, None), + ('', 'detail', {'code': 3005}, None), + ('', 'detail', {'code': 3005}, None), + ('', 'detail', {'code': 3005}, 'type'), + ('', {'detail_1':'detail_1'}, {'code': 3005}, 'type'), + ('', {}, {'code': 3005}, 'type'), + ('', {}, {'status': 'status'}, 'type'), + ('', {}, {'type': 'type'}, 'type'), + ('', {}, {'type': 'type', 'more': 'more'}, 'type'), +]) +async def test_problem_error_handler(title, detail, ext, error_type): + """Test problem error handler.""" + exc = ProblemException(status=400, title=title, detail=detail, ext=ext, type=error_type) + response = await problem_error_handler(None, exc) + body = json.loads(response.body) + + if isinstance(detail, dict): + if 'type' in detail: + detail.pop('type') + if 'status' in detail: + detail.pop('status') + elif isinstance(detail, str): + detail = _cleanup_detail_field(detail) + problem = {} + problem.update({'title': title} if title else {'title': 'Bad Request'}) + problem.update({'type': error_type} if error_type else {}) + problem.update({'detail': detail} if detail else {}) + problem.update(ext if ext else {}) + problem.update({'error': problem.pop('code')} if 'code' in problem else {}) + + assert response.status_code == 400 + assert response.content_type == ERROR_CONTENT_TYPE + assert body == problem + + +@pytest.mark.asyncio +@pytest.mark.parametrize('detail', [None, 'detail']) +async def test_bad_request_error_handler(detail): + """Test bad request error handler.""" + problem = { + "title": 'Bad Request', + } + problem.update({'detail': detail} if detail else {}) + + exc = BadRequestProblem(detail=detail) + response = await bad_request_error_handler(None, exc) + body = json.loads(response.body) + assert body == problem + assert response.status_code == exc.status_code + assert response.content_type == ERROR_CONTENT_TYPE + + +@pytest.mark.asyncio +@freeze_time(datetime(1970, 1, 1, 0, 0, 10)) +@pytest.mark.parametrize('error_code', [6001, 6005]) +async def test_bad_exceeded_request_handler(error_code, mock_request): + """Test exceeded request error handler.""" + problem = { + "title": 'Maximum number of requests per minute reached', + "error": error_code, + } + + exc = ProblemException( + status=429, + title="Maximum number of requests per minute reached", + detail=error_code, + ext=mock_request) + with patch('api.error_handler.access_log') as access_log_mock: + response = await exceeded_requests_handler(mock_request, exc) + expected_time = datetime(1970, 1, 1, 0, 0, 10).timestamp() + access_log_mock.assert_awaited_once_with(mock_request, response, expected_time) + + body = json.loads(response.body) + assert body == problem + assert response.status_code == exc.status_code + assert response.content_type == ERROR_CONTENT_TYPE + + +@pytest.mark.asyncio +@freeze_time(datetime(1970, 1, 1, 0, 0, 10)) +async def test_blocked_ip_handler(mock_request): + """Test blocked ip error handler.""" + + exc = ProblemException( + status=403, + title="Permission Denied", + detail="Limit of login attempts reached. The current IP has been blocked due " + "to a high number of login attempts", + ext=mock_request + ) + problem = { + "title": exc.title, + "detail": exc.detail, + "error": 6000 + } + with patch('api.error_handler.access_log') as access_log_mock: + response = await blocked_ip_handler(mock_request, exc) + expected_time = datetime(1970, 1, 1, 0, 0, 10).timestamp() + access_log_mock.assert_awaited_once_with(mock_request, response, expected_time) + + body = json.loads(response.body) + assert body == problem + assert response.status_code == exc.status_code + assert response.content_type == ERROR_CONTENT_TYPE diff --git a/api/api/test/test_middlewares.py b/api/api/test/test_middlewares.py index 59b2c31fd57..f1e82902abb 100644 --- a/api/api/test/test_middlewares.py +++ b/api/api/test/test_middlewares.py @@ -2,164 +2,290 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 -from copy import copy from datetime import datetime -from unittest.mock import AsyncMock, patch - +from unittest.mock import patch, MagicMock, AsyncMock, call import pytest -from freezegun import freeze_time -from wazuh.core.exception import WazuhPermissionError, WazuhTooManyRequests - -from api.middlewares import ( - MAX_REQUESTS_EVENTS_DEFAULT, - _cleanup_detail_field, - check_rate_limit, - prevent_bruteforce_attack, - security_middleware, - unlock_ip, -) +from connexion import AsyncApp +from connexion.testing import TestContext +from connexion.exceptions import ProblemException -class DummyRequest: - def __init__(self, data: dict): - self.data = data +from freezegun import freeze_time - # Set properties - for k, v in data.items(): - setattr(self, k, v) +from api.middlewares import check_rate_limit, check_blocked_ip, MAX_REQUESTS_EVENTS_DEFAULT, UNKNOWN_USER_STRING, \ + LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT, CheckRateLimitsMiddleware, WazuhAccessLoggerMiddleware, CheckBlockedIP, \ + SecureHeadersMiddleware, secure_headers, access_log - def __contains__(self, item): - return item in self.data +@pytest.fixture +def request_info(request): + """Return the dictionary of the parametrize""" + return request.param if 'prevent_bruteforce_attack' in request.node.name else None - def __getitem__(self, key): - return self.data[key] +@pytest.fixture +def mock_req(request, request_info): + """fixture to wrap functions with request""" + req = MagicMock() + req.client.host = 'ip' + if 'prevent_bruteforce_attack' in request.node.name: + for clave, valor in request_info.items(): + setattr(req, clave, valor) + req.json = AsyncMock(side_effect=lambda: {'ctx': ''} ) + req.context = MagicMock() + req.context.get = MagicMock(return_value={}) - def __setitem__(self, key, value): - self.data[key] = value + return req - def json(self): - return self.data +@freeze_time(datetime(1970, 1, 1, 0, 0, 10)) +async def test_middlewares_check_blocked_ip(mock_req): + """Test check_blocked_ip function. + Check if the ip_block is emptied when the blocking period has finished.""" + with patch("api.middlewares.ip_stats", new={'ip': {'timestamp': -300}}) as mock_ip_stats, \ + patch("api.middlewares.ip_block", new={"ip"}) as mock_ip_block: + check_blocked_ip(mock_req) + # Assert that under these conditions, they have been emptied + assert not mock_ip_stats and not mock_ip_block -handler_mock = AsyncMock() +@patch("api.middlewares.ip_stats", new={"ip": {'timestamp': 5}}) +@patch("api.middlewares.ip_block", new={"ip"}) +@freeze_time(datetime(1970, 1, 1)) +@pytest.mark.asyncio +async def test_middlewares_check_blocked_ip_ko(mock_req): + """Test if `check_blocked_ip` raises an exception if the IP is still blocked.""" + with pytest.raises(ProblemException) as exc_info, \ + patch('api.middlewares.ConnexionRequest.from_starlette_request', returns_value=mock_req): + check_blocked_ip(mock_req) + assert exc_info.value.status == 403 + assert exc_info.value.title == "Permission Denied" + assert exc_info.value.detail == ( + "Limit of login attempts reached. The current IP has been blocked due " + "to a high number of login attempts" + ) + assert exc_info.ext == mock_req -def test_cleanup_detail_field(): - """Test `_cleanup_detail_field` function.""" - detail = """Testing - Details field. - """ +@freeze_time(datetime(1970, 1, 1)) +@pytest.mark.parametrize("current_time,max_requests,current_time_key, current_counter_key,expected_error_code", [ + (-80, 300, 'events_current_time', 'events_request_counter', 0), + (-80, 300, 'general_current_time', 'general_request_counter', 0), + (0, 0, 'events_current_time', 'events_request_counter', 6005), + (0, 0, 'general_current_time', 'general_request_counter', 6001), +]) +def test_middlewares_check_rate_limit( + current_time, max_requests, current_time_key, current_counter_key, + expected_error_code, mock_req): + """Test if the rate limit mechanism triggers when the `max_requests` are reached.""" - assert _cleanup_detail_field(detail) == "Testing. Details field." + with patch(f"api.middlewares.{current_time_key}", new=current_time): + code = check_rate_limit( + current_time_key=current_time_key, + request_counter_key=current_counter_key, + max_requests=max_requests, + error_code=expected_error_code) + assert code == expected_error_code -@patch("api.middlewares.ip_stats", new={'ip': {'timestamp': 5}}) -@patch("api.middlewares.ip_block", new={"ip"}) -@freeze_time(datetime(1970, 1, 1, 0, 0, 10)) @pytest.mark.asyncio -async def test_middlewares_unlock_ip(): - from api.middlewares import ip_block, ip_stats - - # Assert they are not empty - assert ip_stats and ip_block - await unlock_ip(DummyRequest({'remote': "ip"}), 5) - # Assert that under these conditions, they have been emptied - assert not ip_stats and not ip_block +@pytest.mark.parametrize("endpoint", ['/agents', '/events']) +async def test_check_rate_limits_middleware(endpoint, mock_req): + """Test limits middleware.""" + response = MagicMock() + dispatch_mock = AsyncMock(return_value=response) + middleware = CheckRateLimitsMiddleware(AsyncApp(__name__)) + operation = MagicMock(name="operation") + operation.method = "post" + mock_req.url = MagicMock() + mock_req.url.path = endpoint + rq_x_min = 10000 + api_conf = {'access': { 'max_request_per_minute': rq_x_min }} + with TestContext(operation=operation), \ + patch('api.middlewares.check_rate_limit', return_value=0) as mock_check, \ + patch('api.middlewares.configuration.api_conf', new=api_conf): + await middleware.dispatch(request=mock_req, call_next=dispatch_mock) + if endpoint == '/events': + mock_check.assert_has_calls([ + call('general_request_counter', 'general_current_time', rq_x_min, 6001), + call('events_request_counter', 'events_current_time', MAX_REQUESTS_EVENTS_DEFAULT, 6005), + ], any_order=False) + else: + mock_check.assert_called_once_with( + 'general_request_counter', 'general_current_time', rq_x_min, 6001) + dispatch_mock.assert_awaited() -@patch("api.middlewares.ip_stats", new={"ip": {'timestamp': 5}}) -@patch("api.middlewares.ip_block", new={"ip"}) -@freeze_time(datetime(1970, 1, 1)) @pytest.mark.asyncio -async def test_middlewares_unlock_ip_ko(): - """Test if `unlock_ip` raises an exception if the IP is still blocked.""" - with patch("api.middlewares.raise_if_exc") as raise_mock: - await unlock_ip(DummyRequest({'remote': "ip"}), 5) - raise_mock.assert_called_once_with(WazuhPermissionError(6000)) +@pytest.mark.parametrize("endpoint, return_code_general, return_code_events", [ + ('/agents', 6001, 0), + ('/events', 0, 6005), + ('/events', 6001, 6005), +]) +async def test_check_rate_limits_middleware_ko( + endpoint, return_code_general, return_code_events, mock_req): + """Test limits middleware.""" + return_value_sequence = [return_code_general, return_code_events] + def check_rate_limit_side_effect(*_): + """Side effect function.""" + return return_value_sequence.pop(0) + + dispatch_mock = AsyncMock() + middleware = CheckRateLimitsMiddleware(AsyncApp(__name__)) + operation = MagicMock(name="operation") + operation.method = "post" + mock_req.url = MagicMock() + mock_req.url.path = endpoint + rq_x_min = 10000 + api_conf = {'access': { 'max_request_per_minute': rq_x_min }} + with TestContext(operation=operation), \ + patch('api.middlewares.ConnexionRequest.from_starlette_request', + return_value=mock_req) as mock_from, \ + patch('api.middlewares.configuration.api_conf', api_conf), \ + patch('api.middlewares.check_rate_limit', side_effect=check_rate_limit_side_effect), \ + pytest.raises(ProblemException) as exc_info: + await middleware.dispatch(request=mock_req, call_next=dispatch_mock) + mock_from.assert_called_once_with(mock_req) + dispatch_mock.assert_not_awaited() + assert exc_info.value.status == 429 + assert exc_info.value.title == "Permission Denied" + assert exc_info.value.detail == return_code_general if endpoint == 'event' else return_code_events + assert exc_info.ext == mock_req -@pytest.mark.parametrize('request_info', [ - {'path': '/security/user/authenticate', 'method': 'GET', 'remote': 'ip'}, - {'path': '/security/user/authenticate', 'method': 'POST', 'remote': 'ip'}, - {'path': '/security/user/authenticate/run_as', 'method': 'POST', 'remote': 'ip'}, -]) -@pytest.mark.parametrize('stats', [ - {}, - {'ip': {'attempts': 4}}, +@pytest.mark.asyncio +@freeze_time(datetime(1970, 1, 1, 0, 0, 10)) +@pytest.mark.parametrize("json_body, q_password, b_password, b_key, c_user, hash, sec_header, endpoint, method, status_code", [ + (True, None, None, None, None, 'hash', ('basic', 'wazuh:pwd'), '/agents', 'GET', 200), + (False, 'q_pass', 'b_pass', 'b_key', 'wazuh', '', ('basic', 'wazuh:pwd'), LOGIN_ENDPOINT, 'GET', 200), + (False, None, 'b_pass', 'b_key', 'wazuh', '', ('bearer', {'sub':'wazuh'}), RUN_AS_LOGIN_ENDPOINT, 'POST', 403), + (False, 'q_pass', None, 'b_key', 'wazuh', '', ('bearer', {'sub':'wazuh'}), RUN_AS_LOGIN_ENDPOINT, 'POST', 403), + (False, 'q_pass', None, 'b_key', 'wazuh', '', ('other', ''), RUN_AS_LOGIN_ENDPOINT, 'POST', 403), ]) +async def test_access_log(json_body, q_password, b_password, b_key, c_user, + hash, sec_header, endpoint, method, status_code, mock_req): + """Test access_log function.""" + JWT_ALGORITHM = 'ES512' + response = MagicMock() + response.status_code = status_code + + operation = MagicMock(name="operation") + operation.method = "post" + + body = {} + body.update({'password': 'b_password'} if b_password else {}) + body.update({'key': b_key} if b_key else {}) + if json_body: + mock_req._json = MagicMock() + mock_req.json = AsyncMock(return_value=body) + mock_req.query_params = {'password': q_password} if q_password else {} + mock_req.method = method + mock_req.context = { + 'token_info': {'hash_auth_context': hash} if hash else {}, + } + mock_req.context.update({'user': c_user} if c_user else {}) + mock_req.scope = {'path': endpoint} + mock_req.headers = {'content-type': 'None'} + mock_blacke2b = MagicMock() + mock_blacke2b.return_value.hexdigest.return_value = f"blackeb2 {hash}" + with TestContext(operation=operation), \ + patch('api.middlewares.custom_logging') as mock_custom_logging, \ + patch('hashlib.blake2b', mock_blacke2b), \ + patch('api.middlewares.base64.b64decode', return_value=sec_header[1].encode("latin1") \ + if isinstance(sec_header[1], str) else '') as mock_b64decode, \ + patch('api.middlewares.jwt.decode', + return_value=sec_header[1]) as mock_jwt_decode, \ + patch('api.middlewares.generate_keypair', + return_value=(None, None)) as mock_generate_keypair, \ + patch('api.middlewares.logger.warning', + return_value=(None, None)) as mock_log_warning, \ + patch('api.middlewares.AbstractSecurityHandler.get_auth_header_value', + return_value=sec_header) as mock_get_headers: + expected_time = datetime(1970, 1, 1, 0, 0, 10).timestamp() + await access_log(request=mock_req, response=response, prev_time=expected_time) + if json_body: + mock_req.json.assert_awaited_once() + expected_user = UNKNOWN_USER_STRING if not c_user and not sec_header[0] else 'wazuh' + if not c_user: + mock_get_headers.assert_called_once_with(mock_req) + if sec_header[0] == 'basic': + mock_b64decode.assert_called_once_with(sec_header[1]) + elif sec_header[0] == 'bearer': + mock_generate_keypair.assert_called_once() + mock_jwt_decode.assert_called_once_with(sec_header[1], None, [JWT_ALGORITHM]) + + if not hash and endpoint == RUN_AS_LOGIN_ENDPOINT: + mock_blacke2b.assert_called_once() + hash = f"blackeb2 {hash}" + mock_req.query_params.update({'password': '****'} if q_password else {}) + body.update({'password': '****'} if b_key else {}) + body.update({'key': '****'} if b_key and endpoint == '/agents' else {}) + mock_custom_logging.assert_called_once_with( + expected_user, mock_req.client.host, mock_req.method, + endpoint, mock_req.query_params, body, 0.0, response.status_code, + hash_auth_context=hash, headers=mock_req.headers + ) + if status_code == 403 and \ + endpoint in {LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT} and \ + method in {'GET', 'POST'}: + mock_log_warning.assert_called_once_with( + f"IP blocked due to exceeded number of logins attempts: {mock_req.client.host}") + + @pytest.mark.asyncio -async def test_middlewares_prevent_bruteforce_attack(request_info, stats): - """Test `prevent_bruteforce_attack` blocks IPs when reaching max number of attempts.""" - with patch("api.middlewares.ip_stats", new=copy(stats)): - from api.middlewares import ip_block, ip_stats - previous_attempts = ip_stats['ip']['attempts'] if 'ip' in ip_stats else 0 - await prevent_bruteforce_attack(DummyRequest(request_info), - attempts=5) - if stats: - # There were previous attempts. This one reached the limit - assert ip_stats['ip']['attempts'] == previous_attempts + 1 - assert 'ip' in ip_block - else: - # There were not previous attempts - assert ip_stats['ip']['attempts'] == 1 - assert 'ip' not in ip_block +@freeze_time(datetime(1970, 1, 1, 0, 0, 10)) +async def test_wazuh_access_logger_middleware(mock_req): + """Test access logger middleware.""" + response = MagicMock() + response.status_code = 200 + dispatch_mock = AsyncMock(return_value=response) + + middleware = WazuhAccessLoggerMiddleware(AsyncApp(__name__), dispatch=dispatch_mock) + operation = MagicMock(name="operation") + operation.method = "post" + + with TestContext(operation=operation), \ + patch('api.middlewares.access_log') as mock_access_log, \ + patch('api.middlewares.ConnexionRequest.from_starlette_request', + return_value=mock_req) as mock_from: + expected_time = datetime(1970, 1, 1, 0, 0, 10).timestamp() + resp = await middleware.dispatch(request=mock_req, call_next=dispatch_mock) + mock_from.assert_called_once_with(mock_req) + mock_access_log.assert_called_once_with(mock_req, response, expected_time) + dispatch_mock.assert_awaited_once_with(mock_req) + assert resp == response -@freeze_time(datetime(1970, 1, 1)) -@pytest.mark.parametrize("current_time,max_requests,current_time_key, current_counter_key,expected_error_args", [ - (-80, 300, 'events_current_time', 'events_request_counter', {}), - (-80, 300, 'general_current_time', 'general_request_counter', {}), - (0, 0, 'events_current_time', 'events_request_counter', { - 'code': 6005, - 'extra_message': 'For POST /events endpoint the limit is set to 0 requests.' - }), - (0, 0, 'general_current_time', 'general_request_counter', {'code': 6001}), -]) @pytest.mark.asyncio -async def test_middlewares_check_rate_limit( - current_time, max_requests, current_time_key, current_counter_key, expected_error_args -): - """Test if the rate limit mechanism triggers when the `max_requests` are reached.""" +async def test_secure_headers_middleware(mock_req): + """Test access logging.""" + response = MagicMock() + dispatch_mock = AsyncMock(return_value=response) + + middleware = SecureHeadersMiddleware(AsyncApp(__name__)) + operation = MagicMock(name="operation") + operation.method = "post" + + with TestContext(operation=operation), patch('api.middlewares.secure_headers') as mock_secure: + secure_headers.framework.starlette = MagicMock() + ret_response = await middleware.dispatch(request=mock_req, call_next=dispatch_mock) + mock_secure.framework.starlette.assert_called_once_with(response) + dispatch_mock.assert_awaited_once_with(mock_req) + assert ret_response == response + - with patch(f"api.middlewares.{current_time_key}", new=current_time): - with patch("api.middlewares.raise_if_exc") as raise_mock: - await check_rate_limit( - DummyRequest({'remote': 'ip'}), - current_time_key=current_time_key, - request_counter_key=current_counter_key, - max_requests=max_requests) - if max_requests == 0: - raise_mock.assert_called_once_with(WazuhTooManyRequests(**expected_error_args)) - - -@patch("api.middlewares.unlock_ip") -@patch("api.middlewares.check_rate_limit") -@pytest.mark.parametrize( - "request_body,expected_calls,call_args", - [ - ({"path": "/events"}, 2, ['events_request_counter', 'events_current_time', 5]), - ({"path": "some_path"}, 1, ['general_request_counter', 'general_current_time', 5]) - ] -) @pytest.mark.asyncio -async def test_middlewares_security_middleware( - rate_limit_mock, unlock_mock, request_body, expected_calls, call_args -): - """Test that all security middlewares are correctly set following the API configuration.""" - max_req = 5 - block_time = 10 - request = DummyRequest(request_body) - - with patch( - "api.middlewares.api_conf", - new={'access': {'max_request_per_minute': max_req, 'block_time': block_time}} - ): - with patch("api.middlewares.MAX_REQUESTS_EVENTS_DEFAULT", max_req): - - await security_middleware(request, handler_mock) - - assert rate_limit_mock.call_count == expected_calls - rate_limit_mock.assert_called_with(request, *call_args) - - unlock_mock.assert_called_once_with(request, block_time=block_time) +async def test_check_block_ip_middleware(mock_req): + """Test access logging.""" + response = MagicMock() + dispatch_mock = AsyncMock(return_value=response) + + middleware = CheckBlockedIP(AsyncApp(__name__)) + operation = MagicMock(name="operation") + operation.method = "post" + + with TestContext(operation=operation), \ + patch('api.middlewares.check_blocked_ip') as mock_block_ip: + secure_headers.framework.starlette = MagicMock() + ret_response = await middleware.dispatch(request=mock_req, call_next=dispatch_mock) + mock_block_ip.assert_called_once_with(mock_req) + dispatch_mock.assert_awaited_once_with(mock_req) + assert ret_response == response diff --git a/api/api/test/test_uri_parser.py b/api/api/test/test_uri_parser.py index 6c2586e7126..0d6c384c4c9 100644 --- a/api/api/test/test_uri_parser.py +++ b/api/api/test/test_uri_parser.py @@ -2,57 +2,23 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 -from unittest.mock import MagicMock, patch - +from unittest.mock import patch import pytest -from connexion.lifecycle import ConnexionRequest -from api.api_exception import APIError +from api.uri_parser import APIUriParser, LOWER_FIELDS -with patch('wazuh.common.wazuh_uid'): - with patch('wazuh.common.wazuh_gid'): - from api.uri_parser import APIUriParser -query_dict = {'component': 'VaLuE', - 'configuration': 'VaLuE', - 'hash': 'VaLuE', - 'requirement': 'VaLuE', - 'status': 'VaLuE', - 'type': 'VaLuE', - 'section': 'VaLuE', - 'tag': 'VaLuE', - 'level': 'VaLuE', - 'resource': 'VaLuE' - } -LOWER_FIELDS = ['component', 'configuration', 'hash', 'requirement', 'status', 'type', 'section', 'tag', - 'level', 'resource'] +@pytest.mark.parametrize('query_parm', + LOWER_FIELDS) +def test_apiuriparser_resolve_params(query_parm): + """Test Parameter Sanitization.""" -@pytest.mark.parametrize('q_value', - [ - '', - 'q=value', - 'q=;' - ] - ) -def test_apiuriparser_call(q_value): - query_dict.update({'q': q_value}) uri_parser = APIUriParser({}, {}) - function = MagicMock() - request = ConnexionRequest(url=q_value, - method='method_value', - query=query_dict - ) - expected_request = ConnexionRequest(url=q_value, - method='method_value', - query={k: v.lower() if k in LOWER_FIELDS else v for k, v in query_dict.items()} - ) - # uri_parser(function)(request): - # It's calling the __call__ class method. - # The wrapper is being parametrized by the second parameter between brackets. - if ';' in q_value: - with pytest.raises(APIError, match='2009 .*'): - uri_parser(function)(request) - else: - uri_parser(function)(request) - assert request.query == expected_request.query + with patch('connexion.uri_parsing.AbstractURIParser.resolve_params') as resolv_mock: + def side_effect_resolve_param(params, _in): + return params + + resolv_mock.side_effect = side_effect_resolve_param + parm = uri_parser.resolve_params({query_parm: 'ValuE'}, 'query') + resolv_mock.assert_called_once_with(parm, 'query') diff --git a/api/api/test/test_util.py b/api/api/test/test_util.py index d21ed764088..5b01d7e58fc 100644 --- a/api/api/test/test_util.py +++ b/api/api/test/test_util.py @@ -4,19 +4,21 @@ import asyncio from datetime import datetime, date -from unittest.mock import patch, ANY +from unittest.mock import patch, ANY, call import pytest from connexion import ProblemException from api import util -from api.api_exception import APIError -from wazuh.core.exception import WazuhError, WazuhPermissionError, WazuhResourceNotFound, WazuhInternalError +from api import alogging +from wazuh.core.exception import WazuhError, WazuhPermissionError, WazuhResourceNotFound, \ + WazuhInternalError class TestClass: + """Mock swagger type.""" __test__ = False - + def __init__(self, origin=None): self.swagger_types = { 'api_response': 'test_api_response', @@ -30,37 +32,6 @@ def __init__(self, origin=None): self.__origin__ = origin -@pytest.mark.parametrize("size_input, expected_size", [ - ("1m", 1024 * 1024), - ("1M", 1024 * 1024), - ("1024k", 1024 * 1024), - ("1024K", 1024 * 1024), - ("5m", 5 * 1024 * 1024) -]) -def test_APILoggerSize(size_input, expected_size): - """Assert `APILoggerSize` class returns the correct number of bytes depending on the given unit. - - Parameters - ---------- - size_input : str - Input for the class constructor. - expected_size : int - Expected number of bytes after translating the input. - """ - assert util.APILoggerSize(size_input).size == expected_size - - -def test_APILoggerSize_exceptions(): - """Assert `APILoggerSize` class returns the correct exceptions when the given size is not valid.""" - # Test invalid units - with pytest.raises(APIError, match="2011.*expected format.*"): - util.APILoggerSize("3435j") - - # Test min value - with pytest.raises(APIError, match="2011.*Minimum value.*"): - util.APILoggerSize("1k") - - @pytest.mark.parametrize('item, is_transformed', [ (date.today(), False), (datetime.today(), True) diff --git a/api/api/uri_parser.py b/api/api/uri_parser.py index 8e3f91d920a..7d085aaab2d 100644 --- a/api/api/uri_parser.py +++ b/api/api/uri_parser.py @@ -2,53 +2,21 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 -import functools +from connexion.uri_parsing import OpenAPIURIParser -import connexion +LOWER_FIELDS = ('component', 'configuration', 'hash', 'requirement', 'status', 'type', + 'section', 'tag', 'level', 'resource') -from api.api_exception import APIError -from api.util import parse_api_param, raise_if_exc +class APIUriParser(OpenAPIURIParser): + """Sanitize parameters class.""" + def resolve_params(self, params, _in): + """Sanitizes the lower_fields parameters converting keys and values to lowercase.""" -class APIUriParser(connexion.decorators.uri_parsing.OpenAPIURIParser): - def __call__(self, function): - """ - :type function: types.FunctionType - :rtype: types.FunctionType - """ + # Transform to lowercase the values for query parameter's spec.yaml enums + params.update( + {k.lower(): [list_item.lower() for list_item in v] if isinstance(v, list) else v.lower() + for k, v in params.items() if k in LOWER_FIELDS} + ) - @functools.wraps(function) - def wrapper(request): - def coerce_dict(md): - """ MultiDict -> dict of lists - """ - try: - return md.to_dict(flat=False) - except AttributeError: - return dict(md.items()) - - # Raise exception if semicolon is used in q parameter - if 'q' in request.query.keys(): - q = parse_api_param(request.url, 'q') - if q: - if ';' in q: - raise_if_exc(APIError(2009)) - - # Transform to lowercase the values for query parameter's spec.yaml enums - lower_fields = ['component', 'configuration', 'hash', 'requirement', 'status', 'type', 'section', 'tag', - 'level', 'resource'] - request.query.update( - {k.lower(): [list_item.lower() for list_item in v] if isinstance(v, list) else v.lower() - for k, v in request.query.items() if k in lower_fields}) - - query = coerce_dict(request.query) - path_params = coerce_dict(request.path_params) - form = coerce_dict(request.form) - - request.query = self.resolve_query(query) - request.path_params = self.resolve_path(path_params) - request.form = self.resolve_form(form) - response = function(request) - return response - - return wrapper + return super().resolve_params(params, _in) diff --git a/api/api/util.py b/api/api/util.py index 87e80fcab68..9b122b95436 100644 --- a/api/api/util.py +++ b/api/api/util.py @@ -2,9 +2,9 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 +import logging import datetime import os -import re import typing from functools import wraps from typing import Union @@ -12,29 +12,10 @@ import six from connexion import ProblemException -from api.api_exception import APIError from wazuh.core import common, exception from wazuh.core.cluster.utils import running_in_master_node - -class APILoggerSize: - size_regex = re.compile(r"(\d+)([KM])") - unit_conversion = { - 'K': 1024, - 'M': 1024 ** 2 - } - - def __init__(self, size_string: str): - size_string = size_string.upper() - try: - size, unit = self.size_regex.match(size_string).groups() - except AttributeError: - raise APIError(2011, details="Size value does not match the expected format: (Available" - " units: K (kilobytes), M (megabytes). For instance: 45M") from None - - self.size = int(size) * self.unit_conversion[unit] - if self.size < self.unit_conversion['M']: - raise APIError(2011, details=f"Minimum value for size is 1M. Current: {size_string}") +logger = logging.getLogger('wazuh-api') def serialize(item: object) -> object: @@ -359,8 +340,8 @@ def _create_problem(exc: Exception, code: int = None): Parameters ---------- exc : Exception - If `exc` is an instance of `WazuhException` it will be casted into a ProblemException, otherwise it will be - raised. + If `exc` is an instance of `WazuhException` it will be casted into a ProblemException, + otherwise it will be raised. code : int HTTP status code for this response. diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index 8a225da4083..734f7b07388 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -10,10 +10,6 @@ import sys import warnings -from api.constants import API_LOG_PATH -from wazuh.core.wlogging import TimeBasedFileRotatingHandler, SizeBasedFileRotatingHandler -from wazuh.core import pyDaemonModule - SSL_DEPRECATED_MESSAGE = 'The `{ssl_protocol}` SSL protocol is deprecated.' API_MAIN_PROCESS = 'wazuh-apid' @@ -21,6 +17,8 @@ API_AUTHENTICATION_PROCESS = 'wazuh-apid_auth' API_SECURITY_EVENTS_PROCESS = 'wazuh-apid_events' +logger = None + def spawn_process_pool(): """Spawn general process pool child.""" @@ -49,81 +47,190 @@ def spawn_authentication_pool(): signal.signal(signal.SIGINT, signal.SIG_IGN) -def start(): +def assign_wazuh_ownership(filepath: str): + """Create a file if it doesn't exist and assign ownership. + + Parameters + ---------- + filepath : str + File to assign ownership. + """ + if not os.path.isfile(filepath): + f = open(filepath, "w") + f.close() + if os.stat(filepath).st_gid != common.wazuh_gid() or \ + os.stat(filepath).st_uid != common.wazuh_uid(): + os.chown(filepath, common.wazuh_uid(), common.wazuh_gid()) + + +def configure_ssl(params): + """Configure https files and permission, and set the uvicorn dictionary configuration keys. + + Parameters + ---------- + uvicorn_params : dict + uvicorn parameter configuration dictionary. + """ + from api.constants import CONFIG_FILE_PATH + + try: + # Generate SSL if it does not exist and HTTPS is enabled + if not os.path.exists(api_conf['https']['key']) \ + or not os.path.exists(api_conf['https']['cert']): + logger.info('HTTPS is enabled but cannot find the private key and/or certificate. ' + 'Attempting to generate them') + private_key = generate_private_key(api_conf['https']['key']) + logger.info( + f"Generated private key file in WAZUH_PATH/{to_relative_path(api_conf['https']['key'])}") + generate_self_signed_certificate(private_key, api_conf['https']['cert']) + logger.info( + f"Generated certificate file in WAZUH_PATH/{to_relative_path(api_conf['https']['cert'])}") + + # Load SSL context + allowed_ssl_protocols = { + 'tls': ssl.PROTOCOL_TLS, + 'tlsv1': ssl.PROTOCOL_TLSv1, + 'tlsv1.1': ssl.PROTOCOL_TLSv1_1, + 'tlsv1.2': ssl.PROTOCOL_TLSv1_2, + 'auto': ssl.PROTOCOL_TLS_SERVER + } + + config_ssl_protocol = api_conf['https']['ssl_protocol'] + ssl_protocol = allowed_ssl_protocols[config_ssl_protocol.lower()] + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) + if ssl_protocol in (ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1): + logger.warning(SSL_DEPRECATED_MESSAGE.format(ssl_protocol=config_ssl_protocol)) + + # Check and assign ownership to wazuh user for server.key and server.crt files + assign_wazuh_ownership(api_conf['https']['key']) + assign_wazuh_ownership(api_conf['https']['cert']) + + params['ssl_version'] = ssl.PROTOCOL_TLS_SERVER + + if api_conf['https']['use_ca']: + params['ssl_cert_reqs'] = ssl.CERT_REQUIRED + params['ssl_ca_certs'] = api_conf['https']['ca'] + + params['ssl_certfile'] = api_conf['https']['cert'] + params['ssl_keyfile'] = api_conf['https']['key'] + + # Load SSL ciphers if any has been specified + if api_conf['https']['ssl_ciphers']: + params['ssl_ciphers'] = api_conf['https']['ssl_ciphers'].upper() + + except ssl.SSLError as exc: + error = APIError( + 2003, details='Private key does not match with the certificate') + logger.error(error) + raise error from exc + except IOError as exc: + if exc.errno == 22: + error = APIError(2003, details='PEM phrase is not correct') + logger.error(error) + raise error from exc + elif exc.errno == 13: + error = APIError(2003, + details='Ensure the certificates have the correct permissions') + logger.error(error) + raise error from exc + else: + msg = f'Wazuh API SSL ERROR. Please, ensure ' \ + f'if path to certificates is correct in the configuration ' \ + f'file WAZUH_PATH/{to_relative_path(CONFIG_FILE_PATH)}' + print(msg) + logger.error(msg) + raise exc from exc + + +def start(params: dict): """Run the Wazuh API. - If another Wazuh API is running, this function fails. - This function exits with 0 if successful or 1 if failed because the API was already running. + If another Wazuh API is running, this function will fail because uvicorn server will + not be able to create server processes in the same port. + The function creates the pool processes, the AsyncApp instance, setups the API spec.yaml, + the middleware classes, the error_handlers, the lifespan, and runs the uvicorn ASGI server. + + Parameters + ---------- + params : dict + uvicorn parameter configuration dictionary. """ try: check_database_integrity() except Exception as db_integrity_exc: - raise APIError(2012, details=str(db_integrity_exc)) + raise APIError(2012, details=str(db_integrity_exc)) from db_integrity_exc # Spawn child processes with their own needed imports if 'thread_pool' not in common.mp_pools.get(): loop = asyncio.get_event_loop() loop.run_until_complete( - asyncio.wait([loop.run_in_executor(pool, getattr(sys.modules[__name__], f'spawn_{name}')) + asyncio.wait([loop.run_in_executor(pool, + getattr(sys.modules[__name__], f'spawn_{name}')) for name, pool in common.mp_pools.get().items()])) # Set up API - asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) - app = connexion.AioHttpApp(__name__, host=api_conf['host'], - port=api_conf['port'], - specification_dir=os.path.join(api_path[0], 'spec'), - options={"swagger_ui": False, 'uri_parser_class': APIUriParser}, - only_one_api=True - ) + app = AsyncApp( + __name__, + specification_dir=os.path.join(api_path[0], 'spec'), + swagger_ui_options=SwaggerUIOptions(swagger_ui=False), + pythonic_params=True, + lifespan=lifespan_handler, + uri_parser_class=APIUriParser + ) app.add_api('spec.yaml', - arguments={'title': 'Wazuh API', - 'protocol': 'https' if api_conf['https']['enabled'] else 'http', - 'host': api_conf['host'], - 'port': api_conf['port'] - }, + arguments={ + 'title': 'Wazuh API', + 'protocol': 'https' if api_conf['https']['enabled'] else 'http', + 'host': params['host'], + 'port': params['port']}, strict_validation=True, - validate_responses=False, - pass_context_arg_name='request', - options={"middlewares": [response_postprocessing, security_middleware, request_logging, - set_secure_headers]}) + validate_responses=False + ) # Maximum body size that the API can accept (bytes) - app.app._client_max_size = configuration.api_conf['max_upload_size'] + if api_conf['access']['max_request_per_minute'] > 0: + app.add_middleware(CheckRateLimitsMiddleware, MiddlewarePosition.BEFORE_SECURITY) + app.add_middleware(CheckBlockedIP, MiddlewarePosition.BEFORE_SECURITY) + app.add_middleware(WazuhAccessLoggerMiddleware) + app.add_middleware(SecureHeadersMiddleware) + if api_conf['max_upload_size']: + app.add_middleware(ContentSizeLimitMiddleware, max_content_size=api_conf['max_upload_size']) + app.add_error_handler(ContentSizeExceeded, error_handler.content_size_handler) # Enable CORS if api_conf['cors']['enabled']: - import aiohttp_cors - cors = aiohttp_cors.setup(app.app, defaults={ - api_conf['cors']['source_route']: aiohttp_cors.ResourceOptions( - expose_headers=api_conf['cors']['expose_headers'], - allow_headers=api_conf['cors']['allow_headers'], - allow_credentials=api_conf['cors']['allow_credentials'] - ) - }) - # Configure CORS on all endpoints. - for route in list(app.app.router.routes()): - cors.add(route) - - # Enable cache plugin - if api_conf['cache']['enabled']: - setup_cache(app.app) - - # Add application signals - app.app.on_response_prepare.append(modify_response_headers) - app.app.cleanup_ctx.append(register_background_tasks) + app.add_middleware( + CORSMiddleware, + position=MiddlewarePosition.BEFORE_EXCEPTION, + allow_origins=api_conf['cors']['source_route'], + expose_headers=api_conf['cors']['expose_headers'], + allow_headers=api_conf['cors']['allow_headers'], + allow_credentials=api_conf['cors']['allow_credentials'], + ) + + # Add error handlers to format exceptions + app.add_error_handler(JWTError, error_handler.jwt_error_handler) + app.add_error_handler(Unauthorized, error_handler.unauthorized_error_handler) + app.add_error_handler(HTTPException, error_handler.http_error_handler) + app.add_error_handler(429, error_handler.exceeded_requests_handler) + app.add_error_handler(error_handler.BlockedIPException, error_handler.blocked_ip_handler) + app.add_error_handler(ProblemException, error_handler.problem_error_handler) + + # Add application signals TO BE MODIFIED AFTER IMPLEMENTING CTI IN CONNEXION 3.0 + # app.app.on_response_prepare.append(modify_response_headers) + # app.app.cleanup_ctx.append(register_background_tasks) # API configuration logging logger.debug(f'Loaded API configuration: {api_conf}') logger.debug(f'Loaded security API configuration: {security_conf}') - # Start API + # Start uvicorn server + try: - app.run(port=api_conf['port'], - host=api_conf['host'], - ssl_context=ssl_context, - access_log_class=alogging.AccessLogger, - use_default_access_log=True - ) + uvicorn.run(app, **params) + except OSError as exc: if exc.errno == 98: error = APIError(2010) @@ -136,7 +243,7 @@ def start(): def print_version(): from wazuh.core.cluster import __version__, __author__, __wazuh_name__, __licence__ - print("\n{} {} - {}\n\n{}".format(__wazuh_name__, __version__, __author__, __licence__)) + print('\n{} {} - {}\n\n{}'.format(__wazuh_name__, __version__, __author__, __licence__)) def test_config(config_file: str): @@ -148,7 +255,6 @@ def test_config(config_file: str): Path of the file """ try: - from api.configuration import read_yaml_config read_yaml_config(config_file=config_file) except Exception as exc: print(f"Configuration not valid. ERROR: {exc}") @@ -169,19 +275,47 @@ def exit_handler(signum, frame): pyDaemonModule.delete_pid(API_MAIN_PROCESS, api_pid) +def add_debug2_log_level_and_error(): + """Add a new debug level used by wazuh api and framework.""" + + logging.DEBUG2 = 6 + + def debug2(self, message, *args, **kws): + if self.isEnabledFor(logging.DEBUG2): + self._log(logging.DEBUG2, message, args, **kws) + + def error(self, msg, *args, **kws): + if self.isEnabledFor(logging.ERROR): + if 'exc_info' not in kws: + kws['exc_info'] = self.isEnabledFor(logging.DEBUG2) + self._log(logging.ERROR, msg, args, **kws) + + logging.addLevelName(logging.DEBUG2, "DEBUG2") + + logging.Logger.debug2 = debug2 + logging.Logger.error = error + + if __name__ == '__main__': parser = argparse.ArgumentParser() - #################################################################################################################### - parser.add_argument('-f', help="Run in foreground", action='store_true', dest='foreground') - parser.add_argument('-V', help="Print version", action='store_true', dest="version") - parser.add_argument('-t', help="Test configuration", action='store_true', dest='test_config') - parser.add_argument('-r', help="Run as root", action='store_true', dest='root') - parser.add_argument('-c', help="Configuration file to use", type=str, metavar='config', dest='config_file') - parser.add_argument('-d', help="Enable debug messages. Use twice to increase verbosity.", action='count', + ######################################################################################### + parser.add_argument('-f', help="Run in foreground", + action='store_true', dest='foreground') + parser.add_argument('-V', help="Print version", + action='store_true', dest="version") + parser.add_argument('-t', help="Test configuration", + action='store_true', dest='test_config') + parser.add_argument('-r', help="Run as root", + action='store_true', dest='root') + parser.add_argument('-c', help="Configuration file to use", + type=str, metavar='config', dest='config_file') + parser.add_argument('-d', help="Enable debug messages. Use twice to increase verbosity.", + action='count', dest='debug_level') args = parser.parse_args() + from api.configuration import read_yaml_config if args.version: version() sys.exit(0) @@ -190,166 +324,86 @@ def exit_handler(signum, frame): test_config(args.config_file) sys.exit(0) + import asyncio import logging + import logging.config + import ssl + import uvicorn + + from connexion import AsyncApp + from connexion.options import SwaggerUIOptions + from connexion.exceptions import Unauthorized, HTTPException, ProblemException + from connexion.middleware import MiddlewarePosition + + from starlette.middleware.cors import CORSMiddleware + + from content_size_limit_asgi import ContentSizeLimitMiddleware + from content_size_limit_asgi.errors import ContentSizeExceeded + + from jose import JWTError + + from api import error_handler, __path__ as api_path from api.api_exception import APIError - from wazuh.core import common - from api import alogging, configuration - from api.api_exception import APIError - from api.util import APILoggerSize, to_relative_path - - from wazuh.core import common, utils - - - def set_logging(log_path=f'{API_LOG_PATH}.log', foreground_mode=False, debug_mode='info'): - """Set up logging for the API. - - Parameters - ---------- - log_path : str - Path of the log file. - foreground_mode : bool - If True, the log will be printed to stdout. - debug_mode : str - Debug level. Possible values: disabled, info, warning, error, debug, debug2. - """ - if not api_conf['logs']['max_size']['enabled']: - custom_handler = TimeBasedFileRotatingHandler(filename=log_path, when='midnight') - else: - max_size = APILoggerSize(api_conf['logs']['max_size']['size']).size - custom_handler = SizeBasedFileRotatingHandler(filename=log_path, maxBytes=max_size, backupCount=1) - - for logger_name in ('connexion.aiohttp_app', 'connexion.apis.aiohttp_api', 'wazuh-api'): - api_logger = alogging.APILogger( - log_path=log_path, foreground_mode=foreground_mode, logger_name=logger_name, - debug_level='info' if logger_name != 'wazuh-api' and debug_mode != 'debug2' else debug_mode - ) - api_logger.setup_logger(custom_handler) - if os.path.exists(log_path): - os.chown(log_path, common.wazuh_uid(), common.wazuh_gid()) - os.chmod(log_path, 0o660) + from api.configuration import api_conf, security_conf, generate_private_key, \ + generate_self_signed_certificate + from api.middlewares import SecureHeadersMiddleware, CheckRateLimitsMiddleware, \ + CheckBlockedIP, WazuhAccessLoggerMiddleware, lifespan_handler + from api.util import to_relative_path + from api.uri_parser import APIUriParser + from api.constants import API_LOG_PATH + from api.alogging import set_logging - try: - from wazuh.core import utils - from api import alogging, configuration + from wazuh.rbac.orm import check_database_integrity + from wazuh.core import pyDaemonModule, common, utils + try: if args.config_file is not None: - configuration.api_conf.update(configuration.read_yaml_config(config_file=args.config_file)) - api_conf = configuration.api_conf - security_conf = configuration.security_conf + api_conf.update(read_yaml_config(config_file=args.config_file)) except APIError as e: print(f"Error when trying to start the Wazuh API. {e}") sys.exit(1) - # Set up logger + # Configure uvicorn parameters dictionary + uvicorn_params = {} + uvicorn_params['host'] = api_conf['host'] + uvicorn_params['port'] = api_conf['port'] + uvicorn_params['loop'] = 'uvloop' + + # Set up logger file try: - plain_log = 'plain' in api_conf['logs']['format'] - json_log = 'json' in api_conf['logs']['format'] - - if plain_log: - set_logging(log_path=f'{API_LOG_PATH}.log', debug_mode=api_conf['logs']['level'], - foreground_mode=args.foreground) - if json_log: - set_logging(log_path=f'{API_LOG_PATH}.json', debug_mode=api_conf['logs']['level'], - foreground_mode=args.foreground and not plain_log) - except APIError as api_log_error: - print(f"Error when trying to start the Wazuh API. {api_log_error}") + uvicorn_params['log_config'] = set_logging(log_filepath=API_LOG_PATH, + log_level=api_conf['logs']['level'].upper(), + foreground_mode=args.foreground) + except APIError as e: + print(f"Configuration error in the API log format: {api_conf['logs']['format']}.") sys.exit(1) - logger = logging.getLogger('wazuh-api') + # set permission on log files + for handler in uvicorn_params['log_config']['handlers'].values(): + if 'filename' in handler: + assign_wazuh_ownership(handler['filename']) + os.chmod(handler['filename'], 0o660) - import asyncio - import ssl - - import connexion - import uvloop - from aiohttp_cache import setup_cache - from api import __path__ as api_path - # noinspection PyUnresolvedReferences - from api.constants import CONFIG_FILE_PATH - from api.middlewares import security_middleware, response_postprocessing, request_logging, set_secure_headers - from api.signals import modify_response_headers, register_background_tasks - from api.uri_parser import APIUriParser - from api.util import to_relative_path - from wazuh.rbac.orm import check_database_integrity + # Configure and create the wazuh-api logger + logging.config.dictConfig(uvicorn_params['log_config']) + add_debug2_log_level_and_error() + logger = logging.getLogger('wazuh-api') # Check deprecated options. To delete after expected versions if 'use_only_authd' in api_conf: del api_conf['use_only_authd'] - logger.warning("'use_only_authd' option was deprecated on v4.3.0. Wazuh Authd will always be used") + logger.warning( + "'use_only_authd' option was deprecated on v4.3.0. Wazuh Authd will always be used") if 'path' in api_conf['logs']: del api_conf['logs']['path'] - logger.warning("Log 'path' option was deprecated on v4.3.0. Default path will always be used: " - f"{API_LOG_PATH}.") + logger.warning( + "Log 'path' option was deprecated on v4.3.0. Default path will always be used: " + f"{API_LOG_PATH}.") - # Configure https - ssl_context = None + # Configure ssl files if api_conf['https']['enabled']: - try: - # Generate SSL if it does not exist and HTTPS is enabled - if not os.path.exists(api_conf['https']['key']) or not os.path.exists(api_conf['https']['cert']): - logger.info('HTTPS is enabled but cannot find the private key and/or certificate. ' - 'Attempting to generate them') - private_key = configuration.generate_private_key(api_conf['https']['key']) - logger.info( - f"Generated private key file in WAZUH_PATH/{to_relative_path(api_conf['https']['key'])}") - configuration.generate_self_signed_certificate(private_key, api_conf['https']['cert']) - logger.info( - f"Generated certificate file in WAZUH_PATH/{to_relative_path(api_conf['https']['cert'])}") - - # Load SSL context - allowed_ssl_protocols = { - 'tls': ssl.PROTOCOL_TLS, - 'tlsv1': ssl.PROTOCOL_TLSv1, - 'tlsv1.1': ssl.PROTOCOL_TLSv1_1, - 'tlsv1.2': ssl.PROTOCOL_TLSv1_2, - 'auto': ssl.PROTOCOL_TLS_SERVER - } - - config_ssl_protocol = api_conf['https']['ssl_protocol'] - ssl_protocol = allowed_ssl_protocols[config_ssl_protocol.lower()] - - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - if ssl_protocol in (ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1): - logger.warning(SSL_DEPRECATED_MESSAGE.format(ssl_protocol=config_ssl_protocol)) - ssl_context = ssl.SSLContext(protocol=ssl_protocol) - - if api_conf['https']['use_ca']: - ssl_context.verify_mode = ssl.CERT_REQUIRED - ssl_context.load_verify_locations(api_conf['https']['ca']) - - ssl_context.load_cert_chain(certfile=api_conf['https']['cert'], keyfile=api_conf['https']['key']) - - # Load SSL ciphers if any has been specified - if api_conf['https']['ssl_ciphers']: - ssl_ciphers = api_conf['https']['ssl_ciphers'].upper() - try: - ssl_context.set_ciphers(ssl_ciphers) - except ssl.SSLError: - error = APIError(2003, details='SSL ciphers cannot be selected') - logger.error(error) - raise error - - except ssl.SSLError: - error = APIError(2003, details='Private key does not match with the certificate') - logger.error(error) - raise error - except IOError as exc: - if exc.errno == 22: - error = APIError(2003, details='PEM phrase is not correct') - logger.error(error) - raise error - elif exc.errno == 13: - error = APIError(2003, details='Ensure the certificates have the correct permissions') - logger.error(error) - raise error - else: - msg = f'Wazuh API SSL ERROR. Please, ensure if path to certificates is correct in the configuration ' \ - f'file WAZUH_PATH/{to_relative_path(CONFIG_FILE_PATH)}' - print(msg) - logger.error(msg) - raise exc + configure_ssl(uvicorn_params) # Check for unused PID files utils.clean_pid_files(API_MAIN_PROCESS) @@ -358,7 +412,7 @@ def set_logging(log_path=f'{API_LOG_PATH}.log', foreground_mode=False, debug_mod if not args.foreground: pyDaemonModule.pyDaemon() else: - print('Starting API in foreground') + logger.info('Starting API in foreground') # Drop privileges to wazuh if not args.root: @@ -366,15 +420,14 @@ def set_logging(log_path=f'{API_LOG_PATH}.log', foreground_mode=False, debug_mod os.setgid(common.wazuh_gid()) os.setuid(common.wazuh_uid()) else: - print('Starting API as root') + logger.info('Starting API as root') pid = os.getpid() pyDaemonModule.create_pid(API_MAIN_PROCESS, pid) signal.signal(signal.SIGTERM, exit_handler) - try: - start() + start(uvicorn_params) except APIError as e: print(f"Error when trying to start the Wazuh API. {e}") sys.exit(1) diff --git a/api/test/integration/env/base/manager/manager.Dockerfile b/api/test/integration/env/base/manager/manager.Dockerfile index 8e65bcd3e43..a750589786e 100644 --- a/api/test/integration/env/base/manager/manager.Dockerfile +++ b/api/test/integration/env/base/manager/manager.Dockerfile @@ -12,7 +12,9 @@ ADD base/manager/supervisord.conf /etc/supervisor/conf.d/ RUN mkdir wazuh && curl -sL https://github.com/wazuh/wazuh/tarball/${WAZUH_BRANCH} | tar zx --strip-components=1 -C wazuh COPY base/manager/preloaded-vars.conf /wazuh/etc/preloaded-vars.conf RUN /wazuh/install.sh - +# START TO BE DELETED after a the new wazuh embedded version includes all the required dependencies. +RUN /var/ossec/framework/python/bin/pip3 install -r /wazuh/framework/requirements.txt +# END TO BE DELETED COPY base/manager/entrypoint.sh /scripts/entrypoint.sh # HEALTHCHECK diff --git a/api/test/integration/env/docker-compose.yml b/api/test/integration/env/docker-compose.yml index d400ea982c3..1e4e08889e7 100644 --- a/api/test/integration/env/docker-compose.yml +++ b/api/test/integration/env/docker-compose.yml @@ -13,6 +13,11 @@ services: volumes: - ./configurations/tmp/manager:/tmp_volume - ./tools/:/tools + # Folders to be used in local development environments + # - ${WAZUH_LOCAL_PATH}/framework/wazuh:/var/ossec/framework/python/lib/python3.10/site-packages/wazuh + # - ${WAZUH_LOCAL_PATH}/api/api:/var/ossec/framework/python/lib/python3.10/site-packages/api + # - ${WAZUH_LOCAL_PATH}/framework/scripts:/var/ossec/framework/scripts + # - ${WAZUH_LOCAL_PATH}/api/scripts/wazuh-apid.py:/var/ossec/api/scripts/wazuh-apid.py entrypoint: - /scripts/entrypoint.sh - wazuh-master @@ -28,6 +33,11 @@ services: volumes: - ./configurations/tmp/manager:/tmp_volume - ./tools/:/tools + # Folders to be used in local development environments + # - ${WAZUH_LOCAL_PATH}/framework/wazuh:/var/ossec/framework/python/lib/python3.10/site-packages/wazuh + # - ${WAZUH_LOCAL_PATH}/api/api:/var/ossec/framework/python/lib/python3.10/site-packages/api + # - ${WAZUH_LOCAL_PATH}/framework/scripts:/var/ossec/framework/scripts + # - ${WAZUH_LOCAL_PATH}/api/scripts/wazuh-apid.py:/var/ossec/api/scripts/wazuh-apid.py entrypoint: - /scripts/entrypoint.sh - wazuh-master @@ -44,6 +54,11 @@ services: volumes: - ./configurations/tmp/manager:/tmp_volume - ./tools/:/tools + # Folders to be used in local development environments + # - ${WAZUH_LOCAL_PATH}/framework/wazuh:/var/ossec/framework/python/lib/python3.10/site-packages/wazuh + # - ${WAZUH_LOCAL_PATH}/api/api:/var/ossec/framework/python/lib/python3.10/site-packages/api + # - ${WAZUH_LOCAL_PATH}/framework/scripts:/var/ossec/framework/scripts + # - ${WAZUH_LOCAL_PATH}/api/scripts/wazuh-apid.py:/var/ossec/api/scripts/wazuh-apid.py entrypoint: - /scripts/entrypoint.sh - wazuh-master diff --git a/api/test/integration/test_cdb_list_endpoints.tavern.yaml b/api/test/integration/test_cdb_list_endpoints.tavern.yaml index 5a6a80a1abe..51f1a81c934 100644 --- a/api/test/integration/test_cdb_list_endpoints.tavern.yaml +++ b/api/test/integration/test_cdb_list_endpoints.tavern.yaml @@ -579,7 +579,7 @@ stages: headers: Authorization: "Bearer {test_login_token}" response: - status_code: 400 + status_code: 404 # GET /lists/files/{filename} - name: Try to get CDB list (invalid file 2) @@ -732,7 +732,7 @@ stages: headers: Authorization: "Bearer {test_login_token}" response: - status_code: 400 + status_code: 404 --- test_name: DELETE /lists/files/{filename} @@ -789,4 +789,4 @@ stages: headers: Authorization: "Bearer {test_login_token}" response: - status_code: 400 + status_code: 404 diff --git a/api/test/integration/test_manager_endpoints.tavern.yaml b/api/test/integration/test_manager_endpoints.tavern.yaml index 5081aa0fcd1..776272e06bd 100644 --- a/api/test/integration/test_manager_endpoints.tavern.yaml +++ b/api/test/integration/test_manager_endpoints.tavern.yaml @@ -678,6 +678,47 @@ stages: total_affected_items: 1 total_failed_items: 0 +--- +test_name: GET /manager/validation (KO) + +stages: + + #### Upload corrupted rules file + # PUT /rules/files + - name: Upload corrupted + request: + verify: False + url: "{protocol:s}://{host:s}:{port:d}/rules/files/new-rules_corrupted.xml" + method: PUT + data: "{corrupted_rules_file}" + headers: + Authorization: "Bearer {test_login_token}" + content-type: application/octet-stream + response: + status_code: 200 + + # GET /manager/configuration/validation + - name: Request validation + request: + verify: False + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/validation" + method: GET + headers: + Authorization: "Bearer {test_login_token}" + response: + status_code: 200 + json: + error: 1 + data: + affected_items: [] + failed_items: + - error: + code: 1908 + id: + - !anystr + total_affected_items: 0 + total_failed_items: 1 + --- test_name: GET /manager/configuration/{component}/{configuration} @@ -1196,9 +1237,9 @@ stages: Authorization: "Bearer {test_login_token}" content-type: application/json response: - status_code: 406 + status_code: 415 json: - error: 6002 + title: Unsupported Media Type # GET /manager/configuration/ - name: Ensure the config didn't change diff --git a/framework/requirements-dev.txt b/framework/requirements-dev.txt index 5046aa73f94..aa679d80d23 100644 --- a/framework/requirements-dev.txt +++ b/framework/requirements-dev.txt @@ -3,8 +3,7 @@ freezegun==0.3.15 py==1.11.0 pytest==7.3.1 -pytest-aiohttp==1.0.4 pytest-asyncio==0.18.1 pytest-html==2.1.1 pytest-trio==0.8.0 -tavern==1.23.5 \ No newline at end of file +tavern==1.23.5 diff --git a/framework/requirements.txt b/framework/requirements.txt index b3b30e16295..38f49aeeb6d 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -1,12 +1,8 @@ -aiohttp==3.9.3 -aiohttp-cache==2.2.0 -aiohttp-cors==0.7.0 -aiohttp-jinja2==1.5.1 -aioredis==1.3.1 -aiosignal==1.2.0 +# TO BE DELETED AFTER IMPLEMENTING CTI IN CONNEXION 3.0 +aiohttp==3.9.1 +# END TO BE DELETED asn1crypto==1.3.0 -async-timeout==4.0.2 -attrs==20.3.0 +attrs==23.1.0 azure-common==1.1.25 azure-storage-blob==2.1.0 azure-storage-common==2.1.0 @@ -19,15 +15,15 @@ chardet==3.0.4 charset-normalizer==2.0.4 click==8.1.3 clickclick==20.10.2 -connexion==2.14.2 +connexion==3.0.5 cryptography==42.0.4 -Cython==0.29.21 +Cython==0.29.36 defusedxml==0.6.0 docker==6.0.0 docker-pycreds==0.4.0 docutils==0.15.2 +ecdsa==0.16.1 envparse==0.2.0 -Flask==2.2.5 frozenlist==1.2.0 future==0.18.3 google-api-core==1.30.0 @@ -41,21 +37,20 @@ google-resumable-media==1.3.1 greenlet==2.0.2 grpc-google-iam-v1==0.12.3 grpcio==1.58.0 -hiredis==2.2.3 idna==2.9 -importlib-metadata==3.10.1 +importlib-metadata==6.8.0 inflection==0.3.1 itsdangerous==2.0.0 Jinja2==3.1.3 jmespath==0.9.5 -jsonschema==2.6.0 +jsonschema==4.20.0 libcst==0.3.20 MarkupSafe==2.1.2 more-itertools==8.2.0 multidict==5.2.0 mypy-extensions==0.4.3 numpy==1.26.0 -openapi-spec-validator==0.2.6 +openapi-spec-validator==0.7.1 packaging==20.9 pathlib==1.0.1 protobuf==3.19.6 @@ -74,7 +69,7 @@ PyYAML==5.4.1 requests==2.31.0 rsa==4.7.2 s3transfer==0.4.2 -secure==0.2.1 +secure==0.3.0 six==1.16.0 SQLAlchemy==2.0.23 tabulate==0.8.9 @@ -87,3 +82,6 @@ Werkzeug==2.2.3 xmltodict==0.12.0 yarl==1.7.0 zipp==3.3.2 +content_size_limit_asgi +uvicorn==0.24.0.post1 +content_size_limit_asgi==0.1.5 diff --git a/tests/integration/test_api/test_middlewares/data/test_cases/cases_set_secure_headers.yaml b/tests/integration/test_api/test_middlewares/data/test_cases/cases_set_secure_headers.yaml index 274dd8ba968..5ad05e3df21 100644 --- a/tests/integration/test_api/test_middlewares/data/test_cases/cases_set_secure_headers.yaml +++ b/tests/integration/test_api/test_middlewares/data/test_cases/cases_set_secure_headers.yaml @@ -4,7 +4,7 @@ MAX_REQUEST_PER_MINUTE: 15 metadata: security_headers: - Cache-control: no-store - Content-Security-Policy: none - X-Content-Type-Options: nosniff - X-Frame-Options: DENY + cache-control: no-store + content-security-policy: none + x-content-type-options: nosniff + x-frame-options: deny From b8c0d0fb1ce16157a2c33bffc08d536d6fdb8565 Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Wed, 14 Feb 2024 09:14:33 -0300 Subject: [PATCH 181/419] Control JOSEError decoding the token --- api/api/middlewares.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/api/api/middlewares.py b/api/api/middlewares.py index 3a7716058c4..cf94d97acc9 100644 --- a/api/api/middlewares.py +++ b/api/api/middlewares.py @@ -8,7 +8,7 @@ import contextlib import logging import base64 -from jose import jwt +from jose import jwt, JOSEError from starlette.requests import Request from starlette.responses import Response @@ -78,19 +78,22 @@ async def access_log(request: ConnexionRequest, response: Response, prev_time: t if 'key' in body and '/agents' in path: body['key'] = '****' - # Get the user name from the request. If it is not found in the context, - # try to get it from the headers using basic or bearer authentication methods. + # Get the username from the request. If it is not found in the context, try + # to get it from the headers using basic or bearer authentication methods. user = UNKNOWN_USER_STRING - if headers and not (user:= context.get('user', None)): + if headers and not (user := context.get('user', None)): auth_type, user_passw = AbstractSecurityHandler.get_auth_header_value(request) if auth_type == 'basic': user, _ = base64.b64decode(user_passw).decode("latin1").split(":", 1) elif auth_type == 'bearer': - s = jwt.decode(user_passw, generate_keypair()[1], - algorithms=[JWT_ALGORITHM], - audience='Wazuh API REST', - options={'verify_exp': False}) - user = s['sub'] + try: + s = jwt.decode(user_passw, generate_keypair()[1], + algorithms=[JWT_ALGORITHM], + audience='Wazuh API REST', + options={'verify_exp': False}) + user = s['sub'] + except JOSEError: + pass # Get or create authorization context hash hash_auth_context = context.get('token_info', {}).get('hash_auth_context', '') @@ -100,9 +103,8 @@ async def access_log(request: ConnexionRequest, response: Response, prev_time: t hash_auth_context = hashlib.blake2b(json.dumps(body).encode(), digest_size=16).hexdigest() - custom_logging(user, host, method, - path, query, body, time_diff, response.status_code, - hash_auth_context=hash_auth_context, headers=headers) + custom_logging(user, host, method, path, query, body, time_diff, response.status_code, + hash_auth_context=hash_auth_context, headers=headers) if response.status_code == 403 and \ path in {LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT} and \ method in {'GET', 'POST'}: @@ -122,7 +124,7 @@ def check_blocked_ip(request: Request): """ global ip_block, ip_stats access_conf = configuration.api_conf['access'] - block_time=access_conf['block_time'] + block_time = access_conf['block_time'] try: if get_utc_now().timestamp() - block_time >= ip_stats[request.client.host]['timestamp']: del ip_stats[request.client.host] From 6610ec312099958d62804d5c59def51f5e7513e1 Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Wed, 14 Feb 2024 09:16:47 -0300 Subject: [PATCH 182/419] Minor fixes: cosmetic, imports, constants --- api/api/authentication.py | 3 ++- api/api/controllers/agent_controller.py | 3 +-- api/scripts/wazuh_apid.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/api/authentication.py b/api/api/authentication.py index d574b79b440..854be52483a 100755 --- a/api/api/authentication.py +++ b/api/api/authentication.py @@ -30,6 +30,7 @@ from wazuh.rbac.preprocessor import optimize_resources INVALID_TOKEN = "Invalid token" +EXPIRED_TOKEN = "Token expired" pool = ThreadPoolExecutor(max_workers=1) @@ -293,7 +294,7 @@ def decode_token(token: str) -> dict: current_expiration_time = result['auth_token_exp_timeout'] if payload['rbac_policies']['rbac_mode'] != current_rbac_mode \ or (payload['exp'] - payload['nbf']) != current_expiration_time: - raise Unauthorized("Token Expired") + raise Unauthorized(EXPIRED_TOKEN) return payload except JWTError as exc: diff --git a/api/api/controllers/agent_controller.py b/api/api/controllers/agent_controller.py index a50bfff989b..dba4c2c92ec 100755 --- a/api/api/controllers/agent_controller.py +++ b/api/api/controllers/agent_controller.py @@ -16,12 +16,11 @@ from api.models.base_model_ import Body from api.util import deprecate_endpoint, parse_api_param, raise_if_exc, remove_nones_to_dict from api.validator import check_component_configuration_pair -from wazuh import agent +from wazuh import agent, stats from wazuh.core.cluster.control import get_system_nodes from wazuh.core.cluster.dapi.dapi import DistributedAPI from wazuh.core.common import DATABASE_LIMIT from wazuh.core.results import AffectedItemsWazuhResult -from wazuh import stats logger = logging.getLogger('wazuh-api') diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index 734f7b07388..4e39ef01615 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -372,8 +372,8 @@ def error(self, msg, *args, **kws): # Set up logger file try: uvicorn_params['log_config'] = set_logging(log_filepath=API_LOG_PATH, - log_level=api_conf['logs']['level'].upper(), - foreground_mode=args.foreground) + log_level=api_conf['logs']['level'].upper(), + foreground_mode=args.foreground) except APIError as e: print(f"Configuration error in the API log format: {api_conf['logs']['format']}.") sys.exit(1) From 4981eb3dbd04a54a97c0c279b4b56c00ecfac441 Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Wed, 14 Feb 2024 09:17:49 -0300 Subject: [PATCH 183/419] Allow logging to files in foreground mode --- api/api/alogging.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/api/api/alogging.py b/api/api/alogging.py index 04019eab0b2..fe960c513f8 100644 --- a/api/api/alogging.py +++ b/api/api/alogging.py @@ -117,19 +117,19 @@ def set_logging(log_filepath, log_level='INFO', foreground_mode=False) -> dict: } if foreground_mode: handlers.update({'console': {}}) - else: - if 'json' in api_conf['logs']['format']: - handlers["jsonfile"] = { - 'filename': f"{log_filepath}.json", - 'formatter': 'json', - 'filters': ['json-filter'], - } - if 'plain' in api_conf['logs']['format']: - handlers["plainfile"] = { - 'filename': f"{log_filepath}.log", - 'formatter': 'log', - 'filters': ['plain-filter'], - } + + if 'json' in api_conf['logs']['format']: + handlers["jsonfile"] = { + 'filename': f"{log_filepath}.json", + 'formatter': 'json', + 'filters': ['json-filter'], + } + if 'plain' in api_conf['logs']['format']: + handlers["plainfile"] = { + 'filename': f"{log_filepath}.log", + 'formatter': 'log', + 'filters': ['plain-filter'], + } hdls = [k for k, v in handlers.items() if isinstance(v, dict)] if not hdls: @@ -151,7 +151,7 @@ def set_logging(log_filepath, log_level='INFO', foreground_mode=False) -> dict: "log": { "()": "uvicorn.logging.DefaultFormatter", "fmt": "%(asctime)s %(levelname)s: %(message)s", - "datefmt": "%Y-%m-%d %H:%M:%S", + "datefmt": "%Y/%m/%d %H:%M:%S", "use_colors": None, }, "json" : { @@ -266,8 +266,8 @@ def custom_logging(user, remote, method, path, query, body = {'events': len(events)} json_info['body'] = body - log_info += f'with parameters {json.dumps(query)} and body'\ - f' {json.dumps(body)} done in {elapsed_time:.3f}s: {status}' + log_info += f'with parameters {json.dumps(query)} and body '\ + f'{json.dumps(body)} done in {elapsed_time:.3f}s: {status}' logger.info(log_info, extra={'log_type': 'log'}) logger.info(json_info, extra={'log_type': 'json'}) From 7069274d148c054c5f3d7de9b2d5098f5e4d06ee Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Wed, 14 Feb 2024 16:12:39 -0300 Subject: [PATCH 184/419] Create BlockedIpException for DOS max requests. Check blockedIP only for authentication endpoints. --- api/api/api_exception.py | 9 +++- api/api/error_handler.py | 97 ++++++++++++++++++++------------------- api/api/middlewares.py | 12 ++--- api/api/spec/spec.yaml | 2 +- api/scripts/wazuh_apid.py | 2 +- 5 files changed, 66 insertions(+), 56 deletions(-) diff --git a/api/api/api_exception.py b/api/api/api_exception.py index 4d544788c69..bcaa15d9eb6 100644 --- a/api/api/api_exception.py +++ b/api/api/api_exception.py @@ -4,7 +4,7 @@ from connexion.exceptions import ProblemException from api.constants import RELATIVE_CONFIG_FILE_PATH, RELATIVE_SECURITY_PATH -from wazuh.core.exception import DOCU_VERSION +from wazuh.core.exception import DOCU_VERSION, WazuhTooManyRequests class APIException(Exception): @@ -68,3 +68,10 @@ class APIError(APIException): class BlockedIPException(ProblemException): """Bocked IP Exception Class.""" +class MaxRequestsException(ProblemException): + """Bocked IP Exception Class.""" + def __init__(self, code): + exc = WazuhTooManyRequests(code=code) + ext = {"code": exc.code} + ext.update({"remediation": exc.remediation} if hasattr(exc, 'remediation') else {}) + super().__init__(status=429, title=exc.title, detail=exc.message, type=exc.type, ext=ext) diff --git a/api/api/error_handler.py b/api/api/error_handler.py index 6d95bd5b1f6..3af10919a20 100644 --- a/api/api/error_handler.py +++ b/api/api/error_handler.py @@ -13,7 +13,7 @@ from api import configuration from api.middlewares import ip_block, ip_stats, access_log, LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT -from api.api_exception import BlockedIPException +from api.api_exception import BlockedIPException, MaxRequestsException from wazuh.core.utils import get_utc_now ERROR_CONTENT_TYPE="application/problem+json; charset=utf-8" @@ -97,14 +97,14 @@ async def unauthorized_error_handler(request: ConnexionRequest, return response -async def bad_request_error_handler(_: ConnexionRequest, exc: exceptions.BadRequestProblem) -> ConnexionResponse: +async def bad_request_error_handler(request: ConnexionRequest, + exc: exceptions.BadRequestProblem) -> ConnexionResponse: """Bad Request Exception Error handler. Parameters ---------- - _ : ConnexionRequest + request : ConnexionRequest Incomming request. - Unnamed parameter not used. exc : BadRequestProblem Raised exception. @@ -119,19 +119,21 @@ async def bad_request_error_handler(_: ConnexionRequest, exc: exceptions.BadRequ } if exc.detail: problem['detail'] = exc.detail - return ConnexionResponse(status_code=exc.status_code, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) + response = ConnexionResponse(status_code=exc.status_code, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) + await access_log(request, response, time.time()) + return response -async def http_error_handler(_: ConnexionRequest, exc: exceptions.HTTPException) -> ConnexionResponse: +async def http_error_handler(request: ConnexionRequest, + exc: exceptions.HTTPException) -> ConnexionResponse: """HTTPError Exception Error handler. Parameters ---------- - _ : ConnexionRequest + request : ConnexionRequest Incomming request. - Unnamed parameter not used. exc : HTTPException Raised exception. @@ -145,20 +147,21 @@ async def http_error_handler(_: ConnexionRequest, exc: exceptions.HTTPException) 'title': exc.detail, "detail": f"{exc.status_code}: {exc.detail}", } - return ConnexionResponse(status_code=exc.status_code, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) + response = ConnexionResponse(status_code=exc.status_code, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) + await access_log(request, response, time.time()) + return response -async def jwt_error_handler(_: ConnexionRequest, __: JWTError) -> ConnexionResponse: +async def jwt_error_handler(request: ConnexionRequest, _: JWTError) -> ConnexionResponse: """JWTException Error handler. Parameters ---------- - _ : ConnexionRequest + request : ConnexionRequest Incomming request. - Unnamed parameter not used. - __ : JWTError + _ : JWTError Raised exception. Unnamed parameter not used. @@ -171,20 +174,20 @@ async def jwt_error_handler(_: ConnexionRequest, __: JWTError) -> ConnexionRespo "title": "Unauthorized", "detail": "No authorization token provided" } - - return ConnexionResponse(status_code=401, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) + response = ConnexionResponse(status_code=401, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) + await access_log(request, response, time.time()) + return response -async def problem_error_handler(_: ConnexionRequest, exc: exceptions.ProblemException) -> ConnexionResponse: +async def problem_error_handler(request: ConnexionRequest, exc: exceptions.ProblemException) -> ConnexionResponse: """ProblemException Error handler. Parameters ---------- - _ : ConnexionRequest + request : ConnexionRequest Incomming request. - Unnamed parameter not used. exc : ProblemException Raised exception. @@ -209,19 +212,20 @@ async def problem_error_handler(_: ConnexionRequest, exc: exceptions.ProblemExce if not problem['detail']: del problem['detail'] - return ConnexionResponse(body=json.dumps(problem), - status_code=exc.__dict__['status'], - content_type=ERROR_CONTENT_TYPE) + response = ConnexionResponse(body=json.dumps(problem), + status_code=exc.__dict__['status'], + content_type=ERROR_CONTENT_TYPE) + await access_log(request, response, time.time()) + return response -async def content_size_handler(_: ConnexionRequest, exc: ContentSizeExceeded) -> ConnexionResponse: +async def content_size_handler(request: ConnexionRequest, exc: ContentSizeExceeded) -> ConnexionResponse: """Content size error handler. Parameters ---------- - _ : ConnexionRequest + request : ConnexionRequest Incomming request. - Unnamed parameter not used. exc : ContentSizeExceeded Raised exception. @@ -234,47 +238,48 @@ async def content_size_handler(_: ConnexionRequest, exc: ContentSizeExceeded) -> "title": "Content size exceeded.", "detail": str(exc) } - - return ConnexionResponse(status_code=413, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) + response = ConnexionResponse(status_code=413, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) + await access_log(request, response, time.time()) + return response -async def exceeded_requests_handler(_: ConnexionRequest, exc: exceptions.ProblemException) -> ConnexionResponse: +async def exceeded_requests_handler(request: ConnexionRequest, exc: MaxRequestsException) -> ConnexionResponse: """Exceeded requests error handler. Parameters ---------- - _ : ConnexionRequest + request : ConnexionRequest Incomming request. - Unnamed parameter not used. - exc : ProblemException + exc : MaxRequestsException Raised exception. Returns ------- Response - Returns status code 429 if the maximum requests per minutes was exceeded. + Returns status code 429 - maximum requests per minutes was exceeded. """ problem = { "title": exc.title, - "error": exc.detail, + "detail": exc.detail, + "error": exc.ext['code'], + "remediation": exc.ext['remediation'] } - response = ConnexionResponse(status_code=429, + response = ConnexionResponse(status_code=exc.status, body=json.dumps(problem), content_type=ERROR_CONTENT_TYPE) - await access_log(exc.ext, response, time.time()) + await access_log(request, response, time.time()) return response -async def blocked_ip_handler(_: ConnexionRequest, exc: BlockedIPException) -> ConnexionResponse: +async def blocked_ip_handler(request: ConnexionRequest, exc: BlockedIPException) -> ConnexionResponse: """Content size error handler. Parameters ---------- - _ : ConnexionRequest + request : ConnexionRequest Incomming request. - Unnamed parameter not used. exc : ProblemException Raised exception. @@ -291,5 +296,5 @@ async def blocked_ip_handler(_: ConnexionRequest, exc: BlockedIPException) -> Co response = ConnexionResponse(status_code=403, body=json.dumps(problem), content_type=ERROR_CONTENT_TYPE) - await access_log(exc.ext, response, time.time()) + await access_log(request, response, time.time()) return response diff --git a/api/api/middlewares.py b/api/api/middlewares.py index cf94d97acc9..93cb8a74a18 100644 --- a/api/api/middlewares.py +++ b/api/api/middlewares.py @@ -26,7 +26,7 @@ from api import configuration from api.alogging import custom_logging from api.authentication import generate_keypair, JWT_ALGORITHM -from api.api_exception import BlockedIPException +from api.api_exception import BlockedIPException, MaxRequestsException # Default of the max event requests allowed per minute MAX_REQUESTS_EVENTS_DEFAULT = 30 @@ -200,11 +200,7 @@ async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) - 6005) if error_code: - raise ProblemException( - status=429, - title="Maximum number of requests per minute reached", - detail=error_code, - ext=ConnexionRequest.from_starlette_request(request)) + raise MaxRequestsException(code=error_code) else: return await call_next(request) @@ -215,7 +211,9 @@ class CheckBlockedIP(BaseHTTPMiddleware): async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: """"Update and check if the client IP is locked.""" - check_blocked_ip(request) + if request.url.path in {'/security/user/authenticate', '/security/user/authenticate/run_as'} \ + and request.method in {'GET', 'POST'}: + check_blocked_ip(request) return await call_next(request) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index e65d902bac8..265a20569ba 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -11495,7 +11495,7 @@ paths: '403': $ref: '#/components/responses/PermissionDeniedResponse' '404': - $ref: '#/components/responses/InvalidUriResponse' + $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index 4e39ef01615..320f1b893a5 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -214,7 +214,7 @@ def start(params: dict): app.add_error_handler(JWTError, error_handler.jwt_error_handler) app.add_error_handler(Unauthorized, error_handler.unauthorized_error_handler) app.add_error_handler(HTTPException, error_handler.http_error_handler) - app.add_error_handler(429, error_handler.exceeded_requests_handler) + app.add_error_handler(error_handler.MaxRequestsException, error_handler.exceeded_requests_handler) app.add_error_handler(error_handler.BlockedIPException, error_handler.blocked_ip_handler) app.add_error_handler(ProblemException, error_handler.problem_error_handler) From 8eb815df3d1c50a90af1c5cddcc5f040e8321498 Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Thu, 15 Feb 2024 11:07:26 -0300 Subject: [PATCH 185/419] Move AccessloggerMiddleware before exceptions. --- api/api/alogging.py | 3 -- api/api/error_handler.py | 63 +++++++++++------------------- api/api/middlewares.py | 4 +- api/api/test/test_error_handler.py | 29 +++++--------- api/api/test/test_middlewares.py | 22 +++++++++-- api/scripts/wazuh_apid.py | 3 +- 6 files changed, 54 insertions(+), 70 deletions(-) diff --git a/api/api/alogging.py b/api/api/alogging.py index fe960c513f8..3db482c686c 100644 --- a/api/api/alogging.py +++ b/api/api/alogging.py @@ -19,9 +19,6 @@ # Variable used to specify an unknown user UNKNOWN_USER_STRING = "unknown_user" -# Run_as login endpoint path -RUN_AS_LOGIN_ENDPOINT = "/security/user/authenticate/run_as" - class APILoggerSize: size_regex = re.compile(r"(\d+)([KM])") diff --git a/api/api/error_handler.py b/api/api/error_handler.py index 3af10919a20..cc77a21a49c 100644 --- a/api/api/error_handler.py +++ b/api/api/error_handler.py @@ -3,7 +3,6 @@ # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 import json -import time from connexion.lifecycle import ConnexionRequest, ConnexionResponse from connexion import exceptions @@ -12,7 +11,7 @@ from content_size_limit_asgi.errors import ContentSizeExceeded from api import configuration -from api.middlewares import ip_block, ip_stats, access_log, LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT +from api.middlewares import ip_block, ip_stats, LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT from api.api_exception import BlockedIPException, MaxRequestsException from wazuh.core.utils import get_utc_now @@ -90,11 +89,9 @@ async def unauthorized_error_handler(request: ConnexionRequest, problem.update({'detail': 'No authorization token provided'} \ if 'token_info' not in request.context \ else {}) - response = ConnexionResponse(status_code=exc.status_code, + return ConnexionResponse(status_code=exc.status_code, body=json.dumps(problem), content_type=ERROR_CONTENT_TYPE) - await access_log(request, response, time.time()) - return response async def bad_request_error_handler(request: ConnexionRequest, @@ -119,11 +116,9 @@ async def bad_request_error_handler(request: ConnexionRequest, } if exc.detail: problem['detail'] = exc.detail - response = ConnexionResponse(status_code=exc.status_code, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) - await access_log(request, response, time.time()) - return response + return ConnexionResponse(status_code=exc.status_code, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) async def http_error_handler(request: ConnexionRequest, @@ -147,11 +142,9 @@ async def http_error_handler(request: ConnexionRequest, 'title': exc.detail, "detail": f"{exc.status_code}: {exc.detail}", } - response = ConnexionResponse(status_code=exc.status_code, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) - await access_log(request, response, time.time()) - return response + return ConnexionResponse(status_code=exc.status_code, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) async def jwt_error_handler(request: ConnexionRequest, _: JWTError) -> ConnexionResponse: @@ -174,11 +167,9 @@ async def jwt_error_handler(request: ConnexionRequest, _: JWTError) -> Connexion "title": "Unauthorized", "detail": "No authorization token provided" } - response = ConnexionResponse(status_code=401, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) - await access_log(request, response, time.time()) - return response + return ConnexionResponse(status_code=401, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) async def problem_error_handler(request: ConnexionRequest, exc: exceptions.ProblemException) -> ConnexionResponse: @@ -212,11 +203,9 @@ async def problem_error_handler(request: ConnexionRequest, exc: exceptions.Probl if not problem['detail']: del problem['detail'] - response = ConnexionResponse(body=json.dumps(problem), - status_code=exc.__dict__['status'], - content_type=ERROR_CONTENT_TYPE) - await access_log(request, response, time.time()) - return response + return ConnexionResponse(body=json.dumps(problem), + status_code=exc.__dict__['status'], + content_type=ERROR_CONTENT_TYPE) async def content_size_handler(request: ConnexionRequest, exc: ContentSizeExceeded) -> ConnexionResponse: @@ -238,11 +227,9 @@ async def content_size_handler(request: ConnexionRequest, exc: ContentSizeExceed "title": "Content size exceeded.", "detail": str(exc) } - response = ConnexionResponse(status_code=413, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) - await access_log(request, response, time.time()) - return response + return ConnexionResponse(status_code=413, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) async def exceeded_requests_handler(request: ConnexionRequest, exc: MaxRequestsException) -> ConnexionResponse: @@ -266,11 +253,9 @@ async def exceeded_requests_handler(request: ConnexionRequest, exc: MaxRequestsE "error": exc.ext['code'], "remediation": exc.ext['remediation'] } - response = ConnexionResponse(status_code=exc.status, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) - await access_log(request, response, time.time()) - return response + return ConnexionResponse(status_code=exc.status, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) async def blocked_ip_handler(request: ConnexionRequest, exc: BlockedIPException) -> ConnexionResponse: @@ -293,8 +278,6 @@ async def blocked_ip_handler(request: ConnexionRequest, exc: BlockedIPException) "detail": exc.detail, "error": 6000 } - response = ConnexionResponse(status_code=403, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) - await access_log(request, response, time.time()) - return response + return ConnexionResponse(status_code=403, + body=json.dumps(problem), + content_type=ERROR_CONTENT_TYPE) diff --git a/api/api/middlewares.py b/api/api/middlewares.py index 93cb8a74a18..4a3ed40ac8d 100644 --- a/api/api/middlewares.py +++ b/api/api/middlewares.py @@ -15,7 +15,6 @@ from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint from connexion import ConnexionMiddleware -from connexion.exceptions import ProblemException from connexion.lifecycle import ConnexionRequest from connexion.security import AbstractSecurityHandler @@ -210,8 +209,7 @@ class CheckBlockedIP(BaseHTTPMiddleware): async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: """"Update and check if the client IP is locked.""" - - if request.url.path in {'/security/user/authenticate', '/security/user/authenticate/run_as'} \ + if request.url.path in {LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT} \ and request.method in {'GET', 'POST'}: check_blocked_ip(request) return await call_next(request) diff --git a/api/api/test/test_error_handler.py b/api/api/test/test_error_handler.py index 0e6b34d1eeb..fea20398ed4 100644 --- a/api/api/test/test_error_handler.py +++ b/api/api/test/test_error_handler.py @@ -15,6 +15,7 @@ http_error_handler, problem_error_handler, bad_request_error_handler, unauthorized_error_handler, \ exceeded_requests_handler, blocked_ip_handler, ERROR_CONTENT_TYPE from api.middlewares import LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT +from api.api_exception import MaxRequestsException @pytest.fixture @@ -99,14 +100,12 @@ async def test_unauthorized_error_handler(path, method, token_info, mock_request exc = Unauthorized() with patch('api.error_handler.prevent_bruteforce_attack') as mock_pbfa, \ - patch('api.configuration.api_conf', new={'access': {'max_login_attempts': 1000}}), \ - patch('api.error_handler.access_log') as access_log_mock: + patch('api.configuration.api_conf', new={'access': {'max_login_attempts': 1000}}): response = await unauthorized_error_handler(mock_request, exc) if path in {LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT} \ and method in {'GET', 'POST'}: mock_pbfa.assert_called_once_with(request=mock_request, attempts=1000) expected_time = datetime(1970, 1, 1, 0, 0, 10).timestamp() - access_log_mock.assert_awaited_once_with(mock_request, response, expected_time) body = json.loads(response.body) assert body == problem assert response.status_code == exc.status_code @@ -208,21 +207,15 @@ async def test_bad_request_error_handler(detail): @pytest.mark.parametrize('error_code', [6001, 6005]) async def test_bad_exceeded_request_handler(error_code, mock_request): """Test exceeded request error handler.""" + exc = MaxRequestsException(error_code) problem = { - "title": 'Maximum number of requests per minute reached', - "error": error_code, + "title": exc.title, + "detail": exc.detail, + "error": exc.ext['code'], + "remediation": exc.ext['remediation'] } - exc = ProblemException( - status=429, - title="Maximum number of requests per minute reached", - detail=error_code, - ext=mock_request) - with patch('api.error_handler.access_log') as access_log_mock: - response = await exceeded_requests_handler(mock_request, exc) - expected_time = datetime(1970, 1, 1, 0, 0, 10).timestamp() - access_log_mock.assert_awaited_once_with(mock_request, response, expected_time) - + response = await exceeded_requests_handler(mock_request, exc) body = json.loads(response.body) assert body == problem assert response.status_code == exc.status_code @@ -246,11 +239,7 @@ async def test_blocked_ip_handler(mock_request): "detail": exc.detail, "error": 6000 } - with patch('api.error_handler.access_log') as access_log_mock: - response = await blocked_ip_handler(mock_request, exc) - expected_time = datetime(1970, 1, 1, 0, 0, 10).timestamp() - access_log_mock.assert_awaited_once_with(mock_request, response, expected_time) - + response = await blocked_ip_handler(mock_request, exc) body = json.loads(response.body) assert body == problem assert response.status_code == exc.status_code diff --git a/api/api/test/test_middlewares.py b/api/api/test/test_middlewares.py index f1e82902abb..bc37e067400 100644 --- a/api/api/test/test_middlewares.py +++ b/api/api/test/test_middlewares.py @@ -273,19 +273,35 @@ async def test_secure_headers_middleware(mock_req): @pytest.mark.asyncio -async def test_check_block_ip_middleware(mock_req): +@pytest.mark.parametrize("endpoint, method, call_check", [ + (LOGIN_ENDPOINT, 'POST', True), + (RUN_AS_LOGIN_ENDPOINT, 'POST', True), + (LOGIN_ENDPOINT, 'GET', True), + (RUN_AS_LOGIN_ENDPOINT, 'GET', True), + (LOGIN_ENDPOINT, 'DELETE', False), + (RUN_AS_LOGIN_ENDPOINT, 'DELETE', False), + ('/agents', 'POST', False), + ('/agents', 'GET', False), + ('/agents', 'DELETE', False), +]) +async def test_check_block_ip_middleware(endpoint, method, call_check, mock_req): """Test access logging.""" response = MagicMock() dispatch_mock = AsyncMock(return_value=response) middleware = CheckBlockedIP(AsyncApp(__name__)) operation = MagicMock(name="operation") - operation.method = "post" + operation.method = method + mock_req.url.path = endpoint + mock_req.method = method with TestContext(operation=operation), \ patch('api.middlewares.check_blocked_ip') as mock_block_ip: secure_headers.framework.starlette = MagicMock() ret_response = await middleware.dispatch(request=mock_req, call_next=dispatch_mock) - mock_block_ip.assert_called_once_with(mock_req) + if call_check: + mock_block_ip.assert_called_once_with(mock_req) + else: + mock_block_ip.assert_not_called() dispatch_mock.assert_awaited_once_with(mock_req) assert ret_response == response diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index 320f1b893a5..77be3ada987 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -193,7 +193,7 @@ def start(params: dict): if api_conf['access']['max_request_per_minute'] > 0: app.add_middleware(CheckRateLimitsMiddleware, MiddlewarePosition.BEFORE_SECURITY) app.add_middleware(CheckBlockedIP, MiddlewarePosition.BEFORE_SECURITY) - app.add_middleware(WazuhAccessLoggerMiddleware) + app.add_middleware(WazuhAccessLoggerMiddleware, MiddlewarePosition.BEFORE_EXCEPTION) app.add_middleware(SecureHeadersMiddleware) if api_conf['max_upload_size']: app.add_middleware(ContentSizeLimitMiddleware, max_content_size=api_conf['max_upload_size']) @@ -217,6 +217,7 @@ def start(params: dict): app.add_error_handler(error_handler.MaxRequestsException, error_handler.exceeded_requests_handler) app.add_error_handler(error_handler.BlockedIPException, error_handler.blocked_ip_handler) app.add_error_handler(ProblemException, error_handler.problem_error_handler) + app.add_error_handler(403, error_handler.problem_error_handler) # Add application signals TO BE MODIFIED AFTER IMPLEMENTING CTI IN CONNEXION 3.0 # app.app.on_response_prepare.append(modify_response_headers) From eca7d21a6ebe7b59b3fc415198e16d4e6c70bc84 Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Thu, 15 Feb 2024 12:13:33 -0300 Subject: [PATCH 186/419] Fix log in debug mode. --- api/scripts/wazuh_apid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index 77be3ada987..bce9abf6755 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -386,8 +386,8 @@ def error(self, msg, *args, **kws): os.chmod(handler['filename'], 0o660) # Configure and create the wazuh-api logger - logging.config.dictConfig(uvicorn_params['log_config']) add_debug2_log_level_and_error() + logging.config.dictConfig(uvicorn_params['log_config']) logger = logging.getLogger('wazuh-api') # Check deprecated options. To delete after expected versions From 6efda8c6c1e47db5383fc62b11a86e3edab1748f Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Mon, 19 Feb 2024 13:44:42 -0300 Subject: [PATCH 187/419] Remove WAZUH_VERSION import --- api/api/controllers/security_controller.py | 1 - 1 file changed, 1 deletion(-) diff --git a/api/api/controllers/security_controller.py b/api/api/controllers/security_controller.py index be154aed6f6..23d226c1950 100644 --- a/api/api/controllers/security_controller.py +++ b/api/api/controllers/security_controller.py @@ -22,7 +22,6 @@ from wazuh import security, __version__ from wazuh.core.cluster.control import get_system_nodes from wazuh.core.cluster.dapi.dapi import DistributedAPI -from wazuh.core.common import WAZUH_VERSION from wazuh.core.exception import WazuhException, WazuhPermissionError from wazuh.core.results import AffectedItemsWazuhResult, WazuhResult from wazuh.core.security import revoke_tokens From e8e1eae09893b5be3c2540f292345e474498e602 Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Mon, 19 Feb 2024 16:24:03 -0300 Subject: [PATCH 188/419] Remove jose references. --- api/api/authentication.py | 3 +-- api/api/error_handler.py | 4 ++-- api/api/middlewares.py | 4 ++-- api/scripts/wazuh_apid.py | 4 ++-- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/api/api/authentication.py b/api/api/authentication.py index 854be52483a..004da54dcdc 100755 --- a/api/api/authentication.py +++ b/api/api/authentication.py @@ -13,7 +13,6 @@ from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import ec -from jose import JWTError, jwt from connexion.exceptions import Unauthorized import api.configuration as conf @@ -297,5 +296,5 @@ def decode_token(token: str) -> dict: raise Unauthorized(EXPIRED_TOKEN) return payload - except JWTError as exc: + except jwt.exceptions.PyJWTError as exc: raise Unauthorized(INVALID_TOKEN) from exc diff --git a/api/api/error_handler.py b/api/api/error_handler.py index cc77a21a49c..ca198044faf 100644 --- a/api/api/error_handler.py +++ b/api/api/error_handler.py @@ -7,7 +7,7 @@ from connexion.lifecycle import ConnexionRequest, ConnexionResponse from connexion import exceptions -from jose import JWTError +import jwt from content_size_limit_asgi.errors import ContentSizeExceeded from api import configuration @@ -147,7 +147,7 @@ async def http_error_handler(request: ConnexionRequest, content_type=ERROR_CONTENT_TYPE) -async def jwt_error_handler(request: ConnexionRequest, _: JWTError) -> ConnexionResponse: +async def jwt_error_handler(request: ConnexionRequest, _: jwt.exceptions.PyJWTError) -> ConnexionResponse: """JWTException Error handler. Parameters diff --git a/api/api/middlewares.py b/api/api/middlewares.py index 4a3ed40ac8d..127f2dccaac 100644 --- a/api/api/middlewares.py +++ b/api/api/middlewares.py @@ -8,7 +8,7 @@ import contextlib import logging import base64 -from jose import jwt, JOSEError +import jwt from starlette.requests import Request from starlette.responses import Response @@ -91,7 +91,7 @@ async def access_log(request: ConnexionRequest, response: Response, prev_time: t audience='Wazuh API REST', options={'verify_exp': False}) user = s['sub'] - except JOSEError: + except jwt.exceptions.PyJWTError: pass # Get or create authorization context hash diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index bce9abf6755..ecd1526c9b8 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -211,7 +211,7 @@ def start(params: dict): ) # Add error handlers to format exceptions - app.add_error_handler(JWTError, error_handler.jwt_error_handler) + app.add_error_handler(jwt.exceptions.PyJWTError, error_handler.jwt_error_handler) app.add_error_handler(Unauthorized, error_handler.unauthorized_error_handler) app.add_error_handler(HTTPException, error_handler.http_error_handler) app.add_error_handler(error_handler.MaxRequestsException, error_handler.exceeded_requests_handler) @@ -341,7 +341,7 @@ def error(self, msg, *args, **kws): from content_size_limit_asgi import ContentSizeLimitMiddleware from content_size_limit_asgi.errors import ContentSizeExceeded - from jose import JWTError + import jwt from api import error_handler, __path__ as api_path from api.api_exception import APIError From 2988f06adf89280075c7ee45e885ccb8dfc0b67c Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Mon, 19 Feb 2024 18:22:07 -0300 Subject: [PATCH 189/419] Implement pretty parameter in the error handlers. --- api/api/controllers/util.py | 15 ++++++++---- api/api/error_handler.py | 47 +++++++++++++++---------------------- 2 files changed, 29 insertions(+), 33 deletions(-) diff --git a/api/api/controllers/util.py b/api/api/controllers/util.py index 1f0d80f29e2..6949d982bc1 100644 --- a/api/api/controllers/util.py +++ b/api/api/controllers/util.py @@ -7,17 +7,22 @@ JSON_CONTENT_TYPE="application/json" XML_CONTENT_TYPE="application/xml; charset=utf-8" +ERROR_CONTENT_TYPE="application/problem+json; charset=utf-8" -def json_response(data: dict, pretty: bool = False) -> ConnexionResponse: +def json_response(data: dict, pretty: bool = False, status_code: int = 200, content_type: str = JSON_CONTENT_TYPE) -> ConnexionResponse: """Generate a json Response from a dictionary. Parameters ---------- - data: dict + data : dict Data dictionary to convert to json. - pretty: + pretty : bool Prettify the response to be human readable. + status_code : int + HTTP status code to return. Default 200. + content_type : str + Content type to return. Default JSON_CONTENT_TYPE Returns ------- @@ -25,5 +30,5 @@ def json_response(data: dict, pretty: bool = False) -> ConnexionResponse: JSON response generated from the data. """ return ConnexionResponse(body=prettify(data) if pretty else dumps(data), - content_type=JSON_CONTENT_TYPE, - status_code=200) + content_type=content_type, + status_code=status_code) diff --git a/api/api/error_handler.py b/api/api/error_handler.py index ca198044faf..5baccf6fadc 100644 --- a/api/api/error_handler.py +++ b/api/api/error_handler.py @@ -2,8 +2,6 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 -import json - from connexion.lifecycle import ConnexionRequest, ConnexionResponse from connexion import exceptions @@ -13,10 +11,9 @@ from api import configuration from api.middlewares import ip_block, ip_stats, LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT from api.api_exception import BlockedIPException, MaxRequestsException +from api.controllers.util import json_response, ERROR_CONTENT_TYPE from wazuh.core.utils import get_utc_now -ERROR_CONTENT_TYPE="application/problem+json; charset=utf-8" - def prevent_bruteforce_attack(request: ConnexionRequest, attempts: int = 5): """Check that the IPs that are requesting an API token do not do so repeatedly. @@ -89,9 +86,8 @@ async def unauthorized_error_handler(request: ConnexionRequest, problem.update({'detail': 'No authorization token provided'} \ if 'token_info' not in request.context \ else {}) - return ConnexionResponse(status_code=exc.status_code, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) + return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', + status_code=exc.status_code, content_type=ERROR_CONTENT_TYPE) async def bad_request_error_handler(request: ConnexionRequest, @@ -116,9 +112,9 @@ async def bad_request_error_handler(request: ConnexionRequest, } if exc.detail: problem['detail'] = exc.detail - return ConnexionResponse(status_code=exc.status_code, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) + + return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', + status_code=exc.status_code, content_type=ERROR_CONTENT_TYPE) async def http_error_handler(request: ConnexionRequest, @@ -142,9 +138,8 @@ async def http_error_handler(request: ConnexionRequest, 'title': exc.detail, "detail": f"{exc.status_code}: {exc.detail}", } - return ConnexionResponse(status_code=exc.status_code, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) + return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', + status_code=exc.status_code, content_type=ERROR_CONTENT_TYPE) async def jwt_error_handler(request: ConnexionRequest, _: jwt.exceptions.PyJWTError) -> ConnexionResponse: @@ -167,9 +162,8 @@ async def jwt_error_handler(request: ConnexionRequest, _: jwt.exceptions.PyJWTEr "title": "Unauthorized", "detail": "No authorization token provided" } - return ConnexionResponse(status_code=401, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) + return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', + status_code=401, content_type=ERROR_CONTENT_TYPE) async def problem_error_handler(request: ConnexionRequest, exc: exceptions.ProblemException) -> ConnexionResponse: @@ -203,9 +197,8 @@ async def problem_error_handler(request: ConnexionRequest, exc: exceptions.Probl if not problem['detail']: del problem['detail'] - return ConnexionResponse(body=json.dumps(problem), - status_code=exc.__dict__['status'], - content_type=ERROR_CONTENT_TYPE) + return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', + status_code=exc.__dict__['status'], content_type=ERROR_CONTENT_TYPE) async def content_size_handler(request: ConnexionRequest, exc: ContentSizeExceeded) -> ConnexionResponse: @@ -227,9 +220,9 @@ async def content_size_handler(request: ConnexionRequest, exc: ContentSizeExceed "title": "Content size exceeded.", "detail": str(exc) } - return ConnexionResponse(status_code=413, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) + return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', + status_code=413, content_type=ERROR_CONTENT_TYPE) + async def exceeded_requests_handler(request: ConnexionRequest, exc: MaxRequestsException) -> ConnexionResponse: @@ -253,9 +246,8 @@ async def exceeded_requests_handler(request: ConnexionRequest, exc: MaxRequestsE "error": exc.ext['code'], "remediation": exc.ext['remediation'] } - return ConnexionResponse(status_code=exc.status, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) + return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', + status_code=exc.status, content_type=ERROR_CONTENT_TYPE) async def blocked_ip_handler(request: ConnexionRequest, exc: BlockedIPException) -> ConnexionResponse: @@ -278,6 +270,5 @@ async def blocked_ip_handler(request: ConnexionRequest, exc: BlockedIPException) "detail": exc.detail, "error": 6000 } - return ConnexionResponse(status_code=403, - body=json.dumps(problem), - content_type=ERROR_CONTENT_TYPE) + return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', + status_code=403, content_type=ERROR_CONTENT_TYPE) From 23ec81c5d9cce660490b6da64393d34c35ba439e Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Mon, 19 Feb 2024 18:45:26 -0300 Subject: [PATCH 190/419] Update error handler UTs after including the pretty parameter. --- api/api/test/test_error_handler.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/api/api/test/test_error_handler.py b/api/api/test/test_error_handler.py index fea20398ed4..5a39d88d403 100644 --- a/api/api/test/test_error_handler.py +++ b/api/api/test/test_error_handler.py @@ -29,6 +29,7 @@ def mock_request(request, request_info): """fixture to wrap functions with request""" req = MagicMock() req.client.host = 'ip' + mock_request.query_param = {} if 'prevent_bruteforce_attack' in request.node.name: for clave, valor in request_info.items(): setattr(req, clave, valor) @@ -51,14 +52,15 @@ def test_cleanup_detail_field(): {'ip': {'attempts': 4}}, ]) @pytest.mark.parametrize('request_info', [ - {'path': LOGIN_ENDPOINT, 'method': 'GET'}, - {'path': LOGIN_ENDPOINT, 'method': 'POST'}, + {'path': LOGIN_ENDPOINT, 'method': 'GET', 'pretty': 'true'}, + {'path': LOGIN_ENDPOINT, 'method': 'POST', 'pretty': 'false'}, {'path': RUN_AS_LOGIN_ENDPOINT, 'method': 'POST'}, ], indirect=True) def test_middlewares_prevent_bruteforce_attack(stats, request_info, mock_request): """Test `prevent_bruteforce_attack` blocks IPs when reaching max number of attempts.""" mock_request.configure_mock(scope={'path': request_info['path']}) mock_request.method = request_info['method'] + mock_request.query_param['pretty'] = request_info.get('pretty', 'false') with patch("api.error_handler.ip_stats", new=copy(stats)) as ip_stats, \ patch("api.error_handler.ip_block", new=set()) as ip_block: previous_attempts = ip_stats['ip']['attempts'] if 'ip' in ip_stats else 0 @@ -113,13 +115,13 @@ async def test_unauthorized_error_handler(path, method, token_info, mock_request @pytest.mark.asyncio -async def test_jwt_error_handler(): +async def test_jwt_error_handler(mock_request): """Test jwt error handler.""" problem = { "title": "Unauthorized", "detail": "No authorization token provided" } - response = await jwt_error_handler(None, None) + response = await jwt_error_handler(mock_request, None) body = json.loads(response.body) assert body == problem @@ -129,14 +131,14 @@ async def test_jwt_error_handler(): @pytest.mark.asyncio @pytest.mark.parametrize('detail', [None, 'Custom detail']) -async def test_http_error_handler(detail): +async def test_http_error_handler(detail, mock_request): """Test http error handler.""" exc = HTTPException(status_code=401, detail=detail) problem = { "title": exc.detail, 'detail': f"{exc.status_code}: {exc.detail}" } - response = await http_error_handler(None, exc) + response = await http_error_handler(mock_request, exc) body = json.loads(response.body) assert body == problem @@ -160,10 +162,10 @@ async def test_http_error_handler(detail): ('', {}, {'type': 'type'}, 'type'), ('', {}, {'type': 'type', 'more': 'more'}, 'type'), ]) -async def test_problem_error_handler(title, detail, ext, error_type): +async def test_problem_error_handler(title, detail, ext, error_type, mock_request): """Test problem error handler.""" exc = ProblemException(status=400, title=title, detail=detail, ext=ext, type=error_type) - response = await problem_error_handler(None, exc) + response = await problem_error_handler(mock_request, exc) body = json.loads(response.body) if isinstance(detail, dict): @@ -187,7 +189,7 @@ async def test_problem_error_handler(title, detail, ext, error_type): @pytest.mark.asyncio @pytest.mark.parametrize('detail', [None, 'detail']) -async def test_bad_request_error_handler(detail): +async def test_bad_request_error_handler(detail, mock_request): """Test bad request error handler.""" problem = { "title": 'Bad Request', @@ -195,7 +197,7 @@ async def test_bad_request_error_handler(detail): problem.update({'detail': detail} if detail else {}) exc = BadRequestProblem(detail=detail) - response = await bad_request_error_handler(None, exc) + response = await bad_request_error_handler(mock_request, exc) body = json.loads(response.body) assert body == problem assert response.status_code == exc.status_code From 30f72a1c89bac983ed4371ca5b5d1b3a8e2d8fd0 Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Mon, 19 Feb 2024 18:50:53 -0300 Subject: [PATCH 191/419] Update json_response UT --- api/api/controllers/test/test_controller_util.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/api/api/controllers/test/test_controller_util.py b/api/api/controllers/test/test_controller_util.py index 9e8cbc4cf25..a2cf59c8019 100644 --- a/api/api/controllers/test/test_controller_util.py +++ b/api/api/controllers/test/test_controller_util.py @@ -7,11 +7,15 @@ from connexion.lifecycle import ConnexionResponse from api.controllers.util import json_response -@pytest.mark.parametrize('pretty, body', [(False, '{"a": "1", "b": "2"}'), - (True, '{\n "a": "1",\n "b": "2"\n}')]) -def test_json_response(pretty, body): +@pytest.mark.parametrize('pretty, body, status_code, content_type', + [(False, '{"a": "1", "b": "2"}', 200, 'application/json'), + (True, '{\n "a": "1",\n "b": "2"\n}', 401, 'application/json') + ]) +def test_json_response(pretty, body, status_code, content_type): """Veryfy if the response body is converted to json and prettyfied.""" data = {"a": "1", "b": "2"} - response = json_response(data=data, pretty=pretty) + response = json_response(data=data, pretty=pretty, content_type=content_type, status_code=status_code) assert isinstance(response, ConnexionResponse) assert response.body == body + assert response.status_code == status_code + assert response.content_type == content_type From 248ff604b514ece3ac8873794957f1b0c3dca75c Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Tue, 20 Feb 2024 16:14:49 -0300 Subject: [PATCH 192/419] Removed error handler for MaxRequets and BlockedIp exceptions. --- api/api/api_exception.py | 3 ++ api/api/error_handler.py | 50 ------------------------------ api/api/middlewares.py | 3 +- api/api/test/test_error_handler.py | 47 +--------------------------- api/scripts/wazuh_apid.py | 2 -- 5 files changed, 5 insertions(+), 100 deletions(-) diff --git a/api/api/api_exception.py b/api/api/api_exception.py index bcaa15d9eb6..f60d280ee76 100644 --- a/api/api/api_exception.py +++ b/api/api/api_exception.py @@ -67,6 +67,9 @@ class APIError(APIException): class BlockedIPException(ProblemException): """Bocked IP Exception Class.""" + def __init__(self, *, status=500, title=None, detail=None): + ext = {"code": 6000} + super().__init__(status=status, title=title, detail=detail, ext=ext) class MaxRequestsException(ProblemException): """Bocked IP Exception Class.""" diff --git a/api/api/error_handler.py b/api/api/error_handler.py index 5baccf6fadc..33d555ecfed 100644 --- a/api/api/error_handler.py +++ b/api/api/error_handler.py @@ -222,53 +222,3 @@ async def content_size_handler(request: ConnexionRequest, exc: ContentSizeExceed } return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', status_code=413, content_type=ERROR_CONTENT_TYPE) - - - -async def exceeded_requests_handler(request: ConnexionRequest, exc: MaxRequestsException) -> ConnexionResponse: - """Exceeded requests error handler. - - Parameters - ---------- - request : ConnexionRequest - Incomming request. - exc : MaxRequestsException - Raised exception. - - Returns - ------- - Response - Returns status code 429 - maximum requests per minutes was exceeded. - """ - problem = { - "title": exc.title, - "detail": exc.detail, - "error": exc.ext['code'], - "remediation": exc.ext['remediation'] - } - return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', - status_code=exc.status, content_type=ERROR_CONTENT_TYPE) - - -async def blocked_ip_handler(request: ConnexionRequest, exc: BlockedIPException) -> ConnexionResponse: - """Content size error handler. - - Parameters - ---------- - request : ConnexionRequest - Incomming request. - exc : ProblemException - Raised exception. - - Returns - ------- - Response - Returns status code 403 if the maximum number of failed logins was reached. - """ - problem = { - "title": exc.title, - "detail": exc.detail, - "error": 6000 - } - return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', - status_code=403, content_type=ERROR_CONTENT_TYPE) diff --git a/api/api/middlewares.py b/api/api/middlewares.py index 127f2dccaac..dabd7f1cde1 100644 --- a/api/api/middlewares.py +++ b/api/api/middlewares.py @@ -135,8 +135,7 @@ def check_blocked_ip(request: Request): status=403, title="Permission Denied", detail="Limit of login attempts reached. The current IP has been blocked due " - "to a high number of login attempts", - ext=ConnexionRequest.from_starlette_request(request)) + "to a high number of login attempts") def check_rate_limit( diff --git a/api/api/test/test_error_handler.py b/api/api/test/test_error_handler.py index 5a39d88d403..fcbd191d6d7 100644 --- a/api/api/test/test_error_handler.py +++ b/api/api/test/test_error_handler.py @@ -13,9 +13,8 @@ from connexion.exceptions import HTTPException, ProblemException, BadRequestProblem, Unauthorized from api.error_handler import _cleanup_detail_field, prevent_bruteforce_attack, jwt_error_handler, \ http_error_handler, problem_error_handler, bad_request_error_handler, unauthorized_error_handler, \ - exceeded_requests_handler, blocked_ip_handler, ERROR_CONTENT_TYPE + ERROR_CONTENT_TYPE from api.middlewares import LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT -from api.api_exception import MaxRequestsException @pytest.fixture @@ -202,47 +201,3 @@ async def test_bad_request_error_handler(detail, mock_request): assert body == problem assert response.status_code == exc.status_code assert response.content_type == ERROR_CONTENT_TYPE - - -@pytest.mark.asyncio -@freeze_time(datetime(1970, 1, 1, 0, 0, 10)) -@pytest.mark.parametrize('error_code', [6001, 6005]) -async def test_bad_exceeded_request_handler(error_code, mock_request): - """Test exceeded request error handler.""" - exc = MaxRequestsException(error_code) - problem = { - "title": exc.title, - "detail": exc.detail, - "error": exc.ext['code'], - "remediation": exc.ext['remediation'] - } - - response = await exceeded_requests_handler(mock_request, exc) - body = json.loads(response.body) - assert body == problem - assert response.status_code == exc.status_code - assert response.content_type == ERROR_CONTENT_TYPE - - -@pytest.mark.asyncio -@freeze_time(datetime(1970, 1, 1, 0, 0, 10)) -async def test_blocked_ip_handler(mock_request): - """Test blocked ip error handler.""" - - exc = ProblemException( - status=403, - title="Permission Denied", - detail="Limit of login attempts reached. The current IP has been blocked due " - "to a high number of login attempts", - ext=mock_request - ) - problem = { - "title": exc.title, - "detail": exc.detail, - "error": 6000 - } - response = await blocked_ip_handler(mock_request, exc) - body = json.loads(response.body) - assert body == problem - assert response.status_code == exc.status_code - assert response.content_type == ERROR_CONTENT_TYPE diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index ecd1526c9b8..17fd85417ef 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -214,8 +214,6 @@ def start(params: dict): app.add_error_handler(jwt.exceptions.PyJWTError, error_handler.jwt_error_handler) app.add_error_handler(Unauthorized, error_handler.unauthorized_error_handler) app.add_error_handler(HTTPException, error_handler.http_error_handler) - app.add_error_handler(error_handler.MaxRequestsException, error_handler.exceeded_requests_handler) - app.add_error_handler(error_handler.BlockedIPException, error_handler.blocked_ip_handler) app.add_error_handler(ProblemException, error_handler.problem_error_handler) app.add_error_handler(403, error_handler.problem_error_handler) From eb62daad72d1a0fa2e573df4c1d3301e89d7ecb0 Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Thu, 22 Feb 2024 15:31:36 -0300 Subject: [PATCH 193/419] Revert '404' response. --- api/api/spec/spec.yaml | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index 265a20569ba..087fb054835 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -641,16 +641,6 @@ components: unknown-node: error: "The group does not exist" - InvalidUriResponse: - description: "Uri not found" - content: - application/json: - schema: - $ref: '#/components/schemas/ApiError' - example: - title: "Not Found" - detail: "404: Not Found" - UnsupportedMediaTypeResponse: description: "Unsupported media type" content: @@ -11494,8 +11484,6 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': @@ -11546,8 +11534,6 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '413': @@ -11591,8 +11577,6 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': @@ -14269,8 +14253,6 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': @@ -14326,8 +14308,6 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '406': @@ -14376,8 +14356,6 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': @@ -15057,8 +15035,6 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': @@ -15113,8 +15089,6 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '406': @@ -15163,8 +15137,6 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/InvalidUriResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': From a5f011ab1f0a632855b61adfe304bd37807697fd Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 28 Feb 2024 14:50:56 -0300 Subject: [PATCH 194/419] Migrate request to httpx --- framework/wazuh/core/manager.py | 49 ++++++++++++++------------------- 1 file changed, 20 insertions(+), 29 deletions(-) diff --git a/framework/wazuh/core/manager.py b/framework/wazuh/core/manager.py index 35fb0270521..6426b563275 100644 --- a/framework/wazuh/core/manager.py +++ b/framework/wazuh/core/manager.py @@ -14,8 +14,8 @@ from os.path import exists from typing import Dict, Optional, Union -import aiohttp import certifi +import httpx import wazuh from api import configuration from wazuh import WazuhError, WazuhException, WazuhInternalError @@ -268,7 +268,7 @@ def get_api_conf() -> dict: return copy.deepcopy(configuration.api_conf) -def _get_connector() -> aiohttp.TCPConnector: +def _get_ssl_context() -> ssl.SSLContext: """Return a TCPConnector with default ssl context. Returns @@ -276,8 +276,7 @@ def _get_connector() -> aiohttp.TCPConnector: aiohttp.TCPConnector Instance with default ssl connector. """ - ssl_context = ssl.create_default_context(cafile=certifi.where()) - return aiohttp.TCPConnector(ssl=ssl_context) + return ssl.create_default_context(cafile=certifi.where()) def get_update_information_template( @@ -328,36 +327,28 @@ async def query_update_check_service(installation_uid: str) -> dict: headers = {WAZUH_UID_KEY: installation_uid, WAZUH_TAG_KEY: current_version} update_information = get_update_information_template( - update_check=True, - current_version=current_version, - last_check_date=get_utc_now() + update_check=True, current_version=current_version, last_check_date=get_utc_now() ) update_information['uuid'] = installation_uid - async with aiohttp.ClientSession(connector=_get_connector()) as session: + async with httpx.AsyncClient(verify=_get_ssl_context()) as client: try: - async with session.get(RELEASE_UPDATES_URL, headers=headers) as response: - response_data = await response.json() - - update_information['status_code'] = response.status - - if response.status == 200: - if len(response_data['data']['major']): - update_information['last_available_major'].update( - **response_data['data']['major'][-1] - ) - if len(response_data['data']['minor']): - update_information['last_available_minor'].update( - **response_data['data']['minor'][-1] - ) - if len(response_data['data']['patch']): - update_information['last_available_patch'].update( - **response_data['data']['patch'][-1] - ) - else: - update_information['message'] = response_data['errors']['detail'] - except aiohttp.ClientError as err: + response = await client.get(RELEASE_UPDATES_URL, headers=headers) + response_data = response.json() + + update_information['status_code'] = response.status_code + + if response.status_code == 200: + if len(response_data['data']['major']): + update_information['last_available_major'].update(**response_data['data']['major'][-1]) + if len(response_data['data']['minor']): + update_information['last_available_minor'].update(**response_data['data']['minor'][-1]) + if len(response_data['data']['patch']): + update_information['last_available_patch'].update(**response_data['data']['patch'][-1]) + else: + update_information['message'] = response_data['errors']['detail'] + except httpx.RequestError as err: update_information.update({'message': str(err), 'status_code': 500}) except Exception as err: update_information.update({'message': str(err), 'status_code': 500}) From 94c52dd17c1a935b55ce23135393127a9f258fff Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Thu, 29 Feb 2024 15:28:20 -0300 Subject: [PATCH 195/419] Migrate CTI background tasks to connexion 3 --- api/api/controllers/manager_controller.py | 11 ++-- api/api/middlewares.py | 18 ++----- api/api/signals.py | 61 +++++++++++------------ api/api/util.py | 4 +- api/scripts/wazuh_apid.py | 10 ++-- 5 files changed, 44 insertions(+), 60 deletions(-) diff --git a/api/api/controllers/manager_controller.py b/api/api/controllers/manager_controller.py index 9be383cd927..c17b7344026 100644 --- a/api/api/controllers/manager_controller.py +++ b/api/api/controllers/manager_controller.py @@ -4,22 +4,20 @@ import datetime import logging -from typing import Union -from aiohttp import web from connexion import request from connexion.lifecycle import ConnexionResponse import wazuh.manager as manager import wazuh.stats as stats from api.constants import INSTALLATION_UID_KEY, UPDATE_INFORMATION_KEY -from api.encoder import dumps, prettify from api.controllers.util import json_response, XML_CONTENT_TYPE from api.models.base_model_ import Body from api.util import ( deprecate_endpoint, deserialize_date, only_master_endpoint, parse_api_param, raise_if_exc, remove_nones_to_dict ) from api.validator import check_component_configuration_pair +from api.signals import cti_context from wazuh.core import common from wazuh.core import configuration from wazuh.core.cluster.dapi.dapi import DistributedAPI @@ -610,23 +608,22 @@ async def check_available_version(pretty: bool = False, force_query: bool = Fals web.Response API response. """ - if force_query and configuration.update_check_is_enabled(): logger.debug('Forcing query to the update check service...') dapi = DistributedAPI(f=query_update_check_service, f_kwargs={ - INSTALLATION_UID_KEY: request.app[INSTALLATION_UID_KEY] + INSTALLATION_UID_KEY: cti_context[INSTALLATION_UID_KEY] }, request_type='local_master', is_async=True, logger=logger ) update_information = raise_if_exc(await dapi.distribute_function()) - request.app[UPDATE_INFORMATION_KEY] = update_information.dikt + cti_context[UPDATE_INFORMATION_KEY] = update_information.dikt dapi = DistributedAPI(f=manager.get_update_information, f_kwargs={ - UPDATE_INFORMATION_KEY: request.app.get(UPDATE_INFORMATION_KEY, {}) + UPDATE_INFORMATION_KEY: cti_context.get(UPDATE_INFORMATION_KEY, {}) }, request_type='local_master', is_async=False, diff --git a/api/api/middlewares.py b/api/api/middlewares.py index dabd7f1cde1..1c57084e94a 100644 --- a/api/api/middlewares.py +++ b/api/api/middlewares.py @@ -5,7 +5,6 @@ import json import hashlib import time -import contextlib import logging import base64 import jwt @@ -14,7 +13,6 @@ from starlette.responses import Response from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint -from connexion import ConnexionMiddleware from connexion.lifecycle import ConnexionRequest from connexion.security import AbstractSecurityHandler @@ -157,11 +155,11 @@ def check_rate_limit( Maximum number of requests per minute permitted. error_code : int error code to return if the counter is greater than max_requests. - + Return ------ 0 if the counter is greater than max_requests - else error_code. + else error_code. """ if not globals()[current_time_key]: globals()[current_time_key] = get_utc_now().timestamp() @@ -243,7 +241,7 @@ class SecureHeadersMiddleware(BaseHTTPMiddleware): async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: """Check and modifies the response headers with secure package. - + Parameters ---------- request : Request @@ -259,13 +257,3 @@ async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) - resp = await call_next(request) secure_headers.framework.starlette(resp) return resp - - -@contextlib.asynccontextmanager -async def lifespan_handler(_: ConnexionMiddleware): - """Logs the API startup and shutdown messages.""" - - # Log the initial server startup message. - start_stop_logger.info(f'Listening on {configuration.api_conf["host"]}:{configuration.api_conf["port"]}.') - yield - start_stop_logger.info('Shutdown wazuh-apid server.') diff --git a/api/api/signals.py b/api/api/signals.py index dd1e8ad3cdf..188e2b0b962 100644 --- a/api/api/signals.py +++ b/api/api/signals.py @@ -2,24 +2,32 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 import asyncio +import contextlib import logging import os import uuid from functools import wraps -from typing import AsyncGenerator, Callable +from typing import Callable -from aiohttp import web - -from api.constants import INSTALLATION_UID_KEY, INSTALLATION_UID_PATH, UPDATE_INFORMATION_KEY +from connexion import ConnexionMiddleware from wazuh.core import common from wazuh.core.cluster.utils import running_in_master_node from wazuh.core.configuration import update_check_is_enabled from wazuh.core.manager import query_update_check_service +from api import configuration +from api.constants import ( + INSTALLATION_UID_KEY, + INSTALLATION_UID_PATH, + UPDATE_INFORMATION_KEY, +) ONE_DAY_SLEEP = 60*60*24 logger = logging.getLogger('wazuh-api') +start_stop_logger = logging.getLogger('start-stop-api') + +cti_context = {} def cancel_signal_handler(func: Callable) -> Callable: @@ -51,14 +59,10 @@ async def modify_response_headers(request, response): @cancel_signal_handler -async def check_installation_uid(app: web.Application) -> None: - """Check if the installation UID exists, populate it if not and inject it into the application context. +async def check_installation_uid() -> None: + """Check if the installation UID exists, populate it if not and inject it into the global cti context.""" - Parameters - ---------- - app : web.Application - Application context to inject the installation UID - """ + global cti_context if os.path.exists(INSTALLATION_UID_PATH): logger.info("Getting installation UID...") with open(INSTALLATION_UID_PATH, 'r') as file: @@ -70,42 +74,37 @@ async def check_installation_uid(app: web.Application) -> None: file.write(installation_uid) os.chown(file.name, common.wazuh_uid(), common.wazuh_gid()) os.chmod(file.name, 0o660) - app[INSTALLATION_UID_KEY] = installation_uid + cti_context[INSTALLATION_UID_KEY] = installation_uid @cancel_signal_handler -async def get_update_information(app: web.Application) -> None: - """Get updates information from Update Check Service and inject into the application context. - - Parameters - ---------- - app : web.Application - Application context to inject the update information. - """ +async def get_update_information() -> None: + """Get updates information from Update Check Service and inject into the global cti context.""" + global cti_context while True: logger.info('Getting updates information...') - app[UPDATE_INFORMATION_KEY] = await query_update_check_service(app[INSTALLATION_UID_KEY]) - + cti_context[UPDATE_INFORMATION_KEY] = await query_update_check_service(cti_context[INSTALLATION_UID_KEY]) await asyncio.sleep(ONE_DAY_SLEEP) -async def register_background_tasks(app: web.Application) -> AsyncGenerator: - """Cleanup context to handle background tasks. +@contextlib.asynccontextmanager +async def lifespan_handler(_: ConnexionMiddleware): + """Logs the API startup/shutdown messages and register background tasks.""" - Parameters - ---------- - app : web.Application - Application context to pass to tasks. - """ tasks: list[asyncio.Task] = [] if running_in_master_node() and update_check_is_enabled(): - tasks.append(asyncio.create_task(check_installation_uid(app))) - tasks.append(asyncio.create_task(get_update_information(app))) + tasks.append(asyncio.create_task(check_installation_uid())) + tasks.append(asyncio.create_task(get_update_information())) + + # Log the initial server startup message. + start_stop_logger.info(f'Listening on {configuration.api_conf["host"]}:{configuration.api_conf["port"]}.') yield for task in tasks: task.cancel() await task + + start_stop_logger.info('Shutdown wazuh-apid server.') diff --git a/api/api/util.py b/api/api/util.py index 9b122b95436..ccca39c8753 100644 --- a/api/api/util.py +++ b/api/api/util.py @@ -453,10 +453,10 @@ def only_master_endpoint(func): """Decorator used to restrict endpoints only on master node.""" @wraps(func) - def wrapper(*args, **kwargs): + async def wrapper(*args, **kwargs): if not running_in_master_node(): raise_if_exc(exception.WazuhResourceNotFound(902)) else: - return func(*args, **kwargs) + return (await func(*args, **kwargs)) return wrapper diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index 17fd85417ef..a91b2ce4097 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -149,7 +149,7 @@ def start(params: dict): If another Wazuh API is running, this function will fail because uvicorn server will not be able to create server processes in the same port. - The function creates the pool processes, the AsyncApp instance, setups the API spec.yaml, + The function creates the pool processes, the AsyncApp instance, setups the API spec.yaml, the middleware classes, the error_handlers, the lifespan, and runs the uvicorn ASGI server. Parameters @@ -217,9 +217,8 @@ def start(params: dict): app.add_error_handler(ProblemException, error_handler.problem_error_handler) app.add_error_handler(403, error_handler.problem_error_handler) - # Add application signals TO BE MODIFIED AFTER IMPLEMENTING CTI IN CONNEXION 3.0 + # TO BE MODIFIED AFTER IMPLEMENTING CTI IN CONNEXION 3.0 # app.app.on_response_prepare.append(modify_response_headers) - # app.app.cleanup_ctx.append(register_background_tasks) # API configuration logging logger.debug(f'Loaded API configuration: {api_conf}') @@ -309,7 +308,7 @@ def error(self, msg, *args, **kws): action='store_true', dest='root') parser.add_argument('-c', help="Configuration file to use", type=str, metavar='config', dest='config_file') - parser.add_argument('-d', help="Enable debug messages. Use twice to increase verbosity.", + parser.add_argument('-d', help="Enable debug messages. Use twice to increase verbosity.", action='count', dest='debug_level') args = parser.parse_args() @@ -346,11 +345,12 @@ def error(self, msg, *args, **kws): from api.configuration import api_conf, security_conf, generate_private_key, \ generate_self_signed_certificate from api.middlewares import SecureHeadersMiddleware, CheckRateLimitsMiddleware, \ - CheckBlockedIP, WazuhAccessLoggerMiddleware, lifespan_handler + CheckBlockedIP, WazuhAccessLoggerMiddleware from api.util import to_relative_path from api.uri_parser import APIUriParser from api.constants import API_LOG_PATH from api.alogging import set_logging + from api.signals import lifespan_handler from wazuh.rbac.orm import check_database_integrity from wazuh.core import pyDaemonModule, common, utils From f94afd064acb5d8a909492357a27321373d22e15 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Thu, 29 Feb 2024 17:19:16 -0300 Subject: [PATCH 196/419] Fix API unittests --- .../test/test_manager_controller.py | 42 ++++++++------- api/api/test/test_signals.py | 51 ++++++++----------- api/api/test/test_util.py | 10 ++-- 3 files changed, 45 insertions(+), 58 deletions(-) diff --git a/api/api/controllers/test/test_manager_controller.py b/api/api/controllers/test/test_manager_controller.py index c34978e91cc..3ab480a37e2 100644 --- a/api/api/controllers/test/test_manager_controller.py +++ b/api/api/controllers/test/test_manager_controller.py @@ -6,7 +6,6 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -from aiohttp import web_response from connexion.lifecycle import ConnexionResponse from api.constants import INSTALLATION_UID_KEY, UPDATE_INFORMATION_KEY from api.controllers.test.utils import CustomAffectedItems @@ -434,7 +433,6 @@ async def test_update_configuration(mock_exc, mock_dapi, mock_remove, mock_dfunc "force_query,dapi_call_count,update_check", ([True, 2, True], [True, 1, False], [False, 1, True]) ) @pytest.mark.asyncio -@pytest.mark.parametrize("mock_request", ["manager_controller"], indirect=True) @patch('api.controllers.manager_controller.configuration.update_check_is_enabled') @patch('api.controllers.manager_controller.DistributedAPI.distribute_function', return_value=AsyncMock()) @patch('api.controllers.manager_controller.DistributedAPI.__init__', return_value=None) @@ -447,32 +445,32 @@ async def test_check_available_version( force_query, dapi_call_count, update_check, - mock_request ): """Verify 'check_available_version' endpoint is working as expected.""" - app_context = {UPDATE_INFORMATION_KEY: {"foo": 1}, INSTALLATION_UID_KEY: "1234"} - mock_request.app = app_context + cti_context = {UPDATE_INFORMATION_KEY: {"foo": 1}, INSTALLATION_UID_KEY: "1234"} update_check_mock.return_value = update_check - result = await check_available_version(force_query=force_query) - assert mock_dapi.call_count == dapi_call_count + with patch('api.controllers.manager_controller.cti_context', new=cti_context): + + result = await check_available_version(force_query=force_query) + assert mock_dapi.call_count == dapi_call_count - if force_query and update_check: - mock_dapi.assert_any_call( - f=query_update_check_service, - f_kwargs={INSTALLATION_UID_KEY: app_context[INSTALLATION_UID_KEY]}, + if force_query and update_check: + mock_dapi.assert_any_call( + f=query_update_check_service, + f_kwargs={INSTALLATION_UID_KEY: cti_context[INSTALLATION_UID_KEY]}, + request_type='local_master', + is_async=True, + logger=ANY, + ) + mock_exc.assert_any_call(mock_dfunc.return_value) + + mock_dapi.assert_called_with( + f=manager.get_update_information, + f_kwargs={UPDATE_INFORMATION_KEY: cti_context[UPDATE_INFORMATION_KEY]}, request_type='local_master', - is_async=True, + is_async=False, logger=ANY, ) - mock_exc.assert_any_call(mock_dfunc.return_value) - - mock_dapi.assert_called_with( - f=manager.get_update_information, - f_kwargs={UPDATE_INFORMATION_KEY: app_context[UPDATE_INFORMATION_KEY]}, - request_type='local_master', - is_async=False, - logger=ANY, - ) - mock_exc.assert_called_with(mock_dfunc.return_value) + mock_exc.assert_called_with(mock_dfunc.return_value) assert isinstance(result, ConnexionResponse) diff --git a/api/api/test/test_signals.py b/api/api/test/test_signals.py index f5b13f0eafb..ca6987e4694 100644 --- a/api/api/test/test_signals.py +++ b/api/api/test/test_signals.py @@ -4,6 +4,8 @@ from uuid import uuid4 import pytest +from starlette.applications import Starlette +from starlette.testclient import TestClient from api.constants import INSTALLATION_UID_KEY, UPDATE_INFORMATION_KEY from api.signals import ( @@ -11,23 +13,11 @@ cancel_signal_handler, check_installation_uid, get_update_information, - register_background_tasks, + lifespan_handler, + cti_context ) # Fixtures - - -@pytest.fixture -def application_mock(): - return {} - - -@pytest.fixture -def application_mock_with_installation_uid(application_mock): - application_mock[INSTALLATION_UID_KEY] = str(uuid4()) - return application_mock - - @pytest.fixture def installation_uid_mock(): with patch( @@ -61,37 +51,35 @@ async def test_cancel_signal_handler_catch_cancelled_error_and_dont_rise(): @patch('api.signals.common.wazuh_uid') @pytest.mark.asyncio async def test_check_installation_uid_populate_uid_if_not_exists( - uid_mock, gid_mock, chown_mock, chmod_mock, installation_uid_mock, application_mock + uid_mock, gid_mock, chown_mock, chmod_mock, installation_uid_mock ): uid = gid = 999 uid_mock.return_value = uid gid_mock.return_value = gid - await check_installation_uid(application_mock) + await check_installation_uid() assert os.path.exists(installation_uid_mock) with open(installation_uid_mock) as file: - assert application_mock[INSTALLATION_UID_KEY] == file.readline() + assert cti_context[INSTALLATION_UID_KEY] == file.readline() chown_mock.assert_called_with(file.name, uid, gid) chmod_mock.assert_called_with(file.name, 0o660) @pytest.mark.asyncio -async def test_check_installation_uid_get_uid_from_file( - installation_uid_mock, application_mock -): +async def test_check_installation_uid_get_uid_from_file(installation_uid_mock): installation_uid = str(uuid4()) with open(installation_uid_mock, 'w') as file: file.write(installation_uid) - await check_installation_uid(application_mock) + await check_installation_uid() - assert application_mock[INSTALLATION_UID_KEY] == installation_uid + assert cti_context[INSTALLATION_UID_KEY] == installation_uid @pytest.mark.asyncio async def test_get_update_information_injects_correct_data_into_app_context( - application_mock_with_installation_uid, query_update_check_service_mock + query_update_check_service_mock ): response_data = { 'last_check_date': '2023-10-11T16:47:13.066946+00:00', @@ -122,24 +110,24 @@ async def test_get_update_information_injects_correct_data_into_app_context( } query_update_check_service_mock.return_value = response_data + cti_context[INSTALLATION_UID_KEY] = str(uuid4()) task = asyncio.create_task( - get_update_information(application_mock_with_installation_uid) + get_update_information() ) await asyncio.sleep(1) task.cancel() query_update_check_service_mock.assert_called() - assert application_mock_with_installation_uid[UPDATE_INFORMATION_KEY] == response_data + assert cti_context[UPDATE_INFORMATION_KEY] == response_data @pytest.mark.asyncio -async def test_get_update_information_schedule( - application_mock_with_installation_uid, query_update_check_service_mock -): +async def test_get_update_information_schedule(query_update_check_service_mock): + cti_context[INSTALLATION_UID_KEY] = str(uuid4()) with patch('api.signals.asyncio') as sleep_mock: task = asyncio.create_task( - get_update_information(application_mock_with_installation_uid) + get_update_information() ) await asyncio.sleep(1) task.cancel() @@ -182,9 +170,10 @@ def __await__(self): with patch('api.signals.asyncio') as create_task_mock: create_task_mock.create_task.return_value = AwaitableMock(spec=asyncio.Task) create_task_mock.create_task.return_value.cancel = AsyncMock() - [_ async for _ in register_background_tasks({})] - assert create_task_mock.create_task.call_count == registered_tasks + with TestClient(Starlette(lifespan=lifespan_handler)): + assert create_task_mock.create_task.call_count == registered_tasks + assert ( create_task_mock.create_task.return_value.cancel.call_count == registered_tasks diff --git a/api/api/test/test_util.py b/api/api/test/test_util.py index 5b01d7e58fc..442e87156c4 100644 --- a/api/api/test/test_util.py +++ b/api/api/test/test_util.py @@ -10,7 +10,6 @@ from connexion import ProblemException from api import util -from api import alogging from wazuh.core.exception import WazuhError, WazuhPermissionError, WazuhResourceNotFound, \ WazuhInternalError @@ -279,17 +278,18 @@ def dummy_func(): @patch('api.util.raise_if_exc') -def test_only_master_endpoint(mock_exc): +@pytest.mark.asyncio +async def test_only_master_endpoint(mock_exc): """Test that only_master_endpoint decorator raise the correct exception when running_in_master_node is False.""" @util.only_master_endpoint - def func_(): + async def func_(): return ret_val ret_val = 'foo' with patch('api.util.running_in_master_node', return_value=False): - func_() + await func_() mock_exc.assert_called_once_with(WazuhResourceNotFound(902)) with patch('api.util.running_in_master_node', return_value=True): - assert func_() == ret_val + assert await func_() == ret_val From b1c1eb811d43e6c7d0bccb59eba82f56a299a055 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Thu, 29 Feb 2024 18:28:22 -0300 Subject: [PATCH 197/419] Fix Framework unittests --- framework/requirements.txt | 2 -- framework/wazuh/core/manager.py | 8 +------- framework/wazuh/core/tests/test_manager.py | 16 +++++----------- 3 files changed, 6 insertions(+), 20 deletions(-) diff --git a/framework/requirements.txt b/framework/requirements.txt index 38f49aeeb6d..e3745be950e 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -1,5 +1,3 @@ -# TO BE DELETED AFTER IMPLEMENTING CTI IN CONNEXION 3.0 -aiohttp==3.9.1 # END TO BE DELETED asn1crypto==1.3.0 attrs==23.1.0 diff --git a/framework/wazuh/core/manager.py b/framework/wazuh/core/manager.py index 6426b563275..cfed13d5f6a 100644 --- a/framework/wazuh/core/manager.py +++ b/framework/wazuh/core/manager.py @@ -269,13 +269,7 @@ def get_api_conf() -> dict: def _get_ssl_context() -> ssl.SSLContext: - """Return a TCPConnector with default ssl context. - - Returns - ------- - aiohttp.TCPConnector - Instance with default ssl connector. - """ + """Return a default ssl context.""" return ssl.create_default_context(cafile=certifi.where()) diff --git a/framework/wazuh/core/tests/test_manager.py b/framework/wazuh/core/tests/test_manager.py index c558a38af61..df7a6ef9308 100644 --- a/framework/wazuh/core/tests/test_manager.py +++ b/framework/wazuh/core/tests/test_manager.py @@ -8,8 +8,8 @@ from unittest.mock import patch from uuid import uuid4 +import httpx import pytest -from aiohttp import ClientError with patch('wazuh.core.common.wazuh_uid'): with patch('wazuh.core.common.wazuh_gid'): @@ -46,7 +46,7 @@ def test_manager(): @pytest.fixture def client_session_get_mock(): - with patch('aiohttp.ClientSession.get') as get_mock: + with patch('httpx.AsyncClient.get') as get_mock: yield get_mock @@ -259,7 +259,7 @@ async def test_query_update_check_service_catch_exceptions_and_dont_raise( ): """Test that the query_update_check_service function handle errors correctly.""" message_error = 'Some client error' - client_session_get_mock.side_effect = ClientError(message_error) + client_session_get_mock.side_effect = httpx.RequestError(message_error) update_information = await query_update_check_service(installation_uid) client_session_get_mock.assert_called() @@ -317,10 +317,7 @@ def _build_release_info(semvers: list[str]) -> list: } status = 200 - client_session_get_mock.return_value.__aenter__.return_value.status = status - client_session_get_mock.return_value.__aenter__.return_value.json.return_value = ( - response_data - ) + client_session_get_mock.return_value = httpx.Response(status_code=status, json=response_data) update_information = await query_update_check_service(installation_uid) @@ -362,10 +359,7 @@ async def test_query_update_check_service_returns_correct_data_on_error( response_data = {'errors': {'detail': 'Unauthorized'}} status = 403 - client_session_get_mock.return_value.__aenter__.return_value.status = status - client_session_get_mock.return_value.__aenter__.return_value.json.return_value = ( - response_data - ) + client_session_get_mock.return_value = httpx.Response(status_code=status, json=response_data) update_information = await query_update_check_service(installation_uid) From 94b5925324a9d908d1ddac85f587492555d58771 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 4 Mar 2024 11:33:03 -0300 Subject: [PATCH 198/419] Add follow redirect --- framework/requirements.txt | 1 - framework/wazuh/core/manager.py | 2 +- framework/wazuh/core/tests/test_manager.py | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/framework/requirements.txt b/framework/requirements.txt index e3745be950e..dff00bc36cd 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -1,4 +1,3 @@ -# END TO BE DELETED asn1crypto==1.3.0 attrs==23.1.0 azure-common==1.1.25 diff --git a/framework/wazuh/core/manager.py b/framework/wazuh/core/manager.py index cfed13d5f6a..bc02eea7168 100644 --- a/framework/wazuh/core/manager.py +++ b/framework/wazuh/core/manager.py @@ -328,7 +328,7 @@ async def query_update_check_service(installation_uid: str) -> dict: async with httpx.AsyncClient(verify=_get_ssl_context()) as client: try: - response = await client.get(RELEASE_UPDATES_URL, headers=headers) + response = await client.get(RELEASE_UPDATES_URL, headers=headers, follow_redirects=True) response_data = response.json() update_information['status_code'] = response.status_code diff --git a/framework/wazuh/core/tests/test_manager.py b/framework/wazuh/core/tests/test_manager.py index df7a6ef9308..e1f3d0b1b1c 100644 --- a/framework/wazuh/core/tests/test_manager.py +++ b/framework/wazuh/core/tests/test_manager.py @@ -387,4 +387,5 @@ async def test_query_update_check_service_request( WAZUH_UID_KEY: installation_uid, WAZUH_TAG_KEY: f'v{version}', }, + follow_redirects=True ) From 839da2471b5c1982b4639b955009cbc26cdaf178 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 6 Mar 2024 09:38:51 -0300 Subject: [PATCH 199/419] Use base logger --- api/api/signals.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/api/api/signals.py b/api/api/signals.py index 188e2b0b962..ced93677737 100644 --- a/api/api/signals.py +++ b/api/api/signals.py @@ -25,7 +25,6 @@ ONE_DAY_SLEEP = 60*60*24 logger = logging.getLogger('wazuh-api') -start_stop_logger = logging.getLogger('start-stop-api') cti_context = {} @@ -99,7 +98,7 @@ async def lifespan_handler(_: ConnexionMiddleware): tasks.append(asyncio.create_task(get_update_information())) # Log the initial server startup message. - start_stop_logger.info(f'Listening on {configuration.api_conf["host"]}:{configuration.api_conf["port"]}.') + logger.info(f'Listening on {configuration.api_conf["host"]}:{configuration.api_conf["port"]}.') yield @@ -107,4 +106,4 @@ async def lifespan_handler(_: ConnexionMiddleware): task.cancel() await task - start_stop_logger.info('Shutdown wazuh-apid server.') + logger.info('Shutdown wazuh-apid server.') From 21608a74f2c2d53ae11428859b0da66c93217c6b Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Tue, 5 Mar 2024 17:02:57 -0300 Subject: [PATCH 200/419] Remove uvicorn server header --- api/api/signals.py | 5 ---- api/scripts/wazuh_apid.py | 48 +++++++++++++++++++-------------------- 2 files changed, 23 insertions(+), 30 deletions(-) diff --git a/api/api/signals.py b/api/api/signals.py index ced93677737..adcff3242be 100644 --- a/api/api/signals.py +++ b/api/api/signals.py @@ -52,11 +52,6 @@ async def wrapper(*args, **kwargs): return wrapper -async def modify_response_headers(request, response): - # Delete 'Server' entry - response.headers.pop('Server', None) - - @cancel_signal_handler async def check_installation_uid() -> None: """Check if the installation UID exists, populate it if not and inject it into the global cti context.""" diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index a91b2ce4097..4009c8ff72b 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -71,7 +71,7 @@ def configure_ssl(params): uvicorn_params : dict uvicorn parameter configuration dictionary. """ - from api.constants import CONFIG_FILE_PATH + from api.constants import CONFIG_FILE_PATH try: # Generate SSL if it does not exist and HTTPS is enabled @@ -217,9 +217,6 @@ def start(params: dict): app.add_error_handler(ProblemException, error_handler.problem_error_handler) app.add_error_handler(403, error_handler.problem_error_handler) - # TO BE MODIFIED AFTER IMPLEMENTING CTI IN CONNEXION 3.0 - # app.app.on_response_prepare.append(modify_response_headers) - # API configuration logging logger.debug(f'Loaded API configuration: {api_conf}') logger.debug(f'Loaded security API configuration: {security_conf}') @@ -240,7 +237,7 @@ def start(params: dict): def print_version(): - from wazuh.core.cluster import __version__, __author__, __wazuh_name__, __licence__ + from wazuh.core.cluster import __author__, __licence__, __version__, __wazuh_name__ print('\n{} {} - {}\n\n{}'.format(__wazuh_name__, __version__, __author__, __licence__)) @@ -326,34 +323,34 @@ def error(self, msg, *args, **kws): import logging import logging.config import ssl - import uvicorn + import jwt + import uvicorn from connexion import AsyncApp - from connexion.options import SwaggerUIOptions - from connexion.exceptions import Unauthorized, HTTPException, ProblemException + from connexion.exceptions import HTTPException, ProblemException, Unauthorized from connexion.middleware import MiddlewarePosition - - from starlette.middleware.cors import CORSMiddleware - + from connexion.options import SwaggerUIOptions from content_size_limit_asgi import ContentSizeLimitMiddleware from content_size_limit_asgi.errors import ContentSizeExceeded + from starlette.middleware.cors import CORSMiddleware + from wazuh.core import common, pyDaemonModule, utils + from wazuh.rbac.orm import check_database_integrity - import jwt - - from api import error_handler, __path__ as api_path + from api import __path__ as api_path + from api import error_handler + from api.alogging import set_logging from api.api_exception import APIError - from api.configuration import api_conf, security_conf, generate_private_key, \ - generate_self_signed_certificate - from api.middlewares import SecureHeadersMiddleware, CheckRateLimitsMiddleware, \ - CheckBlockedIP, WazuhAccessLoggerMiddleware - from api.util import to_relative_path - from api.uri_parser import APIUriParser + from api.configuration import api_conf, generate_private_key, generate_self_signed_certificate, security_conf from api.constants import API_LOG_PATH - from api.alogging import set_logging + from api.middlewares import ( + CheckBlockedIP, + CheckRateLimitsMiddleware, + SecureHeadersMiddleware, + WazuhAccessLoggerMiddleware, + ) from api.signals import lifespan_handler - - from wazuh.rbac.orm import check_database_integrity - from wazuh.core import pyDaemonModule, common, utils + from api.uri_parser import APIUriParser + from api.util import to_relative_path try: if args.config_file is not None: @@ -367,13 +364,14 @@ def error(self, msg, *args, **kws): uvicorn_params['host'] = api_conf['host'] uvicorn_params['port'] = api_conf['port'] uvicorn_params['loop'] = 'uvloop' + uvicorn_params['server_header'] = False # Set up logger file try: uvicorn_params['log_config'] = set_logging(log_filepath=API_LOG_PATH, log_level=api_conf['logs']['level'].upper(), foreground_mode=args.foreground) - except APIError as e: + except APIError: print(f"Configuration error in the API log format: {api_conf['logs']['format']}.") sys.exit(1) From 9967dc7b665b1b4fb21b33719f69e04398f8fb82 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Thu, 7 Mar 2024 16:22:54 -0300 Subject: [PATCH 201/419] Fixed jsonschema warnings. --- api/api/test/test_validator.py | 6 ++-- api/api/validator.py | 62 +++++++++++++++++----------------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/api/api/test/test_validator.py b/api/api/test/test_validator.py index d192a10f56a..9c3ae7c1873 100644 --- a/api/api/test/test_validator.py +++ b/api/api/test/test_validator.py @@ -253,7 +253,7 @@ def test_validation_json_ok(value, format): """Verify that each value is of the indicated format.""" assert (js.validate({"key": value}, schema={'type': 'object', 'properties': {'key': {'type': 'string', 'format': format}}}, - format_checker=js.draft4_format_checker) is None) + format_checker=js.Draft4Validator.FORMAT_CHECKER) is None) @pytest.mark.parametrize('value, format', [ @@ -292,7 +292,7 @@ def test_validation_json_ko(value, format): js.validate({"key": value}, schema={'type': 'object', 'properties': {'key': {'type': 'string', 'format': format}}}, - format_checker=js.draft4_format_checker) + format_checker=js.Draft4Validator.FORMAT_CHECKER) @pytest.mark.parametrize("component, configuration, expected_response", [ @@ -307,4 +307,4 @@ def test_check_component_configuration_pair(component, configuration, expected_r assert isinstance(response, expected_response.__class__) assert response.code == expected_response.code else: - assert response is expected_response + assert response is expected_response \ No newline at end of file diff --git a/api/api/validator.py b/api/api/validator.py index 5061e7d72ac..9765fe48ee3 100644 --- a/api/api/validator.py +++ b/api/api/validator.py @@ -8,7 +8,7 @@ from typing import Dict, List from defusedxml import ElementTree as ET -from jsonschema import draft4_format_checker +from jsonschema import Draft4Validator from wazuh.core import common from wazuh.core.exception import WazuhError @@ -349,22 +349,22 @@ def check_component_configuration_pair(component: str, configuration: str) -> Wa f"{WAZUH_COMPONENT_CONFIGURATION_MAPPING[component]}") -@draft4_format_checker.checks("alphanumeric") +@Draft4Validator.FORMAT_CHECKER.checks("alphanumeric") def format_alphanumeric(value): return check_exp(value, _alphanumeric_param) -@draft4_format_checker.checks("alphanumeric_symbols") +@Draft4Validator.FORMAT_CHECKER.checks("alphanumeric_symbols") def format_alphanumeric_symbols(value): return check_exp(value, _symbols_alphanumeric_param) -@draft4_format_checker.checks("base64") +@Draft4Validator.FORMAT_CHECKER.checks("base64") def format_base64(value): return check_exp(value, _base64) -@draft4_format_checker.checks("get_dirnames_path") +@Draft4Validator.FORMAT_CHECKER.checks("get_dirnames_path") def format_get_dirnames_path(relative_path): if not is_safe_path(relative_path): return False @@ -372,132 +372,132 @@ def format_get_dirnames_path(relative_path): return check_exp(relative_path, _get_dirnames_path) -@draft4_format_checker.checks("hash") +@Draft4Validator.FORMAT_CHECKER.checks("hash") def format_hash(value): return check_exp(value, _hashes) -@draft4_format_checker.checks("names") +@Draft4Validator.FORMAT_CHECKER.checks("names") def format_names(value): return check_exp(value, _names) -@draft4_format_checker.checks("numbers") +@Draft4Validator.FORMAT_CHECKER.checks("numbers") def format_numbers(value): return check_exp(value, _numbers) -@draft4_format_checker.checks("numbers_or_all") +@Draft4Validator.FORMAT_CHECKER.checks("numbers_or_all") def format_numbers_or_all(value): return check_exp(value, _numbers_or_all) -@draft4_format_checker.checks("cdb_filename_path") +@Draft4Validator.FORMAT_CHECKER.checks("cdb_filename_path") def format_cdb_filename_path(value): return check_exp(value, _cdb_filename_path) -@draft4_format_checker.checks("xml_filename") +@Draft4Validator.FORMAT_CHECKER.checks("xml_filename") def format_xml_filename(value): return check_exp(value, _xml_filename) -@draft4_format_checker.checks("xml_filename_path") +@Draft4Validator.FORMAT_CHECKER.checks("xml_filename_path") def format_xml_filename_path(value): return check_exp(value, _xml_filename_path) -@draft4_format_checker.checks("path") +@Draft4Validator.FORMAT_CHECKER.checks("path") def format_path(value): if not is_safe_path(value): return False return check_exp(value, _paths) -@draft4_format_checker.checks("wpk_path") +@Draft4Validator.FORMAT_CHECKER.checks("wpk_path") def format_wpk_path(value): if not is_safe_path(value, relative=False): return False return check_exp(value, _wpk_path) -@draft4_format_checker.checks("active_response_command") +@Draft4Validator.FORMAT_CHECKER.checks("active_response_command") def format_active_response_command(command): if not is_safe_path(command): return False return check_exp(command, _active_response_command) -@draft4_format_checker.checks("query") +@Draft4Validator.FORMAT_CHECKER.checks("query") def format_query(value): return check_exp(value, _query_param) -@draft4_format_checker.checks("range") +@Draft4Validator.FORMAT_CHECKER.checks("range") def format_range(value): return check_exp(value, _ranges) -@draft4_format_checker.checks("search") +@Draft4Validator.FORMAT_CHECKER.checks("search") def format_search(value): return check_exp(value, _search_param) -@draft4_format_checker.checks("sort") +@Draft4Validator.FORMAT_CHECKER.checks("sort") def format_sort(value): return check_exp(value, _sort_param) -@draft4_format_checker.checks("timeframe") +@Draft4Validator.FORMAT_CHECKER.checks("timeframe") def format_timeframe(value): return check_exp(value, _timeframe_type) -@draft4_format_checker.checks("wazuh_key") +@Draft4Validator.FORMAT_CHECKER.checks("wazuh_key") def format_wazuh_key(value): return check_exp(value, _wazuh_key) -@draft4_format_checker.checks("wazuh_version") +@Draft4Validator.FORMAT_CHECKER.checks("wazuh_version") def format_wazuh_version(value): return check_exp(value, _wazuh_version) -@draft4_format_checker.checks("date") +@Draft4Validator.FORMAT_CHECKER.checks("date") def format_date(value): return check_exp(value, _iso8601_date) -@draft4_format_checker.checks("date-time") +@Draft4Validator.FORMAT_CHECKER.checks("date-time") def format_datetime(value): return check_exp(value, _iso8601_date_time) -@draft4_format_checker.checks("hash_or_empty") +@Draft4Validator.FORMAT_CHECKER.checks("hash_or_empty") def format_hash_or_empty(value): return True if value == "" else format_hash(value) -@draft4_format_checker.checks("names_or_empty") +@Draft4Validator.FORMAT_CHECKER.checks("names_or_empty") def format_names_or_empty(value): return True if value == "" else format_names(value) -@draft4_format_checker.checks("numbers_or_empty") +@Draft4Validator.FORMAT_CHECKER.checks("numbers_or_empty") def format_numbers_or_empty(value): return True if value == "" else format_numbers(value) -@draft4_format_checker.checks("date-time_or_empty") +@Draft4Validator.FORMAT_CHECKER.checks("date-time_or_empty") def format_datetime_or_empty(value): return True if value == "" else format_datetime(value) -@draft4_format_checker.checks("group_names") +@Draft4Validator.FORMAT_CHECKER.checks("group_names") def format_group_names(value): return check_exp(value, _group_names) -@draft4_format_checker.checks("group_names_or_all") +@Draft4Validator.FORMAT_CHECKER.checks("group_names_or_all") def format_group_names_or_all(value): - return check_exp(value, _group_names_or_all) + return check_exp(value, _group_names_or_all) \ No newline at end of file From 492040535048881683e0218f876d5f5ac0518704 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Mon, 11 Mar 2024 16:28:24 -0300 Subject: [PATCH 202/419] Added EOF lines. --- api/api/test/test_validator.py | 2 +- api/api/validator.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/api/test/test_validator.py b/api/api/test/test_validator.py index 9c3ae7c1873..11a2be3c986 100644 --- a/api/api/test/test_validator.py +++ b/api/api/test/test_validator.py @@ -307,4 +307,4 @@ def test_check_component_configuration_pair(component, configuration, expected_r assert isinstance(response, expected_response.__class__) assert response.code == expected_response.code else: - assert response is expected_response \ No newline at end of file + assert response is expected_response diff --git a/api/api/validator.py b/api/api/validator.py index 9765fe48ee3..d5474cfbf6b 100644 --- a/api/api/validator.py +++ b/api/api/validator.py @@ -500,4 +500,4 @@ def format_group_names(value): @Draft4Validator.FORMAT_CHECKER.checks("group_names_or_all") def format_group_names_or_all(value): - return check_exp(value, _group_names_or_all) \ No newline at end of file + return check_exp(value, _group_names_or_all) From 10d6eedd1a79ffab25520421319a04cb73049252 Mon Sep 17 00:00:00 2001 From: javier Date: Tue, 2 Apr 2024 18:44:32 +0200 Subject: [PATCH 203/419] Add middleware expect --- api/api/api_exception.py | 7 ++++++ api/api/error_handler.py | 24 +++++++++++++++++- api/api/middlewares.py | 39 +++++++++++++++++++++++++++++- api/api/test/test_error_handler.py | 22 ++++++++++++++++- api/api/test/test_middlewares.py | 26 +++++++++++++++++++- api/scripts/wazuh_apid.py | 5 +++- 6 files changed, 118 insertions(+), 5 deletions(-) diff --git a/api/api/api_exception.py b/api/api/api_exception.py index f60d280ee76..885d1017a67 100644 --- a/api/api/api_exception.py +++ b/api/api/api_exception.py @@ -78,3 +78,10 @@ def __init__(self, code): ext = {"code": exc.code} ext.update({"remediation": exc.remediation} if hasattr(exc, 'remediation') else {}) super().__init__(status=429, title=exc.title, detail=exc.message, type=exc.type, ext=ext) + +class ExpectFailedException(ProblemException): + """Exception for failed expectation (status code 417).""" + + def __init__(self, *, status=417, title=None, detail=None): + ext = {"code": 417} + super().__init__(status=status, title=title, detail=detail, ext=ext) \ No newline at end of file diff --git a/api/api/error_handler.py b/api/api/error_handler.py index 33d555ecfed..d5e5452cb35 100644 --- a/api/api/error_handler.py +++ b/api/api/error_handler.py @@ -10,7 +10,7 @@ from api import configuration from api.middlewares import ip_block, ip_stats, LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT -from api.api_exception import BlockedIPException, MaxRequestsException +from api.api_exception import BlockedIPException, MaxRequestsException, ExpectFailedException from api.controllers.util import json_response, ERROR_CONTENT_TYPE from wazuh.core.utils import get_utc_now @@ -55,6 +55,28 @@ def _cleanup_detail_field(detail: str) -> str: return ' '.join(str(detail).replace("\n\n", ". ").replace("\n", "").split()) +async def expect_failed_error_handler(request: ConnexionRequest, exc: ExpectFailedException) -> ConnexionResponse: + """Handler for the 'Expect' HTTP header. + + Parameters + ---------- + request : ConnexionRequest + Incoming request. + + Returns + ------- + Response + HTTP Response returned to the client. + """ + problem = { + "title": "Expectation failed", + "detail": "Unknown Expect", + "error": 417 + } + return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', + status_code=417, content_type=ERROR_CONTENT_TYPE) + + async def unauthorized_error_handler(request: ConnexionRequest, exc: exceptions.Unauthorized) -> ConnexionResponse: """Unauthorized Exception Error handler. diff --git a/api/api/middlewares.py b/api/api/middlewares.py index 1c57084e94a..96920118b97 100644 --- a/api/api/middlewares.py +++ b/api/api/middlewares.py @@ -23,7 +23,8 @@ from api import configuration from api.alogging import custom_logging from api.authentication import generate_keypair, JWT_ALGORITHM -from api.api_exception import BlockedIPException, MaxRequestsException +from api.api_exception import BlockedIPException, MaxRequestsException, ExpectFailedException +from api.configuration import default_api_configuration # Default of the max event requests allowed per minute MAX_REQUESTS_EVENTS_DEFAULT = 30 @@ -257,3 +258,39 @@ async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) - resp = await call_next(request) secure_headers.framework.starlette(resp) return resp + +class CheckExpectHeaderMiddleware(BaseHTTPMiddleware): + """Middleware to check for the 'Expect' header in incoming requests.""" + + async def dispatch(self, request: ConnexionRequest, call_next: RequestResponseEndpoint) -> Response: + """Check for specific request headers and generate error 417 if conditions are not met. + + Parameters + ---------- + request : Request + HTTP Request received. + call_next : RequestResponseEndpoint + Endpoint callable to be executed. + + Returns + ------- + Returned response. + """ + + if 'Expect' not in request.headers: + response = await call_next(request) + return response + else: + expect_value = request.headers["Expect"].lower() + + if expect_value != '100-continue': + raise ExpectFailedException(status=417, title="Expectation failed", detail="Unknown Expect") + + if 'Content-Length' in request.headers: + content_length = int(request.headers["Content-Length"]) + max_upload_size = default_api_configuration["max_upload_size"] + if content_length > max_upload_size: + raise ExpectFailedException(status=417, title="Expectation failed", detail="Unknown Expect") + + response = await call_next(request) + return response \ No newline at end of file diff --git a/api/api/test/test_error_handler.py b/api/api/test/test_error_handler.py index fcbd191d6d7..d590a7abe35 100644 --- a/api/api/test/test_error_handler.py +++ b/api/api/test/test_error_handler.py @@ -13,7 +13,7 @@ from connexion.exceptions import HTTPException, ProblemException, BadRequestProblem, Unauthorized from api.error_handler import _cleanup_detail_field, prevent_bruteforce_attack, jwt_error_handler, \ http_error_handler, problem_error_handler, bad_request_error_handler, unauthorized_error_handler, \ - ERROR_CONTENT_TYPE + expect_failed_error_handler, ERROR_CONTENT_TYPE from api.middlewares import LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT @@ -201,3 +201,23 @@ async def test_bad_request_error_handler(detail, mock_request): assert body == problem assert response.status_code == exc.status_code assert response.content_type == ERROR_CONTENT_TYPE + + +@pytest.mark.asyncio +@pytest.mark.parametrize('query_param_pretty, expected_detail', [ + ('true', "Unknown Expect"), + ('false', "Unknown Expect"), +]) +async def test_expect_failed_error_handler(query_param_pretty, expected_detail): + """Test expect failed error handler.""" + request = MagicMock() + request.query_params = {'pretty': query_param_pretty} + response = await expect_failed_error_handler(request, None) + + assert response.status_code == 417 + assert response.content_type == "application/problem+json; charset=utf-8" + + body = json.loads(response.body) + assert body["title"] == "Expectation failed" + assert body["detail"] == expected_detail + assert body["error"] == 417 diff --git a/api/api/test/test_middlewares.py b/api/api/test/test_middlewares.py index bc37e067400..c52bb2ffe39 100644 --- a/api/api/test/test_middlewares.py +++ b/api/api/test/test_middlewares.py @@ -6,6 +6,8 @@ from unittest.mock import patch, MagicMock, AsyncMock, call import pytest +from starlette.responses import Response + from connexion import AsyncApp from connexion.testing import TestContext from connexion.exceptions import ProblemException @@ -14,7 +16,8 @@ from api.middlewares import check_rate_limit, check_blocked_ip, MAX_REQUESTS_EVENTS_DEFAULT, UNKNOWN_USER_STRING, \ LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT, CheckRateLimitsMiddleware, WazuhAccessLoggerMiddleware, CheckBlockedIP, \ - SecureHeadersMiddleware, secure_headers, access_log + SecureHeadersMiddleware, CheckExpectHeaderMiddleware, secure_headers, access_log +from api.api_exception import ExpectFailedException @pytest.fixture def request_info(request): @@ -305,3 +308,24 @@ async def test_check_block_ip_middleware(endpoint, method, call_check, mock_req) mock_block_ip.assert_not_called() dispatch_mock.assert_awaited_once_with(mock_req) assert ret_response == response + +@pytest.mark.asyncio +@pytest.mark.parametrize("expect_value", ['test-value', '100-continue']) +async def test_check_expect_header_middleware(expect_value): + """Test expect header.""" + middleware = CheckExpectHeaderMiddleware(AsyncApp(__name__)) + + mock_request = MagicMock(headers={'Expect': expect_value}) + + response = Response("Success") + + call_next_mock = AsyncMock(return_value=response) + + if expect_value != '100-continue': + with pytest.raises(ExpectFailedException): + await middleware.dispatch(mock_request, call_next_mock) + call_next_mock.assert_not_called() + else: + returned_response = await middleware.dispatch(mock_request, call_next_mock) + call_next_mock.assert_called_once_with(mock_request) + assert returned_response == response \ No newline at end of file diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index 4009c8ff72b..5f9df3a584f 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -192,6 +192,7 @@ def start(params: dict): # Maximum body size that the API can accept (bytes) if api_conf['access']['max_request_per_minute'] > 0: app.add_middleware(CheckRateLimitsMiddleware, MiddlewarePosition.BEFORE_SECURITY) + app.add_middleware(CheckExpectHeaderMiddleware) app.add_middleware(CheckBlockedIP, MiddlewarePosition.BEFORE_SECURITY) app.add_middleware(WazuhAccessLoggerMiddleware, MiddlewarePosition.BEFORE_EXCEPTION) app.add_middleware(SecureHeadersMiddleware) @@ -211,6 +212,7 @@ def start(params: dict): ) # Add error handlers to format exceptions + app.add_error_handler(ExpectFailedException, error_handler.expect_failed_error_handler) app.add_error_handler(jwt.exceptions.PyJWTError, error_handler.jwt_error_handler) app.add_error_handler(Unauthorized, error_handler.unauthorized_error_handler) app.add_error_handler(HTTPException, error_handler.http_error_handler) @@ -339,7 +341,7 @@ def error(self, msg, *args, **kws): from api import __path__ as api_path from api import error_handler from api.alogging import set_logging - from api.api_exception import APIError + from api.api_exception import APIError, ExpectFailedException from api.configuration import api_conf, generate_private_key, generate_self_signed_certificate, security_conf from api.constants import API_LOG_PATH from api.middlewares import ( @@ -347,6 +349,7 @@ def error(self, msg, *args, **kws): CheckRateLimitsMiddleware, SecureHeadersMiddleware, WazuhAccessLoggerMiddleware, + CheckExpectHeaderMiddleware, ) from api.signals import lifespan_handler from api.uri_parser import APIUriParser From 81e6b77cf68b62461acc1a2b2c1fcff1cf5d3901 Mon Sep 17 00:00:00 2001 From: javier Date: Wed, 3 Apr 2024 18:40:39 +0200 Subject: [PATCH 204/419] Add output changes expect --- api/api/api_exception.py | 4 +++- api/api/error_handler.py | 7 ++++--- api/api/middlewares.py | 7 +++++-- api/api/test/test_error_handler.py | 12 +++++++----- api/api/test/test_middlewares.py | 3 ++- 5 files changed, 21 insertions(+), 12 deletions(-) diff --git a/api/api/api_exception.py b/api/api/api_exception.py index 885d1017a67..15a1dcedf73 100644 --- a/api/api/api_exception.py +++ b/api/api/api_exception.py @@ -71,6 +71,7 @@ def __init__(self, *, status=500, title=None, detail=None): ext = {"code": 6000} super().__init__(status=status, title=title, detail=detail, ext=ext) + class MaxRequestsException(ProblemException): """Bocked IP Exception Class.""" def __init__(self, code): @@ -79,9 +80,10 @@ def __init__(self, code): ext.update({"remediation": exc.remediation} if hasattr(exc, 'remediation') else {}) super().__init__(status=429, title=exc.title, detail=exc.message, type=exc.type, ext=ext) + class ExpectFailedException(ProblemException): """Exception for failed expectation (status code 417).""" def __init__(self, *, status=417, title=None, detail=None): ext = {"code": 417} - super().__init__(status=status, title=title, detail=detail, ext=ext) \ No newline at end of file + super().__init__(status=status, title=title, detail=detail, ext=ext) diff --git a/api/api/error_handler.py b/api/api/error_handler.py index d5e5452cb35..4f2ed39d701 100644 --- a/api/api/error_handler.py +++ b/api/api/error_handler.py @@ -70,11 +70,12 @@ async def expect_failed_error_handler(request: ConnexionRequest, exc: ExpectFail """ problem = { "title": "Expectation failed", - "detail": "Unknown Expect", - "error": 417 } + if exc.detail: + problem['detail'] = exc.detail + return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', - status_code=417, content_type=ERROR_CONTENT_TYPE) + status_code=exc.status, content_type=ERROR_CONTENT_TYPE) async def unauthorized_error_handler(request: ConnexionRequest, diff --git a/api/api/middlewares.py b/api/api/middlewares.py index 96920118b97..a195f840256 100644 --- a/api/api/middlewares.py +++ b/api/api/middlewares.py @@ -290,7 +290,10 @@ async def dispatch(self, request: ConnexionRequest, call_next: RequestResponseEn content_length = int(request.headers["Content-Length"]) max_upload_size = default_api_configuration["max_upload_size"] if content_length > max_upload_size: - raise ExpectFailedException(status=417, title="Expectation failed", detail="Unknown Expect") + raise ExpectFailedException(status=417, title="Expectation failed", + detail=f"Maximum content size limit ({max_upload_size}) exceeded " + f"({content_length} bytes read)") response = await call_next(request) - return response \ No newline at end of file + return response + \ No newline at end of file diff --git a/api/api/test/test_error_handler.py b/api/api/test/test_error_handler.py index d590a7abe35..650a41f1515 100644 --- a/api/api/test/test_error_handler.py +++ b/api/api/test/test_error_handler.py @@ -15,6 +15,7 @@ http_error_handler, problem_error_handler, bad_request_error_handler, unauthorized_error_handler, \ expect_failed_error_handler, ERROR_CONTENT_TYPE from api.middlewares import LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT +from api.api_exception import ExpectFailedException @pytest.fixture @@ -212,12 +213,13 @@ async def test_expect_failed_error_handler(query_param_pretty, expected_detail): """Test expect failed error handler.""" request = MagicMock() request.query_params = {'pretty': query_param_pretty} - response = await expect_failed_error_handler(request, None) - + response = await expect_failed_error_handler(request, ExpectFailedException(detail=expected_detail) if expected_detail else None) + assert response.status_code == 417 assert response.content_type == "application/problem+json; charset=utf-8" - + body = json.loads(response.body) assert body["title"] == "Expectation failed" - assert body["detail"] == expected_detail - assert body["error"] == 417 + if expected_detail: + assert body["detail"] == expected_detail + diff --git a/api/api/test/test_middlewares.py b/api/api/test/test_middlewares.py index c52bb2ffe39..c38f4a5a5de 100644 --- a/api/api/test/test_middlewares.py +++ b/api/api/test/test_middlewares.py @@ -328,4 +328,5 @@ async def test_check_expect_header_middleware(expect_value): else: returned_response = await middleware.dispatch(mock_request, call_next_mock) call_next_mock.assert_called_once_with(mock_request) - assert returned_response == response \ No newline at end of file + assert returned_response == response + \ No newline at end of file From 9174b3dad7534a9eb893517a6d745e0938078cba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Fri, 5 Apr 2024 10:02:16 -0300 Subject: [PATCH 205/419] Capture authorization header exceptions --- api/api/error_handler.py | 4 +++- api/api/middlewares.py | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/api/api/error_handler.py b/api/api/error_handler.py index 4f2ed39d701..3e7c807193a 100644 --- a/api/api/error_handler.py +++ b/api/api/error_handler.py @@ -9,7 +9,8 @@ from content_size_limit_asgi.errors import ContentSizeExceeded from api import configuration -from api.middlewares import ip_block, ip_stats, LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT +from api.middlewares import ip_block, ip_stats, LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT, UNKNOWN_USER_STRING +from api.alogging import custom_logging from api.api_exception import BlockedIPException, MaxRequestsException, ExpectFailedException from api.controllers.util import json_response, ERROR_CONTENT_TYPE from wazuh.core.utils import get_utc_now @@ -109,6 +110,7 @@ async def unauthorized_error_handler(request: ConnexionRequest, problem.update({'detail': 'No authorization token provided'} \ if 'token_info' not in request.context \ else {}) + return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', status_code=exc.status_code, content_type=ERROR_CONTENT_TYPE) diff --git a/api/api/middlewares.py b/api/api/middlewares.py index a195f840256..7c88997892c 100644 --- a/api/api/middlewares.py +++ b/api/api/middlewares.py @@ -2,6 +2,7 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 +import binascii import json import hashlib import time @@ -13,6 +14,7 @@ from starlette.responses import Response from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint +from connexion.exceptions import OAuthProblem from connexion.lifecycle import ConnexionRequest from connexion.security import AbstractSecurityHandler @@ -78,25 +80,23 @@ async def access_log(request: ConnexionRequest, response: Response, prev_time: t # Get the username from the request. If it is not found in the context, try # to get it from the headers using basic or bearer authentication methods. - user = UNKNOWN_USER_STRING - if headers and not (user := context.get('user', None)): - auth_type, user_passw = AbstractSecurityHandler.get_auth_header_value(request) - if auth_type == 'basic': - user, _ = base64.b64decode(user_passw).decode("latin1").split(":", 1) - elif auth_type == 'bearer': - try: + if not (user := context.get('user', None)): + try: + auth_type, user_passw = AbstractSecurityHandler.get_auth_header_value(request) + if auth_type == 'basic': + user, _ = base64.b64decode(user_passw).decode("latin1").split(":", 1) + elif auth_type == 'bearer': s = jwt.decode(user_passw, generate_keypair()[1], algorithms=[JWT_ALGORITHM], audience='Wazuh API REST', options={'verify_exp': False}) user = s['sub'] - except jwt.exceptions.PyJWTError: - pass + except (KeyError, IndexError, binascii.Error, jwt.exceptions.PyJWTError, OAuthProblem): + user = UNKNOWN_USER_STRING # Get or create authorization context hash hash_auth_context = context.get('token_info', {}).get('hash_auth_context', '') # Create hash if run_as login - if not hash_auth_context and path == RUN_AS_LOGIN_ENDPOINT: hash_auth_context = hashlib.blake2b(json.dumps(body).encode(), digest_size=16).hexdigest() From b36fb655900214baea2d899fa3a7bfd53dd0231f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Fri, 5 Apr 2024 10:07:27 -0300 Subject: [PATCH 206/419] Add unit test --- api/api/error_handler.py | 4 +-- api/api/test/test_middlewares.py | 48 ++++++++++++++++++++++++++++++-- 2 files changed, 47 insertions(+), 5 deletions(-) diff --git a/api/api/error_handler.py b/api/api/error_handler.py index 3e7c807193a..4f2ed39d701 100644 --- a/api/api/error_handler.py +++ b/api/api/error_handler.py @@ -9,8 +9,7 @@ from content_size_limit_asgi.errors import ContentSizeExceeded from api import configuration -from api.middlewares import ip_block, ip_stats, LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT, UNKNOWN_USER_STRING -from api.alogging import custom_logging +from api.middlewares import ip_block, ip_stats, LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT from api.api_exception import BlockedIPException, MaxRequestsException, ExpectFailedException from api.controllers.util import json_response, ERROR_CONTENT_TYPE from wazuh.core.utils import get_utc_now @@ -110,7 +109,6 @@ async def unauthorized_error_handler(request: ConnexionRequest, problem.update({'detail': 'No authorization token provided'} \ if 'token_info' not in request.context \ else {}) - return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', status_code=exc.status_code, content_type=ERROR_CONTENT_TYPE) diff --git a/api/api/test/test_middlewares.py b/api/api/test/test_middlewares.py index c38f4a5a5de..05b55dfb4f5 100644 --- a/api/api/test/test_middlewares.py +++ b/api/api/test/test_middlewares.py @@ -4,13 +4,15 @@ from datetime import datetime from unittest.mock import patch, MagicMock, AsyncMock, call +import binascii +import jwt import pytest from starlette.responses import Response from connexion import AsyncApp from connexion.testing import TestContext -from connexion.exceptions import ProblemException +from connexion.exceptions import ProblemException, OAuthProblem from freezegun import freeze_time @@ -163,7 +165,7 @@ def check_rate_limit_side_effect(*_): (False, 'q_pass', None, 'b_key', 'wazuh', '', ('bearer', {'sub':'wazuh'}), RUN_AS_LOGIN_ENDPOINT, 'POST', 403), (False, 'q_pass', None, 'b_key', 'wazuh', '', ('other', ''), RUN_AS_LOGIN_ENDPOINT, 'POST', 403), ]) -async def test_access_log(json_body, q_password, b_password, b_key, c_user, +async def test_access_log(json_body, q_password, b_password, b_key, c_user, hash, sec_header, endpoint, method, status_code, mock_req): """Test access_log function.""" JWT_ALGORITHM = 'ES512' @@ -233,6 +235,48 @@ async def test_access_log(json_body, q_password, b_password, b_key, c_user, f"IP blocked due to exceeded number of logins attempts: {mock_req.client.host}") +@freeze_time(datetime(1970, 1, 1, 0, 0, 0)) +@pytest.mark.asyncio +@pytest.mark.parametrize("exception", [ + (OAuthProblem), + (jwt.exceptions.PyJWTError), + (KeyError), + (IndexError), + (binascii.Error) +]) +async def test_access_log_ko(mock_req, exception): + """Test access_log authorization header decoding exceptions.""" + user = UNKNOWN_USER_STRING + endpoint = LOGIN_ENDPOINT + method = 'GET' + status_code = 401 + + response = MagicMock() + response.status_code = status_code + + operation = MagicMock(name="operation") + operation.method = "post" + + body = {} + mock_req.json = AsyncMock(return_value=body) + mock_req.query_params = {'password': '****'} + mock_req.method = method + mock_req.context.update({'user': user}) + mock_req.scope = {'path': endpoint} + mock_req.headers = {'content-type': 'None'} + + with TestContext(operation=operation), \ + patch('api.middlewares.custom_logging') as mock_custom_logging, \ + patch('api.middlewares.AbstractSecurityHandler.get_auth_header_value', side_effect=exception): + expected_time = datetime(1970, 1, 1, 0, 0, 0).timestamp() + await access_log(request=mock_req, response=response, prev_time=expected_time) + mock_custom_logging.assert_called_once_with( + user, mock_req.client.host, mock_req.method, + endpoint, mock_req.query_params, body, 0.0, response.status_code, + hash_auth_context='', headers=mock_req.headers + ) + + @pytest.mark.asyncio @freeze_time(datetime(1970, 1, 1, 0, 0, 10)) async def test_wazuh_access_logger_middleware(mock_req): From b35100cf6932c248aa903fe90725419ac8785cd2 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Mon, 25 Mar 2024 16:41:09 -0300 Subject: [PATCH 207/419] Modified requirements.txt deleting unused dependencies. --- framework/requirements.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/framework/requirements.txt b/framework/requirements.txt index dff00bc36cd..7382131269a 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -1,4 +1,3 @@ -asn1crypto==1.3.0 attrs==23.1.0 azure-common==1.1.25 azure-storage-blob==2.1.0 @@ -20,7 +19,6 @@ docker==6.0.0 docker-pycreds==0.4.0 docutils==0.15.2 ecdsa==0.16.1 -envparse==0.2.0 frozenlist==1.2.0 future==0.18.3 google-api-core==1.30.0 @@ -37,7 +35,6 @@ grpcio==1.58.0 idna==2.9 importlib-metadata==6.8.0 inflection==0.3.1 -itsdangerous==2.0.0 Jinja2==3.1.3 jmespath==0.9.5 jsonschema==4.20.0 From 87b374d9c2122e4e07e3541a0b54050dc2c3be87 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Wed, 27 Mar 2024 10:55:23 -0300 Subject: [PATCH 208/419] Upgrades Werkzeug to 3.0.1 --- framework/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/requirements.txt b/framework/requirements.txt index 7382131269a..4e0d4e18414 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -72,7 +72,7 @@ typing-inspect==0.7.1 urllib3==1.26.18 uvloop==0.17.0 websocket-client==0.57.0 -Werkzeug==2.2.3 +Werkzeug==3.0.1 xmltodict==0.12.0 yarl==1.7.0 zipp==3.3.2 From bd82b4a0d2562fc07a6d97ff019b5b1070d4556b Mon Sep 17 00:00:00 2001 From: RamosFe Date: Wed, 27 Mar 2024 12:41:33 -0300 Subject: [PATCH 209/419] Changed DEPS_VERSION. --- src/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index 0e08b412bf8..f2491dddcbd 100644 --- a/src/Makefile +++ b/src/Makefile @@ -1341,7 +1341,7 @@ TAR := tar -xf GUNZIP := gunzip GZIP := gzip CURL := curl -so -DEPS_VERSION = 25 +DEPS_VERSION = 25-20795 RESOURCES_URL_BASE := https://packages.wazuh.com/deps/ RESOURCES_URL := $(RESOURCES_URL_BASE)$(DEPS_VERSION) CPYTHON := cpython From e2cb42d7b96efc243af0ef7c489e0ed785e0aab8 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Fri, 5 Apr 2024 18:27:24 -0300 Subject: [PATCH 210/419] Added all new packages to the requirements.txt. --- framework/requirements.txt | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/framework/requirements.txt b/framework/requirements.txt index 4e0d4e18414..4357422eda6 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -1,3 +1,5 @@ +anyio==4.1.0 +asgiref==3.7.2 attrs==23.1.0 azure-common==1.1.25 azure-storage-blob==2.1.0 @@ -12,6 +14,7 @@ charset-normalizer==2.0.4 click==8.1.3 clickclick==20.10.2 connexion==3.0.5 +content-size-limit-asgi==0.1.5 cryptography==42.0.4 Cython==0.29.36 defusedxml==0.6.0 @@ -19,6 +22,7 @@ docker==6.0.0 docker-pycreds==0.4.0 docutils==0.15.2 ecdsa==0.16.1 +exceptiongroup==1.2.0 frozenlist==1.2.0 future==0.18.3 google-api-core==1.30.0 @@ -32,20 +36,28 @@ google-resumable-media==1.3.1 greenlet==2.0.2 grpc-google-iam-v1==0.12.3 grpcio==1.58.0 +httpcore==1.0.2 +httpx==0.26.0 +h11==0.14.0 idna==2.9 importlib-metadata==6.8.0 inflection==0.3.1 Jinja2==3.1.3 jmespath==0.9.5 jsonschema==4.20.0 +jsonschema-path==0.3.2 +jsonschema-specifications==2023.11.2 +lazy-object-proxy==1.10.0 libcst==0.3.20 MarkupSafe==2.1.2 more-itertools==8.2.0 multidict==5.2.0 mypy-extensions==0.4.3 numpy==1.26.0 +openapi-schema-validator==0.6.2 openapi-spec-validator==0.7.1 packaging==20.9 +pathable==0.4.3 pathlib==1.0.1 protobuf==3.19.6 proto-plus==1.19.0 @@ -58,19 +70,26 @@ PyJWT==2.8.0 pyparsing==2.4.7 python-dateutil==2.8.1 python-json-logger==2.0.2 +python-multipart==0.0.6 pytz==2020.1 PyYAML==5.4.1 +referencing==0.31.1 requests==2.31.0 +rfc3339-validator==0.1.4 +rpds-py==0.15.2 rsa==4.7.2 s3transfer==0.4.2 secure==0.3.0 six==1.16.0 +sniffio==1.3.0 SQLAlchemy==2.0.23 +starlette==0.32.0.post1 tabulate==0.8.9 typing-extensions==4.5.0 typing-inspect==0.7.1 urllib3==1.26.18 uvloop==0.17.0 +uvicorn==0.24.0.post1 websocket-client==0.57.0 Werkzeug==3.0.1 xmltodict==0.12.0 From c00196b05d097c067878de3c6b39e2714137de7c Mon Sep 17 00:00:00 2001 From: RamosFe Date: Fri, 5 Apr 2024 18:28:37 -0300 Subject: [PATCH 211/419] Deleted extra step in manager.Dockerfile for API IT. --- api/test/integration/env/base/manager/manager.Dockerfile | 3 --- 1 file changed, 3 deletions(-) diff --git a/api/test/integration/env/base/manager/manager.Dockerfile b/api/test/integration/env/base/manager/manager.Dockerfile index a750589786e..38f82e30d6e 100644 --- a/api/test/integration/env/base/manager/manager.Dockerfile +++ b/api/test/integration/env/base/manager/manager.Dockerfile @@ -12,9 +12,6 @@ ADD base/manager/supervisord.conf /etc/supervisor/conf.d/ RUN mkdir wazuh && curl -sL https://github.com/wazuh/wazuh/tarball/${WAZUH_BRANCH} | tar zx --strip-components=1 -C wazuh COPY base/manager/preloaded-vars.conf /wazuh/etc/preloaded-vars.conf RUN /wazuh/install.sh -# START TO BE DELETED after a the new wazuh embedded version includes all the required dependencies. -RUN /var/ossec/framework/python/bin/pip3 install -r /wazuh/framework/requirements.txt -# END TO BE DELETED COPY base/manager/entrypoint.sh /scripts/entrypoint.sh # HEALTHCHECK From 88a91a9ad7f3aa7ee88603c997f45ae5468422ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Fri, 12 Apr 2024 09:33:26 -0300 Subject: [PATCH 212/419] Remove duplicated dependencies --- framework/requirements.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/framework/requirements.txt b/framework/requirements.txt index 4357422eda6..cc34e7d3b01 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -95,6 +95,3 @@ Werkzeug==3.0.1 xmltodict==0.12.0 yarl==1.7.0 zipp==3.3.2 -content_size_limit_asgi -uvicorn==0.24.0.post1 -content_size_limit_asgi==0.1.5 From b8faedad761570910214e524ebf8d5dbb4292c13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Fri, 12 Apr 2024 09:12:14 -0300 Subject: [PATCH 213/419] Update fields format --- api/api/spec/spec.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index 087fb054835..a298dbe93b8 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -5145,6 +5145,7 @@ components: format: int32 node_name: type: string + format: alphanumeric_symbols groups: type: array description: "Recount of the number of Wazuh agents group by Wazuh groups" From adafeab3a884d511fddcf38c7002545e2bd1dda4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Thu, 11 Apr 2024 10:53:09 -0300 Subject: [PATCH 214/419] Update error code --- api/test/integration/test_agent_PUT_endpoints.tavern.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/test/integration/test_agent_PUT_endpoints.tavern.yaml b/api/test/integration/test_agent_PUT_endpoints.tavern.yaml index eaf24c37cc3..8331b0d1b03 100644 --- a/api/test/integration/test_agent_PUT_endpoints.tavern.yaml +++ b/api/test/integration/test_agent_PUT_endpoints.tavern.yaml @@ -287,7 +287,7 @@ stages: response: status_code: 400 json: - error: 1112 + error: 1912 # PUT /groups/group1/configuration - name: Try to update configuration using an invalid configuration file From 677a746dcbe4ea7b4dd8b453b3011fae4daeaf07 Mon Sep 17 00:00:00 2001 From: Marcelo Ariel Hamra Date: Wed, 6 Dec 2023 03:22:45 -0300 Subject: [PATCH 215/419] Upgrade connexion package to version 3.0 --- api/api/controllers/security_controller.py | 1 + 1 file changed, 1 insertion(+) diff --git a/api/api/controllers/security_controller.py b/api/api/controllers/security_controller.py index 23d226c1950..be154aed6f6 100644 --- a/api/api/controllers/security_controller.py +++ b/api/api/controllers/security_controller.py @@ -22,6 +22,7 @@ from wazuh import security, __version__ from wazuh.core.cluster.control import get_system_nodes from wazuh.core.cluster.dapi.dapi import DistributedAPI +from wazuh.core.common import WAZUH_VERSION from wazuh.core.exception import WazuhException, WazuhPermissionError from wazuh.core.results import AffectedItemsWazuhResult, WazuhResult from wazuh.core.security import revoke_tokens From 0b6e608bedad33deb8592812e3d4a28c4d1a7acb Mon Sep 17 00:00:00 2001 From: RamosFe Date: Thu, 7 Mar 2024 14:36:47 -0300 Subject: [PATCH 216/419] Deleted functionality of the cache API config. --- api/api/configuration.py | 4 --- api/api/configuration/api.yaml | 5 ---- api/api/spec/spec.yaml | 26 ------------------- api/api/test/test_configuration.py | 7 ----- api/scripts/wazuh_apid.py | 6 +++++ .../manager/config/api/configuration/api.yaml | 5 ---- .../test_cluster_endpoints.tavern.yaml | 3 --- .../test_manager_endpoints.tavern.yaml | 3 --- 8 files changed, 6 insertions(+), 53 deletions(-) diff --git a/api/api/configuration.py b/api/api/configuration.py index 7d219e06155..d845611797f 100644 --- a/api/api/configuration.py +++ b/api/api/configuration.py @@ -63,10 +63,6 @@ "allow_headers": "*", "allow_credentials": False, }, - "cache": { - "enabled": True, - "time": 0.750 - }, "access": { "max_login_attempts": 50, "block_time": 300, diff --git a/api/api/configuration/api.yaml b/api/api/configuration/api.yaml index d981823861d..c0283399fcc 100644 --- a/api/api/configuration/api.yaml +++ b/api/api/configuration/api.yaml @@ -37,11 +37,6 @@ # allow_headers: "*" # allow_credentials: no -# Cache (time in seconds) -# cache: -# enabled: yes -# time: 0.750 - # Access parameters # access: # max_login_attempts: 50 diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index a298dbe93b8..71192859fad 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -4183,20 +4183,6 @@ components: default: info type: string enum: [disabled, info, warning, error, debug, debug2] - cache: - type: object - additionalProperties: false - properties: - enabled: - description: "Enable cache" - type: boolean - default: true - time: - description: "Cache expiration time in seconds" - type: number - format: double - minimum: 0 - example: 0.75 cors: type: object additionalProperties: false @@ -9975,9 +9961,6 @@ paths: expose_headers: "*" allow_headers: "*" allow_credentials: false - cache: - enabled: true - time: 0.75 access: max_login_attempts: 50 block_time: 300 @@ -10012,9 +9995,6 @@ paths: source_route: "*" allow_headers: "*" allow_credentials: false - cache: - enabled: true - time: 0.75 access: max_login_attempts: 50 block_time: 300 @@ -10049,9 +10029,6 @@ paths: source_route: "*" allow_headers: "*" allow_credentials: false - cache: - enabled: true - time: 0.75 access: max_login_attempts: 50 block_time: 300 @@ -12907,9 +12884,6 @@ paths: source_route: "*" allow_headers: "*" allow_credentials: false - cache: - enabled: true - time: 0.75 drop_privileges: true experimental_features: false max_upload_size: 10485760 diff --git a/api/api/test/test_configuration.py b/api/api/test/test_configuration.py index 81faf424052..9098949df5a 100644 --- a/api/api/test/test_configuration.py +++ b/api/api/test/test_configuration.py @@ -36,10 +36,6 @@ "allow_headers": "*", "allow_credentials": False, }, - "cache": { - "enabled": True, - "time": 0.750 - }, "access": { "max_login_attempts": 50, "block_time": 300, @@ -131,9 +127,6 @@ def test_read_configuration(mock_open, mock_exists, read_config): {'cors': {'allow_headers': 12345}}, {'cors': {'allow_credentials': 12345}}, {'cors': {'invalid_subkey': 'value'}}, - {'cache': {'enabled': 'invalid_type'}}, - {'cache': {'time': 'invalid_type'}}, - {'cache': {'invalid_subkey': 'value'}}, {'access': {'max_login_attempts': 'invalid_type'}}, {'access': {'block_time': 'invalid_type'}}, {'access': {'max_request_per_minute': 'invalid_type'}}, diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index 5f9df3a584f..ee50218b049 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -11,6 +11,8 @@ import warnings SSL_DEPRECATED_MESSAGE = 'The `{ssl_protocol}` SSL protocol is deprecated.' +CACHE_DELETED_MESSAGE = 'The `cache` API configuration option no longer take effect since {release} and will ' \ + 'be completely removed in the next major release.' API_MAIN_PROCESS = 'wazuh-apid' API_LOCAL_REQUEST_PROCESS = 'wazuh-apid_exec' @@ -120,6 +122,10 @@ def configure_ssl(params): if api_conf['https']['ssl_ciphers']: params['ssl_ciphers'] = api_conf['https']['ssl_ciphers'].upper() + if api_conf.get('cache', {})('enabled', {}): + logger.warning(CACHE_DELETED_MESSAGE.format(release="4.9.0")) + + except ssl.SSLError as exc: error = APIError( 2003, details='Private key does not match with the certificate') diff --git a/api/test/integration/env/configurations/base/manager/config/api/configuration/api.yaml b/api/test/integration/env/configurations/base/manager/config/api/configuration/api.yaml index 0912f87340e..9996502b766 100644 --- a/api/test/integration/env/configurations/base/manager/config/api/configuration/api.yaml +++ b/api/test/integration/env/configurations/base/manager/config/api/configuration/api.yaml @@ -24,11 +24,6 @@ cors: allow_headers: "*" allow_credentials: no -# Cache (time in seconds) -cache: - enabled: yes - time: 0.750 - # Access parameters access: max_login_attempts: 10000 diff --git a/api/test/integration/test_cluster_endpoints.tavern.yaml b/api/test/integration/test_cluster_endpoints.tavern.yaml index f6a2050f186..7e47bbaa6fb 100644 --- a/api/test/integration/test_cluster_endpoints.tavern.yaml +++ b/api/test/integration/test_cluster_endpoints.tavern.yaml @@ -880,9 +880,6 @@ stages: expose_headers: !anystr allow_headers: !anystr allow_credentials: !anybool - cache: - enabled: !anybool - time: !anything access: max_login_attempts: !anyint block_time: !anyint diff --git a/api/test/integration/test_manager_endpoints.tavern.yaml b/api/test/integration/test_manager_endpoints.tavern.yaml index 776272e06bd..3dc10999f55 100644 --- a/api/test/integration/test_manager_endpoints.tavern.yaml +++ b/api/test/integration/test_manager_endpoints.tavern.yaml @@ -624,9 +624,6 @@ stages: expose_headers: !anystr allow_headers: !anystr allow_credentials: !anybool - cache: - enabled: !anybool - time: !anything access: max_login_attempts: !anyint block_time: !anyint From 4bc22ec710b57cafa34f8310fc18fd5315aad80b Mon Sep 17 00:00:00 2001 From: RamosFe Date: Thu, 7 Mar 2024 14:49:00 -0300 Subject: [PATCH 217/419] Changed where the config is parsed for the warning. --- api/scripts/wazuh_apid.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index ee50218b049..9563d68046e 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -122,10 +122,6 @@ def configure_ssl(params): if api_conf['https']['ssl_ciphers']: params['ssl_ciphers'] = api_conf['https']['ssl_ciphers'].upper() - if api_conf.get('cache', {})('enabled', {}): - logger.warning(CACHE_DELETED_MESSAGE.format(release="4.9.0")) - - except ssl.SSLError as exc: error = APIError( 2003, details='Private key does not match with the certificate') @@ -217,6 +213,10 @@ def start(params: dict): allow_credentials=api_conf['cors']['allow_credentials'], ) + # Display warning if using deprecated cache API configuration + if api_conf.get('cache', {})('enabled', {}): + logger.warning(CACHE_DELETED_MESSAGE.format(release="4.9.0")) + # Add error handlers to format exceptions app.add_error_handler(ExpectFailedException, error_handler.expect_failed_error_handler) app.add_error_handler(jwt.exceptions.PyJWTError, error_handler.jwt_error_handler) From d91c68a65719c45f91422f28b1e790072ea7ecba Mon Sep 17 00:00:00 2001 From: RamosFe Date: Mon, 18 Mar 2024 11:01:05 -0300 Subject: [PATCH 218/419] Fixed typo in wazuh-apid.py. --- api/scripts/wazuh_apid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index 9563d68046e..7c601f0e843 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -214,7 +214,7 @@ def start(params: dict): ) # Display warning if using deprecated cache API configuration - if api_conf.get('cache', {})('enabled', {}): + if api_conf.get('cache', {}).get('enabled', {}): logger.warning(CACHE_DELETED_MESSAGE.format(release="4.9.0")) # Add error handlers to format exceptions From 69e73b75da5c6fa7f39dd275339e5e7e91f6a448 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Wed, 10 Apr 2024 16:03:40 -0300 Subject: [PATCH 219/419] Fixed import error added in rebase. --- api/api/controllers/security_controller.py | 1 - 1 file changed, 1 deletion(-) diff --git a/api/api/controllers/security_controller.py b/api/api/controllers/security_controller.py index be154aed6f6..23d226c1950 100644 --- a/api/api/controllers/security_controller.py +++ b/api/api/controllers/security_controller.py @@ -22,7 +22,6 @@ from wazuh import security, __version__ from wazuh.core.cluster.control import get_system_nodes from wazuh.core.cluster.dapi.dapi import DistributedAPI -from wazuh.core.common import WAZUH_VERSION from wazuh.core.exception import WazuhException, WazuhPermissionError from wazuh.core.results import AffectedItemsWazuhResult, WazuhResult from wazuh.core.security import revoke_tokens From 4c9ad7a7843dca6320ef97184723cdb7e934e03b Mon Sep 17 00:00:00 2001 From: RamosFe Date: Thu, 11 Apr 2024 16:16:45 -0300 Subject: [PATCH 220/419] Fixed typo in cache message. --- api/scripts/wazuh_apid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index 7c601f0e843..a8f750d61db 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -11,7 +11,7 @@ import warnings SSL_DEPRECATED_MESSAGE = 'The `{ssl_protocol}` SSL protocol is deprecated.' -CACHE_DELETED_MESSAGE = 'The `cache` API configuration option no longer take effect since {release} and will ' \ +CACHE_DELETED_MESSAGE = 'The `cache` API configuration option no longer takes effect since {release} and will ' \ 'be completely removed in the next major release.' API_MAIN_PROCESS = 'wazuh-apid' From 20a7bb5a79d854af4d2021016a3836b941fab948 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Mon, 15 Apr 2024 09:58:32 -0300 Subject: [PATCH 221/419] Deleted test_cache ITs. --- .../test_config/test_cache/__init__.py | 12 -- .../configuration_cache.yaml | 4 - .../data/test_cases/cases_cache.yaml | 15 -- .../test_config/test_cache/test_cache.py | 194 ------------------ 4 files changed, 225 deletions(-) delete mode 100644 tests/integration/test_api/test_config/test_cache/__init__.py delete mode 100644 tests/integration/test_api/test_config/test_cache/data/configuration_templates/configuration_cache.yaml delete mode 100644 tests/integration/test_api/test_config/test_cache/data/test_cases/cases_cache.yaml delete mode 100644 tests/integration/test_api/test_config/test_cache/test_cache.py diff --git a/tests/integration/test_api/test_config/test_cache/__init__.py b/tests/integration/test_api/test_config/test_cache/__init__.py deleted file mode 100644 index eb7412a443f..00000000000 --- a/tests/integration/test_api/test_config/test_cache/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -Copyright (C) 2015-2024, Wazuh Inc. -Created by Wazuh, Inc. . -This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 -""" -from pathlib import Path - - -# Constants & base paths -TEST_DATA_PATH = Path(Path(__file__).parent, 'data') -TEST_CASES_FOLDER_PATH = Path(TEST_DATA_PATH, 'test_cases') -CONFIGURATIONS_FOLDER_PATH = Path(TEST_DATA_PATH, 'configuration_templates') diff --git a/tests/integration/test_api/test_config/test_cache/data/configuration_templates/configuration_cache.yaml b/tests/integration/test_api/test_config/test_cache/data/configuration_templates/configuration_cache.yaml deleted file mode 100644 index 3bdefb882cd..00000000000 --- a/tests/integration/test_api/test_config/test_cache/data/configuration_templates/configuration_cache.yaml +++ /dev/null @@ -1,4 +0,0 @@ -- blocks: - cache: - enabled: ENABLED - time: TIME diff --git a/tests/integration/test_api/test_config/test_cache/data/test_cases/cases_cache.yaml b/tests/integration/test_api/test_config/test_cache/data/test_cases/cases_cache.yaml deleted file mode 100644 index f0ccd6812e3..00000000000 --- a/tests/integration/test_api/test_config/test_cache/data/test_cases/cases_cache.yaml +++ /dev/null @@ -1,15 +0,0 @@ -- name: CACHE_ENABLED - description: | - Check that the response is the same after querying the API twice within `TIME` seconds and check if it is disabled - after `TIME` seconds. - configuration_parameters: - ENABLED: yes - TIME: 10 - metadata: null - -- name: CACHE_DISABLED - description: Check that the response is the different after querying the API twice within `TIME` seconds. - configuration_parameters: - ENABLED: no - TIME: 10 - metadata: null diff --git a/tests/integration/test_api/test_config/test_cache/test_cache.py b/tests/integration/test_api/test_config/test_cache/test_cache.py deleted file mode 100644 index 55195c390cc..00000000000 --- a/tests/integration/test_api/test_config/test_cache/test_cache.py +++ /dev/null @@ -1,194 +0,0 @@ -""" -copyright: Copyright (C) 2015-2024, Wazuh Inc. - - Created by Wazuh, Inc. . - - This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -type: integration - -brief: These tests will check if the cache feature of the API handled by the 'wazuh-apid' daemon - is working properly. The Wazuh API is an open source 'RESTful' API that allows for interaction - with the Wazuh manager from a web browser, command line tool like 'cURL' or any script - or program that can make web requests. - -components: - - api - -suite: config - -targets: - - manager - -daemons: - - wazuh-apid - - wazuh-modulesd - - wazuh-analysisd - - wazuh-execd - - wazuh-db - - wazuh-remoted - -os_platform: - - linux - -os_version: - - Arch Linux - - Amazon Linux 2 - - Amazon Linux 1 - - CentOS 8 - - CentOS 7 - - Debian Buster - - Red Hat 8 - - Ubuntu Focal - - Ubuntu Bionic - -references: - - https://documentation.wazuh.com/current/user-manual/api/getting-started.html - - https://documentation.wazuh.com/current/user-manual/api/configuration.html#cache - -tags: - - api -""" -import time -import pytest -import requests -from pathlib import Path - -from . import CONFIGURATIONS_FOLDER_PATH, TEST_CASES_FOLDER_PATH -from wazuh_testing.constants.api import RULES_FILES_ROUTE, CONFIGURATION_TYPES -from wazuh_testing.constants.daemons import API_DAEMONS_REQUIREMENTS -from wazuh_testing.constants.paths.ruleset import DEFAULT_RULES_PATH -from wazuh_testing.modules.api.utils import get_base_url, login -from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template -from wazuh_testing.utils.file import write_file - - -# Marks -pytestmark = pytest.mark.server - -# Variables -test_file = Path(DEFAULT_RULES_PATH, 'api_test.xml') -# Used by add_configuration to select the target configuration file -configuration_type = CONFIGURATION_TYPES[0] - -# Paths -test_configuration_path = Path(CONFIGURATIONS_FOLDER_PATH, 'configuration_cache.yaml') -test_cases_path = Path(TEST_CASES_FOLDER_PATH, 'cases_cache.yaml') - -# Configurations -test_configuration, test_metadata, test_cases_ids = get_test_cases_data(test_cases_path) -test_configuration = load_configuration_template(test_configuration_path, test_configuration, test_metadata) -daemons_handler_configuration = {'daemons': API_DAEMONS_REQUIREMENTS} - - -# Tests -@pytest.mark.tier(level=0) -@pytest.mark.parametrize('test_configuration,test_metadata', zip(test_configuration, test_metadata), ids=test_cases_ids) -def test_cache(test_configuration, test_metadata, add_configuration, truncate_monitored_files, daemons_handler, - wait_for_api_start, remove_test_file): - """ - description: Check if the stored response is returned when the cache is enabled. - Calls to rules endpoints can be cached. This test verifies if the result - of the first call to the rule endpoint is equal to the second call within - a period established in the configuration, even though a new file - has been created during the process. - - wazuh_min_version: 4.2.0 - - test_phases: - - setup: - - Append configuration to the target configuration files (defined by configuration_type) - - Truncate the log files - - Restart daemons defined in `daemons_handler_configuration` in this module - - Wait until the API is ready to receive requests - - test: - - Request rules files before creating a new one - - Create a new file inside DEFAULT_RULES_PATH - - Request rules files again - - Check if the API's behavior is the expected when the cache is enabled/disabled - - teardown: - - Remove configuration and restore backup configuration - - Truncate the log files - - Stop daemons defined in `daemons_handler_configuration` in this module - - tier: 0 - - parameters: - - test_configuration: - type: dict - brief: Configuration data from the test case. - - test_metadata: - type: dict - brief: Metadata from the test case. - - add_configuration: - type: fixture - brief: Add configuration to the Wazuh API configuration files. - - truncate_monitored_files: - type: fixture - brief: Truncate all the log files and json alerts files before and after the test execution. - - daemons_handler: - type: fixture - brief: Wrapper of a helper function to handle Wazuh daemons. - - wait_for_api_start: - type: fixture - brief: Monitor the API log file to detect whether it has been started or not. - - remove_test_file: - type: fixture - brief: Remove the file before and after the test execution. - - assertions: - - Verify that the number of files is the same in the first and second response when `cache` is enabled. - - Verify that the number of files is updated when a new file is added and the cache has expired. - - Verify that the number of files is greater than before adding a new file when cache is disabled. - - input_description: Different test cases are in the `cases_cache.yaml` file which includes API configuration - parameters that will be replaced in the configuration template file. - - expected_output: - - Number of rule files (if caching is enabled). - - Number of rule files + 1 (if caching is disabled). - - tags: - - cache - """ - cache = test_configuration['blocks']['cache']['enabled'] - cache_expiration_time = test_configuration['blocks']['cache']['time'] - url = get_base_url() + RULES_FILES_ROUTE - authentication_headers, _ = login() - - # Request rules files before creating a new one - rule_files = requests.get(url, headers=authentication_headers, verify=False) - # Get the number of files in total - first_quantity = rule_files.json()['data']['total_affected_items'] - - # Create a new file inside DEFAULT_RULES_PATH - write_file(file_path=test_file, data='') - - # Request rules files again - rule_files = requests.get(url, headers=authentication_headers, verify=False) - # Get the number of files in total after creating a new file - second_quantity = rule_files.json()['data']['total_affected_items'] - - # If cache is enabled, number of files should be the same in the first and second response even with a new one. - expected_files_without_cache = first_quantity + 1 - if cache is True: - assert first_quantity == second_quantity, 'The new file was included. This is not correct because ' \ - 'cache is enabled, the quantity must be the same.\n' \ - f"Expected quantity: {first_quantity}\n" \ - f"Files in the second request: {second_quantity}" - # Wait until cache expires (10 seconds) - time.sleep(cache_expiration_time + 1) - # Get a new response after cache expiration - rule_files = requests.get(url, headers=authentication_headers, verify=False) - third_quantity = rule_files.json()['data']['total_affected_items'] - - assert third_quantity == expected_files_without_cache, 'The new file was not included after the ' \ - 'cache had expired.' \ - f"Expected quantity: {expected_files_without_cache}\n" \ - f"Files in the second request: {second_quantity}" - else: - # Verify that the second response has updated data when cache is disabled. - assert expected_files_without_cache == second_quantity, 'The new file was not included even though the ' \ - 'cache is disabled, the data must be updated.\n' \ - f"Expected quantity: {expected_files_without_cache}\n" \ - f"Files in the second request: {second_quantity}" From 9074226c8cac33d7e741351b5520a6b985176dee Mon Sep 17 00:00:00 2001 From: RamosFe Date: Mon, 15 Apr 2024 09:59:33 -0300 Subject: [PATCH 222/419] Deleted configuration_model.py --- api/api/models/configuration_model.py | 359 -------------------------- 1 file changed, 359 deletions(-) delete mode 100644 api/api/models/configuration_model.py diff --git a/api/api/models/configuration_model.py b/api/api/models/configuration_model.py deleted file mode 100644 index 7f4e8a0c524..00000000000 --- a/api/api/models/configuration_model.py +++ /dev/null @@ -1,359 +0,0 @@ -# coding: utf-8 - -# Copyright (C) 2015, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 - - -from __future__ import absolute_import - -from api.models.base_model_ import Body, Model - - -class HTTPSModel(Model): - def __init__(self, enabled=None, key=None, cert=None, use_ca=None, ca=None): - self.swagger_types = { - 'enabled': bool, - 'key': str, - 'cert': str, - 'use_ca': bool, - 'ca': str - } - - self.attribute_map = { - 'enabled': 'enabled', - 'key': 'key', - 'cert': 'cert', - 'use_ca': 'use_ca', - 'ca': 'ca' - } - - self._enabled = enabled - self._key = key - self._cert = cert - self._use_ca = use_ca - self._ca = ca - - @property - def enabled(self): - return self._enabled - - @enabled.setter - def enabled(self, enabled): - self._enabled = enabled - - @property - def key(self): - return self._key - - @key.setter - def key(self, key): - self._key = key - - @property - def cert(self): - return self._cert - - @cert.setter - def cert(self, cert): - self._cert = cert - - @property - def use_ca(self): - return self._use_ca - - @use_ca.setter - def use_ca(self, use_ca): - self._use_ca = use_ca - - @property - def ca(self): - return self._ca - - @ca.setter - def ca(self, ca): - self._ca = ca - - -class LogsModel(Model): - def __init__(self, level=None): - self.swagger_types = { - 'level': str, - } - - self.attribute_map = { - 'level': 'level', - } - - self._level = level - - @property - def level(self): - return self._level - - @level.setter - def level(self, level): - self._level = level - - -class CORSModel(Model): - def __init__(self, enabled=None, source_route=None, expose_headers=None, allow_headers=None, allow_credentials=None): - self.swagger_types = { - 'enabled': bool, - 'source_route': str, - 'expose_headers': str, - 'allow_headers': bool, - 'allow_credentials': bool - } - - self.attribute_map = { - 'enabled': 'enabled', - 'source_route': 'source_route', - 'expose_headers': 'expose_headers', - 'allow_headers': 'allow_headers', - 'allow_credentials': 'allow_credentials' - } - - self._enabled = enabled - self._source_route = source_route - self._expose_headers = expose_headers - self._allow_headers = allow_headers - self._allow_credentials = allow_credentials - - @property - def enabled(self): - return self._enabled - - @enabled.setter - def enabled(self, enabled): - self._enabled = enabled - - @property - def source_route(self): - return self._source_route - - @source_route.setter - def source_route(self, source_route): - self._source_route = source_route - - @property - def expose_headers(self): - return self._expose_headers - - @expose_headers.setter - def expose_headers(self, expose_headers): - self._expose_headers = expose_headers - - @property - def allow_headers(self): - return self._allow_headers - - @allow_headers.setter - def allow_headers(self, allow_headers): - self._allow_headers = allow_headers - - @property - def allow_credentials(self): - return self._allow_credentials - - @allow_credentials.setter - def allow_credentials(self, allow_credentials): - self._allow_credentials = allow_credentials - - -class CacheModel(Model): - def __init__(self, enabled=None, time=None): - self.swagger_types = { - 'enabled': bool, - 'time': float - } - - self.attribute_map = { - 'enabled': 'enabled', - 'time': 'time' - } - - self._enabled = enabled - self._time = time - - @property - def enabled(self): - return self._enabled - - @enabled.setter - def enabled(self, enabled): - self._enabled = enabled - - @property - def time(self): - return self._time - - @time.setter - def time(self, time): - self._time = time - - -class AccessModel(Model): - def __init__(self, max_login_attempts=None, block_time=None, max_request_per_minute=None): - self.swagger_types = { - 'max_login_attempts': int, - 'block_time': int, - 'max_request_per_minute': int - } - - self.attribute_map = { - 'max_login_attempts': 'max_login_attempts', - 'block_time': 'block_time', - 'max_request_per_minute': 'max_request_per_minute' - } - - self._max_login_attempts = max_login_attempts - self._block_time = block_time - self._max_request_per_minute = max_request_per_minute - - @property - def max_login_attempts(self): - return self._max_login_attempts - - @max_login_attempts.setter - def max_login_attempts(self, max_login_attempts): - self._max_login_attempts = max_login_attempts - - @property - def block_time(self): - return self._block_time - - @block_time.setter - def block_time(self, block_time): - self._block_time = block_time - - @property - def max_request_per_minute(self): - return self._max_request_per_minute - - @max_request_per_minute.setter - def max_request_per_minute(self, max_request_per_minute): - self._max_request_per_minute = max_request_per_minute - - -class APIConfigurationModel(Body): - """API configuration model. Deprecated since v4.0.4, we maintain Model""" - def __init__(self, https=None, logs=None, cors=None, cache=None, drop_privileges=None, - experimental_features=None, access=None): - self.swagger_types = { - 'https': HTTPSModel, - 'logs': LogsModel, - 'cors': CORSModel, - 'cache': CacheModel, - 'drop_privileges': bool, - 'experimental_features': bool, - 'access': AccessModel - } - - self.attribute_map = { - 'https': 'https', - 'logs': 'logs', - 'cors': 'cors', - 'cache': 'cache', - 'drop_privileges': 'drop_privileges', - 'experimental_features': 'experimental_features', - 'access': 'access' - } - - self._https = https - self._logs = logs - self._cors = cors - self._cache = cache - self._drop_privileges = drop_privileges - self._experimental_features = experimental_features - self._access = access - - @property - def https(self): - return self._https - - @https.setter - def https(self, https): - self._https = https - - @property - def logs(self): - return self._logs - - @logs.setter - def logs(self, logs): - self._logs = logs - - @property - def cors(self): - return self._cors - - @cors.setter - def cors(self, cors): - self._cors = cors - - @property - def cache(self): - return self._cache - - @cache.setter - def cache(self, cache): - self._cache = cache - - @property - def drop_privileges(self): - return self._drop_privileges - - @drop_privileges.setter - def drop_privileges(self, drop_privileges): - self._drop_privileges = drop_privileges - - @property - def experimental_features(self): - return self._experimental_features - - @experimental_features.setter - def experimental_features(self, experimental_features): - self._experimental_features = experimental_features - - @property - def access(self): - return self._access - - @access.setter - def access(self, access): - self._access = access - - -class SecurityConfigurationModel(Body): - """Security configuration model.""" - - def __init__(self, auth_token_exp_timeout: int = None, rbac_mode: str = None): - self.swagger_types = { - 'auth_token_exp_timeout': int, - 'rbac_mode': str - } - - self.attribute_map = { - 'auth_token_exp_timeout': 'auth_token_exp_timeout', - 'rbac_mode': 'rbac_mode' - } - - self._auth_token_exp_timeout = auth_token_exp_timeout - self._rbac_mode = rbac_mode - - @property - def auth_token_exp_timeout(self): - return self._auth_token_exp_timeout - - @auth_token_exp_timeout.setter - def auth_token_exp_timeout(self, auth_token_exp_timeout): - self._auth_token_exp_timeout = auth_token_exp_timeout - - @property - def rbac_mode(self): - return self._rbac_mode - - @rbac_mode.setter - def rbac_mode(self, rbac_mode): - self._rbac_mode = rbac_mode From 244861ce22a2d9b8fdfba258b353f437678d1ad6 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Tue, 16 Apr 2024 13:45:53 -0300 Subject: [PATCH 223/419] Added the necessary models in configuration_model.py. --- api/api/models/configuration_model.py | 109 ++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 api/api/models/configuration_model.py diff --git a/api/api/models/configuration_model.py b/api/api/models/configuration_model.py new file mode 100644 index 00000000000..32da2ef8c68 --- /dev/null +++ b/api/api/models/configuration_model.py @@ -0,0 +1,109 @@ +# coding: utf-8 + +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 + + +from __future__ import absolute_import + +from api.models.base_model_ import Body, Model + + +class HTTPSModel(Model): + def __init__(self, enabled=None, key=None, cert=None, use_ca=None, ca=None): + self.swagger_types = { + 'enabled': bool, + 'key': str, + 'cert': str, + 'use_ca': bool, + 'ca': str + } + + self.attribute_map = { + 'enabled': 'enabled', + 'key': 'key', + 'cert': 'cert', + 'use_ca': 'use_ca', + 'ca': 'ca' + } + + self._enabled = enabled + self._key = key + self._cert = cert + self._use_ca = use_ca + self._ca = ca + + @property + def enabled(self): + return self._enabled + + @enabled.setter + def enabled(self, enabled): + self._enabled = enabled + + @property + def key(self): + return self._key + + @key.setter + def key(self, key): + self._key = key + + @property + def cert(self): + return self._cert + + @cert.setter + def cert(self, cert): + self._cert = cert + + @property + def use_ca(self): + return self._use_ca + + @use_ca.setter + def use_ca(self, use_ca): + self._use_ca = use_ca + + @property + def ca(self): + return self._ca + + @ca.setter + def ca(self, ca): + self._ca = ca + + +class SecurityConfigurationModel(Body): + """Security configuration model.""" + + def __init__(self, auth_token_exp_timeout: int = None, rbac_mode: str = None): + self.swagger_types = { + 'auth_token_exp_timeout': int, + 'rbac_mode': str + } + + self.attribute_map = { + 'auth_token_exp_timeout': 'auth_token_exp_timeout', + 'rbac_mode': 'rbac_mode' + } + + self._auth_token_exp_timeout = auth_token_exp_timeout + self._rbac_mode = rbac_mode + + @property + def auth_token_exp_timeout(self): + return self._auth_token_exp_timeout + + @auth_token_exp_timeout.setter + def auth_token_exp_timeout(self, auth_token_exp_timeout): + self._auth_token_exp_timeout = auth_token_exp_timeout + + @property + def rbac_mode(self): + return self._rbac_mode + + @rbac_mode.setter + def rbac_mode(self, rbac_mode): + self._rbac_mode = rbac_mode From 505a90c038f9fc721e7585d70f73dd1693b9b22b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Wed, 10 Apr 2024 14:35:20 -0300 Subject: [PATCH 224/419] Remove unused error handler --- api/api/error_handler.py | 27 --------------------------- api/api/test/test_error_handler.py | 19 +------------------ 2 files changed, 1 insertion(+), 45 deletions(-) diff --git a/api/api/error_handler.py b/api/api/error_handler.py index 4f2ed39d701..3f43e8a7359 100644 --- a/api/api/error_handler.py +++ b/api/api/error_handler.py @@ -113,33 +113,6 @@ async def unauthorized_error_handler(request: ConnexionRequest, status_code=exc.status_code, content_type=ERROR_CONTENT_TYPE) -async def bad_request_error_handler(request: ConnexionRequest, - exc: exceptions.BadRequestProblem) -> ConnexionResponse: - """Bad Request Exception Error handler. - - Parameters - ---------- - request : ConnexionRequest - Incomming request. - exc : BadRequestProblem - Raised exception. - - Returns - ------- - Response - HTTP Response returned to the client. - """ - - problem = { - "title": 'Bad Request', - } - if exc.detail: - problem['detail'] = exc.detail - - return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', - status_code=exc.status_code, content_type=ERROR_CONTENT_TYPE) - - async def http_error_handler(request: ConnexionRequest, exc: exceptions.HTTPException) -> ConnexionResponse: """HTTPError Exception Error handler. diff --git a/api/api/test/test_error_handler.py b/api/api/test/test_error_handler.py index 650a41f1515..f65fa9e0974 100644 --- a/api/api/test/test_error_handler.py +++ b/api/api/test/test_error_handler.py @@ -12,7 +12,7 @@ from connexion.exceptions import HTTPException, ProblemException, BadRequestProblem, Unauthorized from api.error_handler import _cleanup_detail_field, prevent_bruteforce_attack, jwt_error_handler, \ - http_error_handler, problem_error_handler, bad_request_error_handler, unauthorized_error_handler, \ + http_error_handler, problem_error_handler, unauthorized_error_handler, \ expect_failed_error_handler, ERROR_CONTENT_TYPE from api.middlewares import LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT from api.api_exception import ExpectFailedException @@ -187,23 +187,6 @@ async def test_problem_error_handler(title, detail, ext, error_type, mock_reques assert body == problem -@pytest.mark.asyncio -@pytest.mark.parametrize('detail', [None, 'detail']) -async def test_bad_request_error_handler(detail, mock_request): - """Test bad request error handler.""" - problem = { - "title": 'Bad Request', - } - problem.update({'detail': detail} if detail else {}) - - exc = BadRequestProblem(detail=detail) - response = await bad_request_error_handler(mock_request, exc) - body = json.loads(response.body) - assert body == problem - assert response.status_code == exc.status_code - assert response.content_type == ERROR_CONTENT_TYPE - - @pytest.mark.asyncio @pytest.mark.parametrize('query_param_pretty, expected_detail', [ ('true', "Unknown Expect"), From 31a2fcae5b3e5332af6407ab3c5c09ed8c578067 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Wed, 10 Apr 2024 15:31:21 -0300 Subject: [PATCH 225/419] Add application/xml response content-type --- api/api/spec/spec.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index 71192859fad..e2030f4a5b7 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -8781,6 +8781,15 @@ paths: - type: object description: "The output format depends on the type of file that has been requested: rootkit file, rootkit trojans or rcl" + application/xml: + schema: + properties: + data: + oneOf: + - type: array + - type: object + description: "The output format depends on the type of file that has been requested: rootkit + file, rootkit trojans or rcl" example: data: vars: None @@ -11871,6 +11880,14 @@ paths: properties: data: $ref: '#/components/schemas/WazuhManagerConfiguration' + application/xml: + schema: + allOf: + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/WazuhManagerConfiguration' example: data: affected_items: From 96ad981925967d571e9d658af85cdfdc52d1089d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Wed, 10 Apr 2024 15:31:34 -0300 Subject: [PATCH 226/419] Remove wrong test Remove wrong test --- .../test_manager_endpoints.tavern.yaml | 41 ------------------- framework/requirements.txt | 3 ++ 2 files changed, 3 insertions(+), 41 deletions(-) diff --git a/api/test/integration/test_manager_endpoints.tavern.yaml b/api/test/integration/test_manager_endpoints.tavern.yaml index 3dc10999f55..5937ff9f4cc 100644 --- a/api/test/integration/test_manager_endpoints.tavern.yaml +++ b/api/test/integration/test_manager_endpoints.tavern.yaml @@ -675,47 +675,6 @@ stages: total_affected_items: 1 total_failed_items: 0 ---- -test_name: GET /manager/validation (KO) - -stages: - - #### Upload corrupted rules file - # PUT /rules/files - - name: Upload corrupted - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/rules/files/new-rules_corrupted.xml" - method: PUT - data: "{corrupted_rules_file}" - headers: - Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream - response: - status_code: 200 - - # GET /manager/configuration/validation - - name: Request validation - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/validation" - method: GET - headers: - Authorization: "Bearer {test_login_token}" - response: - status_code: 200 - json: - error: 1 - data: - affected_items: [] - failed_items: - - error: - code: 1908 - id: - - !anystr - total_affected_items: 0 - total_failed_items: 1 - --- test_name: GET /manager/configuration/{component}/{configuration} diff --git a/framework/requirements.txt b/framework/requirements.txt index cc34e7d3b01..4357422eda6 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -95,3 +95,6 @@ Werkzeug==3.0.1 xmltodict==0.12.0 yarl==1.7.0 zipp==3.3.2 +content_size_limit_asgi +uvicorn==0.24.0.post1 +content_size_limit_asgi==0.1.5 From 06c4470703d14442f0a29da1107a7069302615e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Tue, 16 Apr 2024 10:12:28 -0300 Subject: [PATCH 227/419] Use variable instead of hardcoded string --- api/api/test/test_error_handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/api/test/test_error_handler.py b/api/api/test/test_error_handler.py index f65fa9e0974..100397f1db0 100644 --- a/api/api/test/test_error_handler.py +++ b/api/api/test/test_error_handler.py @@ -199,7 +199,7 @@ async def test_expect_failed_error_handler(query_param_pretty, expected_detail): response = await expect_failed_error_handler(request, ExpectFailedException(detail=expected_detail) if expected_detail else None) assert response.status_code == 417 - assert response.content_type == "application/problem+json; charset=utf-8" + assert response.content_type == ERROR_CONTENT_TYPE body = json.loads(response.body) assert body["title"] == "Expectation failed" From ca80b1b168490591524c4e458245778234896408 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Tue, 16 Apr 2024 12:27:22 -0300 Subject: [PATCH 228/419] Remove JWT error handler --- api/api/error_handler.py | 26 +------------------------- api/api/test/test_error_handler.py | 22 ++++------------------ api/scripts/wazuh_apid.py | 1 - 3 files changed, 5 insertions(+), 44 deletions(-) diff --git a/api/api/error_handler.py b/api/api/error_handler.py index 3f43e8a7359..aab5fd64550 100644 --- a/api/api/error_handler.py +++ b/api/api/error_handler.py @@ -106,7 +106,7 @@ async def unauthorized_error_handler(request: ConnexionRequest, attempts=configuration.api_conf['access']['max_login_attempts'] ) else: - problem.update({'detail': 'No authorization token provided'} \ + problem.update({'detail': exc.detail} \ if 'token_info' not in request.context \ else {}) return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', @@ -138,30 +138,6 @@ async def http_error_handler(request: ConnexionRequest, status_code=exc.status_code, content_type=ERROR_CONTENT_TYPE) -async def jwt_error_handler(request: ConnexionRequest, _: jwt.exceptions.PyJWTError) -> ConnexionResponse: - """JWTException Error handler. - - Parameters - ---------- - request : ConnexionRequest - Incomming request. - _ : JWTError - Raised exception. - Unnamed parameter not used. - - Returns - ------- - Response - HTTP Response returned to the client. - """ - problem = { - "title": "Unauthorized", - "detail": "No authorization token provided" - } - return json_response(data=problem, pretty=request.query_params.get('pretty', 'false') == 'true', - status_code=401, content_type=ERROR_CONTENT_TYPE) - - async def problem_error_handler(request: ConnexionRequest, exc: exceptions.ProblemException) -> ConnexionResponse: """ProblemException Error handler. diff --git a/api/api/test/test_error_handler.py b/api/api/test/test_error_handler.py index 100397f1db0..2158f262b42 100644 --- a/api/api/test/test_error_handler.py +++ b/api/api/test/test_error_handler.py @@ -11,7 +11,7 @@ from freezegun import freeze_time from connexion.exceptions import HTTPException, ProblemException, BadRequestProblem, Unauthorized -from api.error_handler import _cleanup_detail_field, prevent_bruteforce_attack, jwt_error_handler, \ +from api.error_handler import _cleanup_detail_field, prevent_bruteforce_attack, \ http_error_handler, problem_error_handler, unauthorized_error_handler, \ expect_failed_error_handler, ERROR_CONTENT_TYPE from api.middlewares import LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT @@ -88,6 +88,8 @@ async def test_unauthorized_error_handler(path, method, token_info, mock_request problem = { "title": "Unauthorized", } + detail = 'test' + exc = Unauthorized(detail) mock_request.configure_mock(scope={'path': path}) mock_request.method = method if path in {LOGIN_ENDPOINT, RUN_AS_LOGIN_ENDPOINT} \ @@ -97,10 +99,9 @@ async def test_unauthorized_error_handler(path, method, token_info, mock_request if token_info: mock_request.context = {'token_info': ''} else: - problem['detail'] = 'No authorization token provided' + problem['detail'] = detail mock_request.context = {} - exc = Unauthorized() with patch('api.error_handler.prevent_bruteforce_attack') as mock_pbfa, \ patch('api.configuration.api_conf', new={'access': {'max_login_attempts': 1000}}): response = await unauthorized_error_handler(mock_request, exc) @@ -114,21 +115,6 @@ async def test_unauthorized_error_handler(path, method, token_info, mock_request assert response.content_type == ERROR_CONTENT_TYPE -@pytest.mark.asyncio -async def test_jwt_error_handler(mock_request): - """Test jwt error handler.""" - problem = { - "title": "Unauthorized", - "detail": "No authorization token provided" - } - response = await jwt_error_handler(mock_request, None) - - body = json.loads(response.body) - assert body == problem - assert response.status_code == 401 - assert response.content_type == ERROR_CONTENT_TYPE - - @pytest.mark.asyncio @pytest.mark.parametrize('detail', [None, 'Custom detail']) async def test_http_error_handler(detail, mock_request): diff --git a/api/scripts/wazuh_apid.py b/api/scripts/wazuh_apid.py index a8f750d61db..1bfcc8ac2d8 100755 --- a/api/scripts/wazuh_apid.py +++ b/api/scripts/wazuh_apid.py @@ -219,7 +219,6 @@ def start(params: dict): # Add error handlers to format exceptions app.add_error_handler(ExpectFailedException, error_handler.expect_failed_error_handler) - app.add_error_handler(jwt.exceptions.PyJWTError, error_handler.jwt_error_handler) app.add_error_handler(Unauthorized, error_handler.unauthorized_error_handler) app.add_error_handler(HTTPException, error_handler.http_error_handler) app.add_error_handler(ProblemException, error_handler.problem_error_handler) From 2b5e800df88fb76a7298ff3fdb78f42759cab9e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Thu, 18 Apr 2024 11:25:27 -0300 Subject: [PATCH 229/419] Remove wait_for_api fixture --- .../test_api/test_config/test_logs/test_logs_format.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/integration/test_api/test_config/test_logs/test_logs_format.py b/tests/integration/test_api/test_config/test_logs/test_logs_format.py index 504232f17ae..02534c1aa4d 100644 --- a/tests/integration/test_api/test_config/test_logs/test_logs_format.py +++ b/tests/integration/test_api/test_config/test_logs/test_logs_format.py @@ -87,8 +87,7 @@ # Tests @pytest.mark.tier(level=1) @pytest.mark.parametrize('test_configuration,test_metadata', zip(test_configuration, test_metadata), ids=test_cases_ids) -def test_logs_formats(test_configuration, test_metadata, add_configuration, truncate_monitored_files, daemons_handler, - wait_for_api_start): +def test_logs_formats(test_configuration, test_metadata, add_configuration, truncate_monitored_files, daemons_handler): """ description: Check if the logs of the API are stored in the specified formats and the content of the log files are the expected. @@ -127,9 +126,6 @@ def test_logs_formats(test_configuration, test_metadata, add_configuration, trun - daemons_handler: type: fixture brief: Wrapper of a helper function to handle Wazuh daemons. - - wait_for_api_start: - type: fixture - brief: Monitor the API log file to detect whether it has been started or not. assertions: - Verify that the response status code is the expected one. From b5fad399d94ed384737efb77cf222c6a8bb2bfe4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Fri, 19 Apr 2024 14:39:04 -0300 Subject: [PATCH 230/419] Set timeout and login attempts --- .../test_api/test_config/test_logs/test_logs_format.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_api/test_config/test_logs/test_logs_format.py b/tests/integration/test_api/test_config/test_logs/test_logs_format.py index 02534c1aa4d..1320d8227a1 100644 --- a/tests/integration/test_api/test_config/test_logs/test_logs_format.py +++ b/tests/integration/test_api/test_config/test_logs/test_logs_format.py @@ -148,10 +148,10 @@ def test_logs_formats(test_configuration, test_metadata, add_configuration, trun if current_level == 'error': with pytest.raises(RuntimeError) as exception: - login() + login(timeout=10, login_attempts=5) response = exception.value.args[1] else: - _, response = login() + _, response = login(timeout=10, login_attempts=5) assert response.status_code == expected_code, f"The status code was {response.status_code}." \ f"\nExpected: {expected_code}." From 8be4db1be0f1442de08875017e811d64e3d7b6b0 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Fri, 19 Apr 2024 17:03:45 -0300 Subject: [PATCH 231/419] Removed HTTPSModel. --- api/api/models/configuration_model.py | 65 --------------------------- api/api/test/test_encoder.py | 6 +-- 2 files changed, 2 insertions(+), 69 deletions(-) diff --git a/api/api/models/configuration_model.py b/api/api/models/configuration_model.py index 32da2ef8c68..bcab5649387 100644 --- a/api/api/models/configuration_model.py +++ b/api/api/models/configuration_model.py @@ -10,71 +10,6 @@ from api.models.base_model_ import Body, Model -class HTTPSModel(Model): - def __init__(self, enabled=None, key=None, cert=None, use_ca=None, ca=None): - self.swagger_types = { - 'enabled': bool, - 'key': str, - 'cert': str, - 'use_ca': bool, - 'ca': str - } - - self.attribute_map = { - 'enabled': 'enabled', - 'key': 'key', - 'cert': 'cert', - 'use_ca': 'use_ca', - 'ca': 'ca' - } - - self._enabled = enabled - self._key = key - self._cert = cert - self._use_ca = use_ca - self._ca = ca - - @property - def enabled(self): - return self._enabled - - @enabled.setter - def enabled(self, enabled): - self._enabled = enabled - - @property - def key(self): - return self._key - - @key.setter - def key(self, key): - self._key = key - - @property - def cert(self): - return self._cert - - @cert.setter - def cert(self, cert): - self._cert = cert - - @property - def use_ca(self): - return self._use_ca - - @use_ca.setter - def use_ca(self, use_ca): - self._use_ca = use_ca - - @property - def ca(self): - return self._ca - - @ca.setter - def ca(self, ca): - self._ca = ca - - class SecurityConfigurationModel(Body): """Security configuration model.""" diff --git a/api/api/test/test_encoder.py b/api/api/test/test_encoder.py index 6aec208fc50..25f32861045 100644 --- a/api/api/test/test_encoder.py +++ b/api/api/test/test_encoder.py @@ -7,8 +7,6 @@ import pytest -from api.models.configuration_model import HTTPSModel - with patch('wazuh.common.wazuh_uid'): with patch('wazuh.common.wazuh_gid'): from api.encoder import prettify, dumps @@ -17,14 +15,14 @@ def custom_hook(dct): if 'key' in dct: - return HTTPSModel.from_dict(dct) + return {'key': dct['key']} elif 'error' in dct: return WazuhResult.decode_json({'result': dct, 'str_priority': 'v2'}) else: return dct -@pytest.mark.parametrize('o', [HTTPSModel(key='v1'), +@pytest.mark.parametrize('o', [{'key': 'v1'}, WazuhResult({'k1': 'v1'}, str_priority='v2') ] ) From 412560368a8f39fd9af92d86b0c17d4e27ee69c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Mon, 15 Apr 2024 16:07:22 -0300 Subject: [PATCH 232/419] Remove unused parameter --- api/api/models/active_response_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/api/models/active_response_model.py b/api/api/models/active_response_model.py index 9e05a80ffad..334bedb9e84 100644 --- a/api/api/models/active_response_model.py +++ b/api/api/models/active_response_model.py @@ -14,7 +14,7 @@ class ActiveResponseModel(Body): - def __init__(self, command: str = None, custom: bool = None, arguments: list = None, alert: dict = None): + def __init__(self, command: str = None, arguments: list = None, alert: dict = None): """ActiveResponseModel body model. Parameters From 71b682e8c2852aa99439fec8b94f060c8ca1bebe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Mon, 15 Apr 2024 16:07:28 -0300 Subject: [PATCH 233/419] Update API spec --- api/api/spec/spec.yaml | 3043 ++++++++++++++++++++-------------------- 1 file changed, 1521 insertions(+), 1522 deletions(-) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index e2030f4a5b7..5762af341ae 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -1,18 +1,18 @@ openapi: '3.0.0' info: description: | - The Wazuh API is an open source RESTful API that allows for interaction with the Wazuh manager from a web browser, - command line tool like cURL or any script or program that can make web requests. The Wazuh WUI relies on this + The Wazuh API is an open-source RESTful API that allows for interaction with the Wazuh manager from a web browser, + command line tools like cURL or any script or program that can make web requests. The Wazuh WUI relies on this heavily and Wazuh’s goal is to accommodate complete remote management of the Wazuh infrastructure via the Wazuh WUI. Use the Wazuh API to easily perform everyday actions like adding an agent, restarting the manager(s) or agent(s) or looking up syscheck details. # Authentication - Wazuh API endpoints require authentication in order to be used. Therefore, all calls must include a JSON Web Token. + Wazuh API endpoints require authentication to be used. Therefore, all calls must include a JSON Web Token. JWT is an open standard (RFC 7519) that defines a compact and self-contained way for securely transmitting information between parties as a JSON object. Perform a call with `basicAuth` to `POST /security/user/authenticate` - and obtain a JWT token in order to run any endpoint. + and obtain a JWT token to run any endpoint. JWT tokens have a default duration of 900 seconds. To change this value, you must perform a call with a valid JWT token to `PUT /security/config`. After this change, you will need to get a new JWT token as all previously @@ -29,14 +29,14 @@ info: "error": 0 } ``` - Use the token from previous response to perform any endpoint request: + Use the token from the previous response to perform any endpoint request: `curl -k -X "https://:55000/" -H "Authorization: Bearer "` Change the token base duration: `curl -k -X PUT "https://:55000/security/config" -H "Authorization: Bearer " - -d '{"auth_token_exp_timeout":}'` + -d '{"auth_token_exp_timeout": }'` @@ -313,7 +313,7 @@ x-rbac-catalog: resources: ['decoder:file:local_decoder.xml'] effect: "allow" 'lists:read': - description: "Read cdb lists files" + description: "Read CDB lists files" resources: - $ref: '#/x-rbac-catalog/resources/list:file' example: @@ -321,7 +321,7 @@ x-rbac-catalog: resources: ['list:file:audit-keys'] effect: "deny" 'lists:update': - description: "Update or upload cdb lists files" + description: "Update or upload CDB lists files" resources: - $ref: '#/x-rbac-catalog/resources/*:*' example: @@ -329,7 +329,7 @@ x-rbac-catalog: resources: ['*:*:*'] effect: "allow" 'lists:delete': - description: "Delete cdb lists files" + description: "Delete CDB lists files" resources: - $ref: '#/x-rbac-catalog/resources/list:file' example: @@ -816,7 +816,6 @@ components: - $ref: '#/components/schemas/WazuhDBStatsItem' - $ref: '#/components/schemas/AllItemsResponse' - AllItemsResponseWazuhDaemonStatsAgents: allOf: - type: object @@ -1172,6 +1171,19 @@ components: items: $ref: '#/components/schemas/UsersResponse' + AllItemsResponseGroupFiles: + allOf: + - $ref: '#/components/schemas/AllItemsResponse' + - type: object + required: + - affected_items + properties: + affected_items: + type: array + description: "Items that successfully applied the API call action" + items: + $ref: '#/components/schemas/GroupFiles' + AllItemsResponseCiscatResult: allOf: - $ref: '#/components/schemas/AllItemsResponse' @@ -1321,7 +1333,6 @@ components: type: string ## Ruleset models - RulesetFile: type: object properties: @@ -1353,7 +1364,7 @@ components: items: type: string command: - description: "Command running in the agent. If this value starts by `!`, then it refers to a script name + description: "Command running in the agent. If this value starts with `!`, then it refers to a script name instead of a command name" type: string format: active_response_command @@ -1361,7 +1372,7 @@ components: type: object properties: data: - description: "Alert data depending on the AR executed" + description: "Alert data depending on the active response executed" type: object required: - command @@ -1377,7 +1388,7 @@ components: $ref: '#/components/schemas/AgentID' name: type: string - description: "Agent name used at registration process" + description: "Agent name used at the registration process" status: $ref: '#/components/schemas/AgentStatus' configSum: @@ -1397,13 +1408,13 @@ components: will be the same as registerIP field" registerIP: type: string - description: "IP used at agent registration process" + description: "IP used at agent the registration process" manager: type: string - description: "Hostname of the manager where the agent is reporting to" + description: "Hostname of the manager the agent is reporting to" node_name: type: string - description: "Node ID where the agent is reporting to" + description: "ID of the node the agent is reporting to" dateAdd: type: string description: "Date when the agent was registered" @@ -1437,6 +1448,24 @@ components: default: 0 minimum: 0 maximum: 5 + group_config_status: + type: string + description: "Agent groups configuration sync status" + + AgentAddBody: + type: object + properties: + name: + description: "Agent name" + type: string + format: names + ip: + description: "If this is not included, the API will get the IP automatically. Allowed values: + IP, IP/NET, ANY" + type: string + format: alphanumeric + required: + - name AgentGroup: type: object @@ -1476,6 +1505,32 @@ components: key: type: string format: base64 + + AgentInsertBody: + type: object + properties: + id: + $ref: '#/components/schemas/AgentID' + key: + type: string + maxLength: 64 + minLength: 64 + format: wazuh_key + description: "Key to use when communicating with the manager. The agent must have the same key on + its `client.keys` file" + name: + description: "Agent name" + type: string + format: names + ip: + description: "If this is not included, the API will get the IP automatically. Allowed values: + IP, IP/NET, ANY" + type: string + format: alphanumeric + force: + $ref: '#/components/schemas/AgentInsertForce' + required: + - name AgentStatus: type: string @@ -1557,44 +1612,11 @@ components: description: "Node ID" format: string - GroupID: - type: string - description: "Group name" - format: group_names - - GroupIDListAll: - type: string - minLength: 1 - description: "Group name|all" - format: group_names_or_all - AgentConfiguration: type: object description: "Current agent's configuration. The output varies with requested component and the agent configuration" - GroupConfiguration: - type: object - required: - - filters - - config - properties: - filters: - type: object - properties: - os: - type: string - description: "OS family where the configuration is being applied" - name: - type: string - description: "The name of the agent where that configuration is being applied" - profile: - type: string - description: "Profile name. Any agent configured to use the defined profile may use the block" - config: - type: object - description: "Group configuration. The fields on this object depend on the actual group configuration" - AgentInsertForce: type: object description: "Remove the old agent with the same name, ID or IP if the configuration is matched" @@ -1819,39 +1841,43 @@ components: office365: type: integer format: int32 - description: "Events coming from Office365 module (this agent)" + description: "Events coming from the Office365 module (this agent)" ms-graph: type: integer format: int32 - description: "Events coming from ms-graph module (this agent)" + description: "Events coming from the ms-graph module (this agent)" oscap: type: integer format: int32 - description: "Events coming from OSCAP module (this agent)" + description: "Events coming from the OSCAP module (this agent)" osquery: type: integer format: int32 - description: "Events coming from OSQuery module (this agent)" + description: "Events coming from the OSQuery module (this agent)" rootcheck: type: integer format: int32 - description: "Events coming from rootcheck (syscheckd) (this agent)" + description: "Events coming from the rootcheck (syscheckd) (this agent)" sca: type: integer format: int32 - description: "Events coming from SCA module (this agent)" + description: "Events coming from the SCA module (this agent)" syscheck: type: integer format: int32 - description: "Events coming from syscheckd (this agent)" + description: "Events coming from the syscheckd (this agent)" syscollector: type: integer format: int32 - description: "Events coming from syscollector module (this agent)" + description: "Events coming from the syscollector module (this agent)" upgrade: type: integer format: int32 - description: "Events coming from upgrade agent module (this agent)" + description: "Events coming from the upgrade agent module (this agent)" + vulnerability: + type: integer + format: int32 + description: "Events coming from the vulnerability detector module (this agent)" monitor: type: integer format: int32 @@ -1876,6 +1902,66 @@ components: format: int32 description: "Alerts written in firewall log file (this agent)" + ## Group models + GroupID: + type: string + description: "Group name" + format: group_names + + GroupIDListAll: + type: string + minLength: 1 + description: "Group name|all" + format: group_names_or_all + + GroupConfiguration: + type: object + properties: + filters: + type: object + properties: + os: + type: string + description: "OS family where the configuration is being applied" + name: + type: string + description: "The name of the agent where that configuration is being applied" + profile: + type: string + description: "Profile name. Any agent configured to use the defined profile may use the block" + config: + type: object + description: "Group configuration. The fields on this object depend on the actual group configuration" + required: + - filters + - config + + CreateGroupBody: + type: object + properties: + group_id: + description: "Group name. It can contain any of the characters between a-z, A-Z, 0-9, '_', '-' and '.'. Names '.' and '..' are restricted." + type: string + format: group_names + maxLength: 128 + required: + - group_id + example: + group_id: NewGroup_1 + + GroupFiles: + type: object + properties: + filename: + type: string + description: "File name" + hash: + type: string + description: "File content hash" + example: + filename: agent.conf + hash: ab73af41699f13fdd81903b5f23d8d00 + ## CisCat models CiscatResults: type: object @@ -2043,6 +2129,78 @@ components: - stopped - running + ClusterLocalInfo: + type: object + properties: + data: + properties: + node: + description: "Node name" + type: string + cluster: + description: "Cluster name the node belongs to" + type: string + type: + description: "Node type" + type: string + + ClusterLocalConfig: + type: object + properties: + data: + properties: + name: + description: "Cluster name" + type: string + node_name: + description: "Node name" + type: string + node_type: + description: "Node type" + type: string + enum: + - master + - worker + key: + description: "Cluster key used to encrypt messages" + type: string + port: + description: "Port used by the **master** node to communicate with workers" + type: integer + bind_addr: + description: "Network interface used by the **master** to listen to incoming connections" + type: string + nodes: + description: "List of cluster master nodes. This list is used by **worker** nodes to connect + to the master" + type: array + items: + type: string + hidden: + description: "Whether to hide the cluster information in the alerts" + type: string + disabled: + description: "Whether the cluster is enabled or not" + type: boolean + + ClusterStatus: + type: object + properties: + data: + properties: + enabled: + description: "Whether the cluster is enabled in the Wazuh configuration" + type: string + enum: + - "yes" + - "no" + running: + description: "Whether the cluster daemon is running" + type: string + enum: + - "yes" + - "no" + # Logtest models LogtestRequest: type: object @@ -2404,7 +2562,7 @@ components: size: type: integer format: int32 - description: "Size of received messages queue" + description: "Received messages queue size" usage: type: integer format: int32 @@ -2749,7 +2907,7 @@ components: size: type: integer format: int32 - description: "Size of alerts messages queue" + description: "Alerts messages queue size" usage: type: integer format: int32 @@ -2760,7 +2918,7 @@ components: size: type: integer format: int32 - description: "Size of archives messages queue" + description: "Archives messages queue size" usage: type: integer format: int32 @@ -2771,7 +2929,7 @@ components: size: type: integer format: int32 - description: "Size of dbsync messages queue" + description: "DBsync messages queue size" usage: type: integer format: int32 @@ -2782,7 +2940,7 @@ components: size: type: integer format: int32 - description: "Size of eventchannel messages queue" + description: "EventChannel messages queue size" usage: type: integer format: int32 @@ -2793,7 +2951,7 @@ components: size: type: integer format: int32 - description: "Size of firewall messages queue" + description: "Firewall messages queue size" usage: type: integer format: int32 @@ -2804,7 +2962,7 @@ components: size: type: integer format: int32 - description: "Size of FTS messages queue" + description: "FTS messages queue size" usage: type: integer format: int32 @@ -2815,7 +2973,7 @@ components: size: type: integer format: int32 - description: "Size of hostinfo messages queue" + description: "Hostinfo messages queue size" usage: type: integer format: int32 @@ -2826,7 +2984,7 @@ components: size: type: integer format: int32 - description: "Size of other events messages queue" + description: "Other events messages queue size" usage: type: integer format: int32 @@ -2837,7 +2995,7 @@ components: size: type: integer format: int32 - description: "Size of processed messages queue" + description: "Processed messages queue size" usage: type: integer format: int32 @@ -2848,7 +3006,7 @@ components: size: type: integer format: int32 - description: "Size of rootcheck messages queue" + description: "Rootcheck messages queue size" usage: type: integer format: int32 @@ -2859,7 +3017,7 @@ components: size: type: integer format: int32 - description: "Size of SCA messages queue" + description: "SCA messages queue size" usage: type: integer format: int32 @@ -2870,7 +3028,7 @@ components: size: type: integer format: int32 - description: "Size of stats messages queue" + description: "Stats messages queue size" usage: type: integer format: int32 @@ -2881,7 +3039,7 @@ components: size: type: integer format: int32 - description: "Size of syscheck messages queue" + description: "Syscheck messages queue size" usage: type: integer format: int32 @@ -2892,7 +3050,7 @@ components: size: type: integer format: int32 - description: "Size of syscollector messages queue" + description: "Syscollector messages queue size" usage: type: integer format: int32 @@ -2903,7 +3061,7 @@ components: size: type: integer format: int32 - description: "Size of upgrade messages queue" + description: "Upgrade messages queue size" usage: type: integer format: int32 @@ -2934,7 +3092,7 @@ components: received: type: integer format: int32 - description: "Total of queries through WazuhDB socket" + description: "Total number of queries through WazuhDB socket" received_breakdown: type: object properties: @@ -3817,7 +3975,7 @@ components: alerts_written: type: number format: float - description: "Total of alerts written in disk during the last 5 seconds" + description: "Total number of alerts written in disk during the last 5 seconds" archives_queue_size: type: number format: float @@ -3829,7 +3987,7 @@ components: event_queue_size: type: number format: float - description: "Non catalogued events queue size" + description: "Non-catalogued events queue size" event_queue_usage: type: number format: float @@ -3842,7 +4000,7 @@ components: events_processed: type: number format: float - description: "Total of events processed (i.e. matched against Wazuh ruleset) in the last 5 seconds" + description: "Total number of events processed (i.e. matched against Wazuh ruleset) in the last 5 seconds" events_received: type: number format: float @@ -3858,11 +4016,11 @@ components: firewall_written: type: number format: float - description: "Same as `alerts_written` but focusing in firewall alerts" + description: "Same as `alerts_written` but focusing on firewall alerts" fts_written: type: number format: float - description: "Same as `alerts_written` but focusing in [FTS alerts] + description: "Same as `alerts_written` but focusing on [FTS alerts] (https://documentation.wazuh.com/4.9/user-manual/ruleset/ruleset-xml-syntax/decoders.html?highlight=fts #fts)" hostinfo_queue_size: @@ -3876,7 +4034,7 @@ components: other_events_decoded: type: number format: float - description: "Same as `total_events_decoded` but for non catalogued events" + description: "Same as `total_events_decoded` but for non-catalogued events" rootcheck_events_decoded: type: number format: float @@ -7073,7 +7231,7 @@ paths: - $ref: '#/components/parameters/pretty' responses: '200': - description: "API default info" + description: "API default information" content: application/json: schema: @@ -7143,7 +7301,7 @@ paths: /agents: delete: tags: - - Agents + - Agents summary: "Delete agents" description: "Delete all agents or a list of them based on optional criteria" operationId: api.controllers.agent_controller.delete_agents @@ -7203,7 +7361,7 @@ paths: $ref: '#/components/responses/TooManyRequestsResponse' get: tags: - - Agents + - Agents summary: "List agents" description: "Return information about all available agents or a list of them" operationId: api.controllers.agent_controller.get_agents @@ -7264,14 +7422,14 @@ paths: configSum: ab73af41699f13fdd81903b5f23d8d00 manager: wazuh-worker2 group: [default] - registerIP: Any + registerIP: any ip: 172.25.0.6 name: ac7cb188d538 status: active mergedSum: 9a016508cea1e997ab8569f5cfab30f5 version: Wazuh v4.3.0 node_name: worker2 - group_config_status: "synced" + group_config_status: 'synced' status_code: 0 - os: arch: x86_64 @@ -7289,14 +7447,14 @@ paths: configSum: ab73af41699f13fdd81903b5f23d8d00 manager: wazuh-worker2 group: [default] - registerIP: Any + registerIP: any ip: 172.25.0.11 name: 91642a418627 status: active mergedSum: 9a016508cea1e997ab8569f5cfab30f5 version: Wazuh v4.3.0 node_name: worker2 - group_config_status: "synced" + group_config_status: 'synced' status_code: 0 - os: arch: x86_64 @@ -7314,14 +7472,14 @@ paths: configSum: ab73af41699f13fdd81903b5f23d8d00 manager: wazuh-worker2 group: [default] - registerIP: Any + registerIP: any ip: 172.25.0.10 name: a442e15bc84e status: active mergedSum: 9a016508cea1e997ab8569f5cfab30f5 version: Wazuh v4.3.0 node_name: worker1 - group_config_status: "not synced" + group_config_status: 'not synced' status_code: 0 total_affected_items: 3 total_failed_items: 0 @@ -7340,7 +7498,7 @@ paths: $ref: '#/components/responses/TooManyRequestsResponse' post: tags: - - Agents + - Agents summary: "Add agent" description: "Add a new agent" operationId: api.controllers.agent_controller.add_agent @@ -7352,23 +7510,11 @@ paths: requestBody: content: application/json: - schema: - type: object - properties: - name: - description: "Agent name" - type: string - format: names - ip: - description: "If this is not included, the API will get the IP automatically. Allowed values: - IP, IP/NET, ANY" - type: string - format: alphanumeric - required: - - name - example: - name: NewHost_2 - ip: 10.0.10.11 + schema: + $ref: '#/components/schemas/AgentAddBody' + example: + name: NewHost_2 + ip: 10.0.10.11 responses: '200': description: "Add an agent" @@ -7386,7 +7532,6 @@ paths: id: "009" key: "MDA5IE5ld0hvc3RfMiAxMC4wLjEwLjExIDhkNjllMzRiYTM2ZjcwMzgzZmVkNTIyNzk4MzFiZDg2NWQzZGRkZjJhMTkwZWR jMjY5YmRhOTY1ODU4M2JkMjg=" - message: "User was successfully created" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -7724,7 +7869,7 @@ paths: total_affected_items: 1 total_failed_items: 0 failed_items: [] - message: "Restart command sent to all agents" + message: "Restart command was sent to all agents" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -7824,6 +7969,7 @@ paths: syscheck: 0 syscollector: 0 upgrade: 0 + vulnerability: 0 monitor: 0 remote: 0 written_breakdown: @@ -8138,12 +8284,12 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - allOf: - - $ref: '#/components/schemas/AllItemsResponseAgentIDs' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + allOf: + - $ref: '#/components/schemas/AllItemsResponseAgentIDs' example: data: affected_items: @@ -8190,12 +8336,12 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - allOf: - - $ref: '#/components/schemas/AllItemsResponseAgentIDs' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + allOf: + - $ref: '#/components/schemas/AllItemsResponseAgentIDs' example: data: affected_items: @@ -8259,7 +8405,7 @@ paths: total_affected_items: 3 total_failed_items: 0 failed_items: [] - message: "Restart command sent to all agents" + message: "Restart command was sent to all agents" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -8274,42 +8420,53 @@ paths: '429': $ref: '#/components/responses/TooManyRequestsResponse' - /groups: - delete: + /agents/insert: + post: tags: - - Groups - summary: "Delete groups" - description: "Delete all groups or a list of them" - operationId: api.controllers.agent_controller.delete_groups + - Agents + summary: "Add agent full" + description: "Add an agent specifying its name, ID and IP. If an agent with the same name, the same ID or the + same IP already exists, replace it using the `force` parameter" + operationId: api.controllers.agent_controller.insert_agent x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/group:delete' + - $ref: '#/x-rbac-catalog/actions/agent:create' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - - $ref: '#/components/parameters/groups_list_all' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/AgentInsertBody' + example: + name: NewHost_2 + ip: 10.0.10.11 + id: "123" + key: 1abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghi64 + force: + enabled: True + disconnected_time: + enabled: True + value: "30m" + after_registration_time: "2h" responses: '200': - description: "Remove multiple group of multiple agents" + description: "Insert new agent" content: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - allOf: - - $ref: '#/components/schemas/AgentGroupDeleted' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AgentIdKey' example: data: - affected_items: - - webserver - - dataserver - total_affected_items: 2 - total_failed_items: 0 - failed_items: [] - message: "All selected groups were deleted" - error: 0 + id: "010" + key: "MDEwIHRlc3RfYWdlbnRfd2F6dWhfIDE3Mi4xNi4xNi4xNyBmZTNhYzUyYmUxMDc2MDgwOGI4Y2JiZTczNzFjZDIzMWJjYjF + jZjUyMDVjYjMzNTI1YzhjZDEwZGIwYTkyM2E4" + error: 0 '400': $ref: '#/components/responses/ResponseError' '401': @@ -8318,61 +8475,116 @@ paths: $ref: '#/components/responses/PermissionDeniedResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' + '406': + $ref: '#/components/responses/WrongContentTypeResponse' + '413': + $ref: '#/components/responses/RequestTooLargeResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' - get: + + /agents/insert/quick: + post: tags: - - Groups - summary: "Get groups" - description: "Get information about all groups or a list of them. Returns a list containing basic information - about each group such as number of agents belonging to the group and the checksums of the configuration and - shared files" - operationId: api.controllers.agent_controller.get_list_group + - Agents + summary: "Add agent quick" + description: "Add a new agent with name `agent_name`. This agent will use `any` as IP" + operationId: api.controllers.agent_controller.post_new_agent x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/group:read' + - $ref: '#/x-rbac-catalog/actions/agent:create' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - - $ref: '#/components/parameters/groups_list' - - $ref: '#/components/parameters/offset' - - $ref: '#/components/parameters/limit' - - $ref: '#/components/parameters/sort' - - $ref: '#/components/parameters/search' - - $ref: '#/components/parameters/hash' - - $ref: '#/components/parameters/query' + - $ref: '#/components/parameters/agent_name' + responses: + '200': + description: "Agent added" + content: + application/json: + schema: + allOf: + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AgentIdKey' + example: + data: + id: "011" + key: "MDExIHRlc3Rfd2F6dWhfcXVpY2sgYW55IDc3ZjNhY2RmMzBhN2ZlNzY0ODVmZTQ3YjUzMzc5ZWQ3ZDA1NDVhYzIyZDQ1NGY + 4YzcyZTZhNDU1MjVjYzY3OTM=" + error: 0 + '400': + $ref: '#/components/responses/ResponseError' + '401': + $ref: '#/components/responses/UnauthorizedResponse' + '403': + $ref: '#/components/responses/PermissionDeniedResponse' + '405': + $ref: '#/components/responses/InvalidHTTPMethodResponse' + '429': + $ref: '#/components/responses/TooManyRequestsResponse' + + /agents/no_group: + get: + tags: + - Agents + summary: "List agents without group" + description: "Return a list with all the available agents without an assigned group" + operationId: api.controllers.agent_controller.get_agent_no_group + x-rbac-actions: + - $ref: '#/x-rbac-catalog/actions/agent:read' + parameters: + - $ref: '#/components/parameters/pretty' + - $ref: '#/components/parameters/wait_for_complete' + - $ref: '#/components/parameters/offset' + - $ref: '#/components/parameters/limit' - $ref: '#/components/parameters/select' - - $ref: '#/components/parameters/distinct' + - $ref: '#/components/parameters/sort' + - $ref: '#/components/parameters/search' + - $ref: '#/components/parameters/query' responses: '200': - description: "List all groups" + description: "Get agents without group" content: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseGroups' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseAgents' example: data: affected_items: - - name: default - count: 2 - mergedSum: f8d49771911ed9d5c45b03a40babd065 - configSum: ab73af41699f13fdd81903b5f23d8d00 - - name: dmz - count: 3 - mergedSum: 220d6c5fc253f251827ee7487341c0fc - configSum: cfbae9ecc10eb15f1b4fc736de6758cc - - name: pciserver - count: 0 - mergedSum: 220d6c5fc253f251827ee7487341c0fc - configSum: ab73af41699f13fdd81903b5f23d8d00 + - node_name: unknown + ip: 172.16.16.16 + id: 009 + name: test_agent_wazuh + status: never_connected + dateAdd: 2021-05-27T10:06:13Z + registerIP: 172.16.16.16 + status_code: 0 + - node_name: unknown + ip: 172.16.16.17 + id: 010 + name: test_agent_wazuh2 + status: never_connected + dateAdd: 2021-05-27T10:06:27Z + registerIP: 172.16.16.17 + status_code: 0 + - node_name: unknown + ip: any + id: 011 + name: test_wazuh_quick + status: never_connected + dateAdd: 2021-05-27T10:07:37Z + registerIP: any + status_code: 0 total_affected_items: 3 total_failed_items: 0 failed_items: [] - message: "All selected groups information was returned" + message: All selected agents information was returned error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -8384,41 +8596,43 @@ paths: $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' - post: + + /agents/node/{node_id}/restart: + put: tags: - - Groups - summary: "Create a group" - description: "Create a new group" - operationId: api.controllers.agent_controller.post_group + - Agents + summary: "Restart agents in node" + description: "Restart all agents which belong to a specific given node" + operationId: api.controllers.agent_controller.restart_agents_by_node x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/group:create' + - $ref: '#/x-rbac-catalog/actions/cluster:read' + - $ref: '#/x-rbac-catalog/actions/agent:restart' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - requestBody: - content: - application/json: - schema: - type: object - properties: - group_id: - description: "Group name. It can contain any of the characters between a-z, A-Z, 0-9, '_', '-' and '.'. Names '.' and '..' are restricted." - type: string - format: group_names - maxLength: 128 - required: - - group_id - example: - group_id: NewGroup_1 + - $ref: '#/components/parameters/node_id' responses: '200': - description: "Add new agent" + description: 'Agents restarted' content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' + allOf: + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseAgentIDs' example: - message: "Group 'pciserver' created" + data: + affected_items: + - "002" + - "006" + - "008" + total_affected_items: 3 + total_failed_items: 0 + failed_items: [] + message: "Restart command was sent to all agents" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -8431,122 +8645,93 @@ paths: '429': $ref: '#/components/responses/TooManyRequestsResponse' - /groups/{group_id}/agents: + /agents/outdated: get: tags: - - Groups - summary: "Get agents in a group" - description: "Return the list of agents that belong to the specified group" - operationId: api.controllers.agent_controller.get_agents_in_group + - Agents + summary: "List outdated agents" + description: "Return the list of outdated agents" + operationId: api.controllers.agent_controller.get_agent_outdated x-rbac-actions: - $ref: '#/x-rbac-catalog/actions/agent:read' - - $ref: '#/x-rbac-catalog/actions/group:read' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - - $ref: '#/components/parameters/group_id' - $ref: '#/components/parameters/offset' - $ref: '#/components/parameters/limit' - - $ref: '#/components/parameters/select' - $ref: '#/components/parameters/sort' - $ref: '#/components/parameters/search' - - $ref: '#/components/parameters/statusAgentParam' + - $ref: '#/components/parameters/select' - $ref: '#/components/parameters/query' - - $ref: '#/components/parameters/distinct' responses: '200': - description: "List of agents or error description" + description: "Get outdated agents" content: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseAgents' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseAgents' example: data: affected_items: - os: arch: x86_64 codename: Focal Fossa - major: 20 - minor: 04 + major: '20' + minor: '04' name: Ubuntu platform: ubuntu - uname: Linux |b2497efbf876 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb + uname: Linux |ac7cb188d538 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb 23 13:46:31 UTC 2021 |x86_64 version: 20.04.2 LTS - mergedSum: 2c769b2ea138d472ee8f1ba23412b5d4 - node_name: worker1 - ip: 172.20.0.7 - id: 004 - manager: wazuh-worker1 - group: - - default - - group1 - name: b2497efbf876 - configSum: 052374472f3a0d5c8508241dcc455ea7 - status: active - dateAdd: 2021-05-27T09:14:19Z + lastKeepAlive: '2024-02-26T12:40:40Z' + id: '001' + dateAdd: 2024-02-26T12:40:08Z + configSum: ab73af41699f13fdd81903b5f23d8d00 + manager: wazuh-worker2 + group: [default] registerIP: any - lastKeepAlive: 2021-05-27T09:23:59Z - version: Wazuh v4.3.0 + ip: 172.25.0.6 + name: ac7cb188d538 + status: active + mergedSum: 9a016508cea1e997ab8569f5cfab30f5 + version: Wazuh v3.0.0 + node_name: worker2 + group_config_status: "synced" + status_code: 0 - os: arch: x86_64 codename: Focal Fossa - major: 20 - minor: 04 + major: '20' + minor: '04' name: Ubuntu platform: ubuntu - uname: Linux |600e27371700 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb + uname: Linux |ac7cb188d538 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb 23 13:46:31 UTC 2021 |x86_64 version: 20.04.2 LTS - mergedSum: 9a016508cea1e997ab8569f5cfab30f5 - node_name: worker1 - ip: 172.20.0.9 - id: 005 - manager: wazuh-worker1 - group: - - default - - group2 - name: 600e27371700 + lastKeepAlive: '2024-02-26T12:40:40Z' + id: '002' + dateAdd: 2024-02-26T12:40:10Z configSum: ab73af41699f13fdd81903b5f23d8d00 - status: active - dateAdd: 2021-05-27T09:14:19Z + manager: wazuh-worker2 + group: [default] registerIP: any - lastKeepAlive: 2021-05-27T09:23:52Z - version: Wazuh v4.3.0 - - os: - arch: x86_64 - codename: Focal Fossa - major: 20 - minor: 04 - name: Ubuntu - platform: ubuntu - uname: Linux |4bdac19ce5e3 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb - 23 13:46:31 UTC 2021 |x86_64 - version: 20.04.2 LTS + ip: 172.25.0.11 + name: 91642a418627 + status: active mergedSum: 9a016508cea1e997ab8569f5cfab30f5 + version: Wazuh v3.0.0 node_name: worker2 - ip: 172.20.0.10 - id: 006 - manager: wazuh-worker2 - group: - - default - - group3 - name: 4bdac19ce5e3 - configSum: ab73af41699f13fdd81903b5f23d8d00 - status: active - dateAdd: 2021-05-27T09:14:19Z - registerIP: any - lastKeepAlive: 2021-05-27T09:23:52Z - version: Wazuh v4.3.0 - total_affected_items: 3 + group_config_status: "synced" + status_code: 0 + total_affected_items: 2 total_failed_items: 0 failed_items: [] - message: "All selected agents information was returned" + message: 'All selected agents information was returned' error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -8554,154 +8739,144 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/ResourceNotFoundResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' - /groups/{group_id}/configuration: - get: + /agents/reconnect: + put: tags: - - Groups - summary: "Get group configuration" - description: "Return the group configuration defined in the `agent.conf` file" - operationId: api.controllers.agent_controller.get_group_config + - Agents + summary: "Force reconnect agents" + description: "Force reconnect all agents or a list of them" + operationId: api.controllers.agent_controller.reconnect_agents x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/group:read' + - $ref: '#/x-rbac-catalog/actions/agent:reconnect' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - - $ref: '#/components/parameters/group_id' - - $ref: '#/components/parameters/offset' - - $ref: '#/components/parameters/limit' + - $ref: '#/components/parameters/agents_list' responses: '200': - description: "Get group configuration" + description: "Agents reconnected" content: application/json: schema: - type: object - properties: - data: - type: object + allOf: + - $ref: '#/components/schemas/ApiResponse' + - type: object properties: - affected_items: - type: array - items: - $ref: '#/components/schemas/GroupConfiguration' - total_affected_items: - type: integer - format: int32 - example: - data: - total_affected_items: 3 - affected_items: - - filters: - name: agent_name - config: - localfile: - - location: "/var/log/my.log" - log_format: syslog - - filters: - os: Linux - config: - localfile: - - location: "/var/log/linux.log" - log_format: syslog - - filters: - profile: database - config: - localfile: - - location: "/var/log/database.log" - log_format: syslog - error: 0 + data: + $ref: '#/components/schemas/AllItemsResponseAgentIDs' + example: + data: + affected_items: + - '001' + - '002' + - '003' + total_affected_items: 3 + total_failed_items: 0 + failed_items: [] + message: "Force reconnect command was sent to all agents" + error: 0 '400': $ref: '#/components/responses/ResponseError' '401': $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/ResourceNotFoundResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' + + /agents/restart: put: tags: - - Groups - summary: "Update group configuration" - description: "Update an specified group's configuration. This API call expects a full valid XML file with the - shared configuration tags/syntax" - operationId: api.controllers.agent_controller.put_group_config + - Agents + summary: "Restart agents" + description: "Restart all agents or a list of them" + operationId: api.controllers.agent_controller.restart_agents x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/group:update_config' + - $ref: '#/x-rbac-catalog/actions/agent:restart' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - - $ref: '#/components/parameters/group_id' - requestBody: - required: true - content: - application/xml: - schema: - properties: - tmp_file: - description: "File to update" - type: string + - $ref: '#/components/parameters/agents_list' responses: '200': - description: "Upload configuration" + description: "Agents restarted" content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - example: - message: Agent configuration was successfully updated - error: 0 + allOf: + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseAgentIDs' + example: + data: + affected_items: + - '001' + - '002' + - '003' + - '004' + - '005' + - '006' + - '007' + - '008' + total_affected_items: 8 + total_failed_items: 3 + failed_items: + - error: + code: 1707 + message: "Cannot send request, agent is not active" + remediation: "Please, check non-active agents connection and try again. + Visit https://documentation.wazuh.com/4.9/user-manual/registering/index.html + and https://documentation.wazuh.com/4.9/user-manual/agents/agent-connection.html + to obtain more information on registering and connecting agents" + id: + - '009' + - '010' + - '011' + message: "Restart command was not sent to some agents" + error: 2 + '400': $ref: '#/components/responses/ResponseError' '401': $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/ResourceNotFoundResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' - '406': - $ref: '#/components/responses/WrongContentTypeResponse' - '413': - $ref: '#/components/responses/RequestTooLargeResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' - /groups/{group_id}/files: + /agents/stats/distinct: get: tags: - - Groups - summary: "Get group files" - description: "Return the files placed under the group directory" - operationId: api.controllers.agent_controller.get_group_files + - Agents + summary: "List agents distinct" + description: "Return all the different combinations that agents have for the selected fields. It also indicates + the total number of agents that have each combination" + operationId: api.controllers.agent_controller.get_agent_fields x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/group:read' + - $ref: '#/x-rbac-catalog/actions/agent:read' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - - $ref: '#/components/parameters/group_id' + - $ref: '#/components/parameters/fields' - $ref: '#/components/parameters/offset' - $ref: '#/components/parameters/limit' - $ref: '#/components/parameters/sort' - $ref: '#/components/parameters/search' - - $ref: '#/components/parameters/hash' - $ref: '#/components/parameters/query' - - $ref: '#/components/parameters/select' - - $ref: '#/components/parameters/distinct' responses: '200': - description: "Get group files" + description: "Get fields in agents" content: application/json: schema: @@ -8710,34 +8885,20 @@ paths: - type: object properties: data: - $ref: '#/components/schemas/AllItemsResponse' + $ref: '#/components/schemas/AllItemsResponseAgentsDistinct' example: data: affected_items: - - filename: agent.conf - hash: ab73af41699f13fdd81903b5f23d8d00 - - filename: ar.conf - hash: 76d8be9b97d8eae4c239e530ee7e71c8 - - filename: merged.mg - hash: f8d49771911ed9d5c45b03a40babd065 - - filename: rootkit_files.txt - hash: e5ddcac443143cef6237d5f9b8d48585 - - filename: rootkit_trojans.txt - hash: 6bcf7016d3e6b4c7faa62cf265c24dcc - - filename: system_audit_rcl.txt - hash: be69b84dd5ee73200bb903a46270e18c - - filename: system_audit_ssh.txt - hash: 407c1f5e103f0cb58249eb7252a84797 - - filename: win_applications_rcl.txt - hash: 0a4ad12c8145aca8a28d31de5c448b48 - - filename: win_audit_rcl.txt - hash: 92d8011facc8b921ece301ea4ce6a616 - - filename: win_malware_rcl.txt - hash: f5e0305e248bd00e05445f329bd1dd5e - total_affected_items: 10 + - os: + version: "20.04.2 LTS" + count: 2 + - os: + version: "N/A" + count: 1 + total_affected_items: 3 total_failed_items: 0 failed_items: [] - message: 'All selected groups files were returned' + message: 'All selected agents information was returned' error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -8745,70 +8906,40 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/ResourceNotFoundResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' - /groups/{group_id}/files/{file_name}: + /agents/summary/os: get: tags: - - Groups - summary: "Get a file in group" - description: "Return the content of the specified group file" - operationId: api.controllers.agent_controller.get_group_file + - Agents + summary: "Summarize agents OS" + description: "Return a summary of the OS of available agents" + operationId: api.controllers.agent_controller.get_agent_summary_os x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/group:read' + - $ref: '#/x-rbac-catalog/actions/agent:read' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - - $ref: '#/components/parameters/group_id' - - $ref: '#/components/parameters/file_name' - - $ref: '#/components/parameters/type_agents' - - $ref: '#/components/parameters/raw' responses: '200': - description: "Get group file in json format" + description: "Get summary of agents OS" content: application/json: schema: - properties: - data: - oneOf: - - type: array - - type: object - description: "The output format depends on the type of file that has been requested: rootkit - file, rootkit trojans or rcl" - application/xml: - schema: - properties: - data: - oneOf: - - type: array - - type: object - description: "The output format depends on the type of file that has been requested: rootkit - file, rootkit trojans or rcl" + allOf: + - $ref: '#/components/schemas/ApiResponse' example: data: - vars: None - controls: - - name: "CIS - Testing against the CIS Debian Linux Benchmark v1." - cis: [] - pci: [] - condition: "all required" - reference: "CIS_Debian_Benchmark_v1.0pdf" - checks: - - "f:/etc/debian_version;" - - "f:/proc/sys/kernel/ostype -> Linux;" - - name: "CIS - Debian Linux - 1.4 - Robust partition scheme - /tmp is not on its own partition" - cis: [] - pci: [] - condition: "any" - reference: "https://benchmarks.cisecurity.org/tools2/linux/CIS_Debian_Benchmark_v1.0.pdf" - checks: - - "f:/etc/fstab -> !r:/tmp;" + affected_items: + - ubuntu + totalItems: 1 + total_affected_items: 1 + total_failed_items: 0 + failed_items: [] + message: "Showing the operative system of all specified agents" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -8816,83 +8947,47 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' - '404': - $ref: '#/components/responses/ResourceNotFoundResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' - /agents/insert: - post: + /agents/summary/status: + get: tags: - Agents - summary: "Add agent full" - description: "Add an agent specifying its name, ID and IP. If an agent with the same name, the same ID or the - same IP already exists, replace it using the `force` parameter" - operationId: api.controllers.agent_controller.insert_agent + summary: "Summarize agents status" + description: "Return a summary of the connection and groups configuration synchronization statuses of available agents" + operationId: api.controllers.agent_controller.get_agent_summary_status x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/agent:create' + - $ref: '#/x-rbac-catalog/actions/agent:read' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - requestBody: - content: - application/json: - schema: - type: object - properties: - id: - $ref: '#/components/schemas/AgentID' - key: - type: string - maxLength: 64 - minLength: 64 - format: wazuh_key - description: "Key to use when communicating with the manager. The agent must have the same key on - its `client.keys` file" - name: - description: "Agent name" - type: string - format: names - ip: - description: "If this is not included, the API will get the IP automatically. Allowed values: - IP, IP/NET, ANY" - type: string - format: alphanumeric - force: - $ref: '#/components/schemas/AgentInsertForce' - required: - - name - example: - name: NewHost_2 - ip: 10.0.10.11 - id: "123" - key: 1abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghi64 - force: - enabled: True - disconnected_time: - enabled: True - value: "30m" - after_registration_time: "2h" - responses: '200': - description: "Insert new agent" + description: "Get summary of agents connection and group configuration statuses" content: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AgentIdKey' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AgentsSummaryStatus' example: data: - id: "010" - key: "MDEwIHRlc3RfYWdlbnRfd2F6dWhfIDE3Mi4xNi4xNi4xNyBmZTNhYzUyYmUxMDc2MDgwOGI4Y2JiZTczNzFjZDIzMWJjYjF - jZjUyMDVjYjMzNTI1YzhjZDEwZGIwYTkyM2E4" + connection: + active: 8 + disconnected: 0 + never_connected: 3 + pending: 0 + total: 11 + configuration: + synced: 8 + not_synced: 3 + total: 11 error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -8902,44 +8997,45 @@ paths: $ref: '#/components/responses/PermissionDeniedResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' - '406': - $ref: '#/components/responses/WrongContentTypeResponse' - '413': - $ref: '#/components/responses/RequestTooLargeResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' - /agents/insert/quick: - post: + /groups: + delete: tags: - - Agents - summary: "Add agent quick" - description: "Add a new agent with name `agent_name`. This agent will use `any` as IP" - operationId: api.controllers.agent_controller.post_new_agent + - Groups + summary: "Delete groups" + description: "Delete all groups or a list of them" + operationId: api.controllers.agent_controller.delete_groups x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/agent:create' + - $ref: '#/x-rbac-catalog/actions/group:delete' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - - $ref: '#/components/parameters/agent_name' + - $ref: '#/components/parameters/groups_list_all' responses: '200': - description: "Agent added" + description: "Remove multiple group of multiple agents" content: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AgentIdKey' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + allOf: + - $ref: '#/components/schemas/AgentGroupDeleted' example: data: - id: "011" - key: "MDExIHRlc3Rfd2F6dWhfcXVpY2sgYW55IDc3ZjNhY2RmMzBhN2ZlNzY0ODVmZTQ3YjUzMzc5ZWQ3ZDA1NDVhYzIyZDQ1NGY - 4YzcyZTZhNDU1MjVjYzY3OTM=" - error: 0 + affected_items: + - webserver + - dataserver + total_affected_items: 2 + total_failed_items: 0 + failed_items: [] + message: "All selected groups were deleted" + error: 0 '400': $ref: '#/components/responses/ResponseError' '401': @@ -8950,65 +9046,95 @@ paths: $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' - - /agents/no_group: get: tags: - - Agents - summary: "List agents without group" - description: "Return a list with all the available agents without an assigned group" - operationId: api.controllers.agent_controller.get_agent_no_group + - Groups + summary: "Get groups" + description: "Get information about all groups or a list of them. Returns a list containing basic information + about each group such as number of agents belonging to the group and the checksums of the configuration and + shared files" + operationId: api.controllers.agent_controller.get_list_group x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/agent:read' + - $ref: '#/x-rbac-catalog/actions/group:read' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' + - $ref: '#/components/parameters/groups_list' - $ref: '#/components/parameters/offset' - $ref: '#/components/parameters/limit' - - $ref: '#/components/parameters/select' - $ref: '#/components/parameters/sort' - $ref: '#/components/parameters/search' + - $ref: '#/components/parameters/hash' - $ref: '#/components/parameters/query' + - $ref: '#/components/parameters/select' + - $ref: '#/components/parameters/distinct' responses: '200': - description: "Get agents without group" + description: "List all groups" content: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseAgents' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseGroups' example: data: affected_items: - - node_name: unknown - ip: 172.16.16.16 - id: 009 - name: test_agent_wazuh - status: never_connected - dateAdd: 2021-05-27T10:06:13Z - registerIP: 172.16.16.16 - - node_name: unknown - ip: 172.16.16.17 - id: 010 - name: test_agent_wazuh2 - status: never_connected - dateAdd: 2021-05-27T10:06:27Z - registerIP: 172.16.16.17 - - node_name: unknown - ip: any - id: 011 - name: test_wazuh_quick - status: never_connected - dateAdd: 2021-05-27T10:07:37Z - registerIP: any + - name: default + count: 2 + mergedSum: f8d49771911ed9d5c45b03a40babd065 + configSum: ab73af41699f13fdd81903b5f23d8d00 + - name: dmz + count: 3 + mergedSum: 220d6c5fc253f251827ee7487341c0fc + configSum: cfbae9ecc10eb15f1b4fc736de6758cc + - name: pciserver + count: 0 + mergedSum: 220d6c5fc253f251827ee7487341c0fc + configSum: ab73af41699f13fdd81903b5f23d8d00 total_affected_items: 3 total_failed_items: 0 failed_items: [] - message: All selected agents information was returned + message: "All selected groups information was returned" + error: 0 + '400': + $ref: '#/components/responses/ResponseError' + '401': + $ref: '#/components/responses/UnauthorizedResponse' + '403': + $ref: '#/components/responses/PermissionDeniedResponse' + '405': + $ref: '#/components/responses/InvalidHTTPMethodResponse' + '429': + $ref: '#/components/responses/TooManyRequestsResponse' + post: + tags: + - Groups + summary: "Create a group" + description: "Create a new group" + operationId: api.controllers.agent_controller.post_group + x-rbac-actions: + - $ref: '#/x-rbac-catalog/actions/group:create' + parameters: + - $ref: '#/components/parameters/pretty' + - $ref: '#/components/parameters/wait_for_complete' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateGroupBody' + responses: + '200': + description: "Add new agent" + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + example: + message: "Group 'NewGroup_1' created" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -9021,75 +9147,31 @@ paths: '429': $ref: '#/components/responses/TooManyRequestsResponse' - /agents/node/{node_id}/restart: - put: - tags: - - Agents - summary: "Restart agents in node" - description: "Restart all agents which belong to a specific given node" - operationId: api.controllers.agent_controller.restart_agents_by_node - x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/cluster:read' - - $ref: '#/x-rbac-catalog/actions/agent:restart' - parameters: - - $ref: '#/components/parameters/pretty' - - $ref: '#/components/parameters/wait_for_complete' - - $ref: '#/components/parameters/node_id' - responses: - '200': - description: 'Agents restarted' - content: - application/json: - schema: - allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseAgentIDs' - example: - data: - affected_items: - - "002" - - "006" - - "008" - total_affected_items: 3 - total_failed_items: 0 - failed_items: [] - message: "Restart command was sent to all agents" - error: 0 - '400': - $ref: '#/components/responses/ResponseError' - '401': - $ref: '#/components/responses/UnauthorizedResponse' - '403': - $ref: '#/components/responses/PermissionDeniedResponse' - '405': - $ref: '#/components/responses/InvalidHTTPMethodResponse' - '429': - $ref: '#/components/responses/TooManyRequestsResponse' - - /agents/outdated: + /groups/{group_id}/agents: get: tags: - - Agents - summary: "List outdated agents" - description: "Return the list of outdated agents" - operationId: api.controllers.agent_controller.get_agent_outdated + - Groups + summary: "Get agents in a group" + description: "Return the list of agents that belong to the specified group" + operationId: api.controllers.agent_controller.get_agents_in_group x-rbac-actions: - $ref: '#/x-rbac-catalog/actions/agent:read' + - $ref: '#/x-rbac-catalog/actions/group:read' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' + - $ref: '#/components/parameters/group_id' - $ref: '#/components/parameters/offset' - $ref: '#/components/parameters/limit' + - $ref: '#/components/parameters/select' - $ref: '#/components/parameters/sort' - $ref: '#/components/parameters/search' - - $ref: '#/components/parameters/select' + - $ref: '#/components/parameters/statusAgentParam' - $ref: '#/components/parameters/query' + - $ref: '#/components/parameters/distinct' responses: '200': - description: "Get outdated agents" + description: "List of agents or error description" content: application/json: schema: @@ -9105,57 +9187,85 @@ paths: - os: arch: x86_64 codename: Focal Fossa - major: '20' - minor: '04' + major: 20 + minor: 04 name: Ubuntu platform: ubuntu - uname: Linux |ac7cb188d538 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb + uname: Linux |b2497efbf876 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb 23 13:46:31 UTC 2021 |x86_64 version: 20.04.2 LTS - lastKeepAlive: '2024-02-26T12:40:40Z' - id: '001' - dateAdd: 2024-02-26T12:40:08Z - configSum: ab73af41699f13fdd81903b5f23d8d00 - manager: wazuh-worker2 - group: [default] - registerIP: Any - ip: 172.25.0.6 - name: ac7cb188d538 + mergedSum: 2c769b2ea138d472ee8f1ba23412b5d4 + node_name: worker1 + ip: 172.20.0.7 + id: 004 + manager: wazuh-worker1 + group: + - default + - group1 + name: b2497efbf876 + configSum: 052374472f3a0d5c8508241dcc455ea7 status: active - mergedSum: 9a016508cea1e997ab8569f5cfab30f5 - version: Wazuh v3.0.0 - node_name: worker2 - group_config_status: "synced" + dateAdd: 2021-05-27T09:14:19Z + registerIP: any + lastKeepAlive: 2021-05-27T09:23:59Z + version: Wazuh v4.3.0 status_code: 0 - os: arch: x86_64 codename: Focal Fossa - major: '20' - minor: '04' + major: 20 + minor: 04 name: Ubuntu platform: ubuntu - uname: Linux |ac7cb188d538 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb + uname: Linux |600e27371700 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb 23 13:46:31 UTC 2021 |x86_64 version: 20.04.2 LTS - lastKeepAlive: '2024-02-26T12:40:40Z' - id: '002' - dateAdd: 2024-02-26T12:40:10Z + mergedSum: 9a016508cea1e997ab8569f5cfab30f5 + node_name: worker1 + ip: 172.20.0.9 + id: 005 + manager: wazuh-worker1 + group: + - default + - group2 + name: 600e27371700 configSum: ab73af41699f13fdd81903b5f23d8d00 - manager: wazuh-worker2 - group: [default] - registerIP: Any - ip: 172.25.0.11 - name: 91642a418627 status: active + dateAdd: 2021-05-27T09:14:19Z + registerIP: any + lastKeepAlive: 2021-05-27T09:23:52Z + version: Wazuh v4.3.0 + status_code: 0 + - os: + arch: x86_64 + codename: Focal Fossa + major: 20 + minor: 04 + name: Ubuntu + platform: ubuntu + uname: Linux |4bdac19ce5e3 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb + 23 13:46:31 UTC 2021 |x86_64 + version: 20.04.2 LTS mergedSum: 9a016508cea1e997ab8569f5cfab30f5 - version: Wazuh v3.0.0 node_name: worker2 - group_config_status: "synced" + ip: 172.20.0.10 + id: 006 + manager: wazuh-worker2 + group: + - default + - group3 + name: 4bdac19ce5e3 + configSum: ab73af41699f13fdd81903b5f23d8d00 + status: active + dateAdd: 2021-05-27T09:14:19Z + registerIP: any + lastKeepAlive: 2021-05-27T09:23:52Z + version: Wazuh v4.3.0 status_code: 0 - total_affected_items: 2 + total_affected_items: 3 total_failed_items: 0 failed_items: [] - message: 'All selected agents information was returned' + message: "All selected agents information was returned" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -9163,144 +9273,154 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/ResourceNotFoundResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' - /agents/reconnect: - put: + /groups/{group_id}/configuration: + get: tags: - - Agents - summary: "Force reconnect agents" - description: "Force reconnect all agents or a list of them" - operationId: api.controllers.agent_controller.reconnect_agents + - Groups + summary: "Get group configuration" + description: "Return the group configuration defined in the `agent.conf` file" + operationId: api.controllers.agent_controller.get_group_config x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/agent:reconnect' + - $ref: '#/x-rbac-catalog/actions/group:read' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - - $ref: '#/components/parameters/agents_list' + - $ref: '#/components/parameters/group_id' + - $ref: '#/components/parameters/offset' + - $ref: '#/components/parameters/limit' responses: '200': - description: "Agents reconnected" + description: "Get group configuration" content: application/json: schema: - allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseAgentIDs' - example: + type: object + properties: data: - affected_items: - - '001' - - '002' - - '003' - total_affected_items: 3 - total_failed_items: 0 - failed_items: [] - message: "Force reconnect command was sent to all agents" - error: 0 + type: object + properties: + affected_items: + type: array + items: + $ref: '#/components/schemas/GroupConfiguration' + total_affected_items: + type: integer + format: int32 + example: + data: + total_affected_items: 3 + affected_items: + - filters: + name: agent_name + config: + localfile: + - location: "/var/log/my.log" + log_format: syslog + - filters: + os: Linux + config: + localfile: + - location: "/var/log/linux.log" + log_format: syslog + - filters: + profile: database + config: + localfile: + - location: "/var/log/database.log" + log_format: syslog + error: 0 '400': $ref: '#/components/responses/ResponseError' '401': $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/ResourceNotFoundResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' - - /agents/restart: put: tags: - - Agents - summary: "Restart agents" - description: "Restart all agents or a list of them" - operationId: api.controllers.agent_controller.restart_agents + - Groups + summary: "Update group configuration" + description: "Update an specified group's configuration. This API call expects a full valid XML file with the + shared configuration tags/syntax" + operationId: api.controllers.agent_controller.put_group_config x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/agent:restart' + - $ref: '#/x-rbac-catalog/actions/group:update_config' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - - $ref: '#/components/parameters/agents_list' + - $ref: '#/components/parameters/group_id' + requestBody: + required: true + content: + application/xml: + schema: + properties: + tmp_file: + description: "File to update" + type: string responses: '200': - description: "Agents restarted" + description: "Upload configuration" content: application/json: schema: - allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseAgentIDs' - example: - data: - affected_items: - - '001' - - '002' - - '003' - - '004' - - '005' - - '006' - - '007' - - '008' - total_affected_items: 8 - total_failed_items: 3 - failed_items: - - error: - code: 1707 - message: "Cannot send request, agent is not active" - remediation: "Please, check non-active agents connection and try again. - Visit https://documentation.wazuh.com/4.9/user-manual/registering/index.html - and https://documentation.wazuh.com/4.9/user-manual/agents/agent-connection.html - to obtain more information on registering and connecting agents" - id: - - '009' - - '010' - - '011' - message: "Restart command was not sent to some agents" - error: 2 - + $ref: '#/components/schemas/ApiResponse' + example: + message: Agent configuration was successfully updated + error: 0 '400': $ref: '#/components/responses/ResponseError' '401': $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/ResourceNotFoundResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' + '406': + $ref: '#/components/responses/WrongContentTypeResponse' + '413': + $ref: '#/components/responses/RequestTooLargeResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' - /agents/stats/distinct: + /groups/{group_id}/files: get: tags: - - Agents - summary: "List agents distinct" - description: "Return all the different combinations that agents have for the selected fields. It also indicates - the total number of agents that have each combination" - operationId: api.controllers.agent_controller.get_agent_fields + - Groups + summary: "Get group files" + description: "Return the files placed under the group directory" + operationId: api.controllers.agent_controller.get_group_files x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/agent:read' + - $ref: '#/x-rbac-catalog/actions/group:read' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' - - $ref: '#/components/parameters/fields' + - $ref: '#/components/parameters/group_id' - $ref: '#/components/parameters/offset' - $ref: '#/components/parameters/limit' - $ref: '#/components/parameters/sort' - $ref: '#/components/parameters/search' + - $ref: '#/components/parameters/hash' - $ref: '#/components/parameters/query' + - $ref: '#/components/parameters/select' + - $ref: '#/components/parameters/distinct' responses: '200': - description: "Get fields in agents" + description: "Get group files" content: application/json: schema: @@ -9309,20 +9429,34 @@ paths: - type: object properties: data: - $ref: '#/components/schemas/AllItemsResponseAgentsDistinct' + $ref: '#/components/schemas/AllItemsResponseGroupFiles' example: data: affected_items: - - os: - version: "20.04.2 LTS" - count: 2 - - os: - version: "N/A" - count: 1 - total_affected_items: 3 + - filename: agent.conf + hash: ab73af41699f13fdd81903b5f23d8d00 + - filename: ar.conf + hash: 76d8be9b97d8eae4c239e530ee7e71c8 + - filename: merged.mg + hash: f8d49771911ed9d5c45b03a40babd065 + - filename: rootkit_files.txt + hash: e5ddcac443143cef6237d5f9b8d48585 + - filename: rootkit_trojans.txt + hash: 6bcf7016d3e6b4c7faa62cf265c24dcc + - filename: system_audit_rcl.txt + hash: be69b84dd5ee73200bb903a46270e18c + - filename: system_audit_ssh.txt + hash: 407c1f5e103f0cb58249eb7252a84797 + - filename: win_applications_rcl.txt + hash: 0a4ad12c8145aca8a28d31de5c448b48 + - filename: win_audit_rcl.txt + hash: 92d8011facc8b921ece301ea4ce6a616 + - filename: win_malware_rcl.txt + hash: f5e0305e248bd00e05445f329bd1dd5e + total_affected_items: 10 total_failed_items: 0 failed_items: [] - message: 'All selected agents information was returned' + message: 'All selected groups files were returned' error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -9330,88 +9464,71 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/ResourceNotFoundResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': $ref: '#/components/responses/TooManyRequestsResponse' - /agents/summary/os: + /groups/{group_id}/files/{file_name}: get: tags: - - Agents - summary: "Summarize agents OS" - description: "Return a summary of the OS of available agents" - operationId: api.controllers.agent_controller.get_agent_summary_os + - Groups + summary: "Get a file in group" + description: "Return the content of the specified group file" + operationId: api.controllers.agent_controller.get_group_file x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/agent:read' + - $ref: '#/x-rbac-catalog/actions/group:read' parameters: - $ref: '#/components/parameters/pretty' - $ref: '#/components/parameters/wait_for_complete' + - $ref: '#/components/parameters/group_id' + - $ref: '#/components/parameters/file_name' + - $ref: '#/components/parameters/type_agents' + - $ref: '#/components/parameters/raw' responses: '200': - description: "Get summary of agents OS" + description: "Get group file in json format" content: application/json: schema: - allOf: - - $ref: '#/components/schemas/ApiResponse' - example: - data: - affected_items: - - ubuntu - totalItems: 1 - total_affected_items: 1 - total_failed_items: 0 - failed_items: [] - message: "Showing the operative system of all specified agents" - error: 0 - '400': - $ref: '#/components/responses/ResponseError' - '401': - $ref: '#/components/responses/UnauthorizedResponse' - '403': - $ref: '#/components/responses/PermissionDeniedResponse' - '405': - $ref: '#/components/responses/InvalidHTTPMethodResponse' - '429': - $ref: '#/components/responses/TooManyRequestsResponse' - - /agents/summary/status: - get: - tags: - - Agents - summary: "Summarize agents status" - description: "Return a summary of the connection and groups configuration synchronization statuses of available agents" - operationId: api.controllers.agent_controller.get_agent_summary_status - x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/agent:read' - parameters: - - $ref: '#/components/parameters/pretty' - - $ref: '#/components/parameters/wait_for_complete' - responses: - '200': - description: "Get summary of agents connection and group configuration statuses" - content: - application/json: + properties: + data: + oneOf: + - type: array + - type: object + description: "The output format depends on the type of file that has been requested: rootkit + file, rootkit trojans or rcl" + application/xml: schema: - allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AgentsSummaryStatus' + properties: + data: + oneOf: + - type: array + - type: object + description: "The output format depends on the type of file that has been requested: rootkit + file, rootkit trojans or rcl" example: data: - connection: - active: 8 - disconnected: 0 - never_connected: 3 - pending: 0 - total: 11 - configuration: - synced: 8 - not_synced: 3 - total: 11 + vars: + web_dirs: "/var/www,/var/htdocs,/home/httpd,/usr/local/apache,/usr/local/apache2,/usr/local/www;" + controls: + - name: "CIS - Testing against the CIS Debian Linux Benchmark v1." + cis: [] + pci: [] + condition: "all required" + reference: "CIS_Debian_Benchmark_v1.0pdf" + checks: + - "f:/etc/debian_version;" + - "f:/proc/sys/kernel/ostype -> Linux;" + - name: "CIS - Debian Linux - 1.4 - Robust partition scheme - /tmp is not on its own partition" + cis: [] + pci: [] + condition: "any" + reference: "https://benchmarks.cisecurity.org/tools2/linux/CIS_Debian_Benchmark_v1.0.pdf" + checks: + - "f:/etc/fstab -> !r:/tmp;" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -9419,6 +9536,8 @@ paths: $ref: '#/components/responses/UnauthorizedResponse' '403': $ref: '#/components/responses/PermissionDeniedResponse' + '404': + $ref: '#/components/responses/ResourceNotFoundResponse' '405': $ref: '#/components/responses/InvalidHTTPMethodResponse' '429': @@ -9427,7 +9546,7 @@ paths: /ciscat/{agent_id}/results: get: tags: - - Ciscat + - Ciscat summary: "Get results" description: "Return the agent's ciscat results info" operationId: api.controllers.ciscat_controller.get_agents_ciscat_results @@ -9458,11 +9577,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseCiscatResult' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseCiscatResult' example: data: affected_items: @@ -9496,7 +9615,7 @@ paths: /cluster/local/info: get: tags: - - Cluster + - Cluster summary: "Get local node info" description: "Return basic information about the cluster node receiving the request" operationId: api.controllers.cluster_controller.get_cluster_node @@ -9512,20 +9631,8 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - properties: - node: - description: "Node name" - type: string - cluster: - description: "Cluster name the node belongs to" - type: string - type: - description: "Node type" - type: string + - $ref: '#/components/schemas/ApiResponse' + - $ref: '#/components/schemas/ClusterLocalInfo' example: data: affected_items: @@ -9548,10 +9655,60 @@ paths: '429': $ref: '#/components/responses/TooManyRequestsResponse' + /cluster/local/config: + get: + tags: + - Cluster + summary: "Get local node config" + description: "Return the current node cluster configuration" + operationId: api.controllers.cluster_controller.get_config + x-rbac-actions: + - $ref: '#/x-rbac-catalog/actions/cluster:read' + parameters: + - $ref: '#/components/parameters/pretty' + - $ref: '#/components/parameters/wait_for_complete' + responses: + '200': + description: "Cluster configuration" + content: + application/json: + schema: + allOf: + - $ref: '#/components/schemas/ApiResponse' + - $ref: '#/components/schemas/ClusterLocalConfig' + example: + data: + affected_items: + - name: wazuh + node_name: master-node + node_type: master + key: 9d273b53510fef702b54a92e9cffc82e + port: 1516 + bind_addr: 0.0.0.0 + nodes: + - wazuh-master + hidden: "no" + disabled: false + total_affected_items: 1 + total_failed_items: 0 + failed_items: [] + message: "All selected information was returned" + error: 0 + '400': + $ref: '#/components/responses/ResponseError' + '401': + $ref: '#/components/responses/UnauthorizedResponse' + '403': + $ref: '#/components/responses/PermissionDeniedResponse' + '405': + $ref: '#/components/responses/InvalidHTTPMethodResponse' + '429': + $ref: '#/components/responses/TooManyRequestsResponse' + /cluster/nodes: get: tags: - - Cluster + - Cluster summary: "Get nodes info" description: "Get information about all nodes in the cluster or a list of them" operationId: api.controllers.cluster_controller.get_cluster_nodes @@ -9576,11 +9733,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseClusterNodes' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseClusterNodes' example: data: affected_items: @@ -9615,7 +9772,7 @@ paths: /cluster/healthcheck: get: tags: - - Cluster + - Cluster summary: "Get nodes healthcheck" description: "Return cluster healthcheck information for all nodes or a list of them. Such information includes last keep alive, last synchronization time and number of agents reporting on each node" @@ -9736,7 +9893,7 @@ paths: /cluster/ruleset/synchronization: get: tags: - - Cluster + - Cluster summary: "Get cluster nodes ruleset synchronization status" description: "Return ruleset synchronization status for all nodes or a list of them. This synchronization only covers the user custom ruleset" @@ -9787,7 +9944,7 @@ paths: /cluster/status: get: tags: - - Cluster + - Cluster summary: "Get cluster status" description: "Return information about the cluster status" operationId: api.controllers.cluster_controller.get_status @@ -9803,23 +9960,8 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - properties: - enabled: - description: "Whether the cluster is enabled in the Wazuh configuration" - type: string - enum: - - "yes" - - "no" - running: - description: "Whether the cluster daemon is running" - type: string - enum: - - "yes" - - "no" + - $ref: '#/components/schemas/ApiResponse' + - $ref: '#/components/schemas/ClusterStatus' example: data: enabled: "yes" @@ -9836,92 +9978,6 @@ paths: '429': $ref: '#/components/responses/TooManyRequestsResponse' - /cluster/local/config: - get: - tags: - - Cluster - summary: "Get local node config" - description: "Return the current node cluster configuration" - operationId: api.controllers.cluster_controller.get_config - x-rbac-actions: - - $ref: '#/x-rbac-catalog/actions/cluster:read' - parameters: - - $ref: '#/components/parameters/pretty' - - $ref: '#/components/parameters/wait_for_complete' - responses: - '200': - description: "Cluster configuration" - content: - application/json: - schema: - allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - properties: - name: - description: "Cluster name" - type: string - node_name: - description: "Node name" - type: string - node_type: - description: "Node type" - type: string - enum: - - master - - worker - key: - description: "Cluster key used to encrypt messages" - type: string - port: - description: "Port used by the **master** node to communicate with workers" - type: integer - bind_addr: - description: "Network interface used by the **master** to listen to incoming connections" - type: string - nodes: - description: "List of cluster master nodes. This list is used by **worker** nodes to connect - to the master" - type: array - items: - type: string - hidden: - description: "Whether to hide the cluster information in the alerts" - type: string - disabled: - description: "Whether the cluster is enabled or not" - type: boolean - example: - data: - affected_items: - - name: wazuh - node_name: master-node - node_type: master - key: 9d273b53510fef702b54a92e9cffc82e - port: 1516 - bind_addr: 0.0.0.0 - nodes: - - wazuh-master - hidden: no - disabled: false - total_affected_items: 1 - total_failed_items: 0 - failed_items: [] - message: "All selected information was returned" - error: 0 - '400': - $ref: '#/components/responses/ResponseError' - '401': - $ref: '#/components/responses/UnauthorizedResponse' - '403': - $ref: '#/components/responses/PermissionDeniedResponse' - '405': - $ref: '#/components/responses/InvalidHTTPMethodResponse' - '429': - $ref: '#/components/responses/TooManyRequestsResponse' - /cluster/api/config: get: tags: @@ -9953,6 +10009,8 @@ paths: drop_privileges: true experimental_features: false max_upload_size: 10485760 + intervals: + request_timeout: 10 https: enabled: true key: "/var/ossec/api/configuration/ssl/server.key" @@ -9963,7 +10021,10 @@ paths: ssl_ciphers: "" logs: level: info - path: /var/ossec/logs/api.log + format: plain + max_size: + enabled: false + size: 1M cors: enabled: false source_route: "*" @@ -9974,13 +10035,19 @@ paths: max_login_attempts: 50 block_time: 300 max_request_per_minute: 300 - remote_commands: - localfile: - enabled: True - exceptions: [] - woodle_commands: - enabled: True - exceptions: [] + upload_configuration: + remote_commands: + localfile: + enabled: true + exceptions: [] + wodle_commands: + enabled: true + exceptions: [] + limits: + eps: + allow: true + agents: + allow_higher_versions: true - node_name: "worker1" node_api_config: host: 0.0.0.0 @@ -9988,6 +10055,8 @@ paths: drop_privileges: true experimental_features: false max_upload_size: 10485760 + intervals: + request_timeout: 10 https: enabled: true key: "/var/ossec/api/configuration/ssl/server.key" @@ -9997,8 +10066,11 @@ paths: ssl_protocol: "auto" ssl_ciphers: "" logs: - path: /var/ossec/logs/api.log level: info + format: plain + max_size: + enabled: false + size: 1M cors: enabled: false source_route: "*" @@ -10008,12 +10080,19 @@ paths: max_login_attempts: 50 block_time: 300 max_request_per_minute: 300 - remote_commands: - localfile: - enabled: True - exceptions: [] - woodle_commands: - enabled: True + upload_configuration: + remote_commands: + localfile: + enabled: true + exceptions: [] + wodle_commands: + enabled: true + exceptions: [] + limits: + eps: + allow: true + agents: + allow_higher_versions: true exceptions: [] - node_name: "worker2" node_api_config: @@ -10022,6 +10101,8 @@ paths: drop_privileges: true experimental_features: false max_upload_size: 10485760 + intervals: + request_timeout: 10 https: enabled: true key: "/var/ossec/api/configuration/ssl/server.key" @@ -10031,8 +10112,11 @@ paths: ssl_protocol: "auto" ssl_ciphers: "" logs: - path: /var/ossec/logs/api.log level: info + format: plain + max_size: + enabled: false + size: 1M cors: enabled: false source_route: "*" @@ -10042,13 +10126,19 @@ paths: max_login_attempts: 50 block_time: 300 max_request_per_minute: 300 - remote_commands: - localfile: - enabled: True - exceptions: [] - woodle_commands: - enabled: True - exceptions: [] + upload_configuration: + remote_commands: + localfile: + enabled: true + exceptions: [] + wodle_commands: + enabled: true + exceptions: [] + limits: + eps: + allow: true + agents: + allow_higher_versions: true total_affected_items: 3 total_failed_items: 0 failed_items: [] @@ -10068,7 +10158,7 @@ paths: /cluster/{node_id}/status: get: tags: - - Cluster + - Cluster summary: "Get node status" description: "Return the status of all Wazuh daemons in node node_id" operationId: api.controllers.cluster_controller.get_status_node @@ -10085,11 +10175,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/WazuhDaemonsStatus' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/WazuhDaemonsStatus' example: data: affected_items: @@ -10129,7 +10219,7 @@ paths: /cluster/{node_id}/info: get: tags: - - Cluster + - Cluster summary: "Get node info" description: "Return basic information about a specified node such as version, compilation date, installation path" @@ -10147,11 +10237,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/WazuhInfo' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/WazuhInfo' example: data: affected_items: @@ -10181,9 +10271,10 @@ paths: /cluster/{node_id}/configuration: get: tags: - - Cluster + - Cluster summary: "Get node config" - description: "Return wazuh configuration used in node {node_id}. The 'section' and 'field' parameters will be ignored if 'raw' parameter is provided." + description: "Return wazuh configuration used in node {node_id}. The 'section' and 'field' parameters will be + ignored if 'raw' parameter is provided." operationId: api.controllers.cluster_controller.get_configuration_node x-rbac-actions: - $ref: '#/x-rbac-catalog/actions/cluster:read' @@ -10201,11 +10292,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/WazuhManagerConfiguration' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/WazuhManagerConfiguration' example: data: affected_items: @@ -10222,6 +10313,7 @@ paths: email_log_source: alerts.log agents_disconnection_time: "10m" agents_disconnection_alert_time: 0 + update_check: yes white_list: - 127.0.0.1 - ^localhost.localdomain$ @@ -10276,7 +10368,8 @@ paths: all: no item: yes processes: yes - synchronization: "\n " + synchronization: + max_eps: 10 sca: enabled: yes scan_on_start: yes @@ -10289,7 +10382,15 @@ paths: indexer: enabled: yes hosts: - host: http://127.0.0.1:9200 + - http://127.0.0.1:9200 + ssl: + certificate_authorities: + ca: + - /etc/filebeat/certs/root-ca.pem + certificate: + - /etc/filebeat/certs/filebeat.pem + key: + - /etc/filebeat/certs/filebeat-key.pem syscheck: disabled: no frequency: 43200 @@ -10389,13 +10490,6 @@ paths: disabled: no port: "1515" use_source_ip: "no" - force: - enabled: "yes" - key_mismatch: "yes" - disconnected_time: - enabled: "yes" - value: "1h" - after_registration_time: "1h" purge: "yes" use_password: "no" ciphers: "HIGH:!ADH:!EXP:!MD5:!RC4:!3DES:!CAMELLIA:@STRENGTH" @@ -10412,8 +10506,8 @@ paths: bind_addr: "0.0.0.0" nodes: - "wazuh-master" - hidden: no - disabled: no + hidden: "no" + disabled: "no" total_affected_items: 1 total_failed_items: 0 failed_items: [] @@ -10431,7 +10525,7 @@ paths: $ref: '#/components/responses/TooManyRequestsResponse' put: tags: - - Cluster + - Cluster summary: "Update node configuration" description: "Replace wazuh configuration for the given node with the data contained in the API request" operationId: api.controllers.cluster_controller.update_configuration @@ -10509,33 +10603,35 @@ paths: timestamp: 2022-07-21T10:48:32+00:00 name: wazuh-remoted metrics: - tcp_sessions: 0 - received_bytes: 0 - messages_received_breakdown: - event_messages: 0 - control_messages: 0 - control_breakdown: - request_messages: 0 - startup_messages: 0 - shutdown_messages: 0 - keepalive_messages: 0 - ping_messages: 0 - unknown_messages: 0 - dequeued_after_close_messages: 0 - discarded_messages: 0 - sent_bytes: 0 - messages_sent_breakdown: - ack_messages: 0 - shared_file_messages: 0 - ar_messages: 0 - cfga_messages: 0 - request_messages: 0 - discarded_messages: 0 - queue_status: - receive_queue_usage: 0 - receive_queue_size: 131072 + bytes: + received: 0 + sent: 0 keys_reload_count: 0 - update_shared_files_count: 42 + messages: + received_breakdown: + control: 0 + control_breakdown: + keepalive: 0 + request: 0 + shutdown: 0 + startup: 0 + dequeued_after: 0 + discarded: 0 + event: 0 + ping: 0 + unknown: 0 + sent_breakdown: + ack: 0 + ar: 0 + discarded: 0 + request: 0 + sca: 0 + shared: 0 + queues: + received: + size: 131072 + usage: 0 + tcp_sessions: 0 total_affected_items: 1 failed_items: [] total_failed_items: 0 @@ -10554,7 +10650,7 @@ paths: /cluster/{node_id}/stats: get: tags: - - Cluster + - Cluster summary: "Get node stats" description: "Return Wazuh statistical information in node {node_id} for the current or specified date" operationId: api.controllers.cluster_controller.get_stats_node @@ -10572,11 +10668,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseWazuhStats' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseWazuhStats' example: data: affected_items: @@ -10638,7 +10734,7 @@ paths: /cluster/{node_id}/stats/hourly: get: tags: - - Cluster + - Cluster summary: "Get node stats hour" description: "Return Wazuh statistical information in node {node_id} per hour. Each number in the averages field represents the average of alerts per hour" @@ -10656,11 +10752,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseWazuhStats' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseWazuhStats' example: data: affected_items: @@ -10709,7 +10805,7 @@ paths: /cluster/{node_id}/stats/weekly: get: tags: - - Cluster + - Cluster summary: "Get node stats week" description: "Return Wazuh statistical information in node {node_id} per week. Each number in the averages field represents the average of alerts per hour for that specific day" @@ -10727,11 +10823,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseWazuhStats' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseWazuhStats' example: data: affected_items: @@ -10943,7 +11039,7 @@ paths: /cluster/{node_id}/stats/analysisd: get: tags: - - Cluster + - Cluster deprecated: true summary: "Get node stats analysisd" description: "Return Wazuh analysisd statistical information in node {node_id}" @@ -10959,53 +11055,58 @@ paths: description: "Wazuh node analysisd stats" content: application/json: - schema: - allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseWazuhStats' + schema: + allOf: + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseWazuhStats' example: data: affected_items: - - total_events_decoded: 5 - syscheck_events_decoded: 0 - syscollector_events_decoded: 0 - rootcheck_events_decoded: 0 - sca_events_decoded: 0 - winevt_events_decoded: 0 - other_events_decoded: 5 - events_processed: 5 - events_received: 5 - events_dropped: 0 - alerts_written: 0 - firewall_written: 0 - fts_written: 0 - syscheck_queue_usage: 0 - syscheck_queue_size: 16384 - syscollector_queue_usage: 0 - syscollector_queue_size: 16384 - rootcheck_queue_usage: 0 - rootcheck_queue_size: 16384 - sca_queue_usage: 0 - sca_queue_size: 16384 - hostinfo_queue_usage: 0 - hostinfo_queue_size: 16384 - winevt_queue_usage: 0 - winevt_queue_size: 16384 - event_queue_usage: 0 - event_queue_size: 16384 - rule_matching_queue_usage: 0 - rule_matching_queue_size: 16384 - alerts_queue_usage: 0 - alerts_queue_size: 16384 - firewall_queue_usage: 0 - firewall_queue_size: 16384 - statistical_queue_usage: 0 - statistical_queue_size: 16384 - archives_queue_usage: 0 - archives_queue_size: 16384 + - total_events_decoded: 113.0 + syscheck_events_decoded: 2.0 + syscollector_events_decoded: 0.0 + rootcheck_events_decoded: 4.0 + sca_events_decoded: 2.0 + winevt_events_decoded: 0.0 + dbsync_messages_dispatched: 55.0 + other_events_decoded: 50.0 + events_processed: 57.0 + events_received: 114.0 + events_dropped: 0.0 + alerts_written: 3.0 + firewall_written: 0.0 + fts_written: 0.0 + syscheck_queue_usage: 0.0 + syscheck_queue_size: 16384.0 + syscollector_queue_usage: 0.0 + syscollector_queue_size: 16384.0 + rootcheck_queue_usage: 0.0 + rootcheck_queue_size: 16384.0 + sca_queue_usage: 0.0 + sca_queue_size: 16384.0 + hostinfo_queue_usage: 0.0 + hostinfo_queue_size: 16384.0 + winevt_queue_usage: 0.0 + winevt_queue_size: 16384.0 + dbsync_queue_usage: 0.0 + dbsync_queue_size: 16384.0 + upgrade_queue_usage: 0.0 + upgrade_queue_size: 16384.0 + event_queue_usage: 0.0 + event_queue_size: 16384.0 + rule_matching_queue_usage: 0.0 + rule_matching_queue_size: 16384.0 + alerts_queue_usage: 0.0 + alerts_queue_size: 16384.0 + firewall_queue_usage: 0.0 + firewall_queue_size: 16384.0 + statistical_queue_usage: 0.0 + statistical_queue_size: 16384.0 + archives_queue_usage: 0.0 + archives_queue_size: 16384.0 total_affected_items: 1 total_failed_items: 0 failed_items: [] @@ -11025,7 +11126,7 @@ paths: /cluster/{node_id}/stats/remoted: get: tags: - - Cluster + - Cluster deprecated: true summary: "Get node stats remoted" description: "Return Wazuh remoted statistical information in node {node_id}" @@ -11043,20 +11144,20 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseWazuhStats' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseWazuhStats' example: data: affected_items: - - queue_size: 0 - total_queue_size: 131072 + - queue_size: 0.0 + total_queue_size: 131072.0 tcp_sessions: 4.0 evt_count: 9089.0 ctrl_msg_count: 2099.0 - discarded_count: 0 + discarded_count: 0.0 sent_bytes: 784598.0 recv_bytes: 4541779.0 dequeued_after_close: 0.0 @@ -11079,7 +11180,7 @@ paths: /cluster/{node_id}/logs: get: tags: - - Cluster + - Cluster summary: "Get node logs" description: "Return the last 2000 wazuh log entries in the specified node" operationId: api.controllers.cluster_controller.get_log_node @@ -11145,7 +11246,7 @@ paths: /cluster/{node_id}/logs/summary: get: tags: - - Cluster + - Cluster summary: "Get node logs summary" description: "Return a summary of the last 2000 wazuh log entries in the specified node" operationId: api.controllers.cluster_controller.get_log_summary_node @@ -11162,31 +11263,52 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/WazuhLogsSummary' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/WazuhLogsSummary' example: data: affected_items: - - wazuh-db: - all: 356 + - indexer-connector: + all: 24 info: 0 error: 0 critical: 0 + warning: 24 + debug: 0 + - wazuh-db: + info: 1 + all: 4 + critical: 0 + debug: 0 + error: 3 + warning: 0 + - wazuh-modulesd:content_manager: + info: 1 + all: 1 + critical: 0 + debug: 0 + error: 0 + warning: 0 + - wazuh-modulesd:router: + info: 1 + all: 1 + critical: 0 + debug: 0 + error: 0 warning: 0 - debug: 356 - wazuh-remoted: - all: 842 - info: 0 - error: 0 + info: 744 + all: 0 critical: 0 + debug: 744 + error: 0 warning: 0 - debug: 842 - total_affected_items: 2 - total_failed_items: 0 + total_affected_items: 5 failed_items: [] + total_failed_items: 0 message: "Log was successfully summarized in specified node" error: 0 '400': @@ -11203,7 +11325,7 @@ paths: /cluster/restart: put: tags: - - Cluster + - Cluster summary: "Restart nodes" description: "Restart all nodes in the cluster or a list of them" operationId: api.controllers.cluster_controller.put_restart @@ -11235,7 +11357,7 @@ paths: total_affected_items: 3 total_failed_items: 0 failed_items: [] - message: "Restart request sent to all specified nodes" + message: "Restart request sent to all specified nodes" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -11251,7 +11373,7 @@ paths: /cluster/configuration/validation: get: tags: - - Cluster + - Cluster summary: "Check nodes config" description: "Return whether the Wazuh configuration is correct or not in all cluster nodes or a list of them" operationId: api.controllers.cluster_controller.get_conf_validation @@ -11319,21 +11441,21 @@ paths: content: application/json: schema: - allOf: - - $ref: '#/components/schemas/ApiResponse' + properties: + data: + oneOf: + - type: array + - type: object + description: "The output format depends on the configuration and component requested" example: data: affected_items: - - name: "wazuh" - node_name: "master-node" - node_type: "master" - key: "9d273b53510fef702b54a92e9cffc82e" - port: 1516 - bind_addr: "0.0.0.0" - nodes: - - "wazuh-master" - hidden: "no" - disabled: "false" + - wdb: + backup: + database: global + enabled: true + interval: 86400 + max_files: 3 total_affected_items: 1 total_failed_items: 0 failed_items: [] @@ -11353,7 +11475,7 @@ paths: /lists: get: tags: - - Lists + - Lists summary: "Get CDB lists info" description: "Return the contents of all CDB lists. Optionally, the result can be filtered by several criteria. See available parameters for more details" @@ -11572,7 +11694,7 @@ paths: /lists/files: get: tags: - - Lists + - Lists summary: "Get CDB lists files" description: "Return the path from all CDB lists. Use this method to know all the CDB lists and their location in the filesystem relative to Wazuh installation folder" @@ -11724,7 +11846,8 @@ paths: example: error: 0 data: - messages: ["INFO: (7206): The session '573ed2ca' was closed successfully"] + messages: + - "INFO: (7206): The session '573ed2ca' was closed successfully" codemsg: 0 '400': @@ -11745,7 +11868,7 @@ paths: /manager/status: get: tags: - - Manager + - Manager summary: "Get status" description: "Return the status of all Wazuh daemons" operationId: api.controllers.manager_controller.get_status @@ -11761,11 +11884,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/WazuhDaemonsStatus' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/WazuhDaemonsStatus' example: data: affected_items: @@ -11805,7 +11928,7 @@ paths: /manager/info: get: tags: - - Manager + - Manager summary: "Get information" description: "Return basic information such as version, compilation date, installation path" operationId: api.controllers.manager_controller.get_info @@ -11821,11 +11944,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/WazuhInfo' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/WazuhInfo' example: data: affected_items: @@ -11855,7 +11978,7 @@ paths: /manager/configuration: get: tags: - - Manager + - Manager summary: "Get configuration" description: "Return wazuh configuration used. The 'section' and 'field' parameters will be ignored if 'raw' parameter is provided." operationId: api.controllers.manager_controller.get_configuration @@ -11875,19 +11998,19 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/WazuhManagerConfiguration' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/WazuhManagerConfiguration' application/xml: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/WazuhManagerConfiguration' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/WazuhManagerConfiguration' example: data: affected_items: @@ -11903,6 +12026,7 @@ paths: email_maxperhour: 12 email_log_source: alerts.log queue_size: 131072 + update_check: yes white_list: - 127.0.0.1 - ^localhost.localdomain$ @@ -11941,7 +12065,7 @@ paths: $ref: '#/components/responses/TooManyRequestsResponse' put: tags: - - Manager + - Manager summary: "Update Wazuh configuration" description: "Replace Wazuh configuration with the data contained in the API request" operationId: api.controllers.manager_controller.update_configuration @@ -11990,7 +12114,7 @@ paths: /manager/daemons/stats: get: tags: - - Manager + - Manager summary: "Get Wazuh daemon stats" description: "Return Wazuh statistical information from specified daemons" operationId: api.controllers.manager_controller.get_daemon_stats @@ -12007,216 +12131,47 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseWazuhDaemonStats' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseWazuhDaemonStats' example: data: affected_items: - uptime: 2022-07-21T10:09:20+00:00 - timestamp: 2022-07-21T10:47:59+00:00 - name: wazuh-db + timestamp: 2022-07-21T10:48:32+00:00 + name: wazuh-remoted metrics: - queries_total: 1514 - queries_breakdown: - wazuhdb_queries: 0 - wazuhdb_queries_breakdown: - remove_queries: 0 - unknown_queries: 0 - agent_queries: 73 - agent_queries_breakdown: - sql_queries: 0 - remove_queries: 0 - begin_queries: 0 - commit_queries: 0 - close_queries: 0 - syscheck_queries: - syscheck_queries: 2 - fim_file_queries: 2 - fim_registry_queries: 0 - fim_registry_key_queries: 0 - fim_registry_value_queries: 0 - rootcheck_queries: - rootcheck_queries: 2 - sca_queries: - sca_queries: 6 - ciscat_queries: - ciscat_queries: 0 - syscollector_queries: - syscollector_processes_queries: 36 - syscollector_packages_queries: 1 - syscollector_hotfixes_queries: 0 - syscollector_ports_queries: 16 - syscollector_network_protocol_queries: 1 - syscollector_network_address_queries: 1 - syscollector_network_iface_queries: 2 - syscollector_hwinfo_queries: 2 - syscollector_osinfo_queries: 2 - process_queries: 0 - package_queries: 0 - hotfix_queries: 0 - port_queries: 0 - netproto_queries: 0 - netaddr_queries: 0 - netinfo_queries: 0 - hardware_queries: 0 - osinfo_queries: 0 - dbsync_queries: 0 - unknown_queries: 0 - global_queries: 161 - global_queries_breakdown: - sql_queries: 0 - backup_queries: 0 - agent_queries: - insert-agent_queries: 1 - update-agent-data_queries: 1 - update-agent-name_queries: 1 - update-keepalive_queries: 0 - update-connection-status_queries: 0 - reset-agents-connection_queries: 1 - delete-agent_queries: 0 - select-agent-name_queries: 2 - select-agent-group_queries: 1 - select-keepalive_queries: 0 - find-agent_queries: 0 - get-agent-info_queries: 39 - get-all-agents_queries: 40 - get-agents-by-connection-status_queries: 0 - disconnect-agents_queries: 0 - sync-agent-info-get_queries: 0 - sync-agent-info-set_queries: 37 - sync-agent-groups-get_queries: 36 - set-agent-groups_queries: 0 - get-groups-integrity_queries: 0 - group_queries: - insert-agent-group_queries: 0 - delete-group_queries: 0 - select-groups_queries: 1 - find-group_queries: 1 - belongs_queries: - delete-agent-belong_queries: 0 - select-group-belong_queries: 0 - get-group-agents_queries: 0 - labels_queries: - set-labels_queries: 0 - get-labels_queries: 0 - unknown_queries: 0 - task_queries: 3 - task_queries_breakdown: - sql_queries: 0 - upgrade_queries: - upgrade_queries: 0 - upgrade_custom_queries: 0 - upgrade_get_status_queries: 0 - upgrade_update_status_queries: 0 - upgrade_result_queries: 0 - upgrade_cancel_tasks_queries: 1 - set_timeout_queries: 1 - delete_old_queries: 1 - unknown_queries: 0 - mitre_queries: 1277 - mitre_queries_breakdown: - sql_queries: 1277 - unknown_queries: 0 - unknown_queries: 0 - queries_time_total: 493 - queries_time_breakdown: - wazuhdb_time: 0 - wazuhdb_time_breakdown: - remove_time: 0 - agent_time: 351 - agent_time_breakdown: - sql_time: 0 - remove_time: 0 - begin_time: 0 - commit_time: 0 - close_time: 0 - syscheck_time: - syscheck_time: 49 - fim_file_time: 30 - fim_registry_time: 0 - fim_registry_key_time: 0 - fim_registry_value_time: 0 - rootcheck_time: - rootcheck_time: 47 - sca_time: - sca_time: 0 - ciscat_time: - ciscat_time: 0 - syscollector_time: - syscollector_processes_time: 17 - syscollector_packages_time: 33 - syscollector_hotfixes_time: 0 - syscollector_ports_time: 47 - syscollector_network_protocol_time: 27 - syscollector_network_address_time: 26 - syscollector_network_iface_time: 21 - syscollector_hwinfo_time: 20 - syscollector_osinfo_time: 27 - process_time: 0 - package_time: 0 - hotfix_time: 0 - port_time: 0 - netproto_time: 0 - netaddr_time: 0 - netinfo_time: 0 - hardware_time: 0 - osinfo_time: 0 - dbsync_time: 0 - global_time: 14 - global_time_breakdown: - sql_time: 0 - backup_time: 0 - agent_time: - insert-agent_time: 0 - update-agent-data_time: 0 - update-agent-name_time: 0 - update-keepalive_time: 0 - update-connection-status_time: 0 - reset-agents-connection_time: 0 - delete-agent_time: 0 - select-agent-name_time: 0 - select-agent-group_time: 0 - select-keepalive_time: 0 - find-agent_time: 0 - get-agent-info_time: 2 - get-all-agents_time: 1 - get-agents-by-connection-status_time: 0 - disconnect-agents_time: 0 - sync-agent-info-get_time: 0 - sync-agent-info-set_time: 6 - sync-agent-groups-get_time: 2 - set-agent-groups_time: 0 - get-groups-integrity_time: 0 - group_time: - insert-agent-group_time: 0 - delete-group_time: 0 - select-groups_time: 0 - find-group_time: 0 - belongs_time: - delete-agent-belong_time: 0 - select-group-belong_time: 0 - get-group-agents_time: 0 - labels_time: - set-labels_time: 0 - get-labels_time: 0 - task_time: 0 - task_time_breakdown: - sql_time: 0 - upgrade_time: - upgrade_time: 0 - upgrade_custom_time: 0 - upgrade_get_status_time: 0 - upgrade_update_status_time: 0 - upgrade_result_time: 0 - upgrade_cancel_tasks_time: 0 - set_timeout_time: 0 - delete_old_time: 0 - mitre_time: 128 - mitre_time_breakdown: - sql_time: 128 + bytes: + received: 0 + sent: 0 + keys_reload_count: 0 + messages: + received_breakdown: + control: 0 + control_breakdown: + keepalive: 0 + request: 0 + shutdown: 0 + startup: 0 + dequeued_after: 0 + discarded: 0 + event: 0 + ping: 0 + unknown: 0 + sent_breakdown: + ack: 0 + ar: 0 + discarded: 0 + request: 0 + sca: 0 + shared: 0 + queues: + received: + size: 131072 + usage: 0 + tcp_sessions: 0 total_affected_items: 1 failed_items: [] total_failed_items: 0 @@ -12234,7 +12189,7 @@ paths: /manager/stats: get: tags: - - Manager + - Manager summary: "Get stats" description: "Return Wazuh statistical information for the current or specified date" operationId: api.controllers.manager_controller.get_stats @@ -12251,11 +12206,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseWazuhStats' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseWazuhStats' example: data: affected_items: @@ -12302,7 +12257,7 @@ paths: /manager/stats/hourly: get: tags: - - Manager + - Manager summary: "Get stats hour" description: "Return Wazuh statistical information per hour. Each number in the averages field represents the average of alerts per hour" @@ -12319,11 +12274,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseWazuhStats' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseWazuhStats' example: data: affected_items: @@ -12370,7 +12325,7 @@ paths: /manager/stats/weekly: get: tags: - - Manager + - Manager summary: "Get stats week" description: "Return Wazuh statistical information per week. Each number in the averages field represents the average of alerts per hour for that specific day" @@ -12387,11 +12342,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseWazuhStats' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseWazuhStats' example: data: affected_items: @@ -12601,7 +12556,7 @@ paths: /manager/stats/analysisd: get: tags: - - Manager + - Manager deprecated: true summary: "Get stats analysisd" description: "Return Wazuh analysisd statistical information" @@ -12626,43 +12581,48 @@ paths: example: data: affected_items: - - total_events_decoded: 5 - syscheck_events_decoded: 0 - syscollector_events_decoded: 0 - rootcheck_events_decoded: 0 - sca_events_decoded: 0 - winevt_events_decoded: 0 - other_events_decoded: 5 - events_processed: 5 - events_received: 5 - events_dropped: 0 - alerts_written: 0 - firewall_written: 0 - fts_written: 0 - syscheck_queue_usage: 0 - syscheck_queue_size: 16384 - syscollector_queue_usage: 0 - syscollector_queue_size: 16384 - rootcheck_queue_usage: 0 - rootcheck_queue_size: 16384 - sca_queue_usage: 0 - sca_queue_size: 16384 - hostinfo_queue_usage: 0 - hostinfo_queue_size: 16384 - winevt_queue_usage: 0 - winevt_queue_size: 16384 - event_queue_usage: 0 - event_queue_size: 16384 - rule_matching_queue_usage: 0 - rule_matching_queue_size: 16384 - alerts_queue_usage: 0 - alerts_queue_size: 16384 - firewall_queue_usage: 0 - firewall_queue_size: 16384 - statistical_queue_usage: 0 - statistical_queue_size: 16384 - archives_queue_usage: 0 - archives_queue_size: 16384 + - total_events_decoded: 113.0 + syscheck_events_decoded: 2.0 + syscollector_events_decoded: 0.0 + rootcheck_events_decoded: 4.0 + sca_events_decoded: 2.0 + winevt_events_decoded: 0.0 + dbsync_messages_dispatched: 55.0 + other_events_decoded: 50.0 + events_processed: 57.0 + events_received: 114.0 + events_dropped: 0.0 + alerts_written: 3.0 + firewall_written: 0.0 + fts_written: 0.0 + syscheck_queue_usage: 0.0 + syscheck_queue_size: 16384.0 + syscollector_queue_usage: 0.0 + syscollector_queue_size: 16384.0 + rootcheck_queue_usage: 0.0 + rootcheck_queue_size: 16384.0 + sca_queue_usage: 0.0 + sca_queue_size: 16384.0 + hostinfo_queue_usage: 0.0 + hostinfo_queue_size: 16384.0 + winevt_queue_usage: 0.0 + winevt_queue_size: 16384.0 + dbsync_queue_usage: 0.0 + dbsync_queue_size: 16384.0 + upgrade_queue_usage: 0.0 + upgrade_queue_size: 16384.0 + event_queue_usage: 0.0 + event_queue_size: 16384.0 + rule_matching_queue_usage: 0.0 + rule_matching_queue_size: 16384.0 + alerts_queue_usage: 0.0 + alerts_queue_size: 16384.0 + firewall_queue_usage: 0.0 + firewall_queue_size: 16384.0 + statistical_queue_usage: 0.0 + statistical_queue_size: 16384.0 + archives_queue_usage: 0.0 + archives_queue_size: 16384.0 total_affected_items: 1 failed_items: [] total_failed_items: 0 @@ -12680,7 +12640,7 @@ paths: /manager/stats/remoted: get: tags: - - Manager + - Manager deprecated: true summary: "Get stats remoted" description: "Return Wazuh remoted statistical information" @@ -12697,22 +12657,23 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseWazuhStats' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseWazuhStats' example: data: affected_items: - - queue_size: 0 - total_queue_size: 131072 - tcp_sessions: 0 - evt_count: 0 - ctrl_msg_count: 0 - discarded_count: 0 - sent_bytes: 0 - recv_bytes: 0 + - queue_size: 0.0 + total_queue_size: 131072.0 + tcp_sessions: 4.0 + evt_count: 9089.0 + ctrl_msg_count: 2099.0 + discarded_count: 0.0 + sent_bytes: 784598.0 + recv_bytes: 4541779.0 + dequeued_after_close: 0.0 total_affected_items: 0 failed_items: [] total_failed_items: 0 @@ -12730,7 +12691,7 @@ paths: /manager/logs: get: tags: - - Manager + - Manager summary: "Get logs" description: "Return the last 2000 wazuh log entries" operationId: api.controllers.manager_controller.get_log @@ -12794,7 +12755,7 @@ paths: /manager/logs/summary: get: tags: - - Manager + - Manager summary: "Get logs summary" description: "Return a summary of the last 2000 wazuh log entries" operationId: api.controllers.manager_controller.get_log_summary @@ -12810,21 +12771,21 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/WazuhLogsSummary' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/WazuhLogsSummary' example: data: affected_items: - - wazuh-modulesd: - info: 2 - all: 2 + - indexer-connector: + all: 24 + info: 0 + error: 0 critical: 0 + warning: 24 debug: 0 - error: 0 - warning: 0 - wazuh-db: info: 1 all: 4 @@ -12832,17 +12793,31 @@ paths: debug: 0 error: 3 warning: 0 - - wazuh-rootcheck: - info: 8 - all: 8 + - wazuh-modulesd:content_manager: + info: 1 + all: 1 critical: 0 debug: 0 error: 0 warning: 0 - total_affected_items: 3 + - wazuh-modulesd:router: + info: 1 + all: 1 + critical: 0 + debug: 0 + error: 0 + warning: 0 + - wazuh-remoted: + info: 744 + all: 0 + critical: 0 + debug: 744 + error: 0 + warning: 0 + total_affected_items: 5 failed_items: [] total_failed_items: 0 - message: "Log was successfully summarized" + message: "Log was successfully summarized in specified node" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -12881,6 +12856,11 @@ paths: node_api_config: host: 0.0.0.0 port: 55000 + drop_privileges: true + experimental_features: false + max_upload_size: 10485760 + intervals: + request_timeout: 10 https: enabled: true key: "/var/ossec/api/configuration/ssl/server.key" @@ -12889,21 +12869,38 @@ paths: ca: "/var/ossec/api/configuration/ssl/ca.crt" ssl_protocol: "auto" ssl_ciphers: "" - access: - max_login_attempts: 50 - block_time: 300 - max_request_per_minute: 300 logs: - path: /var/ossec/logs/api.log level: info + format: plain + max_size: + enabled: false + size: 1M cors: enabled: false source_route: "*" + expose_headers: "*" allow_headers: "*" allow_credentials: false - drop_privileges: true - experimental_features: false - max_upload_size: 10485760 + cache: + enabled: true + time: 0.75 + access: + max_login_attempts: 50 + block_time: 300 + max_request_per_minute: 300 + upload_configuration: + remote_commands: + localfile: + enabled: true + exceptions: [] + wodle_commands: + enabled: true + exceptions: [] + limits: + eps: + allow: true + agents: + allow_higher_versions: true total_affected_items: 1 total_failed_items: 0 failed_items: [] @@ -12923,7 +12920,7 @@ paths: /manager/restart: put: tags: - - Manager + - Manager summary: "Restart manager" description: "Restart the wazuh manager" operationId: api.controllers.manager_controller.put_restart @@ -12963,7 +12960,7 @@ paths: /manager/configuration/validation: get: tags: - - Manager + - Manager summary: "Check config" description: "Return whether the Wazuh configuration is correct" operationId: api.controllers.manager_controller.get_conf_validation @@ -13025,26 +13022,16 @@ paths: example: data: affected_items: - - global: - email_notification: "no" - logall: "no" - logall_json: "no" - integrity_checking: 8 - rootkit_detection: 8 - host_information: 8 - prelude_output: "no" - zeromq_output: "no" - jsonout_output: "yes" - alerts_log: "yes" - stats: 4 - memory_size: 8192 - white_list: - - "127.0.0.1" - - "80.58.61.250" - - "80.58.61.254" - - "localhost.localdomain" - rotate_interval: 0 - max_output_size: 0 + - name: wazuh + node_name: master-node + node_type: master + key: 9d273b53510fef702b54a92e9cffc82e + port: 1516 + bind_addr: "0.0.0.0" + nodes: + - wazuh-master + hidden: no + disabled: false total_affected_items: 1 total_failed_items: 0 failed_items: [] @@ -13064,7 +13051,7 @@ paths: /manager/version/check: get: tags: - - Manager + - Manager summary: "Check available updates" description: "Return if there is any available update" operationId: api.controllers.manager_controller.check_available_version @@ -13207,9 +13194,9 @@ paths: - url: "https://www.us-cert.gov/ncas/alerts/TA17-164A" description: "US-CERT. (2017, June 13). Alert (TA17-164A) HIDDEN COBRA – North Korea’s DDoS Botnet Infrastructure. Retrieved July 13, 2017." source: "US-CERT HIDDEN COBRA June 2017" - - url: "https://attack.mitre.org/groups/G0082" - external_id: "G0082" - source: "mitre-attack" + url: "https://attack.mitre.org/groups/G0082" + external_id: G0082 + source: mitre-attack total_affected_items: 111 total_failed_items: 0 failed_items: [] @@ -13309,18 +13296,18 @@ paths: data: affected_items: - mitre_version: "1.0" - deprecated: 1 - description: "Prevent files from having a trailing space after the extension." - name: "Space after Filename Mitigation" - id: "course-of-action--02f0f92a-0a51-4c94-9bda-6437b9a93f22" - modified_time: '2019-07-25T11:46:32.010000Z' - created_time: '2018-10-17T00:14:20.652000Z' + deprecated: 0 + description: "Use intrusion detection signatures to block traffic at network boundaries." + name: "Network Intrusion Prevention" + id: "course-of-action--12241367-a8b7-49b4-b86e-2236901ba50c" + modified_time: '2019-06-10T20:46:02.263000Z' + created_time: '2019-06-10T20:46:02.263000Z' techniques: - - "attack-pattern--e2907cea-4b43-4ed7-a570-0fdf0fbeea00" - references: - - url: "https://attack.mitre.org/mitigations/T1151" - source: "mitre-attack" - external_id": "T1151" + - "attack-pattern--bf176076-b789-408e-8cba-7275e81c0ada" + references: [] + url: "https://attack.mitre.org/mitigations/M1031" + source: mitre-attack + external_id: M1031 total_affected_items: 266 total_failed_items: 0 failed_items: [] @@ -13445,9 +13432,9 @@ paths: - url: "https://media.kasperskycontenthub.com/wp-content/uploads/sites/43/2018/03/07205555/TheNaikonAPT-MsnMM1.pdf" description: "Baumgartner, K., Golovkin, M.. (2015, May). The MsnMM Campaigns: The Earliest Naikon APT Campaigns. Retrieved April 10, 2019." source: "Baumgartner Naikon 2015" - - url: "https://attack.mitre.org/software/S0061" - source: "mitre-attack" - external_id: "S0061" + url: "https://attack.mitre.org/software/S0061" + source: mitre-attack + external_id: S0061 total_affected_items: 444 total_failed_items: 0 failed_items: [] @@ -13529,10 +13516,10 @@ paths: - "attack-pattern--e624264c-033a-424d-9fd7-fc9c3bbdb03e" - "attack-pattern--eb062747-2193-45de-8fa2-e62549c37ddf" - "attack-pattern--f005e783-57d4-4837-88ad-dbe7faee1c51" - references: - - url: "https://attack.mitre.org/tactics/TA0008" - source: "mitre-attack" - external_id: "TA0008" + references: [] + url: "https://attack.mitre.org/tactics/TA0008" + source: mitre-attack + external_id: TA0008 total_affected_items: 14 total_failed_items: 0 failed_items: [] @@ -13616,9 +13603,9 @@ paths: - source: "WHOIS" description: "NTT America. (n.d.). Whois Lookup. Retrieved October 20, 2020." url: "https://www.whois.net/" - - source: "mitre-attack" - external_id: "T1590" - url: "https://attack.mitre.org/techniques/T1590" + source: mitre-attack + external_id: T1590 + url: "https://attack.mitre.org/techniques/T1590" total_affected_items: 1 total_failed_items: 0 failed_items: [] @@ -13686,7 +13673,7 @@ paths: /rootcheck/{agent_id}: get: tags: - - Rootcheck + - Rootcheck summary: "Get results" description: "Return the rootcheck database of an agent" operationId: api.controllers.rootcheck_controller.get_rootcheck_agent @@ -13791,7 +13778,7 @@ paths: /rootcheck/{agent_id}/last_scan: get: tags: - - Rootcheck + - Rootcheck summary: "Get last scan datetime" description: "Return the timestamp of the last rootcheck scan of an agent" operationId: api.controllers.rootcheck_controller.get_last_scan_agent @@ -13837,7 +13824,7 @@ paths: /rules: get: tags: - - Rules + - Rules summary: "List rules" description: "Return a list containing information about each rule such as file where it's defined, description, rule group, status, etc" @@ -13911,7 +13898,7 @@ paths: - syslog - errors description: "File missing. Root access unrestricted." - total_affected_items: 707 + total_affected_items: 1 total_failed_items: 0 failed_items: [] message: "All selected rules were returned" @@ -13930,7 +13917,7 @@ paths: /rules/groups: get: tags: - - Rules + - Rules summary: "Get groups" description: "Return a list containing all rule groups names" operationId: api.controllers.rule_controller.get_rules_groups @@ -13983,7 +13970,7 @@ paths: /rules/requirement/{requirement}: get: tags: - - Rules + - Rules summary: "Get requirements" description: "Return all specified requirement names defined in the Wazuh ruleset" operationId: api.controllers.rule_controller.get_rules_requirement @@ -14012,14 +13999,11 @@ paths: example: data: affected_items: - - 10.2.6 - - 10.2.7 - - 10.5.2 - - 10.5.5 - - 10.6.1 - - 11.4 - - 11.5 - total_affected_items: 7 + - A1.2 + - CC6.1 + - CC6.2 + - CC6.3 + total_affected_items: 4 total_failed_items: 0 failed_items: [] message: "All selected rules were returned" @@ -14038,7 +14022,7 @@ paths: /rules/files: get: tags: - - Rules + - Rules summary: "Get files" description: "Return a list containing all files used to define rules and their status" operationId: api.controllers.rule_controller.get_rules_files @@ -14131,7 +14115,7 @@ paths: data: affected_items: - group: - - "@name": syslog + - "@name": syslog, rule: "@id": '1' "@level": '0' @@ -14145,41 +14129,41 @@ paths: "@noalert": '1' category: firewall description: "Generic template for all firewall rules." - - "@name": ids + - "@name": ids, rule: "@id": '3' "@level": '0' "@noalert": '1' category: ids description: "Generic template for all ids rules." - - "@name": web-log + - "@name": web-log, rule: "@id": '4' "@level": '0' "@noalert": '1' category: web-log description: "Generic template for all web rules." - - "@name": squid + - "@name": squid, rule: "@id": '5' "@level": '0' "@noalert": '1' category: squid description: "Generic template for all web proxy rules." - - "@name": windows + - "@name": windows, rule: "@id": '6' "@level": '0' "@noalert": '1' category: windows description: "Generic template for all windows rules." - - "@name": ossec + - "@name": ossec, rule: "@id": '7' "@level": '0' "@noalert": '1' category: ossec - description: "Generic template for all ossec rules." + description: "Generic template for all wazuh rules." total_affected_items: 1 total_failed_items: 0 failed_items: [] @@ -14310,7 +14294,7 @@ paths: $ref: '#/components/responses/TooManyRequestsResponse' delete: tags: - - Rules + - Rules summary: "Delete rules file" description: "Delete a specified rule file" operationId: api.controllers.rule_controller.delete_file @@ -14356,7 +14340,7 @@ paths: /sca/{agent_id}: get: tags: - - SCA + - SCA summary: "Get results" description: "Return the security SCA database of an agent" operationId: api.controllers.sca_controller.get_sca_agent @@ -14383,11 +14367,11 @@ paths: application/json: schema: allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - data: - $ref: '#/components/schemas/AllItemsResponseSCADatabase' + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + data: + $ref: '#/components/schemas/AllItemsResponseSCADatabase' example: data: affected_items: @@ -14407,7 +14391,7 @@ paths: total_affected_items: 1 total_failed_items: 0 failed_items: [] - message: "All selected sca information was returned" + message: "All selected SCA information was returned" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -14423,7 +14407,7 @@ paths: /sca/{agent_id}/checks/{policy_id}: get: tags: - - SCA + - SCA summary: "Get policy checks" description: "Return the policy monitoring alerts for a given policy" operationId: api.controllers.sca_controller.get_sca_checks @@ -14500,7 +14484,7 @@ paths: total_affected_items: 191 total_failed_items: 0 failed_items: [] - message: "All selected sca/policy information was returned" + message: "All selected SCA/policy information was returned" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -14516,7 +14500,7 @@ paths: /syscheck: put: tags: - - Syscheck + - Syscheck summary: "Run scan" description: "Run FIM scan in all agents" operationId: api.controllers.syscheck_controller.put_syscheck @@ -14564,7 +14548,7 @@ paths: /syscheck/{agent_id}: get: tags: - - Syscheck + - Syscheck deprecated: true summary: "Get results" description: "Return FIM findings in the specified agent" @@ -14622,6 +14606,7 @@ paths: type: file uid: '0' uname: root + attributes: "" - changes: 1 date: '2019-11-22T10:24:56Z' file: "/etc/sgml/xml-core.cat" @@ -14637,6 +14622,7 @@ paths: type: file uid: '0' uname: root + attributes: "" total_affected_items: 1433 total_failed_items: 0 failed_items: [] @@ -14701,7 +14687,7 @@ paths: /syscheck/{agent_id}/last_scan: get: tags: - - Syscheck + - Syscheck summary: "Get last scan datetime" description: "Return when the last syscheck scan started and ended. If the scan is still in progress the end date will be unknown" @@ -14806,7 +14792,7 @@ paths: pattern: "^ '(\\S+)'." offset: after_prematch order: level - total_affected_items: 1140 + total_affected_items: 2 total_failed_items: 0 failed_items: [] message: "All selected decoders were returned" @@ -15091,7 +15077,7 @@ paths: $ref: '#/components/responses/TooManyRequestsResponse' delete: tags: - - Decoders + - Decoders summary: "Delete decoders file" description: "Delete a specified decoder file" operationId: api.controllers.decoder_controller.delete_file @@ -16423,7 +16409,9 @@ paths: example: data: affected_items: - - address: 172.26.0.7 + - scan: + id: 0 + address: 172.26.0.7 iface: eth0 netmask: 255.255.0.0 broadcast: 172.26.255.255 @@ -16713,6 +16701,8 @@ paths: section: libs priority: optional agent_id: '002' + location: "" + install_time: "" - scan: id: 0 time: '2021-05-28T11:16:15Z' @@ -16728,6 +16718,8 @@ paths: section: net priority: standard agent_id: '002' + location: "" + install_time: "" - scan: id: 0 time: '2021-05-28T11:16:16Z' @@ -16742,6 +16734,8 @@ paths: section: python priority: optional agent_id: '002' + location: "" + install_time: "" total_affected_items: 307 total_failed_items: 0 failed_items: [] @@ -16817,6 +16811,7 @@ paths: protocol: tcp pid: 0 tx_queue: 0 + process: "" agent_id: '001' - local: ip: 0.0.0.0 @@ -16833,6 +16828,7 @@ paths: protocol: tcp pid: 0 tx_queue: 0 + process: "" agent_id: '001' - local: ip: 0.0.0.0 @@ -16848,6 +16844,7 @@ paths: rx_queue: 0 protocol: tcp tx_queue: 0 + process: "" agent_id: '001' total_affected_items: 3 total_failed_items: 0 @@ -16947,7 +16944,7 @@ paths: fgroup: "root" ruser: "root" agent_id: "001" - total_affected_items: 2 + total_affected_items: 1 total_failed_items: 0 failed_items: [] message: "All specified syscollector information was returned" @@ -17076,7 +17073,7 @@ paths: /security/user/authenticate/run_as: post: tags: - - Security + - Security summary: "Login auth_context" description: "This method should be called to get an API token using an authorization context body. This token will expire after auth_token_exp_timeout seconds (default: 900). This value can be changed using PUT @@ -17238,7 +17235,7 @@ paths: /security/user/revoke: put: tags: - - Security + - Security summary: "Revoke JWT tokens" description: "This method should be called to revoke all active JWT tokens" operationId: api.controllers.security_controller.revoke_all_tokens @@ -17895,7 +17892,7 @@ paths: total_affected_items: 1 total_failed_items: 0 failed_items: [] - message: All specified roles were deleted + message: "All specified roles were deleted" error: 0 '400': @@ -18854,6 +18851,8 @@ paths: id: "004" mergedSum: "9a016508cea1e997ab8569f5cfab30f5" version: "Wazuh v4.3.0" + status_code: 0 + group_config_status: active error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -19022,7 +19021,7 @@ paths: /events: post: tags: - - Events + - Events summary: "Ingest events" description: |- Send security events to analysisd. @@ -19084,5 +19083,5 @@ paths: $ref: '#/components/responses/TooManyRequestsResponse' externalDocs: - description: "Find more about Wazuh API usage" + description: "Learn more about the Wazuh API" url: 'https://documentation.wazuh.com/4.9/user-manual/api/index.html' From 09c7d82781028292f3f70855fec8e4aceb32cb12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Tue, 16 Apr 2024 15:48:18 -0300 Subject: [PATCH 234/419] Remove extra empty space --- framework/wazuh/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/wazuh/manager.py b/framework/wazuh/manager.py index 2c1a1040888..28802831034 100755 --- a/framework/wazuh/manager.py +++ b/framework/wazuh/manager.py @@ -176,7 +176,7 @@ def get_api_config() -> AffectedItemsWazuhResult: } _restart_default_result_kwargs = { - 'all_msg': f"Restart request sent to {' all specified nodes' if node_id != ' manager' else ''}", + 'all_msg': f"Restart request sent to {'all specified nodes' if node_id != 'manager' else ''}", 'some_msg': "Could not send restart request to some specified nodes", 'none_msg': "Could not send restart request to any node", 'sort_casting': ['str'] From 6eb29a4331ad3d46ca6d8f5b330ab49879952fbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Wed, 17 Apr 2024 12:57:10 -0300 Subject: [PATCH 235/419] Typo fix --- framework/wazuh/mitre.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/wazuh/mitre.py b/framework/wazuh/mitre.py index e3b27661edc..eaa43394fb9 100644 --- a/framework/wazuh/mitre.py +++ b/framework/wazuh/mitre.py @@ -17,7 +17,7 @@ def mitre_metadata() -> AffectedItemsWazuhResult: Metadata of MITRE's db. """ result = AffectedItemsWazuhResult(none_msg='No MITRE metadata information was returned', - all_msg='MITRE Metadata information was returned') + all_msg='MITRE metadata information was returned') with mitre.WazuhDBQueryMitreMetadata() as db_query: data = db_query.run() From 3f0774683e25b76d520f117d0b25c4e6a309aea7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Wed, 17 Apr 2024 13:23:13 -0300 Subject: [PATCH 236/419] Use uppercases for acronyms --- framework/wazuh/sca.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/framework/wazuh/sca.py b/framework/wazuh/sca.py index 8ad80d08d37..529097dea92 100755 --- a/framework/wazuh/sca.py +++ b/framework/wazuh/sca.py @@ -46,9 +46,9 @@ def get_sca_list(agent_list: list = None, q: str = "", offset: int = 0, limit: i AffectedItemsWazuhResult Affected items. """ - result = AffectedItemsWazuhResult(all_msg='All selected sca information was returned', - some_msg='Some sca information was not returned', - none_msg='No sca information was returned' + result = AffectedItemsWazuhResult(all_msg='All selected SCA information was returned', + some_msg='Some SCA information was not returned', + none_msg='No SCA information was returned' ) if len(agent_list) != 0: @@ -106,9 +106,9 @@ def get_sca_checks(policy_id: str = None, agent_list: list = None, q: str = "", AffectedItemsWazuhResult Affected items. """ - result = AffectedItemsWazuhResult(all_msg='All selected sca/policy information was returned', - some_msg='Some sca/policy information was not returned', - none_msg='No sca/policy information was returned' + result = AffectedItemsWazuhResult(all_msg='All selected SCA/policy information was returned', + some_msg='Some SCA/policy information was not returned', + none_msg='No SCA/policy information was returned' ) if len(agent_list) != 0: if agent_list[0] in get_agents_info(): From 98d3a95c2210c82e41101f5b310be300fe44c643 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Wed, 17 Apr 2024 14:53:08 -0300 Subject: [PATCH 237/419] Update test --- framework/wazuh/tests/data/security/rbac_catalog.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/framework/wazuh/tests/data/security/rbac_catalog.yml b/framework/wazuh/tests/data/security/rbac_catalog.yml index abf4337e980..199c9545683 100644 --- a/framework/wazuh/tests/data/security/rbac_catalog.yml +++ b/framework/wazuh/tests/data/security/rbac_catalog.yml @@ -98,12 +98,12 @@ get_rbac_actions: - GET /agents/{agent_id}/key - GET /agents/{agent_id}/daemons/stats - GET /agents/{agent_id}/stats/{component} - - GET /groups/{group_id}/agents - GET /agents/no_group - GET /agents/outdated - GET /agents/stats/distinct - GET /agents/summary/os - GET /agents/summary/status + - GET /groups/{group_id}/agents - GET /overview/agents agent:create: description: Create new agents @@ -267,10 +267,10 @@ get_rbac_actions: related_endpoints: - PUT /agents/node/{node_id}/restart - GET /cluster/local/info + - GET /cluster/local/config - GET /cluster/nodes - GET /cluster/healthcheck - GET /cluster/ruleset/synchronization - - GET /cluster/local/config - GET /cluster/{node_id}/status - GET /cluster/{node_id}/info - GET /cluster/{node_id}/configuration @@ -351,7 +351,7 @@ get_rbac_actions: related_endpoints: - PUT /cluster/restart lists:read: - description: Read cdb lists files + description: Read CDB lists files resources: - list:file example: @@ -365,7 +365,7 @@ get_rbac_actions: - GET /lists/files/{filename} - GET /lists/files lists:update: - description: Update or upload cdb lists files + description: Update or upload CDB lists files resources: - "*:*" example: @@ -377,7 +377,7 @@ get_rbac_actions: related_endpoints: - PUT /lists/files/{filename} lists:delete: - description: Delete cdb lists files + description: Delete CDB lists files resources: - list:file example: From 70a7b43112931f6601649dd283caa65be8d7d673 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Thu, 18 Apr 2024 09:43:28 -0300 Subject: [PATCH 238/419] Minor corrections --- api/api/spec/spec.yaml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index 5762af341ae..b036ef24248 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -1372,7 +1372,7 @@ components: type: object properties: data: - description: "Alert data depending on the active response executed" + description: "Alert data depending on the active response command executed" type: object required: - command @@ -8584,7 +8584,7 @@ paths: total_affected_items: 3 total_failed_items: 0 failed_items: [] - message: All selected agents information was returned + message: "All selected agents information was returned" error: 0 '400': $ref: '#/components/responses/ResponseError' @@ -9187,8 +9187,8 @@ paths: - os: arch: x86_64 codename: Focal Fossa - major: 20 - minor: 04 + major: '20' + minor: '04' name: Ubuntu platform: ubuntu uname: Linux |b2497efbf876 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb @@ -9213,8 +9213,8 @@ paths: - os: arch: x86_64 codename: Focal Fossa - major: 20 - minor: 04 + major: '20' + minor: '04' name: Ubuntu platform: ubuntu uname: Linux |600e27371700 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb @@ -9239,8 +9239,8 @@ paths: - os: arch: x86_64 codename: Focal Fossa - major: 20 - minor: 04 + major: '20' + minor: '04' name: Ubuntu platform: ubuntu uname: Linux |4bdac19ce5e3 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb @@ -18830,8 +18830,8 @@ paths: - os: arch: "x86_64" codename: "Focal Fossa" - major: 20 - minor: 04 + major: '20' + minor: '04' name: Ubuntu platform: ubuntu uname: "Linux |77000bae7bd0 |5.8.0-45-generic |#51~20.04.1-Ubuntu SMP Tue Feb From ed65a67e9a7a86a3e8ae2ed0259e68a7b0a1cc69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Thu, 18 Apr 2024 12:27:37 -0300 Subject: [PATCH 239/419] Update IP field format --- api/api/spec/spec.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index b036ef24248..a40fd5368fb 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -1463,7 +1463,7 @@ components: description: "If this is not included, the API will get the IP automatically. Allowed values: IP, IP/NET, ANY" type: string - format: alphanumeric + format: alphanumeric_symbols required: - name @@ -1526,7 +1526,7 @@ components: description: "If this is not included, the API will get the IP automatically. Allowed values: IP, IP/NET, ANY" type: string - format: alphanumeric + format: alphanumeric_symbols force: $ref: '#/components/schemas/AgentInsertForce' required: From 3e8bc74ed3d78f2412b1fc829e6348707b397f16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Fri, 19 Apr 2024 09:17:40 -0300 Subject: [PATCH 240/419] Small corrections --- api/api/spec/spec.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index a40fd5368fb..e0c3719705f 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -8565,6 +8565,7 @@ paths: dateAdd: 2021-05-27T10:06:13Z registerIP: 172.16.16.16 status_code: 0 + group_config_status: not synced - node_name: unknown ip: 172.16.16.17 id: 010 @@ -8573,6 +8574,7 @@ paths: dateAdd: 2021-05-27T10:06:27Z registerIP: 172.16.16.17 status_code: 0 + group_config_status: not synced - node_name: unknown ip: any id: 011 @@ -8581,6 +8583,7 @@ paths: dateAdd: 2021-05-27T10:07:37Z registerIP: any status_code: 0 + group_config_status: not synced total_affected_items: 3 total_failed_items: 0 failed_items: [] @@ -8935,7 +8938,6 @@ paths: data: affected_items: - ubuntu - totalItems: 1 total_affected_items: 1 total_failed_items: 0 failed_items: [] @@ -9015,7 +9017,7 @@ paths: - $ref: '#/components/parameters/groups_list_all' responses: '200': - description: "Remove multiple group of multiple agents" + description: "Remove multiple group from multiple agents" content: application/json: schema: From eb9e6d975ab382d73e34433f3beaf68051370d86 Mon Sep 17 00:00:00 2001 From: GGP1 Date: Fri, 3 May 2024 12:49:41 -0300 Subject: [PATCH 241/419] Specify content_type instead of mimetype --- api/api/controllers/agent_controller.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/api/api/controllers/agent_controller.py b/api/api/controllers/agent_controller.py index dba4c2c92ec..e2adee0df41 100755 --- a/api/api/controllers/agent_controller.py +++ b/api/api/controllers/agent_controller.py @@ -1376,13 +1376,13 @@ async def get_group_file(group_id: str, file_name: str, raw: bool = False, prett data = raise_if_exc(await dapi.distribute_function()) if raw: - mimetype, _ = mimetypes.guess_type(file_name) - if mimetype is None: - mimetype = 'text/plain' + content_type, _ = mimetypes.guess_type(file_name) + if content_type is None: + content_type = 'text/plain' if file_name == 'agent.conf': - mimetype = 'application/xml' + content_type = 'application/xml' - return ConnexionResponse(body=data['data'], mimetype=mimetype) + return ConnexionResponse(body=data['data'], content_type=content_type) return json_response(data, pretty=pretty) From 4f6ad4ae14522c9d6717b7339395bdafaffc024c Mon Sep 17 00:00:00 2001 From: GGP1 Date: Fri, 3 May 2024 13:06:53 -0300 Subject: [PATCH 242/419] Add character encoding --- api/api/controllers/agent_controller.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/api/api/controllers/agent_controller.py b/api/api/controllers/agent_controller.py index e2adee0df41..41b849b91cd 100755 --- a/api/api/controllers/agent_controller.py +++ b/api/api/controllers/agent_controller.py @@ -1376,13 +1376,13 @@ async def get_group_file(group_id: str, file_name: str, raw: bool = False, prett data = raise_if_exc(await dapi.distribute_function()) if raw: - content_type, _ = mimetypes.guess_type(file_name) - if content_type is None: - content_type = 'text/plain' + mimetype, _ = mimetypes.guess_type(file_name) + if mimetype is None: + mimetype = 'text/plain' if file_name == 'agent.conf': - content_type = 'application/xml' + mimetype = 'application/xml' - return ConnexionResponse(body=data['data'], content_type=content_type) + return ConnexionResponse(body=data['data'], content_type=mimetype+'; charset=utf-8') return json_response(data, pretty=pretty) From 77fdeaf021c7907c520532da2db1dadb8e395d15 Mon Sep 17 00:00:00 2001 From: GGP1 Date: Tue, 7 May 2024 08:51:08 -0300 Subject: [PATCH 243/419] Add text/plain content-type --- api/api/spec/spec.yaml | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index e0c3719705f..7d10b1292ff 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -9502,15 +9502,6 @@ paths: - type: object description: "The output format depends on the type of file that has been requested: rootkit file, rootkit trojans or rcl" - application/xml: - schema: - properties: - data: - oneOf: - - type: array - - type: object - description: "The output format depends on the type of file that has been requested: rootkit - file, rootkit trojans or rcl" example: data: vars: @@ -9532,6 +9523,12 @@ paths: checks: - "f:/etc/fstab -> !r:/tmp;" error: 0 + application/xml: + schema: + type: string + text/plain: + schema: + type: string '400': $ref: '#/components/responses/ResponseError' '401': From 76d86867f903618ef0dcb01033071b1d4eea6de5 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Thu, 9 May 2024 09:22:53 -0300 Subject: [PATCH 244/419] Upgrades idna to 3.7.0. --- framework/requirements.txt | 2 +- src/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/framework/requirements.txt b/framework/requirements.txt index 4357422eda6..99fec234db3 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -39,7 +39,7 @@ grpcio==1.58.0 httpcore==1.0.2 httpx==0.26.0 h11==0.14.0 -idna==2.9 +idna==3.7.0 importlib-metadata==6.8.0 inflection==0.3.1 Jinja2==3.1.3 diff --git a/src/Makefile b/src/Makefile index f2491dddcbd..5a7c94d5b2e 100644 --- a/src/Makefile +++ b/src/Makefile @@ -1341,7 +1341,7 @@ TAR := tar -xf GUNZIP := gunzip GZIP := gzip CURL := curl -so -DEPS_VERSION = 25-20795 +DEPS_VERSION = 25-23136 RESOURCES_URL_BASE := https://packages.wazuh.com/deps/ RESOURCES_URL := $(RESOURCES_URL_BASE)$(DEPS_VERSION) CPYTHON := cpython From 3766a313bb0cf838b90fe2573df2f7965024d1ae Mon Sep 17 00:00:00 2001 From: Selutario Date: Mon, 20 May 2024 13:38:41 +0200 Subject: [PATCH 245/419] Update deps_version, fix requirements.txt --- api/test/integration/env/docker-compose.yml | 15 --------------- framework/requirements.txt | 2 -- src/Makefile | 2 +- 3 files changed, 1 insertion(+), 18 deletions(-) diff --git a/api/test/integration/env/docker-compose.yml b/api/test/integration/env/docker-compose.yml index 1e4e08889e7..d400ea982c3 100644 --- a/api/test/integration/env/docker-compose.yml +++ b/api/test/integration/env/docker-compose.yml @@ -13,11 +13,6 @@ services: volumes: - ./configurations/tmp/manager:/tmp_volume - ./tools/:/tools - # Folders to be used in local development environments - # - ${WAZUH_LOCAL_PATH}/framework/wazuh:/var/ossec/framework/python/lib/python3.10/site-packages/wazuh - # - ${WAZUH_LOCAL_PATH}/api/api:/var/ossec/framework/python/lib/python3.10/site-packages/api - # - ${WAZUH_LOCAL_PATH}/framework/scripts:/var/ossec/framework/scripts - # - ${WAZUH_LOCAL_PATH}/api/scripts/wazuh-apid.py:/var/ossec/api/scripts/wazuh-apid.py entrypoint: - /scripts/entrypoint.sh - wazuh-master @@ -33,11 +28,6 @@ services: volumes: - ./configurations/tmp/manager:/tmp_volume - ./tools/:/tools - # Folders to be used in local development environments - # - ${WAZUH_LOCAL_PATH}/framework/wazuh:/var/ossec/framework/python/lib/python3.10/site-packages/wazuh - # - ${WAZUH_LOCAL_PATH}/api/api:/var/ossec/framework/python/lib/python3.10/site-packages/api - # - ${WAZUH_LOCAL_PATH}/framework/scripts:/var/ossec/framework/scripts - # - ${WAZUH_LOCAL_PATH}/api/scripts/wazuh-apid.py:/var/ossec/api/scripts/wazuh-apid.py entrypoint: - /scripts/entrypoint.sh - wazuh-master @@ -54,11 +44,6 @@ services: volumes: - ./configurations/tmp/manager:/tmp_volume - ./tools/:/tools - # Folders to be used in local development environments - # - ${WAZUH_LOCAL_PATH}/framework/wazuh:/var/ossec/framework/python/lib/python3.10/site-packages/wazuh - # - ${WAZUH_LOCAL_PATH}/api/api:/var/ossec/framework/python/lib/python3.10/site-packages/api - # - ${WAZUH_LOCAL_PATH}/framework/scripts:/var/ossec/framework/scripts - # - ${WAZUH_LOCAL_PATH}/api/scripts/wazuh-apid.py:/var/ossec/api/scripts/wazuh-apid.py entrypoint: - /scripts/entrypoint.sh - wazuh-master diff --git a/framework/requirements.txt b/framework/requirements.txt index 99fec234db3..72860bed2c5 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -95,6 +95,4 @@ Werkzeug==3.0.1 xmltodict==0.12.0 yarl==1.7.0 zipp==3.3.2 -content_size_limit_asgi -uvicorn==0.24.0.post1 content_size_limit_asgi==0.1.5 diff --git a/src/Makefile b/src/Makefile index 5a7c94d5b2e..f556ffeb6b6 100644 --- a/src/Makefile +++ b/src/Makefile @@ -1341,7 +1341,7 @@ TAR := tar -xf GUNZIP := gunzip GZIP := gzip CURL := curl -so -DEPS_VERSION = 25-23136 +DEPS_VERSION = 30 RESOURCES_URL_BASE := https://packages.wazuh.com/deps/ RESOURCES_URL := $(RESOURCES_URL_BASE)$(DEPS_VERSION) CPYTHON := cpython From dd645b1c22a0bd79cc4237246a3d3904d6eecc43 Mon Sep 17 00:00:00 2001 From: Selutario Date: Mon, 20 May 2024 14:11:43 +0200 Subject: [PATCH 246/419] Remove old vulnerability_controller file --- .../controllers/vulnerability_controller.py | 214 ------------------ 1 file changed, 214 deletions(-) delete mode 100644 api/api/controllers/vulnerability_controller.py diff --git a/api/api/controllers/vulnerability_controller.py b/api/api/controllers/vulnerability_controller.py deleted file mode 100644 index 073d3e63428..00000000000 --- a/api/api/controllers/vulnerability_controller.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright (C) 2015, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 - -import logging - -from connexion import request -from connexion.lifecycle import ConnexionResponse - -from api.controllers.util import json_response -from api.util import parse_api_param, remove_nones_to_dict, raise_if_exc, deprecate_endpoint -from wazuh import vulnerability, WazuhError -from wazuh.core.cluster.control import get_system_nodes -from wazuh.core.cluster.dapi.dapi import DistributedAPI -from wazuh.core.common import DATABASE_LIMIT - -logger = logging.getLogger('wazuh-api') - - -@deprecate_endpoint() -async def run_vulnerability_scan(pretty: bool = False, wait_for_complete: bool = False) -> ConnexionResponse: - """Run a vulnerability detector scan in all nodes. - - Parameters - ---------- - pretty : bool - Show results in human-readable format. - wait_for_complete : bool - Disable timeout response. - - Returns - ------- - ConnexionResponse - API response. - """ - try: - nodes = await get_system_nodes() - dapi_extra_kwargs = {'f_kwargs': {'node_list': nodes}, - 'nodes': nodes, - 'remove_denied_nodes': True} \ - if not isinstance(nodes, WazuhError) else {} - except Exception as exc: - raise_if_exc(exc) - - dapi = DistributedAPI(f=vulnerability.run_vulnerability_scan, - request_type='distributed_master', - is_async=False, - wait_for_complete=wait_for_complete, - logger=logger, - rbac_permissions=request.context['token_info']['rbac_policies'], - **dapi_extra_kwargs) - data = raise_if_exc(await dapi.distribute_function()) - - return json_response(data, pretty=pretty) - - -@deprecate_endpoint() -async def get_vulnerability_agent(pretty: bool = False, wait_for_complete: bool = False, agent_id: str = None, - offset: int = 0, limit: int = None, sort: str = None, search: str = None, - select: str = None, q: str = '', distinct: str = None, architecture: str = None, - cve: str = None, name: str = None, version: str = None, type: str = None, - status: str = None, severity: str = None) -> ConnexionResponse: - """Get agents' vulnerabilities. - - Parameters - ---------- - request : request.connexion - pretty : bool - Show results in human-readable format. - wait_for_complete : bool - Disable timeout response. - agent_id : str - ID of the agent to retrieve CVE info. - offset : int - First element to return in the collection. - limit : int - Maximum number of elements to return. - sort : str - Sort the collection by a field or fields (separated by comma). Use +/- at the beginning to list in - ascending or descending order. - search : str - Look for elements with the specified string. - select : str - Fields to return. - q : str - Query to filter results by. - distinct : bool - Look for distinct values. - architecture : str - Filter by architecture. - cve : str - Filter by CVE ID. - name : str - Filter by package ID. - version : str - Filter by version. - type : str - Filter by CVE type. - status : str - Filter by CVE status. - severity : str - Filter by CVE severity. - - Returns - ------- - ConnexionResponse - API response. - """ - f_kwargs = { - 'agent_list': [agent_id], - 'offset': offset, - 'limit': limit, - 'sort': parse_api_param(sort, 'sort'), - 'search': parse_api_param(search, 'search'), - 'select': select, - 'q': q, - 'distinct': distinct, - 'filters': { - 'architecture': architecture, - 'cve': cve, - 'name': name, - 'version': version, - 'status': status, - 'type': type, - 'severity': severity - } - } - - dapi = DistributedAPI(f=vulnerability.get_agent_cve, - f_kwargs=remove_nones_to_dict(f_kwargs), - request_type='distributed_master', - is_async=False, - wait_for_complete=wait_for_complete, - logger=logger, - rbac_permissions=request.context['token_info']['rbac_policies'] - ) - data = raise_if_exc(await dapi.distribute_function()) - - return json_response(data, pretty=pretty) - - -@deprecate_endpoint() -async def get_last_scan_agent(pretty: bool = False, wait_for_complete: bool = False, - agent_id: str = None) -> ConnexionResponse: - """Return when the last full and partial vulnerability scan of a specified agent ended. - - Parameters - ---------- - pretty : bool - Show results in human-readable format. - wait_for_complete : bool - Disable timeout response. - agent_id : str - ID of the agent to retrieve scans info. - - Returns - ------- - ConnexionResponse - API response. - """ - f_kwargs = {'agent_list': [agent_id]} - - dapi = DistributedAPI(f=vulnerability.last_scan, - f_kwargs=remove_nones_to_dict(f_kwargs), - request_type='distributed_master', - is_async=False, - wait_for_complete=wait_for_complete, - logger=logger, - rbac_permissions=request.context['token_info']['rbac_policies'] - ) - data = raise_if_exc(await dapi.distribute_function()) - - return json_response(data, pretty=pretty) - - -@deprecate_endpoint() -async def get_vulnerabilities_field_summary(pretty: bool = False, wait_for_complete: bool = False, - agent_id: str = None, field: str = None, - limit: int = DATABASE_LIMIT) -> ConnexionResponse: - """Return a summary of the vulnerabilities' field of a given agent. - - Parameters - ---------- - pretty : bool - Show results in human-readable format. - wait_for_complete : bool - Disable timeout response. - agent_id : str - ID of the agent to retrieve severity summary. - field : str - Field to obtain the summary from. - limit : int - Maximum number of elements to return. Default: DATABASE_LIMIT - - Returns - ------- - ConnexionResponse - API response. - """ - f_kwargs = {'agent_list': [agent_id], - 'field': field, - 'limit': limit} - - dapi = DistributedAPI(f=vulnerability.get_inventory_summary, - f_kwargs=remove_nones_to_dict(f_kwargs), - request_type='distributed_master', - is_async=False, - wait_for_complete=wait_for_complete, - logger=logger, - rbac_permissions=request.context['token_info']['rbac_policies'] - ) - data = raise_if_exc(await dapi.distribute_function()) - - return json_response(data, pretty=pretty) From ebd4f4e90c629bc3edd4b67f9848eb935778aa6b Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Tue, 21 May 2024 17:54:38 +0200 Subject: [PATCH 247/419] Fix bug related to error type handling --- api/api/error_handler.py | 5 ++--- api/api/test/test_error_handler.py | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/api/api/error_handler.py b/api/api/error_handler.py index aab5fd64550..c7f5c1a732f 100644 --- a/api/api/error_handler.py +++ b/api/api/error_handler.py @@ -155,10 +155,9 @@ async def problem_error_handler(request: ConnexionRequest, exc: exceptions.Probl """ problem = { "title": exc.title if exc.title else 'Bad Request', - "detail": exc.detail if isinstance(exc.detail, dict) \ - else _cleanup_detail_field(exc.detail) + "detail": exc.detail if isinstance(exc.detail, dict) else _cleanup_detail_field(exc.detail) } - problem.update({"type": exc.type} if exc.type else {}) + problem.update({"type": exc.type} if (exc.type and exc.type != 'about:blank') else {}) problem.update(exc.ext if exc.ext else {}) if isinstance(problem['detail'], dict): for field in ['status', 'type']: diff --git a/api/api/test/test_error_handler.py b/api/api/test/test_error_handler.py index 2158f262b42..3cb6f248d44 100644 --- a/api/api/test/test_error_handler.py +++ b/api/api/test/test_error_handler.py @@ -163,7 +163,7 @@ async def test_problem_error_handler(title, detail, ext, error_type, mock_reques detail = _cleanup_detail_field(detail) problem = {} problem.update({'title': title} if title else {'title': 'Bad Request'}) - problem.update({'type': error_type} if error_type else {}) + problem.update({'type': error_type} if (error_type and error_type != 'about:blank') else {}) problem.update({'detail': detail} if detail else {}) problem.update(ext if ext else {}) problem.update({'error': problem.pop('code')} if 'code' in problem else {}) From 06f6e5ea504be315b99c0373b6bf9debe0a3a05c Mon Sep 17 00:00:00 2001 From: GGP1 Date: Thu, 23 May 2024 15:31:41 -0300 Subject: [PATCH 248/419] Catch psutil.NoSuchProcess exception --- framework/wazuh/core/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/wazuh/core/utils.py b/framework/wazuh/core/utils.py index b9cb252ba31..88c4aaad650 100755 --- a/framework/wazuh/core/utils.py +++ b/framework/wazuh/core/utils.py @@ -63,7 +63,7 @@ def clean_pid_files(daemon: str) -> None: else: print(f"{daemon}: Process {pid} does not belong to {daemon}, removing from {common.WAZUH_PATH}/var/run...") - except OSError: + except (OSError, psutil.NoSuchProcess): print(f'{daemon}: Non existent process {pid}, removing from {common.WAZUH_PATH}/var/run...') finally: os.remove(path.join(common.OSSEC_PIDFILE_PATH, pid_file)) From daeaf5c86f59a157af7bbac8d7e9e4ff0a362ba2 Mon Sep 17 00:00:00 2001 From: GGP1 Date: Fri, 17 May 2024 12:43:47 -0300 Subject: [PATCH 249/419] Disable wazuh-modulesd debug mode --- api/test/integration/env/base/manager/entrypoint.sh | 3 --- api/test/integration/test_cluster_endpoints.tavern.yaml | 8 ++++---- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/api/test/integration/env/base/manager/entrypoint.sh b/api/test/integration/env/base/manager/entrypoint.sh index 663c527c54a..8d0900505d0 100755 --- a/api/test/integration/env/base/manager/entrypoint.sh +++ b/api/test/integration/env/base/manager/entrypoint.sh @@ -1,8 +1,5 @@ #!/usr/bin/env bash -# Enable debug mode for the modulesd daemon -echo 'wazuh_modules.debug=2' >> /var/ossec/etc/local_internal_options.conf - # Apply API configuration cp -rf /tmp_volume/config/* /var/ossec/ && chown -R wazuh:wazuh /var/ossec/api diff --git a/api/test/integration/test_cluster_endpoints.tavern.yaml b/api/test/integration/test_cluster_endpoints.tavern.yaml index 7e47bbaa6fb..8f0003eaec4 100644 --- a/api/test/integration/test_cluster_endpoints.tavern.yaml +++ b/api/test/integration/test_cluster_endpoints.tavern.yaml @@ -2036,12 +2036,12 @@ stages: total_affected_items: !anyint total_failed_items: 0 - - name: Read logs with filters -> tag=wazuh-modulesd, limit=2 {node_id} + - name: Read logs with filters -> tag=wazuh-syscheckd, limit=2 {node_id} request: verify: False <<: *get_cluster_logs params: - tag: wazuh-modulesd + tag: wazuh-syscheckd limit: 2 response: status_code: 200 @@ -2050,9 +2050,9 @@ stages: data: affected_items: - <<: *cluster_log - tag: wazuh-modulesd + tag: wazuh-syscheckd - <<: *cluster_log - tag: wazuh-modulesd + tag: wazuh-syscheckd failed_items: [] total_affected_items: !anyint total_failed_items: 0 From e0b612758a6d9a8a0325bbf7d74340c1bc05e53d Mon Sep 17 00:00:00 2001 From: GGP1 Date: Fri, 17 May 2024 12:43:57 -0300 Subject: [PATCH 250/419] Download VD content at compile time --- api/test/integration/env/base/manager/preloaded-vars.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/api/test/integration/env/base/manager/preloaded-vars.conf b/api/test/integration/env/base/manager/preloaded-vars.conf index cddf6fa2ab5..a3063b04627 100644 --- a/api/test/integration/env/base/manager/preloaded-vars.conf +++ b/api/test/integration/env/base/manager/preloaded-vars.conf @@ -10,5 +10,6 @@ USER_WHITE_LIST="n" USER_ENABLE_SYSLOG="y" USER_ENABLE_AUTHD="y" USER_ENABLE_UPDATE_CHECK="n" +DOWNLOAD_CONTENT="y" USER_AUTO_START="n" From 365148d404a7937196eb50f0c4491cecf12b65df Mon Sep 17 00:00:00 2001 From: GGP1 Date: Fri, 17 May 2024 16:39:13 -0300 Subject: [PATCH 251/419] Fix agent RBAC tests --- .../test_rbac_black_agent_endpoints.tavern.yaml | 9 +++------ .../test_rbac_white_agent_endpoints.tavern.yaml | 10 ++++------ 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/api/test/integration/test_rbac_black_agent_endpoints.tavern.yaml b/api/test/integration/test_rbac_black_agent_endpoints.tavern.yaml index 1287bb6616c..4198fc1b1e8 100644 --- a/api/test/integration/test_rbac_black_agent_endpoints.tavern.yaml +++ b/api/test/integration/test_rbac_black_agent_endpoints.tavern.yaml @@ -1406,12 +1406,9 @@ stages: json: error: 0 data: - affected_items: - - '003' - - '005' - - '007' + affected_items: !anything failed_items: [] - total_affected_items: 3 + total_affected_items: !anyint total_failed_items: 0 delay_after: !float "{reconnect_delay}" @@ -1420,7 +1417,7 @@ test_name: PUT /agents/restart stages: - - name: Try to restart agent 001 (Denied)default,group1,group3,pepito + - name: Try to restart agent 001 (Denied) request: verify: False url: "{protocol:s}://{host:s}:{port:d}/agents/restart" diff --git a/api/test/integration/test_rbac_white_agent_endpoints.tavern.yaml b/api/test/integration/test_rbac_white_agent_endpoints.tavern.yaml index 0e709d629d1..f1f25eec2dc 100644 --- a/api/test/integration/test_rbac_white_agent_endpoints.tavern.yaml +++ b/api/test/integration/test_rbac_white_agent_endpoints.tavern.yaml @@ -1470,10 +1470,9 @@ stages: json: error: 0 data: - affected_items: - - '004' + affected_items: !anything failed_items: [] - total_affected_items: 1 + total_affected_items: !anyint total_failed_items: 0 delay_after: !float "{reconnect_delay}" @@ -1570,10 +1569,9 @@ stages: json: error: 0 data: - affected_items: - - '004' + affected_items: !anything failed_items: [] - total_affected_items: 1 + total_affected_items: !anyint total_failed_items: 0 --- From 8832b19f83107c795dc3e07db481958d6a95531f Mon Sep 17 00:00:00 2001 From: GGP1 Date: Mon, 20 May 2024 12:04:19 -0300 Subject: [PATCH 252/419] Move last scan validation to the top --- .../test_rootcheck_endpoints.tavern.yaml | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/api/test/integration/test_rootcheck_endpoints.tavern.yaml b/api/test/integration/test_rootcheck_endpoints.tavern.yaml index 0e8b24f5f9d..c6c56a65cdf 100644 --- a/api/test/integration/test_rootcheck_endpoints.tavern.yaml +++ b/api/test/integration/test_rootcheck_endpoints.tavern.yaml @@ -1,3 +1,28 @@ +--- +test_name: GET /rootcheck/001/last_scan + +stages: + + # GET /rootcheck/001/last_scan + - name: Get when the last scan for agent 001 started and ended + request: + verify: False + method: GET + url: "{protocol:s}://{host:s}:{port:d}/rootcheck/001/last_scan" + headers: + Authorization: "Bearer {test_login_token}" + response: + status_code: 200 + json: + error: 0 + data: + affected_items: + - start: !anystr + end: !anystr + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 + --- test_name: GET /rootcheck/001 @@ -449,31 +474,6 @@ stages: json: error: 2007 ---- -test_name: GET /rootcheck/001/last_scan - -stages: - - # GET /rootcheck/001/last_scan - - name: Get when the last scan for agent 001 started and ended - request: - verify: False - method: GET - url: "{protocol:s}://{host:s}:{port:d}/rootcheck/001/last_scan" - headers: - Authorization: "Bearer {test_login_token}" - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - end: !anything - start: !anystr - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 - --- test_name: PUT /rootcheck From c4d3c9ebfbc576889e066a95292b5117fcc0e018 Mon Sep 17 00:00:00 2001 From: GGP1 Date: Fri, 24 May 2024 15:17:15 -0300 Subject: [PATCH 253/419] Revert "Fix agent RBAC tests" This reverts commit 5dd01c9d4ea55f65d050f226864abe0c5b537315. --- .../test_rbac_black_agent_endpoints.tavern.yaml | 9 ++++++--- .../test_rbac_white_agent_endpoints.tavern.yaml | 10 ++++++---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/api/test/integration/test_rbac_black_agent_endpoints.tavern.yaml b/api/test/integration/test_rbac_black_agent_endpoints.tavern.yaml index 4198fc1b1e8..1287bb6616c 100644 --- a/api/test/integration/test_rbac_black_agent_endpoints.tavern.yaml +++ b/api/test/integration/test_rbac_black_agent_endpoints.tavern.yaml @@ -1406,9 +1406,12 @@ stages: json: error: 0 data: - affected_items: !anything + affected_items: + - '003' + - '005' + - '007' failed_items: [] - total_affected_items: !anyint + total_affected_items: 3 total_failed_items: 0 delay_after: !float "{reconnect_delay}" @@ -1417,7 +1420,7 @@ test_name: PUT /agents/restart stages: - - name: Try to restart agent 001 (Denied) + - name: Try to restart agent 001 (Denied)default,group1,group3,pepito request: verify: False url: "{protocol:s}://{host:s}:{port:d}/agents/restart" diff --git a/api/test/integration/test_rbac_white_agent_endpoints.tavern.yaml b/api/test/integration/test_rbac_white_agent_endpoints.tavern.yaml index f1f25eec2dc..0e709d629d1 100644 --- a/api/test/integration/test_rbac_white_agent_endpoints.tavern.yaml +++ b/api/test/integration/test_rbac_white_agent_endpoints.tavern.yaml @@ -1470,9 +1470,10 @@ stages: json: error: 0 data: - affected_items: !anything + affected_items: + - '004' failed_items: [] - total_affected_items: !anyint + total_affected_items: 1 total_failed_items: 0 delay_after: !float "{reconnect_delay}" @@ -1569,9 +1570,10 @@ stages: json: error: 0 data: - affected_items: !anything + affected_items: + - '004' failed_items: [] - total_affected_items: !anyint + total_affected_items: 1 total_failed_items: 0 --- From 2a9e5da2bf44c394636b7b9bb11d9303cafa185d Mon Sep 17 00:00:00 2001 From: GGP1 Date: Fri, 24 May 2024 15:54:08 -0300 Subject: [PATCH 254/419] Skip file instead of removing it --- api/test/integration/env/base/agent/entrypoint.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/api/test/integration/env/base/agent/entrypoint.sh b/api/test/integration/env/base/agent/entrypoint.sh index 595398f4a59..96c0d159359 100755 --- a/api/test/integration/env/base/agent/entrypoint.sh +++ b/api/test/integration/env/base/agent/entrypoint.sh @@ -6,13 +6,13 @@ echo 'wazuh_modules.debug=2' >> /var/ossec/etc/local_internal_options.conf # Apply test.keys cp /tmp_volume/configuration_files/test.keys /var/ossec/etc/test.keys -# Remove ossec_4.x in agents with version 3.x -if [ "$3" == "agent_old" ]; then - rm /tmp_volume/configuration_files/ossec_4.x.conf -fi - # Modify ossec.conf for conf_file in /tmp_volume/configuration_files/*.conf; do + # Do not apply 4.x configuration changes to agents with version 3.x + if [ "$3" == "agent_old" ] && [ $conf_file == "/tmp_volume/configuration_files/ossec_4.x.conf" ]; then + continue + fi + python3 /tools/xml_parser.py /var/ossec/etc/ossec.conf $conf_file done From 985dde191cd4e84d925b502ba2bf6c9bbc6e8af0 Mon Sep 17 00:00:00 2001 From: GGP1 Date: Fri, 24 May 2024 15:54:21 -0300 Subject: [PATCH 255/419] Use shutil to copy directories --- api/test/integration/conftest.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/api/test/integration/conftest.py b/api/test/integration/conftest.py index 704690c7a8b..b96bb1bb895 100644 --- a/api/test/integration/conftest.py +++ b/api/test/integration/conftest.py @@ -200,12 +200,11 @@ def general_procedure(module: str): module : str Name of the tested module. """ - base_content = os.path.join(env_path, 'configurations', 'base', '*') - module_content = os.path.join(env_path, 'configurations', module, '*') + base_content = os.path.join(env_path, 'configurations', 'base') + module_content = os.path.join(env_path, 'configurations', module) tmp_content = os.path.join(env_path, 'configurations', 'tmp') - os.makedirs(tmp_content, exist_ok=True) - os.popen(f'cp -rf {base_content} {tmp_content}').close() - os.popen(f'cp -rf {module_content} {tmp_content}').close() + shutil.copytree(base_content, tmp_content, dirs_exist_ok=True) + shutil.copytree(module_content, tmp_content, dirs_exist_ok=True) def change_rbac_mode(rbac_mode: str = 'white'): From 72f402b3dbafaf6de158c521d5aef75ed909f6ca Mon Sep 17 00:00:00 2001 From: Selutario Date: Mon, 27 May 2024 14:45:31 +0200 Subject: [PATCH 256/419] Suppress FileNotFoundError in AIT conftest --- api/test/integration/conftest.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/api/test/integration/conftest.py b/api/test/integration/conftest.py index b96bb1bb895..b2fa8ee9269 100644 --- a/api/test/integration/conftest.py +++ b/api/test/integration/conftest.py @@ -3,6 +3,7 @@ # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 +import contextlib import json import os import re @@ -203,8 +204,10 @@ def general_procedure(module: str): base_content = os.path.join(env_path, 'configurations', 'base') module_content = os.path.join(env_path, 'configurations', module) tmp_content = os.path.join(env_path, 'configurations', 'tmp') - shutil.copytree(base_content, tmp_content, dirs_exist_ok=True) - shutil.copytree(module_content, tmp_content, dirs_exist_ok=True) + with contextlib.suppress(FileNotFoundError): + shutil.copytree(base_content, tmp_content, dirs_exist_ok=True) + with contextlib.suppress(FileNotFoundError): + shutil.copytree(module_content, tmp_content, dirs_exist_ok=True) def change_rbac_mode(rbac_mode: str = 'white'): From 5ad203648b1767ff79767ff069f96a3d83896f73 Mon Sep 17 00:00:00 2001 From: Eduardo Leon Wazuh Date: Wed, 19 Jul 2023 08:05:36 -0300 Subject: [PATCH 257/419] Migrate AWS ITs from wazuh-qa --- tests/integration/conftest.py | 110 +- tests/integration/test_aws/README.md | 180 ++++ tests/integration/test_aws/__init__.py | 0 tests/integration/test_aws/conftest.py | 162 +++ .../bucket_configuration_defaults.yaml | 15 + .../cloudwatch_configuration_defaults.yaml | 17 + .../inspector_configuration_defaults.yaml | 15 + .../configuration_discard_regex.yaml | 23 + .../configuration_log_groups.yaml | 19 + ...et_configuration_with_only_logs_after.yaml | 19 + ...configuration_without_only_logs_after.yaml | 17 + ...ch_configuration_with_only_logs_after.yaml | 19 + ...or_configuration_with_only_logs_after.yaml | 17 + ...configuration_without_only_logs_after.yaml | 17 + ...figuration_bucket_and_service_missing.yaml | 7 + ...tion_multiple_bucket_and_service_tags.yaml | 47 + .../configuration_type_missing_in_bucket.yaml | 13 + ...configuration_type_missing_in_service.yaml | 13 + .../configuration_values_in_bucket.yaml | 25 + .../configuration_values_in_service.yaml | 21 + .../configuration_path_suffix.yaml | 19 + .../path_test_module/configuration_path.yaml | 19 + .../bucket_configuration_regions.yaml | 19 + .../cloudwatch_configuration_regions.yaml | 19 + .../inspector_configuration_regions.yaml | 17 + .../configuration_remove_from_bucket.yaml | 19 + .../configuration_remove_log_stream.yaml | 19 + .../cases_bucket_defaults.yaml | 125 +++ .../cases_cloudwatch_defaults.yaml | 8 + .../cases_inspector_defaults.yaml | 6 + .../cases_discard_regex.yaml | 238 +++++ .../cases_log_groups.yaml | 21 + .../cases_bucket_multiple_calls.yaml | 98 ++ .../cases_bucket_with_only_logs_after.yaml | 183 ++++ .../cases_bucket_without_only_logs_after.yaml | 155 +++ .../cases_cloudwatch_multiple_calls.yaml | 6 + ...cases_cloudwatch_with_only_logs_after.yaml | 12 + .../cases_inspector_multiple_calls.yaml | 5 + .../cases_inspector_with_only_logs_after.yaml | 9 + ...cases_service_without_only_logs_after.yaml | 9 + .../cases_bucket_and_service_missing.yaml | 4 + .../cases_empty_values_in_bucket.yaml | 71 ++ .../cases_empty_values_in_service.yaml | 39 + .../cases_invalid_values_in_bucket.yaml | 83 ++ .../cases_invalid_values_in_service.yaml | 49 + ...ases_multiple_bucket_and_service_tags.yaml | 4 + .../cases_type_missing_in_bucket.yaml | 4 + .../cases_type_missing_in_service.yaml | 4 + .../cases_path_suffix.yaml | 116 +++ .../path_test_module/cases_path.yaml | 548 ++++++++++ .../cases_bucket_regions.yaml | 233 +++++ .../cases_cloudwatch_regions.yaml | 38 + .../cases_inspector_regions.yaml | 32 + .../cases_remove_from_bucket.yaml | 140 +++ .../cases_remove_log_streams.yaml | 8 + tests/integration/test_aws/test_basic.py | 329 ++++++ .../test_aws/test_discard_regex.py | 153 +++ tests/integration/test_aws/test_log_groups.py | 164 +++ .../test_aws/test_only_logs_after.py | 963 ++++++++++++++++++ tests/integration/test_aws/test_parser.py | 600 +++++++++++ tests/integration/test_aws/test_path.py | 166 +++ .../integration/test_aws/test_path_suffix.py | 167 +++ tests/integration/test_aws/test_regions.py | 466 +++++++++ .../test_aws/test_remove_from_bucket.py | 246 +++++ tests/integration/test_aws/utils.py | 33 + 65 files changed, 6414 insertions(+), 8 deletions(-) create mode 100644 tests/integration/test_aws/README.md create mode 100644 tests/integration/test_aws/__init__.py create mode 100644 tests/integration/test_aws/conftest.py create mode 100644 tests/integration/test_aws/data/configuration_template/basic_test_module/bucket_configuration_defaults.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/basic_test_module/cloudwatch_configuration_defaults.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/basic_test_module/inspector_configuration_defaults.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/log_groups_test_module/configuration_log_groups.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_with_only_logs_after.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_without_only_logs_after.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/cloudwatch_configuration_with_only_logs_after.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/inspector_configuration_with_only_logs_after.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/service_configuration_without_only_logs_after.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_bucket_and_service_missing.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_multiple_bucket_and_service_tags.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_bucket.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_service.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_bucket.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_service.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/path_suffix_test_module/configuration_path_suffix.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/path_test_module/configuration_path.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/regions_test_module/bucket_configuration_regions.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/regions_test_module/cloudwatch_configuration_regions.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/regions_test_module/inspector_configuration_regions.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_from_bucket.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_log_stream.yaml create mode 100644 tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml create mode 100644 tests/integration/test_aws/data/test_cases/basic_test_module/cases_cloudwatch_defaults.yaml create mode 100644 tests/integration/test_aws/data/test_cases/basic_test_module/cases_inspector_defaults.yaml create mode 100644 tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml create mode 100644 tests/integration/test_aws/data/test_cases/log_groups_test_module/cases_log_groups.yaml create mode 100644 tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_multiple_calls.yaml create mode 100644 tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_with_only_logs_after.yaml create mode 100644 tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_without_only_logs_after.yaml create mode 100644 tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_multiple_calls.yaml create mode 100644 tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_with_only_logs_after.yaml create mode 100644 tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_inspector_multiple_calls.yaml create mode 100644 tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_inspector_with_only_logs_after.yaml create mode 100644 tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_service_without_only_logs_after.yaml create mode 100644 tests/integration/test_aws/data/test_cases/parser_test_module/cases_bucket_and_service_missing.yaml create mode 100644 tests/integration/test_aws/data/test_cases/parser_test_module/cases_empty_values_in_bucket.yaml create mode 100644 tests/integration/test_aws/data/test_cases/parser_test_module/cases_empty_values_in_service.yaml create mode 100644 tests/integration/test_aws/data/test_cases/parser_test_module/cases_invalid_values_in_bucket.yaml create mode 100644 tests/integration/test_aws/data/test_cases/parser_test_module/cases_invalid_values_in_service.yaml create mode 100644 tests/integration/test_aws/data/test_cases/parser_test_module/cases_multiple_bucket_and_service_tags.yaml create mode 100644 tests/integration/test_aws/data/test_cases/parser_test_module/cases_type_missing_in_bucket.yaml create mode 100644 tests/integration/test_aws/data/test_cases/parser_test_module/cases_type_missing_in_service.yaml create mode 100644 tests/integration/test_aws/data/test_cases/path_suffix_test_module/cases_path_suffix.yaml create mode 100644 tests/integration/test_aws/data/test_cases/path_test_module/cases_path.yaml create mode 100644 tests/integration/test_aws/data/test_cases/regions_test_module/cases_bucket_regions.yaml create mode 100644 tests/integration/test_aws/data/test_cases/regions_test_module/cases_cloudwatch_regions.yaml create mode 100644 tests/integration/test_aws/data/test_cases/regions_test_module/cases_inspector_regions.yaml create mode 100644 tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_from_bucket.yaml create mode 100644 tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_log_streams.yaml create mode 100644 tests/integration/test_aws/test_basic.py create mode 100644 tests/integration/test_aws/test_discard_regex.py create mode 100644 tests/integration/test_aws/test_log_groups.py create mode 100644 tests/integration/test_aws/test_only_logs_after.py create mode 100644 tests/integration/test_aws/test_parser.py create mode 100644 tests/integration/test_aws/test_path.py create mode 100644 tests/integration/test_aws/test_path_suffix.py create mode 100644 tests/integration/test_aws/test_regions.py create mode 100644 tests/integration/test_aws/test_remove_from_bucket.py create mode 100644 tests/integration/test_aws/utils.py diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index bf044fcda4e..504b1abb618 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -21,15 +21,18 @@ from wazuh_testing.logger import logger from wazuh_testing.tools import socket_controller from wazuh_testing.tools.monitors import queue_monitor +from wazuh_testing.tools.monitors.file_monitor import FileMonitor from wazuh_testing.tools.simulators.agent_simulator import create_agents, connect from wazuh_testing.tools.simulators.authd_simulator import AuthdSimulator from wazuh_testing.tools.simulators.remoted_simulator import RemotedSimulator from wazuh_testing.utils import configuration, database, file, mocking, services -from wazuh_testing.utils.file import remove_file +from wazuh_testing.utils.file import remove_file, truncate_file from wazuh_testing.utils.manage_agents import remove_agents +import wazuh_testing.utils.configuration as wazuh_configuration from wazuh_testing.utils.services import control_service -#- - - - - - - - - - - - - - - - - - - - - - - - -Pytest configuration - - - - - - - - - - - - - - - - - - - - - - - - + +# - - - - - - - - - - - - - - - - - - - - - - - - -Pytest configuration - - - - - - - - - - - - - - - - - - - - - - - def pytest_addoption(parser: pytest.Parser) -> None: @@ -151,16 +154,16 @@ def set_wazuh_configuration(test_configuration: dict) -> None: """Set wazuh configuration Args: - test_configuration (dict): Configuration template data to write in the ossec.conf + configuration (dict): Configuration template data to write in the ossec.conf """ # Save current configuration - backup_config = configuration.get_wazuh_conf() + backup_config = wazuh_configuration.get_wazuh_conf() # Configuration for testing - test_config = configuration.set_section_wazuh_conf(test_configuration.get('sections')) + test_config = wazuh_configuration.set_section_wazuh_conf(configuration.get('sections')) # Set new configuration - configuration.write_wazuh_conf(test_config) + wazuh_configuration.write_wazuh_conf(test_config) # Set current configuration session_parameters.current_configuration = test_config @@ -168,7 +171,98 @@ def set_wazuh_configuration(test_configuration: dict) -> None: yield # Restore previous configuration - configuration.write_wazuh_conf(backup_config) + wazuh_configuration.write_wazuh_conf(backup_config) + + +@pytest.fixture() +def configure_local_internal_options_function(request): + """Fixture to configure the local internal options file. + + It uses the test variable local_internal_options. This should be + a dictionary wich keys and values corresponds to the internal option configuration, For example: + local_internal_options = {'monitord.rotate_log': '0', 'syscheck.debug': '0' } + """ + try: + local_internal_options = request.param + except AttributeError: + try: + local_internal_options = getattr(request.module, 'local_internal_options') + except AttributeError: + logger.debug('local_internal_options is not set') + raise AttributeError('Error when using the fixture "configure_local_internal_options_module", no ' + 'parameter has been passed explicitly, nor is the variable local_internal_options ' + 'found in the module.') from AttributeError + + backup_local_internal_options = wazuh_configuration.get_local_internal_options_dict() + + logger.debug(f"Set local_internal_option to {str(local_internal_options)}") + wazuh_configuration.set_local_internal_options_dict(local_internal_options) + + yield + + logger.debug(f"Restore local_internal_option to {str(backup_local_internal_options)}") + wazuh_configuration.set_local_internal_options_dict(backup_local_internal_options) + + +@pytest.fixture(scope='function') +def restart_wazuh_function(request): + """Restart before starting a test, and stop it after finishing. + + Args: + request (fixture): Provide information on the executing test function. + """ + # If there is a list of required daemons defined in the test module, restart daemons, else restart all daemons. + try: + daemons = request.module.REQUIRED_DAEMONS + except AttributeError: + daemons = [] + + if len(daemons) == 0: + logger.debug(f"Restarting all daemon") + control_service('restart') + else: + for daemon in daemons: + logger.debug(f"Restarting {daemon}") + # Restart daemon instead of starting due to legacy used fixture in the test suite. + control_service('restart', daemon=daemon) + + yield + + # Stop all daemons by default (daemons = None) + if len(daemons) == 0: + logger.debug(f"Stopping all daemons") + control_service('stop') + else: + # Stop a list daemons in order (as Wazuh does) + daemons.reverse() + for daemon in daemons: + logger.debug(f"Stopping {daemon}") + control_service('stop', daemon=daemon) + + +@pytest.fixture(scope='function') +def file_monitoring(request): + """Fixture to handle the monitoring of a specified file. + + It uses the variable `file_to_monitor` to determinate the file to monitor. Default `LOG_FILE_PATH` + + Args: + request (fixture): Provide information on the executing test function. + """ + if hasattr(request.module, 'file_to_monitor'): + file_to_monitor = getattr(request.module, 'file_to_monitor') + else: + file_to_monitor = WAZUH_LOG_PATH + + logger.debug(f"Initializing file to monitor to {file_to_monitor}") + + file_monitor = FileMonitor(file_to_monitor) + setattr(request.module, 'log_monitor', file_monitor) + + yield + + truncate_file(file_to_monitor) + logger.debug(f"Trucanted {file_to_monitor}") def truncate_monitored_files_implementation() -> None: @@ -264,7 +358,7 @@ def daemons_handler_implementation(request: pytest.FixtureRequest) -> None: logger.debug('Stopping wazuh using wazuh-control') services.control_service('stop') else: - if daemons == API_DAEMONS_REQUIREMENTS: daemons.reverse() # Stop in reverse, otherwise the next start will fail + if daemons == API_DAEMONS_REQUIREMENTS: daemons.reverse() # Stop in reverse, otherwise the next start will fail for daemon in daemons: logger.debug(f"Stopping {daemon}") services.control_service('stop', daemon=daemon) diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md new file mode 100644 index 00000000000..f06aac5e543 --- /dev/null +++ b/tests/integration/test_aws/README.md @@ -0,0 +1,180 @@ +# AWS Integration + +## Description + +It is a _wodle based_ module that has a capability to pull logs from several AWS services. + +## Tests directory structure + +```bash +wazuh-qa/tests/integration/test_aws +├── conftest.py +├── data +│   ├── configuration_template +│   │   ├── basic_test_module +│   │   ├── discard_regex_test_module +│   │   ├── only_logs_after_test_module +│   │   ├── path_suffix_test_module +│   │   ├── path_test_module +│   │   ├── regions_test_module +│   │   └── remove_from_bucket_test_module +│   └── test_cases +│   ├── basic_test_module +│   ├── discard_regex_test_module +│   ├── only_logs_after_test_module +│   ├── path_suffix_test_module +│   ├── path_test_module +│   ├── regions_test_module +│   └── remove_from_bucket_test_module +├── README.MD +├── test_basic.py +├── test_discard_regex.py +├── test_only_logs_after.py +├── test_path.py +├── test_path_suffix.py +├── test_regions.py +└── test_remove_from_bucket.py +``` + +## Deps directory structure + +```bash +wazuh-qa/deps/wazuh_testing/wazuh_testing/modules/aws +├── cli_utils.py +├── constants.py +├── data_generator.py +├── db_utils.py +├── event_monitor.py +├── __init__.py +└── s3_utils.py +``` + +## Requirements + +- The only extra dependency is `boto3` +- The module will assume there are already buckets, log groups and an inspector assessment with test data in AWS. + +## Configuration settings + +- **credentials** + Set the credentials at `$HOME/.aws/credentials` (being `HOME` the home directory of the user who runs the tests, more information [here](https://documentation.wazuh.com/current/amazon/services/prerequisites/credentials.html#profiles)) with the content: + +```ini +[qa] +aws_access_key_id = +aws_secret_access_key = +``` + +## Setting up a test environment + +You will need a proper environment to run the integration tests. You can use any virtual machine you wish. If you have +one already, go to the [integration tests section](#integration-tests) + +If you use [Vagrant](https://www.vagrantup.com/downloads.html) +or [VirtualBox](https://www.virtualbox.org/wiki/Downloads), it is important to install the `vbguest` plugin since some +tests modify the system date and there could be some synchronization issues. + +This guide will cover the following platforms: [Linux](#linux). + +You can run these tests on a manager or an agent. In case you are using an agent, please remember to register it and use +the correct version (Wazuh branch). + +_We are skipping Wazuh installation steps. For further information, +check [Wazuh documentation](https://documentation.wazuh.com/current/installation-guide/index.html)._ + +### Linux + +_We are using **Ubuntu 22.04** for this example:_ + +- Install **Wazuh** + +- Install python tests dependencies: + + ```shell script + # Install pip + apt install python3-pip + + # Clone your `wazuh-qa` repository within your testing environment + cd wazuh-qa + + # Install Python libraries + python3 -m pip install -r requirements.txt + + # Install test dependecies + python3 -m pip install deps/wazuh-testing + ``` + + +## Integration tests + +**DISCLAIMER:** this guide assumes you have a proper testing environment. If you do not, please check +our [testing environment guide](#setting-up-a-test-environment). + +### Pytest + +We use [pytest](https://docs.pytest.org/en/latest/contents.html) to run our integrity tests. Pytest will recursively +look for the closest `conftest` to import all the variables and fixtures needed for every test. If something is lacking +from the closest one, it will look for the next one (if possible) until reaching the current directory. This means we +need to run every test from the following path, where the general _conftest_ is: + +```shell script +cd wazuh-qa/tests/integration +``` + +To run any test, we just need to call `pytest` from `python3` using the following line: + +```shell script +python3 -m pytest [options] [file_or_dir] [file_or_dir] [...] +``` + +**Options:** + +- `v`: verbosity level (-v or -vv. Highly recommended to use -vv when tests are failing) +- `s`: shortcut for --capture=no. This will show the output in real time +- `x`: instantly exit after the first error. Very helpful when using a log truncate since it will keep the last failed + result +- `k`: only run tests which match the given substring expression (-k EXPRESSION) +- `m`: only run tests matching given expression (-m MARKEXPR) +- `--tier`: only run tests with given tier (ex. --tier 2) +- `--html`: generates a HTML report for the test results. (ex. --html=report.html) +- `--default-timeout`: overwrites the default timeout (in seconds). This value is used to make a test fail if a + condition is not met before the given time lapse. Some tests make use of this value and other has other fixed timeout + that cannot be modified. + +_Use `-h` to see the rest or check its [documentation](https://docs.pytest.org/en/latest/usage.html)._ + +Also, these integration tests are heavily based on [fixtures](https://docs.pytest.org/en/latest/fixture.html), so please +check its documentation for further information. + +#### AWS integration tests example + +```bash +# python3 -m pytest -vvx test_aws/ -k cloudtrail +=========================================================== test session starts ====================================================== +platform linux -- Python 3.10.6, pytest-7.1.2, pluggy-1.0.0 -- /usr/bin/python3 +cachedir: .pytest_cache +metadata: {'Python': '3.10.6', 'Platform': 'Linux-5.15.0-58-generic-x86_64-with-glibc2.35', +'Packages': {'pytest': '7.1.2', 'py': '1.10.0', 'pluggy': '1.0.0'}, +'Plugins': {'metadata': '2.0.2', 'html': '3.1.1', 'testinfra': '5.0.0'}} +rootdir: /home/vagrant/qa/tests/integration, configfile: pytest.ini +plugins: metadata-2.0.2, html-3.1.1, testinfra-5.0.0 +collected 15 items + +test_aws/test_basic.py::test_defaults[cloudtrail_defaults] PASSED [ 6%] +test_aws/test_discard_regex.py::test_discard_regex[cloudtrail_discard_regex] PASSED [ 13%] +test_aws/test_only_logs_after.py::test_without_only_logs_after[cloudtrail_without_only_logs_after] PASSED [ 20%] +test_aws/test_only_logs_after.py::test_with_only_logs_after[cloudtrail_with_only_logs_after] PASSED [ 26%] +test_aws/test_only_logs_after.py::test_multiple_calls[cloudtrail_only_logs_after_multiple_calls] PASSED [ 33%] +test_aws/test_path.py::test_path[cloudtrail_path_with_data] PASSED [ 40%] +test_aws/test_path.py::test_path[cloudtrail_path_without_data] PASSED [ 46%] +test_aws/test_path.py::test_path[cloudtrail_inexistent_path] PASSED [ 53%] +test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_path_suffix_with_data] PASSED [ 60%] +test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_path_suffix_without_data] PASSED [ 66%] +test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_inexistent_path_suffix] PASSED [ 73%] +test_aws/test_regions.py::test_regions[cloudtrail_region_with_data] PASSED [ 80%] +test_aws/test_regions.py::test_regions[cloudtrail_regions_with_data] PASSED [ 86%] +test_aws/test_regions.py::test_regions[cloudtrail_inexistent_region] PASSED [ 93%] +test_aws/test_remove_from_bucket.py::test_remove_from_bucket[cloudtrail_remove_from_bucket] PASSED [100%] + +=============================================== 15 passed, 2 warnings in 332.67s (0:05:32) =========================================== +``` diff --git a/tests/integration/test_aws/__init__.py b/tests/integration/test_aws/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py new file mode 100644 index 00000000000..2dfb6bde491 --- /dev/null +++ b/tests/integration/test_aws/conftest.py @@ -0,0 +1,162 @@ +import pytest +from wazuh_testing.logger import logger +from wazuh_testing.modules.aws import ( + FAKE_CLOUDWATCH_LOG_GROUP, + PERMANENT_CLOUDWATCH_LOG_GROUP, +) +from wazuh_testing.modules.aws.cloudwatch_utils import ( + create_log_events, + create_log_group, + create_log_stream, + delete_log_group, + delete_log_stream, +) +from wazuh_testing.modules.aws.db_utils import delete_s3_db, delete_services_db +from wazuh_testing.modules.aws.s3_utils import delete_file, file_exists, upload_file +from wazuh_testing.utils.services import control_service + + +@pytest.fixture +def mark_cases_as_skipped(metadata): + if metadata['name'] in ['alb_remove_from_bucket', 'clb_remove_from_bucket', 'nlb_remove_from_bucket']: + pytest.skip(reason='ALB, CLB and NLB integrations are removing older logs from other region') + + +@pytest.fixture +def restart_wazuh_function_without_exception(daemon=None): + """Restart all Wazuh daemons.""" + try: + control_service("start", daemon=daemon) + except ValueError: + pass + + yield + + control_service('stop', daemon=daemon) + + +# S3 fixtures + +@pytest.fixture +def upload_and_delete_file_to_s3(metadata): + """Upload a file to S3 bucket and delete after the test ends. + + Args: + metadata (dict): Metadata to get the parameters. + """ + bucket_name = metadata['bucket_name'] + filename = upload_file(bucket_type=metadata['bucket_type'], bucket_name=metadata['bucket_name']) + if filename != '': + logger.debug('Uploaded file: %s to bucket "%s"', filename, bucket_name) + metadata['uploaded_file'] = filename + + yield + + if file_exists(filename=filename, bucket_name=bucket_name): + delete_file(filename=filename, bucket_name=bucket_name) + logger.debug('Deleted file: %s from bucket %s', filename, bucket_name) + + +@pytest.fixture +def delete_file_from_s3(metadata): + """Delete a file from S3 bucket after the test ends. + + Args: + metadata (dict): Metadata to get the parameters. + """ + yield + + bucket_name = metadata['bucket_name'] + filename = metadata.get('filename') + if filename is not None: + delete_file(filename=filename, bucket_name=bucket_name) + logger.debug('Deleted file: %s from bucket %s', filename, bucket_name) + + +# CloudWatch fixtures + +@pytest.fixture(name='create_log_stream') +def fixture_create_log_stream(metadata): + """Create a log stream with events and delete after the execution. + + Args: + metadata (dict): Metadata to get the parameters. + """ + SKIP_LOG_GROUP_CREATION = [PERMANENT_CLOUDWATCH_LOG_GROUP, FAKE_CLOUDWATCH_LOG_GROUP] + log_group_names = [item.strip() for item in metadata['log_group_name'].split(',')] + for log_group_name in log_group_names: + if log_group_name in SKIP_LOG_GROUP_CREATION: + continue + + logger.debug('Creating log group: %s', log_group_name) + create_log_group(log_group_name) + log_stream = create_log_stream(log_group_name) + logger.debug('Created log stream "%s" within log group "%s"', log_stream, log_group_name) + create_log_events( + log_stream=log_stream, log_group=log_group_name, event_number=metadata.get('expected_results', 1) + ) + logger.debug('Created log events') + metadata['log_stream'] = log_stream + + yield + + for log_group_name in log_group_names: + if log_group_name in SKIP_LOG_GROUP_CREATION: + continue + delete_log_group(log_group_name) + logger.debug('Deleted log group: %s', log_group_name) + + +@pytest.fixture +def create_log_stream_in_existent_group(metadata): + """Create a log stream with events and delete after the execution. + + Args: + metadata (dict): Metadata to get the parameters. + """ + log_group_name = metadata['log_group_name'] + log_stream = create_log_stream(log_group_name) + logger.debug('Created log stream "%s" within log group "%s"', log_stream, log_group_name) + create_log_events(log_stream=log_stream, log_group=log_group_name) + logger.debug('Created log events') + metadata['log_stream'] = log_stream + + yield + + delete_log_stream(log_stream=log_stream, log_group=log_group_name) + logger.debug('Deleted log stream: %s', log_stream) + + +@pytest.fixture(name='delete_log_stream') +def fixture_delete_log_stream(metadata): + """Create a log stream with events and delete after the execution. + + Args: + metadata (dict): Metadata to get the parameters. + """ + yield + log_stream = metadata['log_stream'] + delete_log_stream(log_stream=log_stream) + logger.debug('Deleted log stream: %s', log_stream) + +# DB fixtures + + +@pytest.fixture +def clean_s3_cloudtrail_db(): + """Delete the DB file before and after the test execution""" + delete_s3_db() + + yield + + delete_s3_db() + + +@pytest.fixture +def clean_aws_services_db(): + """Delete the DB file before and after the test execution.""" + delete_services_db() + + yield + + delete_services_db() diff --git a/tests/integration/test_aws/data/configuration_template/basic_test_module/bucket_configuration_defaults.yaml b/tests/integration/test_aws/data/configuration_template/basic_test_module/bucket_configuration_defaults.yaml new file mode 100644 index 00000000000..507a734e36b --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/basic_test_module/bucket_configuration_defaults.yaml @@ -0,0 +1,15 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - bucket: + attributes: + - type: BUCKET_TYPE + elements: + - aws_profile: + value: qa + - name: + value: BUCKET_NAME diff --git a/tests/integration/test_aws/data/configuration_template/basic_test_module/cloudwatch_configuration_defaults.yaml b/tests/integration/test_aws/data/configuration_template/basic_test_module/cloudwatch_configuration_defaults.yaml new file mode 100644 index 00000000000..6fc76e6537a --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/basic_test_module/cloudwatch_configuration_defaults.yaml @@ -0,0 +1,17 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - aws_log_groups: + value: LOG_GROUP_NAME + - regions: + value: us-east-1 diff --git a/tests/integration/test_aws/data/configuration_template/basic_test_module/inspector_configuration_defaults.yaml b/tests/integration/test_aws/data/configuration_template/basic_test_module/inspector_configuration_defaults.yaml new file mode 100644 index 00000000000..2b6c6bd0430 --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/basic_test_module/inspector_configuration_defaults.yaml @@ -0,0 +1,15 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - regions: + value: us-east-1 diff --git a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml new file mode 100644 index 00000000000..cd7e6175e8c --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml @@ -0,0 +1,23 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - bucket: + attributes: + - type: BUCKET_TYPE + elements: + - aws_profile: + value: qa + - name: + value: BUCKET_NAME + - only_logs_after: + value: 2022-NOV-20 + - path: + value: PATH + - discard_regex: + attributes: + - field: DISCARD_FIELD + value: DISCARD_REGEX diff --git a/tests/integration/test_aws/data/configuration_template/log_groups_test_module/configuration_log_groups.yaml b/tests/integration/test_aws/data/configuration_template/log_groups_test_module/configuration_log_groups.yaml new file mode 100644 index 00000000000..17585a7f2d1 --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/log_groups_test_module/configuration_log_groups.yaml @@ -0,0 +1,19 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - aws_log_groups: + value: LOG_GROUP_NAME + - only_logs_after: + value: 2023-JAN-12 + - regions: + value: us-east-1 diff --git a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_with_only_logs_after.yaml b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_with_only_logs_after.yaml new file mode 100644 index 00000000000..1ccc03ee926 --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_with_only_logs_after.yaml @@ -0,0 +1,19 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: "no" + - bucket: + attributes: + - type: BUCKET_TYPE + elements: + - aws_profile: + value: qa + - name: + value: BUCKET_NAME + - only_logs_after: + value: ONLY_LOGS_AFTER + - path: + value: PATH diff --git a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_without_only_logs_after.yaml b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_without_only_logs_after.yaml new file mode 100644 index 00000000000..0821bb0b8f2 --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_without_only_logs_after.yaml @@ -0,0 +1,17 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: "no" + - bucket: + attributes: + - type: BUCKET_TYPE + elements: + - aws_profile: + value: qa + - name: + value: BUCKET_NAME + - path: + value: PATH diff --git a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/cloudwatch_configuration_with_only_logs_after.yaml b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/cloudwatch_configuration_with_only_logs_after.yaml new file mode 100644 index 00000000000..5eacd69893b --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/cloudwatch_configuration_with_only_logs_after.yaml @@ -0,0 +1,19 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: "no" + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - aws_log_groups: + value: LOG_GROUP_NAME + - only_logs_after: + value: ONLY_LOGS_AFTER + - regions: + value: us-east-1 diff --git a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/inspector_configuration_with_only_logs_after.yaml b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/inspector_configuration_with_only_logs_after.yaml new file mode 100644 index 00000000000..d88be0bb12f --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/inspector_configuration_with_only_logs_after.yaml @@ -0,0 +1,17 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: "no" + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - only_logs_after: + value: ONLY_LOGS_AFTER + - regions: + value: us-east-1 diff --git a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/service_configuration_without_only_logs_after.yaml b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/service_configuration_without_only_logs_after.yaml new file mode 100644 index 00000000000..c16c07ec92f --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/service_configuration_without_only_logs_after.yaml @@ -0,0 +1,17 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: "no" + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - aws_log_groups: + value: LOG_GROUP_NAME + - regions: + value: us-east-1 diff --git a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_bucket_and_service_missing.yaml b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_bucket_and_service_missing.yaml new file mode 100644 index 00000000000..818a192d1af --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_bucket_and_service_missing.yaml @@ -0,0 +1,7 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' diff --git a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_multiple_bucket_and_service_tags.yaml b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_multiple_bucket_and_service_tags.yaml new file mode 100644 index 00000000000..645fed742cd --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_multiple_bucket_and_service_tags.yaml @@ -0,0 +1,47 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - bucket: + attributes: + - type: cloudtrail + elements: + - aws_profile: + value: qa + - name: + value: wazuh-cloudtrail-integration-tests + - regions: + value: us-east-1 + - bucket: + attributes: + - type: cloudtrail + elements: + - aws_profile: + value: qa + - name: + value: wazuh-cloudtrail-integration-tests + - regions: + value: us-east-2 + - service: + attributes: + - type: cloudwatchlogs + elements: + - aws_profile: + value: qa + - aws_log_groups: + value: wazuh-cloudwatchlogs-integration-tests + - regions: + value: us-east-1 + - service: + attributes: + - type: cloudwatchlogs + elements: + - aws_profile: + value: qa + - aws_log_groups: + value: wazuh-cloudwatchlogs-integration-tests + - regions: + value: us-east-2 diff --git a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_bucket.yaml b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_bucket.yaml new file mode 100644 index 00000000000..36cb9d2c6d6 --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_bucket.yaml @@ -0,0 +1,13 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - bucket: + elements: + - aws_profile: + value: qa + - name: + value: wazuh-cloudtrail-integration-tests diff --git a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_service.yaml b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_service.yaml new file mode 100644 index 00000000000..2a5e06849bd --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_service.yaml @@ -0,0 +1,13 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - service: + elements: + - aws_profile: + value: qa + - name: + aws_log_groups: wazuh-cloudwatch-integration-tests diff --git a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_bucket.yaml b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_bucket.yaml new file mode 100644 index 00000000000..5f433bbfa3f --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_bucket.yaml @@ -0,0 +1,25 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - bucket: + attributes: + - type: BUCKET_TYPE + elements: + - aws_profile: + value: qa + - name: + value: BUCKET_NAME + - only_logs_after: + value: ONLY_LOGS_AFTER + - regions: + value: REGIONS + - path: + value: PATH + - path_suffix: + value: PATH_SUFFIX + - remove_from_bucket: + value: REMOVE_FROM_BUCKET diff --git a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_service.yaml b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_service.yaml new file mode 100644 index 00000000000..1d8c91c7d36 --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_service.yaml @@ -0,0 +1,21 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - aws_log_groups: + value: LOG_GROUPS + - only_logs_after: + value: ONLY_LOGS_AFTER + - regions: + value: REGIONS + - remove_log_streams: + value: REMOVE_LOG_STREAMS diff --git a/tests/integration/test_aws/data/configuration_template/path_suffix_test_module/configuration_path_suffix.yaml b/tests/integration/test_aws/data/configuration_template/path_suffix_test_module/configuration_path_suffix.yaml new file mode 100644 index 00000000000..7b76a2dc222 --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/path_suffix_test_module/configuration_path_suffix.yaml @@ -0,0 +1,19 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - bucket: + attributes: + - type: BUCKET_TYPE + elements: + - aws_profile: + value: qa + - name: + value: BUCKET_NAME + - only_logs_after: + value: 2022-NOV-20 + - path_suffix: + value: PATH_SUFFIX diff --git a/tests/integration/test_aws/data/configuration_template/path_test_module/configuration_path.yaml b/tests/integration/test_aws/data/configuration_template/path_test_module/configuration_path.yaml new file mode 100644 index 00000000000..62b5aaff26f --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/path_test_module/configuration_path.yaml @@ -0,0 +1,19 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - bucket: + attributes: + - type: BUCKET_TYPE + elements: + - aws_profile: + value: qa + - name: + value: BUCKET_NAME + - only_logs_after: + value: 2022-NOV-20 + - path: + value: PATH diff --git a/tests/integration/test_aws/data/configuration_template/regions_test_module/bucket_configuration_regions.yaml b/tests/integration/test_aws/data/configuration_template/regions_test_module/bucket_configuration_regions.yaml new file mode 100644 index 00000000000..45b8fcb4046 --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/regions_test_module/bucket_configuration_regions.yaml @@ -0,0 +1,19 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - bucket: + attributes: + - type: BUCKET_TYPE + elements: + - aws_profile: + value: qa + - name: + value: BUCKET_NAME + - only_logs_after: + value: 2022-NOV-20 + - regions: + value: REGIONS diff --git a/tests/integration/test_aws/data/configuration_template/regions_test_module/cloudwatch_configuration_regions.yaml b/tests/integration/test_aws/data/configuration_template/regions_test_module/cloudwatch_configuration_regions.yaml new file mode 100644 index 00000000000..f6c2b339acc --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/regions_test_module/cloudwatch_configuration_regions.yaml @@ -0,0 +1,19 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - aws_log_groups: + value: LOG_GROUP_NAME + - only_logs_after: + value: 2023-JAN-12 + - regions: + value: REGIONS diff --git a/tests/integration/test_aws/data/configuration_template/regions_test_module/inspector_configuration_regions.yaml b/tests/integration/test_aws/data/configuration_template/regions_test_module/inspector_configuration_regions.yaml new file mode 100644 index 00000000000..ca344fac2ec --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/regions_test_module/inspector_configuration_regions.yaml @@ -0,0 +1,17 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - only_logs_after: + value: 2023-JAN-12 + - regions: + value: REGIONS diff --git a/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_from_bucket.yaml b/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_from_bucket.yaml new file mode 100644 index 00000000000..5c8d3f24a74 --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_from_bucket.yaml @@ -0,0 +1,19 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - bucket: + attributes: + - type: BUCKET_TYPE + elements: + - aws_profile: + value: qa + - name: + value: BUCKET_NAME + - remove_from_bucket: + value: 'yes' + - path: + value: PATH diff --git a/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_log_stream.yaml b/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_log_stream.yaml new file mode 100644 index 00000000000..2fbdbf07379 --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_log_stream.yaml @@ -0,0 +1,19 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - aws_log_groups: + value: LOG_GROUP_NAME + - remove_log_streams: + value: 'yes' + - regions: + value: us-east-1 diff --git a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml new file mode 100644 index 00000000000..cedf6c83b23 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml @@ -0,0 +1,125 @@ +- name: cloudtrail_defaults + description: CloudTrail default configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + +- name: vpc_defaults + description: VPC default configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + +- name: config_defaults + description: Config default configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + +- name: alb_defaults + description: ALB default configurations + configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + +- name: clb_defaults + description: CLB default configurations + configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + +- name: nlb_defaults + description: NLB default configurations + configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + +- name: kms_defaults + description: KMS default configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-kms-integration-tests + metadata: + bucket_type: custom + bucket_name: wazuh-kms-integration-tests + +- name: macie_defaults + description: CloudTrail default configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-macie-integration-tests + metadata: + bucket_type: custom + bucket_name: wazuh-macie-integration-tests + +- name: trusted_advisor_defaults + description: Trusted Advisor default configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-trusted-advisor-integration-tests + metadata: + bucket_type: custom + bucket_name: wazuh-trusted-advisor-integration-tests + +- name: guardduty_defaults + description: GuardDuty default configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-guardduty-integration-tests + metadata: + bucket_type: guardduty + bucket_name: wazuh-guardduty-integration-tests + +- name: native_guardduty_defaults + description: Native GuardDuty default configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-native-guardduty-integration-tests + metadata: + bucket_type: guardduty + bucket_name: wazuh-native-guardduty-integration-tests + +- name: waf_defaults + description: WAF default configurations + configuration_parameters: + BUCKET_TYPE: waf + BUCKET_NAME: wazuh-waf-integration-tests + metadata: + bucket_type: waf + bucket_name: wazuh-waf-integration-tests + +- name: server_access_defaults + description: Server Access default configurations + configuration_parameters: + BUCKET_TYPE: server_access + BUCKET_NAME: wazuh-server-access-integration-tests + metadata: + bucket_type: server_access + bucket_name: wazuh-server-access-integration-tests + +- name: cisco_umbrella_defaults + description: Umbrella default configurations + configuration_parameters: + BUCKET_TYPE: cisco_umbrella + BUCKET_NAME: wazuh-umbrella-integration-tests + metadata: + bucket_type: cisco_umbrella + bucket_name: wazuh-umbrella-integration-tests diff --git a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_cloudwatch_defaults.yaml b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_cloudwatch_defaults.yaml new file mode 100644 index 00000000000..ca0c2e7adac --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_cloudwatch_defaults.yaml @@ -0,0 +1,8 @@ +- name: cloudwatchlogs_defaults + description: CloudWatch default configurations + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests + metadata: + service_type: cloudwatchlogs + log_group_name: wazuh-cloudwatchlogs-integration-tests diff --git a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_inspector_defaults.yaml b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_inspector_defaults.yaml new file mode 100644 index 00000000000..094da2a5390 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_inspector_defaults.yaml @@ -0,0 +1,6 @@ +- name: inspector_defaults + description: Inspector default configurations + configuration_parameters: + SERVICE_TYPE: inspector + metadata: + service_type: inspector diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml new file mode 100644 index 00000000000..7c37319fb6e --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml @@ -0,0 +1,238 @@ +- name: cloudtrail_discard_regex + description: CloudTrail discard regex configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + DISCARD_FIELD: eventSource + DISCARD_REGEX: .*ec2.amazonaws.com.* + PATH: '' + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: eventSource + discard_regex: .*ec2.amazonaws.com.* + found_logs: 5 + skipped_logs: 1 + +- name: vpc_discard_regex + description: VPC discard regex configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + DISCARD_FIELD: srcport + DISCARD_REGEX: "5319" + PATH: '' + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: srcport + discard_regex: "5319" + found_logs: 5 + skipped_logs: 1 + +- name: config_discard_regex + description: Config discard regex configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + DISCARD_FIELD: configuration.complianceType + DISCARD_REGEX: .*COMPLIANT.* + PATH: '' + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: configuration.complianceType + discard_regex: .*COMPLIANT.* + found_logs: 5 + skipped_logs: 1 + +- name: alb_discard_regex + description: ALB discard regex configurations + configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests + DISCARD_FIELD: elb_status_code + DISCARD_REGEX: '401' + PATH: '' + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: elb_status_code + discard_regex: '401' + found_logs: 5 + skipped_logs: 1 + +- name: clb_discard_regex + description: CLB discard regex configurations + configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests + DISCARD_FIELD: elb_status_code + DISCARD_REGEX: '401' + PATH: '' + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: elb_status_code + discard_regex: '401' + found_logs: 5 + skipped_logs: 1 + +- name: nlb_discard_regex + description: NLB discard regex configurations + configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests + DISCARD_FIELD: listener + DISCARD_REGEX: 0CMK2UAG108C7AXK + PATH: '' + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: listener + discard_regex: 0CMK2UAG108C7AXK + found_logs: 5 + skipped_logs: 1 + +- name: kms_discard_regex + description: KMS discard regex configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-kms-integration-tests + DISCARD_FIELD: eventName + DISCARD_REGEX: MatchDataKey + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-kms-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: eventName + discard_regex: MatchDataKey + found_logs: 3 + skipped_logs: 1 + +- name: macie_discard_regex + description: Macie discard regex configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-macie-integration-tests + DISCARD_FIELD: severity + DISCARD_REGEX: LOW + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-macie-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: severity + discard_regex: LOW + found_logs: 3 + skipped_logs: 1 + +- name: trusted_advisor_discard_regex + description: Trusted Advisor discard regex configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-trusted-advisor-integration-tests + DISCARD_FIELD: status + DISCARD_REGEX: ERROR + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-trusted-advisor-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: status + discard_regex: ERROR + found_logs: 3 + skipped_logs: 1 + +- name: guardduty_discard_regex + description: GuardDuty discard regex configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-guardduty-integration-tests + DISCARD_FIELD: partition + DISCARD_REGEX: aws-test + PATH: '' + metadata: + bucket_type: guardduty + bucket_name: wazuh-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: partition + discard_regex: aws-test + found_logs: 3 + skipped_logs: 1 + +- name: native_guardduty_discard_regex + description: Native GuardDuty discard regex configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-native-guardduty-integration-tests + DISCARD_FIELD: partition + DISCARD_REGEX: aws-test + PATH: '' + metadata: + bucket_type: guardduty + bucket_name: wazuh-native-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: partition + discard_regex: aws-test + found_logs: 3 + skipped_logs: 1 + +- name: waf_discard_regex + description: WAF discard regex configurations + configuration_parameters: + BUCKET_TYPE: waf + BUCKET_NAME: wazuh-waf-integration-tests + DISCARD_FIELD: action + DISCARD_REGEX: ALLOW + PATH: '' + metadata: + bucket_type: waf + bucket_name: wazuh-waf-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: action + discard_regex: ALLOW + found_logs: 3 + skipped_logs: 1 + +- name: server_access_discard_regex + description: Server Access discard regex configurations + configuration_parameters: + BUCKET_TYPE: server_access + BUCKET_NAME: wazuh-server-access-integration-tests + DISCARD_FIELD: http_status + DISCARD_REGEX: '200' + PATH: '' + metadata: + bucket_type: server_access + bucket_name: wazuh-server-access-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: http_status + discard_regex: '200' + found_logs: 3 + skipped_logs: 1 + +- name: cisco_umbrella_discard_regex + description: CloudTrail discard regex configurations + configuration_parameters: + BUCKET_TYPE: cisco_umbrella + BUCKET_NAME: wazuh-umbrella-integration-tests + DISCARD_FIELD: action + DISCARD_REGEX: Blocked + PATH: dnslogs + metadata: + bucket_type: cisco_umbrella + bucket_name: wazuh-umbrella-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: action + discard_regex: Blocked + found_logs: 3 + skipped_logs: 1 + path: dnslogs diff --git a/tests/integration/test_aws/data/test_cases/log_groups_test_module/cases_log_groups.yaml b/tests/integration/test_aws/data/test_cases/log_groups_test_module/cases_log_groups.yaml new file mode 100644 index 00000000000..aefd5f59f79 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/log_groups_test_module/cases_log_groups.yaml @@ -0,0 +1,21 @@ +- name: cloudwatchlogs_log_groups_with_data + description: CloudWatch log groups configurations + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests,temporary-log-group + metadata: + service_type: cloudwatchlogs + log_group_name: wazuh-cloudwatchlogs-integration-tests,temporary-log-group + only_logs_after: 2023-JAN-12 + expected_results: 3 + +- name: cloudwatchlogs_inexistent_log_group + description: CloudWatch log group configurations + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUP_NAME: fake-log-group + metadata: + service_type: cloudwatchlogs + log_group_name: fake-log-group + only_logs_after: 2023-JAN-12 + expected_results: 0 diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_multiple_calls.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_multiple_calls.yaml new file mode 100644 index 00000000000..12b19b6226c --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_multiple_calls.yaml @@ -0,0 +1,98 @@ +- name: cloudtrail_only_logs_after_multiple_calls + description: CloudTrail only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + +- name: vpc_only_logs_after_multiple_calls + description: VPC only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + +- name: config_only_logs_after_multiple_calls + description: Config only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + +- name: alb_only_logs_after_multiple_calls + description: ALB only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + +- name: clb_only_logs_after_multiple_calls + description: CLB only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + +- name: nlb_only_logs_after_multiple_calls + description: NLB only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + +- name: kms_only_logs_after_multiple_calls + description: KMS only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: custom + bucket_name: wazuh-kms-integration-tests + +- name: macie_only_logs_after_multiple_calls + description: Macie only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: custom + bucket_name: wazuh-macie-integration-tests + +- name: trusted_advisor_only_logs_after_multiple_calls + description: Trusted Advisor only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: custom + bucket_name: wazuh-trusted-advisor-integration-tests + +- name: guardduty_only_logs_after_multiple_calls + description: GuardDuty only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: guardduty + bucket_name: wazuh-guardduty-integration-tests + +- name: native_guardduty_only_logs_after_multiple_calls + description: Native GuardDuty only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: guardduty + bucket_name: wazuh-native-guardduty-integration-tests + +- name: waf_only_logs_after_multiple_calls + description: WAF only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: waf + bucket_name: wazuh-waf-integration-tests + +- name: server_access_only_logs_after_multiple_calls + description: Server Access only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: server_access + bucket_name: wazuh-server-access-integration-tests + +- name: cisco_umbrella_only_logs_after_multiple_calls + description: Umbrella only_logs_after multiple calls configurations + configuration_parameters: + metadata: + bucket_type: cisco_umbrella + bucket_name: wazuh-umbrella-integration-tests + path: dnslogs diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_with_only_logs_after.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_with_only_logs_after.yaml new file mode 100644 index 00000000000..a466dcbf445 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_with_only_logs_after.yaml @@ -0,0 +1,183 @@ +- name: cloudtrail_with_only_logs_after + description: CloudTrail only logs after configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 5 + +- name: vpc_with_only_logs_after + description: VPC only logs after configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 3 + +- name: config_with_only_logs_after + description: Config only logs after configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 5 + +- name: alb_with_only_logs_after + description: ALB only logs after configurations + configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 5 + +- name: clb_with_only_logs_after + description: CLB only logs after configurations + configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 5 + +- name: nlb_with_only_logs_after + description: NLB only logs after configurations + configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 5 + +- name: kms_with_only_logs_after + description: KMS only logs after configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-kms-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-kms-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 3 + +- name: macie_with_only_logs_after + description: Macie only logs after configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-macie-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-macie-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 3 + +- name: trusted_avisor_with_only_logs_after + description: Trusted Advisor only logs after configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-trusted-advisor-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-trusted-advisor-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 3 + +- name: guardduty_with_only_logs_after + description: GuardDuty only logs after configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-guardduty-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: guardduty + bucket_name: wazuh-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 3 + +- name: native_guardduty_with_only_logs_after + description: Native GuardDuty only logs after configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-native-guardduty-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: guardduty + bucket_name: wazuh-native-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 3 + +- name: waf_with_only_logs_after + description: WAF only logs after configurations + configuration_parameters: + BUCKET_TYPE: waf + BUCKET_NAME: wazuh-waf-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: waf + bucket_name: wazuh-waf-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 3 + +- name: server_access_with_only_logs_after + description: Server Access only logs after configurations + configuration_parameters: + BUCKET_TYPE: server_access + BUCKET_NAME: wazuh-server-access-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: '' + metadata: + bucket_type: server_access + bucket_name: wazuh-server-access-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 3 + table_name: s3_server_access + +- name: cisco_umbrella_with_only_logs_after + description: Umbrella only logs after configurations + configuration_parameters: + BUCKET_TYPE: cisco_umbrella + BUCKET_NAME: wazuh-umbrella-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + PATH: dnslogs + metadata: + bucket_type: cisco_umbrella + bucket_name: wazuh-umbrella-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 3 + path: dnslogs diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_without_only_logs_after.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_without_only_logs_after.yaml new file mode 100644 index 00000000000..8b622f44f84 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_without_only_logs_after.yaml @@ -0,0 +1,155 @@ +- name: cloudtrail_without_only_logs_after + description: CloudTrail only logs after configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + PATH: '' + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + expected_results: 1 + +- name: vpc_without_only_logs_after + description: VPC only logs after configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + PATH: '' + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + expected_results: 1 + +- name: config_without_only_logs_after + description: Config only logs after configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + PATH: '' + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + expected_results: 1 + +- name: alb_without_only_logs_after + description: ALB only logs after configurations + configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests + PATH: '' + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + expected_results: 1 + +- name: clb_without_only_logs_after + description: CLB only logs after configurations + configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests + PATH: '' + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + expected_results: 1 + +- name: nlb_without_only_logs_after + description: NLB only logs after configurations + configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests + PATH: '' + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + expected_results: 1 + +- name: kms_without_only_logs_after + description: KMS only logs after configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-kms-integration-tests + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-kms-integration-tests + expected_results: 1 + +- name: macie_without_only_logs_after + description: Macie only logs after configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-macie-integration-tests + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-macie-integration-tests + expected_results: 1 + +- name: trusted_advisor_without_only_logs_after + description: Trusted Advisor only logs after configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-trusted-advisor-integration-tests + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-trusted-advisor-integration-tests + expected_results: 1 + +- name: guardduty_without_only_logs_after + description: GuardDuty only logs after configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-guardduty-integration-tests + PATH: '' + metadata: + bucket_type: guardduty + bucket_name: wazuh-guardduty-integration-tests + expected_results: 1 + +- name: native_guardduty_without_only_logs_after + description: Native GuardDuty only logs after configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-native-guardduty-integration-tests + PATH: '' + metadata: + bucket_type: guardduty + bucket_name: wazuh-native-guardduty-integration-tests + expected_results: 1 + +- name: waf_without_only_logs_after + description: WAF only logs after configurations + configuration_parameters: + BUCKET_TYPE: waf + BUCKET_NAME: wazuh-waf-integration-tests + PATH: '' + metadata: + bucket_type: waf + bucket_name: wazuh-waf-integration-tests + expected_results: 1 + +- name: server_access_without_only_logs_after + description: Server Access only logs after configurations + configuration_parameters: + BUCKET_TYPE: server_access + BUCKET_NAME: wazuh-server-access-integration-tests + PATH: '' + metadata: + bucket_type: server_access + bucket_name: wazuh-server-access-integration-tests + expected_results: 1 + table_name: s3_server_access + +- name: cisco_umbrella_without_only_logs_after + description: Umbrella only logs after configurations + configuration_parameters: + BUCKET_TYPE: cisco_umbrella + BUCKET_NAME: wazuh-umbrella-integration-tests + PATH: dnslogs + metadata: + bucket_type: cisco_umbrella + bucket_name: wazuh-umbrella-integration-tests + expected_results: 1 + path: dnslogs diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_multiple_calls.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_multiple_calls.yaml new file mode 100644 index 00000000000..808e86d5114 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_multiple_calls.yaml @@ -0,0 +1,6 @@ +- name: cloudwatchlogs_only_logs_after_multiple_calls + description: CloudWatch only_logs_after multiple calls configurations + configuration_parameters: + metadata: + service_type: cloudwatchlogs + log_group_name: wazuh-cloudwatchlogs-integration-tests diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_with_only_logs_after.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_with_only_logs_after.yaml new file mode 100644 index 00000000000..6fc8afb1571 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_with_only_logs_after.yaml @@ -0,0 +1,12 @@ +- name: cloudwatchlogs_with_only_logs_after + description: CloudWatch only logs after configurations + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests + ONLY_LOGS_AFTER: 2022-NOV-20 + metadata: + service_type: cloudwatchlogs + log_group_name: wazuh-cloudwatchlogs-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 3 + log_stream: permanent-logs diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_inspector_multiple_calls.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_inspector_multiple_calls.yaml new file mode 100644 index 00000000000..738095e8610 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_inspector_multiple_calls.yaml @@ -0,0 +1,5 @@ +- name: inspector_only_logs_after_multiple_calls + description: Inspector only_logs_after multiple calls configurations + configuration_parameters: + metadata: + service_type: inspector diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_inspector_with_only_logs_after.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_inspector_with_only_logs_after.yaml new file mode 100644 index 00000000000..860c37e7976 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_inspector_with_only_logs_after.yaml @@ -0,0 +1,9 @@ +- name: inspector_with_only_logs_after + description: Inspector only logs after configurations + configuration_parameters: + SERVICE_TYPE: inspector + ONLY_LOGS_AFTER: 2023-JAN-30 + metadata: + service_type: inspector + only_logs_after: 2023-JAN-30 + expected_results: 4 diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_service_without_only_logs_after.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_service_without_only_logs_after.yaml new file mode 100644 index 00000000000..40414b43d60 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_service_without_only_logs_after.yaml @@ -0,0 +1,9 @@ +- name: cloudwatchlogs_without_only_logs_after + description: CloudWatch only logs after configurations + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests + metadata: + service_type: cloudwatchlogs + log_group_name: wazuh-cloudwatchlogs-integration-tests + expected_results: 1 diff --git a/tests/integration/test_aws/data/test_cases/parser_test_module/cases_bucket_and_service_missing.yaml b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_bucket_and_service_missing.yaml new file mode 100644 index 00000000000..a1b300a75a0 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_bucket_and_service_missing.yaml @@ -0,0 +1,4 @@ +- name: parser_bucket_and_service_missing + description: Parser bucket and service missing configurations + configuration_parameters: [] + metadata: [] diff --git a/tests/integration/test_aws/data/test_cases/parser_test_module/cases_empty_values_in_bucket.yaml b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_empty_values_in_bucket.yaml new file mode 100644 index 00000000000..f230042ece3 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_empty_values_in_bucket.yaml @@ -0,0 +1,71 @@ +- name: parser_empty_type_in_bucket + description: Parser empty type in bucket + configuration_parameters: + BUCKET_TYPE: '' + BUCKET_NAME: wazuh-cloudtrail-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + PATH: test_prefix + PATH_SUFFIX: test_suffix + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_empty_name_in_bucket + description: Parser empty name in bucket + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: '' + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + PATH: test_prefix + PATH_SUFFIX: test_suffix + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_empty_only_logs_after_in_bucket + description: Parser empty only_logs_after in bucket + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + ONLY_LOGS_AFTER: '' + REGIONS: us-east-1 + PATH: test_prefix + PATH_SUFFIX: test_suffix + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_empty_regions_in_bucket + description: Parser empty regions in bucket + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: '' + PATH: test_prefix + PATH_SUFFIX: test_suffix + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_empty_path_in_bucket + description: Parser empty path in bucket + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + PATH: '' + PATH_SUFFIX: test_suffix + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_empty_path_suffix_in_bucket + description: Parser empty path_suffix in bucket + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + PATH: test_prefix + PATH_SUFFIX: '' + REMOVE_FROM_BUCKET: 'no' + metadata: [] diff --git a/tests/integration/test_aws/data/test_cases/parser_test_module/cases_empty_values_in_service.yaml b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_empty_values_in_service.yaml new file mode 100644 index 00000000000..d252f271d01 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_empty_values_in_service.yaml @@ -0,0 +1,39 @@ +- name: parser_empty_type_in_service + description: Parser empty type in service + configuration_parameters: + SERVICE_TYPE: '' + LOG_GROUPS: wazuh-cloudwatch-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_empty_log_groups_in_service + description: Parser empty log_groups in service + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUPS: '' + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_empty_only_logs_after_in_service + description: Parser empty only_logs_after in service + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUPS: wazuh-cloudwatchlogs-integration-tests + ONLY_LOGS_AFTER: '' + REGIONS: us-east-1 + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_empty_regions_in_service + description: Parser empty regions in service + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUPS: wazuh-cloudwatchlogs-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: '' + REMOVE_FROM_BUCKET: 'no' + metadata: [] diff --git a/tests/integration/test_aws/data/test_cases/parser_test_module/cases_invalid_values_in_bucket.yaml b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_invalid_values_in_bucket.yaml new file mode 100644 index 00000000000..9edda13e2a2 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_invalid_values_in_bucket.yaml @@ -0,0 +1,83 @@ +- name: parser_invalid_type_in_bucket + description: Parser invalid type in bucket + configuration_parameters: + BUCKET_TYPE: invalid_value + BUCKET_NAME: wazuh-cloudtrail-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + PATH: test_prefix + PATH_SUFFIX: test_suffix + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_invalid_name_in_bucket + description: Parser invalid name in bucket + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: 1 + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + PATH: test_prefix + PATH_SUFFIX: test_suffix + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_invalid_only_logs_after_in_bucket + description: Parser invalid only_logs_after in bucket + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + ONLY_LOGS_AFTER: JAN-31 + REGIONS: us-east-1 + PATH: test_prefix + PATH_SUFFIX: test_suffix + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_invalid_regions_in_bucket + description: Parser invalid regions in bucket + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: 1 + PATH: test_prefix + PATH_SUFFIX: test_suffix + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_invalid_path_in_bucket + description: Parser invalid path in bucket + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + PATH: test-prefix> + PATH_SUFFIX: test_suffix + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_invalid_path_suffix_in_bucket + description: Parser invalid path_suffix in bucket + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + PATH: test_prefix + PATH_SUFFIX: test-suffix> + REMOVE_FROM_BUCKET: 'no' + metadata: [] + +- name: parser_invalid_remove_from_bucket_in_bucket + description: Parser invalid remove_from_bucket in bucket + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + PATH: test_prefix + PATH_SUFFIX: test-suffix + REMOVE_FROM_BUCKET: nein + metadata: [] diff --git a/tests/integration/test_aws/data/test_cases/parser_test_module/cases_invalid_values_in_service.yaml b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_invalid_values_in_service.yaml new file mode 100644 index 00000000000..fb739358fcd --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_invalid_values_in_service.yaml @@ -0,0 +1,49 @@ +- name: parser_invalid_type_in_service + description: Parser invalid type in service + configuration_parameters: + SERVICE_TYPE: fakeservice + LOG_GROUPS: wazuh-cloudwatch-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + REMOVE_LOG_STREAMS: 'no' + metadata: [] + +- name: parser_invalid_log_groups_in_service + description: Parser invalid log_groups in service + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUPS: invalid_log_group> + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: us-east-1 + REMOVE_LOG_STREAMS: 'no' + metadata: [] + +- name: parser_invalid_only_logs_after_in_service + description: Parser invalid only_logs_after in service + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUPS: wazuh-cloudwatchlogs-integration-tests + ONLY_LOGS_AFTER: JAN-31 + REGIONS: us-east-1 + REMOVE_LOG_STREAMS: 'no' + metadata: [] + +- name: parser_invalid_regions_in_service + description: Parser invalid regions in service + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUPS: wazuh-cloudwatchlogs-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: 1 + REMOVE_LOG_STREAMS: 'no' + metadata: [] + +- name: parser_invalid_remove_log_stream_in_service + description: Parser invalid remove_log_stream in service + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUPS: wazuh-cloudwatchlogs-integration-tests + ONLY_LOGS_AFTER: 2023-JAN-31 + REGIONS: 1 + REMOVE_LOG_STREAMS: nein + metadata: [] diff --git a/tests/integration/test_aws/data/test_cases/parser_test_module/cases_multiple_bucket_and_service_tags.yaml b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_multiple_bucket_and_service_tags.yaml new file mode 100644 index 00000000000..6b07c332e6f --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_multiple_bucket_and_service_tags.yaml @@ -0,0 +1,4 @@ +- name: parser_mutiple_bucket_and_service_tags + description: Parser multiple bucket and service tags configurations + configuration_parameters: [] + metadata: [] diff --git a/tests/integration/test_aws/data/test_cases/parser_test_module/cases_type_missing_in_bucket.yaml b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_type_missing_in_bucket.yaml new file mode 100644 index 00000000000..44cef7e1dd7 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_type_missing_in_bucket.yaml @@ -0,0 +1,4 @@ +- name: parser_type_missing_in_bucket + description: Parser type missing in bucket configurations + configuration_parameters: [] + metadata: [] diff --git a/tests/integration/test_aws/data/test_cases/parser_test_module/cases_type_missing_in_service.yaml b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_type_missing_in_service.yaml new file mode 100644 index 00000000000..d8ba3d2b205 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_type_missing_in_service.yaml @@ -0,0 +1,4 @@ +- name: parser_type_missing_in_service + description: Parser type missing in service configurations + configuration_parameters: [] + metadata: [] diff --git a/tests/integration/test_aws/data/test_cases/path_suffix_test_module/cases_path_suffix.yaml b/tests/integration/test_aws/data/test_cases/path_suffix_test_module/cases_path_suffix.yaml new file mode 100644 index 00000000000..b95f1695a62 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/path_suffix_test_module/cases_path_suffix.yaml @@ -0,0 +1,116 @@ +- name: cloudtrail_path_suffix_with_data + description: CloudTrail path_suffix configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + PATH_SUFFIX: test_suffix + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + path_suffix: test_suffix + expected_results: 1 + +- name: cloudtrail_path_suffix_without_data + description: CloudTrail path_suffix configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + PATH_SUFFIX: empty_suffix + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + path_suffix: empty_suffix + expected_results: 0 + +- name: cloudtrail_inexistent_path_suffix + description: CloudTrail path_suffix configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + PATH_SUFFIX: inexistent_suffix + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + path_suffix: inexistent_suffix + expected_results: 0 + +- name: vpc_path_suffix_with_data + description: VPC path_suffix configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + PATH_SUFFIX: test_suffix + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + path_suffix: test_suffix + expected_results: 1 + +- name: config_path_suffix_with_data + description: Config path_suffix configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + PATH_SUFFIX: test_suffix + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + path_suffix: test_suffix + expected_results: 1 + +- name: vpc_path_suffix_without_data + description: VPC path_suffix configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + PATH_SUFFIX: empty_suffix + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + path_suffix: empty_suffix + expected_results: 0 + +- name: config_path_suffix_without_data + description: Config path_suffix configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + PATH_SUFFIX: empty_suffix + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + path_suffix: empty_suffix + expected_results: 0 + +- name: vpc_inexistent_path_suffix + description: VPC path_suffix configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + PATH_SUFFIX: inexistent_suffix + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + path_suffix: inexistent_suffix + expected_results: 0 + +- name: config_inexistent_path_suffix + description: Config path_suffix configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + PATH_SUFFIX: inexistent_suffix + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + path_suffix: inexistent_suffix + expected_results: 0 diff --git a/tests/integration/test_aws/data/test_cases/path_test_module/cases_path.yaml b/tests/integration/test_aws/data/test_cases/path_test_module/cases_path.yaml new file mode 100644 index 00000000000..86b04ea2aa6 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/path_test_module/cases_path.yaml @@ -0,0 +1,548 @@ +- name: cloudtrail_path_with_data + description: CloudTrail path configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + PATH: test_prefix + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + +- name: cloudtrail_path_without_data + description: CloudTrail path configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + PATH: empty_prefix + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: cloudtrail_inexistent_path + description: CloudTrail path configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + +- name: vpc_path_with_data + description: VPC path configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + PATH: test_prefix + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + +- name: vpc_path_without_data + description: VPC path configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + PATH: empty_prefix + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: cisco_umbrella_path_with_data + description: Umbrella path configurations + configuration_parameters: + BUCKET_TYPE: cisco_umbrella + BUCKET_NAME: wazuh-umbrella-integration-tests + PATH: test_prefix/dnslogs + metadata: + bucket_type: cisco_umbrella + bucket_name: wazuh-umbrella-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix/dnslogs + expected_results: 1 + +- name: cisco_umbrella_path_without_data + description: Umbrella path configurations + configuration_parameters: + BUCKET_TYPE: cisco_umbrella + BUCKET_NAME: wazuh-umbrella-integration-tests + PATH: empty_prefix + metadata: + bucket_type: cisco_umbrella + bucket_name: wazuh-umbrella-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: vpc_inexistent_path + description: CloudTrail path configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + +- name: config_path_with_data + description: Config path configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + PATH: test_prefix + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + +- name: config_path_without_data + description: Config path configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + PATH: empty_prefix + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: config_inexistent_path + description: Config path configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + +- name: alb_path_with_data + description: ALB path configurations + configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests + PATH: test_prefix + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + +- name: alb_path_without_data + description: ALB path configurations + configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests + PATH: empty_prefix + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: alb_inexistent_path + description: ALB path configurations + configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + +- name: clb_path_with_data + description: CLB path configurations + configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests + PATH: test_prefix + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + +- name: clb_path_without_data + description: CLB path configurations + configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests + PATH: empty_prefix + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: clb_inexistent_path + description: CLB path configurations + configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + +- name: nlb_path_with_data + description: NLB path configurations + configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests + PATH: test_prefix + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + +- name: nlb_path_without_data + description: NLB path configurations + configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests + PATH: empty_prefix + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: nlb_inexistent_path + description: NLB path configurations + configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + +- name: kms_path_with_data + description: KMS path configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-kms-integration-tests + PATH: test_prefix + metadata: + bucket_type: custom + bucket_name: wazuh-kms-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + +- name: kms_path_without_data + description: KMS path configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-kms-integration-tests + PATH: empty_prefix + metadata: + bucket_type: custom + bucket_name: wazuh-kms-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: kms_inexistent_path + description: KMS path configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-kms-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: custom + bucket_name: wazuh-kms-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + +- name: macie_path_with_data + description: Macie path configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-macie-integration-tests + PATH: test_prefix + metadata: + bucket_type: custom + bucket_name: wazuh-macie-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + +- name: macie_path_without_data + description: Macie path configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-macie-integration-tests + PATH: empty_prefix + metadata: + bucket_type: custom + bucket_name: wazuh-macie-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: macie_inexistent_path + description: Macie path configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-macie-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: custom + bucket_name: wazuh-macie-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + +- name: trusted_advisor_path_with_data + description: Trusted Advisor path configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-trusted-advisor-integration-tests + PATH: test_prefix + metadata: + bucket_type: custom + bucket_name: wazuh-trusted-advisor-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + +- name: trusted_advisor_path_without_data + description: Trusted Advisor path configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-trusted-advisor-integration-tests + PATH: empty_prefix + metadata: + bucket_type: custom + bucket_name: wazuh-trusted-advisor-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: trusted_advisor_inexistent_path + description: Trusted Advisor path configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-trusted-advisor-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: custom + bucket_name: wazuh-trusted-advisor-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + +- name: guardduty_path_with_data + description: GuardDuty path configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-guardduty-integration-tests + PATH: test_prefix + metadata: + bucket_type: guardduty + bucket_name: wazuh-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + +- name: guardduty_path_without_data + description: GuardDuty path configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-guardduty-integration-tests + PATH: empty_prefix + metadata: + bucket_type: guardduty + bucket_name: wazuh-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: guardduty_inexistent_path + description: GuardDuty path configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-guardduty-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: guardduty + bucket_name: wazuh-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + +- name: native_guardduty_path_with_data + description: Native GuardDuty path configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-native-guardduty-integration-tests + PATH: test_prefix + metadata: + bucket_type: guardduty + bucket_name: wazuh-native-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + +- name: native_guardduty_path_without_data + description: Native GuardDuty path configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-native-guardduty-integration-tests + PATH: empty_prefix + metadata: + bucket_type: guardduty + bucket_name: wazuh-native-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: native_guardduty_inexistent_path + description: Native GuardDuty path configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-native-guardduty-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: guardduty + bucket_name: wazuh-native-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + +- name: waf_path_with_data + description: WAF path configurations + configuration_parameters: + BUCKET_TYPE: waf + BUCKET_NAME: wazuh-waf-integration-tests + PATH: test_prefix + metadata: + bucket_type: waf + bucket_name: wazuh-waf-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + +- name: waf_path_without_data + description: WAF path configurations + configuration_parameters: + BUCKET_TYPE: waf + BUCKET_NAME: wazuh-waf-integration-tests + PATH: empty_prefix + metadata: + bucket_type: waf + bucket_name: wazuh-waf-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + +- name: waf_inexistent_path + description: WAF path configurations + configuration_parameters: + BUCKET_TYPE: waf + BUCKET_NAME: wazuh-waf-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: waf + bucket_name: wazuh-waf-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + +- name: server_access_path_with_data + description: Server Access path configurations + configuration_parameters: + BUCKET_TYPE: server_access + BUCKET_NAME: wazuh-server-access-integration-tests + PATH: test_prefix + metadata: + bucket_type: server_access + bucket_name: wazuh-server-access-integration-tests + only_logs_after: 2022-NOV-20 + path: test_prefix + expected_results: 1 + table_name: s3_server_access + +- name: server_access_path_without_data + description: Server Access path configurations + configuration_parameters: + BUCKET_TYPE: server_access + BUCKET_NAME: wazuh-server-access-integration-tests + PATH: empty_prefix + metadata: + bucket_type: server_access + bucket_name: wazuh-server-access-integration-tests + only_logs_after: 2022-NOV-20 + path: empty_prefix + expected_results: 0 + table_name: s3_server_access + +- name: server_access_inexistent_path + description: Server Access path configurations + configuration_parameters: + BUCKET_TYPE: server_access + BUCKET_NAME: wazuh-server-access-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: server_access + bucket_name: wazuh-server-access-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 + table_name: s3_server_access + +- name: cisco_umbrella_inexistent_path + description: Umbrella path configurations + configuration_parameters: + BUCKET_TYPE: cisco_umbrella + BUCKET_NAME: wazuh-umbrella-integration-tests + PATH: inexistent_prefix + metadata: + bucket_type: cisco_umbrella + bucket_name: wazuh-umbrella-integration-tests + only_logs_after: 2022-NOV-20 + path: inexistent_prefix + expected_results: 0 diff --git a/tests/integration/test_aws/data/test_cases/regions_test_module/cases_bucket_regions.yaml b/tests/integration/test_aws/data/test_cases/regions_test_module/cases_bucket_regions.yaml new file mode 100644 index 00000000000..07231f255a7 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/regions_test_module/cases_bucket_regions.yaml @@ -0,0 +1,233 @@ +- name: cloudtrail_region_with_data + description: CloudTrail regions configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + REGIONS: us-east-1 + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-east-1 + expected_results: 3 + +- name: cloudtrail_regions_with_data + description: CloudTrail regions configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + REGIONS: us-east-1,us-east-2 + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-east-1,us-east-2 + expected_results: 5 + +- name: cloudtrail_inexistent_region + description: CloudTrail regions configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + REGIONS: us-fake-1 + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-fake-1 + expected_results: 0 + +- name: vpc_region_with_data + description: VPC regions configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + REGIONS: us-east-1 + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-east-1 + expected_results: 3 + +- name: config_region_with_data + description: Config regions configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + REGIONS: us-east-1 + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-east-1 + expected_results: 3 + +- name: alb_region_with_data + description: ALB regions configurations + configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests + REGIONS: us-east-1 + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-east-1 + expected_results: 3 + +- name: vpc_regions_with_data + description: VPC regions configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + REGIONS: us-east-1,us-east-2 + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-east-1,us-east-2 + expected_results: 5 + +- name: config_regions_with_data + description: Config regions configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + REGIONS: us-east-1,us-east-2 + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-east-1,us-east-2 + expected_results: 5 + +- name: alb_regions_with_data + description: ALB regions configurations + configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests + REGIONS: us-east-1,us-east-2 + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-east-1,us-east-2 + expected_results: 5 + +- name: vpc_inexistent_region + description: VPC regions configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + REGIONS: us-fake-1 + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-fake-1 + expected_results: 0 + +- name: config_inexistent_region + description: Config regions configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + REGIONS: us-fake-1 + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-fake-1 + expected_results: 0 + +- name: alb_inexistent_region + description: ALB regions configurations + configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests + REGIONS: us-fake-1 + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-fake-1 + expected_results: 0 + +- name: clb_region_with_data + description: CLB regions configurations + configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests + REGIONS: us-east-1 + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-east-1 + expected_results: 3 + +- name: clb_regions_with_data + description: CLB regions configurations + configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests + REGIONS: us-east-1,us-east-2 + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-east-1,us-east-2 + expected_results: 5 + +- name: clb_inexistent_region + description: CLB regions configurations + configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests + REGIONS: us-fake-1 + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-fake-1 + expected_results: 0 + +- name: nlb_region_with_data + description: NLB regions configurations + configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests + REGIONS: us-east-1 + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-east-1 + expected_results: 3 + +- name: nlb_regions_with_data + description: NLB regions configurations + configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests + REGIONS: us-east-1,us-east-2 + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-east-1,us-east-2 + expected_results: 5 + +- name: nlb_inexistent_region + description: NLB regions configurations + configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests + REGIONS: us-fake-1 + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + only_logs_after: 2022-NOV-20 + regions: us-fake-1 + expected_results: 0 diff --git a/tests/integration/test_aws/data/test_cases/regions_test_module/cases_cloudwatch_regions.yaml b/tests/integration/test_aws/data/test_cases/regions_test_module/cases_cloudwatch_regions.yaml new file mode 100644 index 00000000000..d5f4c3b8e45 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/regions_test_module/cases_cloudwatch_regions.yaml @@ -0,0 +1,38 @@ +- name: cloudwatchlogs_region_with_data + description: CloudWatch regions configurations + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests + REGIONS: us-east-1 + metadata: + service_type: cloudwatchlogs + log_group_name: wazuh-cloudwatchlogs-integration-tests + only_logs_after: 2023-JAN-12 + regions: us-east-1 + expected_results: 3 + +- name: cloudwatchlogs_regions_with_data + description: CloudWatch regions configurations + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests + REGIONS: us-east-1,us-east-2 + metadata: + service_type: cloudwatchlogs + log_group_name: wazuh-cloudwatchlogs-integration-tests + only_logs_after: 2023-JAN-12 + regions: us-east-1,us-east-2 + expected_results: 3 + +- name: cloudwatchlogs_inexistent_region + description: CloudWatch regions configurations + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests + REGIONS: us-fake-1 + metadata: + service_type: cloudwatchlogs + log_group_name: wazuh-cloudwatchlogs-integration-tests + only_logs_after: 2023-JAN-12 + regions: us-fake-1 + expected_results: 0 diff --git a/tests/integration/test_aws/data/test_cases/regions_test_module/cases_inspector_regions.yaml b/tests/integration/test_aws/data/test_cases/regions_test_module/cases_inspector_regions.yaml new file mode 100644 index 00000000000..fc1fbbd39b6 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/regions_test_module/cases_inspector_regions.yaml @@ -0,0 +1,32 @@ +- name: inspector_region_with_data + description: Inspector regions configurations + configuration_parameters: + SERVICE_TYPE: inspector + REGIONS: us-east-1 + metadata: + service_type: inspector + only_logs_after: 2023-JAN-12 + regions: us-east-1 + expected_results: 4 + +- name: inspector_regions_with_data + description: Inspector regions configurations + configuration_parameters: + SERVICE_TYPE: inspector + REGIONS: us-east-1,us-east-2 + metadata: + service_type: inspector + only_logs_after: 2023-JAN-12 + regions: us-east-1,us-east-2 + expected_results: 4 + +- name: inspector_inexistent_region + description: Inspector regions configurations + configuration_parameters: + SERVICE_TYPE: inspector + REGIONS: us-fake-1 + metadata: + service_type: inspector + only_logs_after: 2023-JAN-12 + regions: us-fake-1 + expected_results: 0 diff --git a/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_from_bucket.yaml b/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_from_bucket.yaml new file mode 100644 index 00000000000..eb26143283f --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_from_bucket.yaml @@ -0,0 +1,140 @@ +- name: cloudtrail_remove_from_bucket + description: CloudTrail remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + PATH: '' + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + +- name: vpc_remove_from_bucket + description: VPC remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + PATH: '' + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + +- name: config_remove_from_bucket + description: Config remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + PATH: '' + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + +- name: alb_remove_from_bucket + description: ALB remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests + PATH: '' + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + +- name: clb_remove_from_bucket + description: CLB remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests + PATH: '' + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + +- name: nlb_remove_from_bucket + description: NLB remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests + PATH: '' + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + +- name: kms_remove_from_bucket + description: KMS remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-kms-integration-tests + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-kms-integration-tests + +- name: macie_remove_from_bucket + description: Macie remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-macie-integration-tests + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-macie-integration-tests + +- name: trusted_advisor_remove_from_bucket + description: Trusted Advisor remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-trusted-advisor-integration-tests + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-trusted-advisor-integration-tests + +- name: guardduty_remove_from_bucket + description: GuardDuty remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-guardduty-integration-tests + PATH: '' + metadata: + bucket_type: guardduty + bucket_name: wazuh-guardduty-integration-tests + +- name: native_guardduty_remove_from_bucket + description: Native GuardDuty remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-native-guardduty-integration-tests + PATH: '' + metadata: + bucket_type: guardduty + bucket_name: wazuh-native-guardduty-integration-tests + +- name: waf_remove_from_bucket + description: WAF remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: waf + BUCKET_NAME: wazuh-waf-integration-tests + PATH: '' + metadata: + bucket_type: waf + bucket_name: wazuh-waf-integration-tests + +- name: server_access_remove_from_bucket + description: Server Access remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: server_access + BUCKET_NAME: wazuh-server-access-integration-tests + PATH: '' + metadata: + bucket_type: server_access + bucket_name: wazuh-server-access-integration-tests + +- name: cisco_umbrella_remove_from_bucket + description: CloudTrail remove from bucket configurations + configuration_parameters: + BUCKET_TYPE: cisco_umbrella + BUCKET_NAME: wazuh-umbrella-integration-tests + PATH: dnslogs + metadata: + bucket_type: cisco_umbrella + bucket_name: wazuh-umbrella-integration-tests + path: dnslogs diff --git a/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_log_streams.yaml b/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_log_streams.yaml new file mode 100644 index 00000000000..dfc941327c0 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_log_streams.yaml @@ -0,0 +1,8 @@ +- name: cloudwatchlogs_remove_from_bucket + description: CloudWatch remove from bucket configurations + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUP_NAME: temporary-log-group + metadata: + service_type: cloudwatchlogs + log_group_name: temporary-log-group diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py new file mode 100644 index 00000000000..f6707aa0947 --- /dev/null +++ b/tests/integration/test_aws/test_basic.py @@ -0,0 +1,329 @@ +import os +import pytest + +# qa-integration-framework imports +from wazuh_testing import session_parameters +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) + +# Local module imports +from .utils import ERROR_MESSAGES + +pytestmark = [pytest.mark.server] + + +# Generic vars +MODULE = 'basic_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) + +# -------------------------------------------- TEST_BUCKET_DEFAULTS ---------------------------------------------------- +# Configuration and cases +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_defaults.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_defaults.yaml') + +# Enabled test configurations +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +def test_bucket_defaults( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +): + """ + description: The module is invoked with the expected parameters and no error occurs. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check in the ossec.log that no errors occurs. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check in the log that no errors occurs. + input_description: + - The `configuration_defaults` file provides the module configuration for this test. + - The `cases_defaults` file provides the test cases. + """ + parameters = [ + 'wodles/aws/aws-s3', + '--bucket', metadata['bucket_name'], + '--aws_profile', 'qa', + '--type', metadata['bucket_type'], + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + + +# -------------------------------------------- TEST_CLOUDWATCH_DEFAULTS ------------------------------------------------ +# Configuration and cases data +t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'cloudwatch_configuration_defaults.yaml') +t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_defaults.yaml') + +# Enabled test configurations +t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) +configurations = load_configuration_template( + t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, t2_configuration_metadata), ids=t2_case_ids) +def test_service_defaults( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +): + """ + description: The module is invoked with the expected parameters and no error occurs. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check in the ossec.log that no errors occurs. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check in the log that no errors occurs. + input_description: + - The `configuration_defaults` file provides the module configuration for this test. + - The `cases_defaults` file provides the test cases. + """ + log_groups = metadata.get('log_group_name') + + parameters = [ + 'wodles/aws/aws-s3', + '--service', metadata['service_type'], + '--aws_profile', 'qa', + '--regions', 'us-east-1', + '--aws_log_groups', log_groups, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + + +# ------------------------------------------ TEST_INSPECTOR_DEFAULTS --------------------------------------------------- +# Configuration and cases data +t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'inspector_configuration_defaults.yaml') +t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_defaults.yaml') + +# Enabled test configurations +t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) +configurations = load_configuration_template( + t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, t3_configuration_metadata), ids=t3_case_ids) +def test_inspector_defaults( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +): + """ + description: The module is invoked with the expected parameters and no error occurs. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check in the ossec.log that no errors occurs. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check in the log that no errors occurs. + input_description: + - The `configuration_defaults` file provides the module configuration for this test. + - The `cases_defaults` file provides the test cases. + """ + + parameters = [ + 'wodles/aws/aws-s3', + '--service', metadata['service_type'], + '--aws_profile', 'qa', + '--regions', 'us-east-1', + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py new file mode 100644 index 00000000000..54cd7f27d9b --- /dev/null +++ b/tests/integration/test_aws/test_discard_regex.py @@ -0,0 +1,153 @@ +import os +import pytest + +# qa-integration-framework imports +from wazuh_testing import session_parameters +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) +from wazuh_testing.modules.aws.db_utils import s3_db_exists + +# Local module imports +from .utils import ERROR_MESSAGES, TIMEOUTS + +pytestmark = [pytest.mark.server] + + +# Generic vars +MODULE = 'discard_regex_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) + +# -----------------------------------------opvb----------- TEST_PATH ------------------------------------------------------- +configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_discard_regex.yaml') +cases_path = os.path.join(TEST_CASES_PATH, 'cases_discard_regex.yaml') + +configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) +configurations = load_configuration_template( + configurations_path, configuration_parameters, configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, configuration_metadata), ids=case_ids) +def test_discard_regex( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, +): + """ + description: Fetch logs excluding the ones that match with the regex. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket and skips + the ones that match with regex. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_s3_cloudtrail_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly. + input_description: + - The `configuration_discard_regex` file provides the module configuration for this test. + - The `cases_discard_regex` file provides the test cases. + """ + bucket_name = metadata['bucket_name'] + bucket_type = metadata['bucket_type'] + only_logs_after = metadata['only_logs_after'] + discard_field = metadata['discard_field'] + discard_regex = metadata['discard_regex'] + found_logs = metadata['found_logs'] + skipped_logs = metadata['skipped_logs'] + path = metadata['path'] if 'path' in metadata else None + + pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field. The event will be skipped.' + + parameters = [ + 'wodles/aws/aws-s3', + '--bucket', bucket_name, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--discard-field', discard_field, + '--discard-regex', discard_regex, + '--type', bucket_type, + '--debug', '2' + ] + + if path is not None: + parameters.insert(5, path) + parameters.insert(5, '--trail_prefix') + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + log_monitor.start( + timeout=TIMEOUTS[20], + callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), + accumulations=found_logs + skipped_logs + ) + + assert s3_db_exists() + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/test_log_groups.py b/tests/integration/test_aws/test_log_groups.py new file mode 100644 index 00000000000..b806bfd186c --- /dev/null +++ b/tests/integration/test_aws/test_log_groups.py @@ -0,0 +1,164 @@ +import os +import pytest + +# qa-integration-framework imports +from wazuh_testing import session_parameters +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) +from wazuh_testing.modules.aws.db_utils import ( + get_multiple_service_db_row, + services_db_exists, + table_exists, +) + +# Local module imports +from .utils import ERROR_MESSAGES, TIMEOUTS + +pytestmark = [pytest.mark.server] + + +# Generic vars +MODULE = 'log_groups_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) + +# ----------------------------------------------- TEST_AWS_LOG_GROUPS -------------------------------------------------- +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_log_groups.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_log_groups.yaml') + +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +def test_log_groups( + configuration, metadata, create_log_stream, load_wazuh_basic_configuration, set_wazuh_configuration, + clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, + file_monitoring +): + """ + description: Only the events for the specified log_group are processed. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - If a region that does not exist was specified, make sure that a message is displayed in the ossec.log + warning the user. + - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket + for the specified region. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - create_log_stream: + type: fixture + brief: Create a log stream with events for the day of execution. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly, using the correct path for each entry. + input_description: + - The `configuration_regions` file provides the module configuration for this test. + - The `cases_regions` file provides the test cases. + """ + service_type = metadata['service_type'] + log_group_names = metadata['log_group_name'] + expected_results = metadata['expected_results'] + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--only_logs_after', '2023-JAN-12', + '--regions', 'us-east-1', + '--aws_log_groups', log_group_names, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + if expected_results: + log_monitor.start( + timeout=TIMEOUTS[20], + callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), + accumulations=len(log_group_names.split(',')) + ) + else: + log_monitor.start( + timeout=TIMEOUTS[10], + callback=event_monitor.make_aws_callback(r'.*The specified log group does not exist.'), + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_no_existent_log_group'] + + assert services_db_exists() + + if expected_results: + log_group_list = log_group_names.split(",") + for row in get_multiple_service_db_row(table_name='cloudwatch_logs'): + assert row.aws_log_group in log_group_list + else: + assert not table_exists(table_name='cloudwatch_logs') + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/test_only_logs_after.py b/tests/integration/test_aws/test_only_logs_after.py new file mode 100644 index 00000000000..42a117f735a --- /dev/null +++ b/tests/integration/test_aws/test_only_logs_after.py @@ -0,0 +1,963 @@ +import os +import pytest +from datetime import datetime + +# qa-integration-framework imports +from wazuh_testing import session_parameters +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules import aws as cons +from wazuh_testing.modules.aws import ONLY_LOGS_AFTER_PARAM, event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.modules.aws.cli_utils import call_aws_module +from wazuh_testing.modules.aws.cloudwatch_utils import ( + create_log_events, + create_log_stream, +) +from wazuh_testing.modules.aws.db_utils import ( + get_multiple_s3_db_row, + get_service_db_row, + s3_db_exists, + services_db_exists, + get_s3_db_row, +) +from wazuh_testing.modules.aws.s3_utils import get_last_file_key, upload_file +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) + +from .utils import ERROR_MESSAGES, TIMEOUTS + +pytestmark = [pytest.mark.server] + + +# Generic vars +MODULE = 'only_logs_after_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) + +# --------------------------------------------- TEST_BUCKET_WITHOUT_ONLY_LOGS_AFTER ------------------------------------ +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_without_only_logs_after.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_without_only_logs_after.yaml') + +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +def test_bucket_without_only_logs_after( + configuration, metadata, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, set_wazuh_configuration, + clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, + file_monitoring +): + """ + description: Only the log uploaded during execution is processed. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check the expected number of events were sent to analysisd. Only the logs whose timestamp is greater than + the date specified in the configuration should be processed. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - upload_and_delete_file_to_s3: + type: fixture + brief: Upload a file for the day of the execution and delete after the test. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_s3_cloudtrail_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check in the bucket that the uploaded log was removed. + input_description: + - The `configuration_defaults` file provides the module configuration for this test. + - The `cases_defaults` file provides the test cases. + """ + bucket_name = metadata['bucket_name'] + bucket_type = metadata['bucket_type'] + expected_results = metadata['expected_results'] + table_name = metadata.get('table_name', bucket_type) + path = metadata.get('path') + + parameters = [ + 'wodles/aws/aws-s3', + '--bucket', bucket_name, + '--aws_profile', 'qa', + '--type', bucket_type, + '--debug', '2' + ] + + if path is not None: + parameters.insert(5, path) + parameters.insert(5, '--trail_prefix') + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_event_processed, + accumulations=expected_results + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + + assert s3_db_exists() + + data = get_s3_db_row(table_name=table_name) + + assert bucket_name in data.bucket_path + assert metadata['uploaded_file'] == data.log_key + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + + +# -------------------------------------------- TEST_SERVICE_WITHOUT_ONLY_LOGS_AFTER ------------------------------------ +t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'service_configuration_without_only_logs_after.yaml') +t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_service_without_only_logs_after.yaml') + +t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) +t2_configurations = load_configuration_template( + t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +def test_service_without_only_logs_after( + configuration, metadata, create_log_stream_in_existent_group, load_wazuh_basic_configuration, + set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, + restart_wazuh_function, file_monitoring +): + """ + description: Only the event created during execution is processed. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check the expected number of events were sent to analysisd. Only the logs whose timestamp is greater than + the date specified in the configuration should be processed. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - create_log_stream_in_existent_group: + type: fixture + brief: Create a log stream with events for the day of execution. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check in the bucket that the uploaded log was removed. + input_description: + - The `configuration_defaults` file provides the module configuration for this test. + - The `cases_defaults` file provides the test cases. + """ + service_type = metadata['service_type'] + log_group_name = metadata['log_group_name'] + expected_results = metadata['expected_results'] + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--regions', 'us-east-1', + '--aws_log_groups', log_group_name, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + assert services_db_exists() + + data = get_service_db_row(table_name="cloudwatch_logs") + + assert log_group_name == data.aws_log_group + + assert metadata['log_stream'] == data.aws_log_stream + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + + +# --------------------------------------------- TEST_BUCKET_WITH_ONLY_LOGS_AFTER --------------------------------------- +t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_with_only_logs_after.yaml') +t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_with_only_logs_after.yaml') + +t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) +t3_configurations = load_configuration_template( + t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t3_configurations, t3_configuration_metadata), ids=t3_case_ids) +def test_bucket_with_only_logs_after( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +): + """ + description: All logs with a timestamp greater than the only_logs_after value are processed. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check the expected number of events were sent to analysisd. Only the logs whose timestamp is greater than + the date specified in the configuration should be processed. + - Check the database was created and updated accordingly + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_s3_cloudtrail_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check in the bucket that the uploaded log was removed. + input_description: + - The `configuration_defaults` file provides the module configuration for this test. + - The `cases_defaults` file provides the test cases. + """ + bucket_name = metadata['bucket_name'] + bucket_type = metadata['bucket_type'] + only_logs_after = metadata['only_logs_after'] + expected_results = metadata['expected_results'] + table_name = metadata.get('table_name', bucket_type) + path = metadata.get('path') + + parameters = [ + 'wodles/aws/aws-s3', + '--bucket', bucket_name, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--type', bucket_type, + '--debug', '2' + ] + + if path is not None: + parameters.insert(5, path) + parameters.insert(5, '--trail_prefix') + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + log_monitor.start( + timeout=TIMEOUTS[20], + callback=event_monitor.callback_detect_event_processed, + accumulations=expected_results + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + + assert s3_db_exists() + + for row in get_multiple_s3_db_row(table_name=table_name): + assert bucket_name in row.bucket_path + assert ( + datetime.strptime(only_logs_after, '%Y-%b-%d') < datetime.strptime(str(row.created_date), '%Y%m%d') + ) + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + + +# --------------------------------------------TEST_CLOUDWATCH_WITH_ONLY_LOGS_AFTER ------------------------------------- +t4_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'cloudwatch_configuration_with_only_logs_after.yaml') +t4_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_with_only_logs_after.yaml') + +t4_configuration_parameters, t4_configuration_metadata, t4_case_ids = get_test_cases_data(t4_cases_path) +t4_configurations = load_configuration_template( + t4_configurations_path, t4_configuration_parameters, t4_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t4_configurations, t4_configuration_metadata), ids=t4_case_ids) +def test_cloudwatch_with_only_logs_after( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +): + """ + description: All events with a timestamp greater than the only_logs_after value are processed. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check the expected number of events were sent to analysisd. Only the logs whose timestamp is greater than + the date specified in the configuration should be processed. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check in the bucket that the uploaded log was removed. + input_description: + - The `configuration_defaults` file provides the module configuration for this test. + - The `cases_defaults` file provides the test cases. + """ + table_name_map = { + 'inspector': 'aws_services', + 'cloudwatchlogs': 'cloudwatch_logs' + } + + service_type = metadata['service_type'] + log_group_name = metadata.get('log_group_name') + only_logs_after = metadata['only_logs_after'] + expected_results = metadata['expected_results'] + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--regions', 'us-east-1', + '--aws_log_groups', log_group_name, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + log_monitor.start( + timeout=TIMEOUTS[10], + callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + + assert services_db_exists() + + data = get_service_db_row(table_name=table_name_map[service_type]) + + assert log_group_name == data.aws_log_group + assert metadata['log_stream'] == data.aws_log_stream + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + + +# ------------------------------------------ TEST_INSPECTOR_WITH_ONLY_LOGS_AFTER --------------------------------------- +t5_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'inspector_configuration_with_only_logs_after.yaml') +t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_with_only_logs_after.yaml') + +t5_configuration_parameters, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path) +t5_configurations = load_configuration_template( + t5_configurations_path, t5_configuration_parameters, t5_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t5_configurations, t5_configuration_metadata), ids=t5_case_ids) +def test_inspector_with_only_logs_after( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +): + """ + description: All events with a timestamp greater than the only_logs_after value are processed. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check the expected number of events were sent to analysisd. Only the logs whose timestamp is greater than + the date specified in the configuration should be processed. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check in the bucket that the uploaded log was removed. + input_description: + - The `configuration_defaults` file provides the module configuration for this test. + - The `cases_defaults` file provides the test cases. + """ + table_name_map = { + 'inspector': 'aws_services', + 'cloudwatchlogs': 'cloudwatch_logs' + } + + service_type = metadata['service_type'] + only_logs_after = metadata['only_logs_after'] + expected_results = metadata['expected_results'] + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--regions', 'us-east-1', + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + log_monitor.start( + timeout=TIMEOUTS[10], + callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + + assert services_db_exists() + + data = get_service_db_row(table_name=table_name_map[service_type]) + + assert data.service == service_type + assert ( + datetime.strptime(data.timestamp, '%Y-%m-%d %H:%M:%S.%f') == datetime.strptime(only_logs_after, '%Y-%b-%d') + ) + + +# ---------------------------------------------------- TEST_MULTIPLE_CALLS --------------------------------------------- +t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_multiple_calls.yaml') + +_, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path) + + +@pytest.mark.tier(level=1) +@pytest.mark.parametrize('metadata', t5_configuration_metadata, ids=t5_case_ids) +def test_bucket_multiple_calls( + metadata, clean_s3_cloudtrail_db, load_wazuh_basic_configuration, restart_wazuh_function, delete_file_from_s3 +): + """ + description: Call the AWS module multiple times with different only_logs_after values. + test_phases: + - setup: + - Delete the `s3_cloudtrail.db`. + + - test: + - Call the module without only_logs_after and check that no logs were processed. + - Upload a log file for the day of the test execution and call the module with the same parameters as + before, check that the uploaded logs were processed. + - Call the module with the same parameters and check that no logs were processed, there were no duplicates. + - Call the module with only_logs_after set in the past and check that the expected number of logs were + processed. + - Call the module with the same parameters in and check there were no duplicates. + - Call the module with only_logs_after set with an older date check that old logs were processed without + duplicates. + - Call the module with only_logs_after set with an early date than setted previously and check that no logs + were processed, there were no duplicates. + + - teardown: + - Delete the `s3_cloudtrail.db`. + - Delete the uploaded files. + wazuh_min_version: 4.6.0 + parameters: + - metadata: + type: dict + brief: Get metadata from the module. + - clean_s3_cloudtrail_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - restart_wazuh_daemon: + type: fixture + brief: Restart the wazuh service. + - delete_file_from_s3: + type: fixture + brief: Delete the a file after the test execution. + input_description: + - The `cases_multiple_calls` file provides the test cases. + """ + + bucket_type = metadata['bucket_type'] + bucket_name = metadata['bucket_name'] + path = metadata.get('path') + + base_parameters = [ + '--bucket', bucket_name, + '--type', bucket_type, + '--regions', 'us-east-1', + '--aws_profile', 'qa', + '--debug', '2' + ] + + if path is not None: + base_parameters.extend(['--trail_prefix', path]) + + # Call the module without only_logs_after and check that no logs were processed + last_marker_key = datetime.utcnow().strftime(cons.PATH_DATE_FORMAT) + + event_monitor.check_non_processed_logs_from_output( + command_output=call_aws_module(*base_parameters), + bucket_type=bucket_type + ) + + # Call the module with only_logs_after set in the past and check that the expected number of logs were + # processed + event_monitor.check_processed_logs_from_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-20'), + expected_results=3 + ) + + # Call the module with the same parameters in and check there were no duplicates + expected_skipped_logs_step_3 = metadata.get('expected_skipped_logs_step_3', 1) + event_monitor.check_non_processed_logs_from_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-20'), + bucket_type=bucket_type, + expected_results=expected_skipped_logs_step_3 + ) + + # Call the module with only_logs_after set with an early date than setted previously and check that no logs + # were processed, there were no duplicates + event_monitor.check_non_processed_logs_from_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-22'), + bucket_type=bucket_type, + expected_results=expected_skipped_logs_step_3 - 1 if expected_skipped_logs_step_3 > 1 else 1 + ) + + # Upload a log file for the day of the test execution and call the module without only_logs_after and check that + # only the uploaded logs were processed and the last marker is specified in the DB. + last_marker_key = get_last_file_key(bucket_type, bucket_name, datetime.utcnow()) + metadata['filename'] = upload_file(bucket_type, bucket_name) + + event_monitor.check_marker_from_output( + command_output=call_aws_module(*base_parameters), + file_key=last_marker_key + ) + + +# -------------------------------------------- TEST_INSPECTOR_MULTIPLE_CALLS ------------------------------------------- +t6_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_multiple_calls.yaml') + +_, t6_configuration_metadata, t6_case_ids = get_test_cases_data(t6_cases_path) + + +@pytest.mark.tier(level=1) +@pytest.mark.parametrize('metadata', t6_configuration_metadata, ids=t6_case_ids) +@pytest.mark.xfail +def test_inspector_multiple_calls( + metadata, clean_aws_services_db, load_wazuh_basic_configuration, restart_wazuh_function +): + """ + description: Call the AWS module multiple times with different only_logs_after values. + test_phases: + - setup: + - Delete the `aws_services.db`. + - test: + - Call the module without only_logs_after and check that no logs were processed. + - Call the module with only_logs_after set in the past and check that the expected number of logs were + processed. + - Call the module with the same parameters in and check there were no duplicates. + - Call the module with only_logs_after set with an early date than setted previously and check that no logs + were processed, there were no duplicates. + - teardown: + - Delete the `aws_services.db`. + wazuh_min_version: 4.6.0 + parameters: + - metadata: + type: dict + brief: Get metadata from the module. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - restart_wazuh_daemon: + type: fixture + brief: Restart the wazuh service. + input_description: + - The `cases_multiple_calls` file provides the test cases. + """ + + service_type = metadata['service_type'] + + base_parameters = [ + '--service', service_type, + '--regions', 'us-east-1', + '--aws_profile', 'qa', + '--debug', '2' + ] + + # Call the module without only_logs_after and check that no logs were processed + event_monitor.check_service_non_processed_logs_from_output( + command_output=call_aws_module(*base_parameters), service_type=service_type, expected_results=1 + ) + + # Call the module with only_logs_after set in the past and check that the expected number of logs were + # processed + event_monitor.check_service_processed_logs_from_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-30'), + service_type=service_type, + events_sent=4 + ) + + # Call the module with the same parameters in and check there were no duplicates + event_monitor.check_service_non_processed_logs_from_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-30'), + service_type=service_type, + expected_results=1 + ) + + # Call the module with only_logs_after set with an early date than setted previously and check that no logs + # were processed, there were no duplicates + event_monitor.check_service_non_processed_logs_from_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-31'), + service_type=service_type, + expected_results=1 + ) + + +# ----------------------------------------- TEST_CLOUDWATCH_MULTIPLE_CALLS --------------------------------------------- +t7_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_multiple_calls.yaml') + +_, t7_configuration_metadata, t7_case_ids = get_test_cases_data(t7_cases_path) + + +@pytest.mark.tier(level=1) +@pytest.mark.parametrize('metadata', t7_configuration_metadata, ids=t7_case_ids) +def test_cloudwatch_multiple_calls( + metadata, clean_aws_services_db, load_wazuh_basic_configuration, restart_wazuh_function, delete_log_stream +): + """ + description: Call the AWS module multiple times with different only_logs_after values. + test_phases: + - setup: + - Delete the `aws_services.db`. + - test: + - Call the module without only_logs_after and check that no logs were processed. + - Upload a log file for the day of the test execution and call the module with the same parameters as + before, check that the uploaded logs were processed. + - Call the module with the same parameters and check that no logs were processed, there were no duplicates. + - Call the module with only_logs_after set in the past and check that the expected number of logs were + processed. + - Call the module with the same parameters in and check there were no duplicates. + - Call the module with only_logs_after set with an older date check that old logs were processed without + duplicates. + - Call the module with only_logs_after set with an early date than setted previously and check that no logs + were processed, there were no duplicates. + - teardown: + - Delete the `aws_services.db`. + - Delete the uploaded files. + wazuh_min_version: 4.6.0 + parameters: + - metadata: + type: dict + brief: Get metadata from the module. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - restart_wazuh_daemon: + type: fixture + brief: Restart the wazuh service. + - delete_log_stream: + type: fixture + brief: Delete the log stream after the test execution. + input_description: + - The `cases_multiple_calls` file provides the test cases. + """ + + service_type = metadata['service_type'] + log_group_name = metadata['log_group_name'] + + base_parameters = [ + '--service', service_type, + '--aws_log_groups', log_group_name, + '--regions', 'us-east-1', + '--aws_profile', 'qa', + '--debug', '2' + ] + + # Call the module without only_logs_after and check that no logs were processed + event_monitor.check_service_non_processed_logs_from_output( + command_output=call_aws_module(*base_parameters), service_type=service_type, expected_results=0 + ) + + # Call the module with only_logs_after set in the past and check that the expected number of logs were + # processed + event_monitor.check_service_processed_logs_from_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-12'), + service_type=service_type, + events_sent=3 + ) + + # Call the module with the same parameters in and check there were no duplicates + event_monitor.check_service_non_processed_logs_from_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-12'), + service_type=service_type, + expected_results=0 + ) + + # Call the module with only_logs_after set with an early date than setted previously and check that no logs + # were processed, there were no duplicates + event_monitor.check_service_non_processed_logs_from_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-15'), + service_type=service_type, + expected_results=0 + ) + + # Upload a log file for the day of the test execution and call the module without only_logs_after and check that + # only the uploaded logs were processed. + log_stream = create_log_stream() + metadata['log_stream'] = log_stream + create_log_events(log_stream) + event_monitor.check_service_processed_logs_from_output( + command_output=call_aws_module(*base_parameters), service_type=service_type, events_sent=1 + ) diff --git a/tests/integration/test_aws/test_parser.py b/tests/integration/test_aws/test_parser.py new file mode 100644 index 00000000000..e46feeda3cb --- /dev/null +++ b/tests/integration/test_aws/test_parser.py @@ -0,0 +1,600 @@ +import os +import pytest + +# qa-integration-framework imports +from wazuh_testing import session_parameters +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) + +# Local module imports +from .utils import ERROR_MESSAGES, TIMEOUTS + +pytestmark = [pytest.mark.server] + + +# Generic vars +MODULE = 'parser_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) + +# --------------------------------------------TEST_BUCKET_AND_SERVICE_MISSING ------------------------------------------ +# Configuration and cases data +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_bucket_and_service_missing.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_and_service_missing.yaml') + +# Enabled test configurations +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +def test_bucket_and_service_missing( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring +): + """ + description: Command for bucket and service weren't invoked. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has not appeared calling the module with correct parameters. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_function_without_exception: + type: fixture + brief: Restart the wazuh service catching the exception. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was not called. + + input_description: + - The `configuration_bucket_and_service_missing` file provides the configuration for this test. + """ + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_warning, + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_warning'] + + +# -------------------------------------------- TEST_TYPE_MISSING_IN_BUCKET --------------------------------------------- +# Configuration and cases data +t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_type_missing_in_bucket.yaml') +t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_type_missing_in_bucket.yaml') + +# Enabled test configurations +t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) +t2_configurations = load_configuration_template( + t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +def test_type_missing_in_bucket( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring +): + """ + description: A warning occurs and was displayed in `ossec.log`. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has not appeared calling the module with correct parameters. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_function_without_exception: + type: fixture + brief: Restart the wazuh service catching the exception. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module displays the message about missing attributes. + input_description: + - The `configuration_type_missing_in_bucket` file provides the configuration for this test. + """ + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_legacy_module_warning, + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_legacy_warning'] + + +# -------------------------------------------- TEST_TYPE_MISSING_IN_SERVICE -------------------------------------------- +# Configuration and cases data +t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_type_missing_in_service.yaml') +t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_type_missing_in_service.yaml') + +# Enabled test configurations +t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) +t3_configurations = load_configuration_template( + t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t3_configurations, t3_configuration_metadata), ids=t3_case_ids) +def test_type_missing_in_service( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring +): + """ + description: An error occurs and was displayed in `ossec.log`. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has not appeared calling the module with correct parameters. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_function_without_exception: + type: fixture + brief: Restart the wazuh service catching the exception. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module displays the message about missing attributes. + + input_description: + - The `configuration_type_missing_in_service` file provides the configuration for this test. + """ + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_error_for_missing_type, + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_error_message'] + +# -------------------------------------------- TEST_EMPTY_VALUES_IN_BUCKET --------------------------------------------- +# Configuration and cases data +t4_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_bucket.yaml') +t4_cases_path = os.path.join(TEST_CASES_PATH, 'cases_empty_values_in_bucket.yaml') + +# Enabled test configurations +t4_configuration_parameters, t4_configuration_metadata, t4_case_ids = get_test_cases_data(t4_cases_path) +t4_configurations = load_configuration_template( + t4_configurations_path, t4_configuration_parameters, t4_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t4_configurations, t4_configuration_metadata), ids=t4_case_ids) +def test_empty_values_in_bucket( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring +): + """ + description: An error occurs and was displayed in `ossec.log`. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has not appeared calling the module with correct parameters. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_function_without_exception: + type: fixture + brief: Restart the wazuh service catching the exception. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module displays the message about an empty value. + input_description: + - The `configuration_values_in_bucket` file provides the configuration for this test. + """ + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_empty_value, + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_value_message'] + +# -------------------------------------------- TEST_EMPTY_VALUES_IN_SERVICE -------------------------------------------- +# Configuration and cases data +t5_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_service.yaml') +t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_empty_values_in_service.yaml') + +# Enabled test configurations +t5_configuration_parameters, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path) +t5_configurations = load_configuration_template( + t5_configurations_path, t5_configuration_parameters, t5_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t5_configurations, t5_configuration_metadata), ids=t5_case_ids) +def test_empty_values_in_service( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring +): + """ + description: An error occurs and was displayed in `ossec.log`. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has not appeared calling the module with correct parameters. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_function_without_exception: + type: fixture + brief: Restart the wazuh service catching the exception. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module displays the message about an empty value. + + input_description: + - The `configuration_values_in_service` file provides the configuration for this test. + """ + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_empty_value, + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_value_message'] + + +# ------------------------------------------ TEST_INVALID_VALUES_IN_BUCKET --------------------------------------------- +# Configuration and cases data +t6_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_bucket.yaml') +t6_cases_path = os.path.join(TEST_CASES_PATH, 'cases_invalid_values_in_bucket.yaml') + +# Enabled test configurations +t6_configuration_parameters, t6_configuration_metadata, t6_case_ids = get_test_cases_data(t6_cases_path) +t6_configurations = load_configuration_template( + t6_configurations_path, t6_configuration_parameters, t6_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t6_configurations, t6_configuration_metadata), ids=t6_case_ids) +def test_invalid_values_in_bucket( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring +): + """ + description: An error occurs and was displayed in `ossec.log`. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has not appeared calling the module with correct parameters. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_function_without_exception: + type: fixture + brief: Restart the wazuh service catching the exception. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module displays the message about an invalid value. + input_description: + - The `configuration_values_in_bucket` file provides the configuration for this test. + """ + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_invalid_value, + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_invalid_value_message'] + + +# ------------------------------------------ TEST_INVALID_VALUES_IN_BUCKET --------------------------------------------- +# Configuration and cases data +t7_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_service.yaml') +t7_cases_path = os.path.join(TEST_CASES_PATH, 'cases_invalid_values_in_service.yaml') + +# Enabled test configurations +t7_configuration_parameters, t7_configuration_metadata, t7_case_ids = get_test_cases_data(t7_cases_path) +t7_configurations = load_configuration_template( + t7_configurations_path, t7_configuration_parameters, t7_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t7_configurations, t7_configuration_metadata), ids=t7_case_ids) +def test_invalid_values_in_service( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring +): + """ + description: An error occurs and was displayed in `ossec.log`. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has not appeared calling the module with correct parameters. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_function_without_exception: + type: fixture + brief: Restart the wazuh service catching the exception. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module displays the message about an invalid value. + input_description: + - The `configuration_values_in_service` file provides the configuration for this test. + """ + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_invalid_value, + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_invalid_value_message'] + + +# --------------------------------------- TEST_MULTIPLE_BUCKET_AND_SERVICE_TAGS ---------------------------------------- +# Configuration and cases data +t8_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_multiple_bucket_and_service_tags.yaml') +t8_cases_path = os.path.join(TEST_CASES_PATH, 'cases_multiple_bucket_and_service_tags.yaml') + +# Enabled test configurations +t8_configuration_parameters, t8_configuration_metadata, t8_case_ids = get_test_cases_data(t8_cases_path) +t8_configurations = load_configuration_template( + t8_configurations_path, t8_configuration_parameters, t8_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t8_configurations, t8_configuration_metadata), ids=t8_case_ids) +def test_multiple_bucket_and_service_tags( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring +): + """ + description: The command is invoked two times for buckets and two times for services. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has not appeared calling the module with correct parameters. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_function_without_exception: + type: fixture + brief: Restart the wazuh service catching the exception. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called the right amount of times. + input_description: + - The `configuration_multiple_bucket_and_service_tags` file provides the configuration for this test. + """ + log_monitor.start( + timeout=TIMEOUTS[20], + callback=event_monitor.callback_detect_bucket_or_service_call, + accumulations=4 + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_service_calls_amount'] diff --git a/tests/integration/test_aws/test_path.py b/tests/integration/test_aws/test_path.py new file mode 100644 index 00000000000..d6d740b0034 --- /dev/null +++ b/tests/integration/test_aws/test_path.py @@ -0,0 +1,166 @@ +import os +import pytest + +# qa-integration-framework imports +from wazuh_testing import session_parameters +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.modules.aws.db_utils import ( + get_s3_db_row, + s3_db_exists, + table_exists_or_has_values, +) +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) +# Local module imports +from .utils import ERROR_MESSAGES, TIMEOUTS + +pytestmark = [pytest.mark.server] + + +# Generic vars +MODULE = 'path_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) + +# ---------------------------------------------------- TEST_PATH ------------------------------------------------------- +configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_path.yaml') +cases_path = os.path.join(TEST_CASES_PATH, 'cases_path.yaml') + +configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) +configurations = load_configuration_template( + configurations_path, configuration_parameters, configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, configuration_metadata), ids=case_ids) +def test_path( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +): + """ + description: Only logs within a path are processed. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - If a path that does not exist was specified, make sure that a message is displayed in the ossec.log + warning the user. + - Check the command was called with the correct parameters. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_s3_cloudtrail_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly, using the correct path for each entry. + input_description: + - The `configuration_path` file provides the module configuration for this test. + - The `cases_path` file provides the test cases. + """ + bucket_name = metadata['bucket_name'] + bucket_type = metadata['bucket_type'] + only_logs_after = metadata['only_logs_after'] + path = metadata['path'] + expected_results = metadata['expected_results'] + table_name = metadata.get('table_name', bucket_type) + pattern = fr".*WARNING: Bucket: - No files were found in '{bucket_name}/{path}/'. No logs will be processed.\n+" + + parameters = [ + 'wodles/aws/aws-s3', + '--bucket', bucket_name, + '--aws_profile', 'qa', + '--trail_prefix', path, + '--only_logs_after', only_logs_after, + '--type', bucket_type, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + if expected_results: + log_monitor.start( + timeout=TIMEOUTS[20], + callback=event_monitor.callback_detect_event_processed, + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + + else: + log_monitor.start( + timeout=TIMEOUTS[10], + callback=event_monitor.make_aws_callback(pattern), + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_path_message'] + + assert s3_db_exists() + + if expected_results: + data = get_s3_db_row(table_name=table_name) + assert f"{bucket_name}/{path}/" == data.bucket_path + assert data.log_key.startswith(f"{path}/") + else: + assert not table_exists_or_has_values(table_name=table_name) + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/test_path_suffix.py b/tests/integration/test_aws/test_path_suffix.py new file mode 100644 index 00000000000..6c7450091ec --- /dev/null +++ b/tests/integration/test_aws/test_path_suffix.py @@ -0,0 +1,167 @@ +import os +import pytest + +# qa-integration-framework imports +from wazuh_testing import session_parameters +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.modules.aws.db_utils import ( + get_s3_db_row, + s3_db_exists, + table_exists_or_has_values, +) +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) +# Local module imports +from .utils import ERROR_MESSAGES, TIMEOUTS + +pytestmark = [pytest.mark.server] + + +# Generic vars +MODULE = 'path_suffix_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) + +# ---------------------------------------------------- TEST_PATH ------------------------------------------------------- +configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_path_suffix.yaml') +cases_path = os.path.join(TEST_CASES_PATH, 'cases_path_suffix.yaml') + +configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) +configurations = load_configuration_template( + configurations_path, configuration_parameters, configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, configuration_metadata), ids=case_ids) +def test_path_suffix( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +): + """ + description: Only logs within a path_suffix are processed. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - If a path_suffix that does not exist was specified, make sure that a message is displayed in the ossec.log + warning the user. + - Check the command was called with the correct parameters. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_s3_cloudtrail_db: + type: fixture + brief: Delete the DB file before and after the test execution + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly, using the correct path for each entry. + input_description: + - The `configuration_path_suffix` file provides the module configuration for this test. + - The `cases_path_suffix` file provides the test cases. + """ + bucket_name = metadata['bucket_name'] + bucket_type = metadata['bucket_type'] + only_logs_after = metadata['only_logs_after'] + path_suffix = metadata['path_suffix'] + expected_results = metadata['expected_results'] + pattern = ( + fr".*No logs found in 'AWSLogs/{path_suffix}/'. " + fr"Check the provided prefix and the location of the logs for the bucket type '{bucket_type}'*" + ) + + parameters = [ + 'wodles/aws/aws-s3', + '--bucket', bucket_name, + '--aws_profile', 'qa', + '--trail_suffix', path_suffix, + '--only_logs_after', only_logs_after, + '--type', bucket_type, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + if expected_results: + log_monitor.start( + timeout=TIMEOUTS[20], + callback=event_monitor.callback_detect_event_processed, + ) + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + + else: + log_monitor.start( + timeout=TIMEOUTS[10], + callback=event_monitor.make_aws_callback(pattern), + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_path_suffix_message'] + + assert s3_db_exists() + + if expected_results: + data = get_s3_db_row(table_name=bucket_type) + assert f"{bucket_name}/{path_suffix}/" == data.bucket_path + assert data.log_key.startswith(f"AWSLogs/{path_suffix}/") + else: + assert not table_exists_or_has_values(table_name=bucket_type) + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/test_regions.py b/tests/integration/test_aws/test_regions.py new file mode 100644 index 00000000000..4b45b1feb6e --- /dev/null +++ b/tests/integration/test_aws/test_regions.py @@ -0,0 +1,466 @@ +import os +import pytest + +# qa-integration-framework imports +from wazuh_testing import session_parameters +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.modules.aws import ( # noqa: F401 + AWS_SERVICES_DB_PATH, + RANDOM_ACCOUNT_ID, + event_monitor, + local_internal_options +) +from wazuh_testing.modules.aws.db_utils import ( + get_multiple_s3_db_row, + get_multiple_service_db_row, + s3_db_exists, + table_exists_or_has_values, +) +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) +# Local module imports +from .utils import ERROR_MESSAGES, TIMEOUTS + +pytestmark = [pytest.mark.server] + +# Generic vars +MODULE = 'regions_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) + +# ---------------------------------------------------- TEST_PATH ------------------------------------------------------- +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_regions.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_regions.yaml') + +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +def test_regions( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +): + """ + description: Only the logs for the specified region are processed. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - If a region that does not exist was specified, make sure that a message is displayed in the ossec.log + warning the user. + - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket + for the specified region. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_s3_cloudtrail_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly, using the correct path for each entry. + input_description: + - The `configuration_regions` file provides the module configuration for this test. + - The `cases_regions` file provides the test cases. + """ + bucket_name = metadata['bucket_name'] + bucket_type = metadata['bucket_type'] + only_logs_after = metadata['only_logs_after'] + regions = metadata['regions'] + expected_results = metadata['expected_results'] + pattern = fr".*DEBUG: \+\+\+ No logs to process in bucket: {RANDOM_ACCOUNT_ID}/{regions}" + + parameters = [ + 'wodles/aws/aws-s3', + '--bucket', bucket_name, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--regions', regions, + '--type', bucket_type, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + if expected_results: + log_monitor.start( + timeout=TIMEOUTS[20], + callback=event_monitor.callback_detect_event_processed, + accumulations=expected_results + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + + else: + log_monitor.start( + timeout=TIMEOUTS[10], + callback=event_monitor.make_aws_callback(pattern), + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_no_region_found_message'] + + assert s3_db_exists() + + if expected_results: + regions_list = regions.split(",") + for row in get_multiple_s3_db_row(table_name=bucket_type): + if hasattr(row, "aws_region"): + assert row.aws_region in regions_list + else: + assert row.log_key.split("/")[3] in regions_list + else: + assert not table_exists_or_has_values(table_name=bucket_type) + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + + +# -------------------------------------------- TEST_CLOUDWATCH_REGIONS ------------------------------------------------- +t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'cloudwatch_configuration_regions.yaml') +t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_regions.yaml') + +t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) +configurations = load_configuration_template( + t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, t2_configuration_metadata), ids=t2_case_ids) +def test_cloudwatch_regions( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +): + """ + description: Only the logs for the specified region are processed. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - If a region that does not exist was specified, make sure that a message is displayed in the ossec.log + warning the user. + - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket + for the specified region. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly, using the correct path for each entry. + input_description: + - The `configuration_regions` file provides the module configuration for this test. + - The `cases_regions` file provides the test cases. + """ + service_type = metadata['service_type'] + log_group_name = metadata.get('log_group_name') + only_logs_after = metadata['only_logs_after'] + regions: str = metadata['regions'] + expected_results = metadata['expected_results'] + regions_list = regions.split(",") + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--regions', regions, + '--aws_log_groups', log_group_name, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + if expected_results: + log_monitor.start( + timeout=TIMEOUTS[20], + callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), + accumulations=len(regions_list) + ) + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + + else: + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.make_aws_callback( + fr".*\+\+\+ ERROR: The region '{regions}' is not a valid one." + ), + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_non-existent_region_message'] + + table_name = 'cloudwatch_logs' + + if expected_results: + assert table_exists_or_has_values(table_name=table_name, db_path=AWS_SERVICES_DB_PATH) + for row in get_multiple_service_db_row(table_name=table_name): + assert (getattr(row, 'region', None) or getattr(row, 'aws_region')) in regions_list + else: + assert not table_exists_or_has_values(table_name=table_name, db_path=AWS_SERVICES_DB_PATH) + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + + +# ------------------------------------------ TEST_INSPECTOR_PATH ------------------------------------------------------- +t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'inspector_configuration_regions.yaml') +t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_regions.yaml') + +t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) +configurations = load_configuration_template( + t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, t3_configuration_metadata), ids=t3_case_ids) +def test_inspector_regions( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +): + """ + description: Only the logs for the specified region are processed. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - If a region that does not exist was specified, make sure that a message is displayed in the ossec.log + warning the user. + - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket + for the specified region. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly, using the correct path for each entry. + input_description: + - The `configuration_regions` file provides the module configuration for this test. + - The `cases_regions` file provides the test cases. + """ + service_type = metadata['service_type'] + only_logs_after = metadata['only_logs_after'] + regions: str = metadata['regions'] + expected_results = metadata['expected_results'] + regions_list = regions.split(",") + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--regions', regions, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + if expected_results: + log_monitor.start( + timeout=TIMEOUTS[20], + callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), + accumulations=len(regions_list) + ) + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + + else: + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.make_aws_callback( + fr".*\+\+\+ ERROR: The region '{regions}' is not a valid one." + ), + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_non-existent_region_message'] + + table_name = 'aws_services' + + if expected_results: + assert table_exists_or_has_values(table_name=table_name, db_path=AWS_SERVICES_DB_PATH) + for row in get_multiple_service_db_row(table_name=table_name): + assert (getattr(row, 'region', None) or getattr(row, 'aws_region')) in regions_list + else: + assert not table_exists_or_has_values(table_name=table_name, db_path=AWS_SERVICES_DB_PATH) + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/test_remove_from_bucket.py b/tests/integration/test_aws/test_remove_from_bucket.py new file mode 100644 index 00000000000..8c6dc85bd89 --- /dev/null +++ b/tests/integration/test_aws/test_remove_from_bucket.py @@ -0,0 +1,246 @@ +import os +import pytest + +from wazuh_testing import session_parameters +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.modules.aws.cloudwatch_utils import log_stream_exists +from wazuh_testing.modules.aws.s3_utils import file_exists +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) + +pytestmark = [pytest.mark.server] + + +# Generic vars +MODULE = 'remove_from_bucket_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) + +# ---------------------------------------------------- TEST_REMOVE_FROM_BUCKET ----------------------------------------- +# Configuration and cases data +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_remove_from_bucket.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_remove_from_bucket.yaml') + +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +def test_remove_from_bucket( + configuration, metadata, mark_cases_as_skipped, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, + set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, + truncate_monitored_files, restart_wazuh_function, file_monitoring +): + """ + description: The uploaded file was removed after the execution. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check that the uploaded log was removed by the module after the execution. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - upload_and_delete_file_to_s3: + type: fixture + brief: Upload a file to S3 bucket for the day of the execution. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_s3_cloudtrail_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check in the bucket that the uploaded log was removed. + input_description: + - The `configuration_defaults` file provides the module configuration for this test. + - The `cases_defaults` file provides the test cases. + """ + bucket_name = metadata['bucket_name'] + path = metadata.get('path') + parameters = [ + 'wodles/aws/aws-s3', + '--bucket', bucket_name, + '--remove', + '--aws_profile', 'qa', + '--type', metadata['bucket_type'], + '--debug', '2' + ] + + if path is not None: + parameters.insert(6, path) + parameters.insert(6, '--trail_prefix') + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + assert not file_exists(filename=metadata['uploaded_file'], bucket_name=bucket_name) + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + + +# ---------------------------------------------------- TEST_REMOVE_LOG_STREAM ------------------------------------------ +# Configuration and cases data +t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_remove_log_stream.yaml') +t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_remove_log_streams.yaml') + +t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) +t2_configurations = load_configuration_template( + t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +def test_remove_log_stream( + configuration, metadata, create_log_stream, load_wazuh_basic_configuration, set_wazuh_configuration, + clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, + file_monitoring +): + """ + description: The created log stream was removed after the execution. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check that the created log stream was removed by the module after the execution. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - create_log_stream: + type: fixture + brief: Create a log stream with events for the day of execution. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check in the log group that the created stream was removed. + input_description: + - The `configuration_defaults` file provides the module configuration for this test. + - The `cases_defaults` file provides the test cases. + """ + service_type = metadata['service_type'] + log_group_name = metadata['log_group_name'] + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--regions', 'us-east-1', + '--aws_log_groups', log_group_name, + '--remove-log-streams', + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + assert not log_stream_exists(log_stream=metadata['log_stream'], log_group=log_group_name) + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py new file mode 100644 index 00000000000..23601bf2d6c --- /dev/null +++ b/tests/integration/test_aws/utils.py @@ -0,0 +1,33 @@ +# Copyright (C) 2015-2023, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" + This file contains constant and other utilities to be used in the AWS integration test module. +""" + +# CONSTANTS + +ERROR_MESSAGES = { + + "failed_start": "The AWS module did not start as expected", + "incorrect_parameters": "The AWS module was not called with the correct parameters", + "error_found": "Found error message on AWS module", + "incorrect_event_number": "The AWS module did not process the expected number of events", + "incorrect_non-existent_region_message": "The AWS module did not show correct message about non-existent region", + "incorrect_no_existent_log_group": "The AWS module did not show correct message non-existent log group", + "incorrect_empty_path_message": "The AWS module did not show correct message about empty path", + "incorrect_empty_path_suffix_message": "The AWS module did not show correct message about empty path_suffix", + "incorrect_error_message": "The AWS module did not show the expected error message", + "incorrect_empty_value_message": "The AWS module did not show the expected message about empty value", + "incorrect_legacy_warning": "The AWS module did not show the expected legacy warning", + "incorrect_warning": "The AWS module did not show the expected warning", + "incorrect_invalid_value_message": "The AWS module did not show the expected message about invalid value", + "incorrect_service_calls_amount": "The AWS module was not called for bucket or service the right amount of times" +} + +TIMEOUTS = { + + 10: 10, + 20: 20 +} From 3fa4c02eb1daa0bd9be9372684cde323ab991f4b Mon Sep 17 00:00:00 2001 From: Eduardo Date: Fri, 18 Aug 2023 12:50:16 -0300 Subject: [PATCH 258/419] Update Readme --- tests/integration/test_aws/README.md | 90 ++++++++++++++-------------- 1 file changed, 44 insertions(+), 46 deletions(-) diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index f06aac5e543..59d9ae61a66 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -7,13 +7,14 @@ It is a _wodle based_ module that has a capability to pull logs from several AWS ## Tests directory structure ```bash -wazuh-qa/tests/integration/test_aws -├── conftest.py +wazuh/tests/integration/test_aws ├── data │   ├── configuration_template │   │   ├── basic_test_module │   │   ├── discard_regex_test_module +│   │   ├── log_groups_test_module │   │   ├── only_logs_after_test_module +│   │   ├── parser_test_module │   │   ├── path_suffix_test_module │   │   ├── path_test_module │   │   ├── regions_test_module @@ -21,37 +22,44 @@ wazuh-qa/tests/integration/test_aws │   └── test_cases │   ├── basic_test_module │   ├── discard_regex_test_module +│   ├── log_groups_test_module │   ├── only_logs_after_test_module +│   ├── parser_test_module │   ├── path_suffix_test_module │   ├── path_test_module │   ├── regions_test_module │   └── remove_from_bucket_test_module -├── README.MD +├── __init__.py +├── README.md +├── conftest.py ├── test_basic.py ├── test_discard_regex.py +├── test_log_groups.py ├── test_only_logs_after.py ├── test_path.py ├── test_path_suffix.py ├── test_regions.py -└── test_remove_from_bucket.py +├── test_remove_from_bucket.py +└── utils.py ``` ## Deps directory structure ```bash -wazuh-qa/deps/wazuh_testing/wazuh_testing/modules/aws +qa-integration-framework/src/wazuh_testing/modules/aws +├── __init__.py ├── cli_utils.py -├── constants.py +├── cloudwatch_utils.py ├── data_generator.py ├── db_utils.py ├── event_monitor.py -├── __init__.py +├── exceptions.py └── s3_utils.py ``` ## Requirements -- The only extra dependency is `boto3` +- Install the [qa-integration-framework](https://github.com/wazuh/qa-integration-framework) - The module will assume there are already buckets, log groups and an inspector assessment with test data in AWS. ## Configuration settings @@ -67,7 +75,7 @@ aws_secret_access_key = ## Setting up a test environment -You will need a proper environment to run the integration tests. You can use any virtual machine you wish. If you have +You will need a proper environment to run the integration tests. You can use Docker or any virtual machine. If you have one already, go to the [integration tests section](#integration-tests) If you use [Vagrant](https://www.vagrantup.com/downloads.html) @@ -92,16 +100,16 @@ _We are using **Ubuntu 22.04** for this example:_ ```shell script # Install pip - apt install python3-pip - - # Clone your `wazuh-qa` repository within your testing environment - cd wazuh-qa - - # Install Python libraries - python3 -m pip install -r requirements.txt - + apt install python3-pip git -y + + # Clone your `wazuh` repository within your testing environment + git clone https://github.com/wazuh/wazuh.git + + # Clone the `qa-integration-framework` + git clone https://github.com/wazuh/qa-integration-framework.git + # Install test dependecies - python3 -m pip install deps/wazuh-testing + python3 -m pip install qa-integration-framework/ ``` @@ -149,32 +157,22 @@ check its documentation for further information. #### AWS integration tests example ```bash -# python3 -m pytest -vvx test_aws/ -k cloudtrail -=========================================================== test session starts ====================================================== -platform linux -- Python 3.10.6, pytest-7.1.2, pluggy-1.0.0 -- /usr/bin/python3 -cachedir: .pytest_cache -metadata: {'Python': '3.10.6', 'Platform': 'Linux-5.15.0-58-generic-x86_64-with-glibc2.35', -'Packages': {'pytest': '7.1.2', 'py': '1.10.0', 'pluggy': '1.0.0'}, -'Plugins': {'metadata': '2.0.2', 'html': '3.1.1', 'testinfra': '5.0.0'}} -rootdir: /home/vagrant/qa/tests/integration, configfile: pytest.ini -plugins: metadata-2.0.2, html-3.1.1, testinfra-5.0.0 -collected 15 items - -test_aws/test_basic.py::test_defaults[cloudtrail_defaults] PASSED [ 6%] -test_aws/test_discard_regex.py::test_discard_regex[cloudtrail_discard_regex] PASSED [ 13%] -test_aws/test_only_logs_after.py::test_without_only_logs_after[cloudtrail_without_only_logs_after] PASSED [ 20%] -test_aws/test_only_logs_after.py::test_with_only_logs_after[cloudtrail_with_only_logs_after] PASSED [ 26%] -test_aws/test_only_logs_after.py::test_multiple_calls[cloudtrail_only_logs_after_multiple_calls] PASSED [ 33%] -test_aws/test_path.py::test_path[cloudtrail_path_with_data] PASSED [ 40%] -test_aws/test_path.py::test_path[cloudtrail_path_without_data] PASSED [ 46%] -test_aws/test_path.py::test_path[cloudtrail_inexistent_path] PASSED [ 53%] -test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_path_suffix_with_data] PASSED [ 60%] -test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_path_suffix_without_data] PASSED [ 66%] -test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_inexistent_path_suffix] PASSED [ 73%] -test_aws/test_regions.py::test_regions[cloudtrail_region_with_data] PASSED [ 80%] -test_aws/test_regions.py::test_regions[cloudtrail_regions_with_data] PASSED [ 86%] -test_aws/test_regions.py::test_regions[cloudtrail_inexistent_region] PASSED [ 93%] -test_aws/test_remove_from_bucket.py::test_remove_from_bucket[cloudtrail_remove_from_bucket] PASSED [100%] - -=============================================== 15 passed, 2 warnings in 332.67s (0:05:32) =========================================== +#root@wazuh-master:/wazuh/tests/integration# pytest -x test_aws/ --disable-warnings +==================================== test session starts ==================================== +platform linux -- Python 3.10.12, pytest-7.1.2, pluggy-1.2.0 +rootdir: /wazuh/tests/integration, configfile: pytest.ini +plugins: testinfra-5.0.0, metadata-3.0.0, html-3.1.1 +collected 195 items + +test_aws/test_basic.py ................ [ 8%] +test_aws/test_discard_regex.py .............. [ 15%] +test_aws/test_log_groups.py .. [ 16%] +test_aws/test_only_logs_after.py .............................................x. [ 40%] +test_aws/test_parser.py .......................... [ 53%] +test_aws/test_path.py .......................................... [ 75%] +test_aws/test_path_suffix.py ......... [ 80%] +test_aws/test_regions.py ........................ [ 92%] +test_aws/test_remove_from_bucket.py ...sss......... [100%] + +============ 191 passed, 3 skipped, 1 xfailed, 7 warnings in 3723.08s (1:02:03) ============= ``` From 0c9330efd12030225eecd4cf6db88f8a00e5ea4c Mon Sep 17 00:00:00 2001 From: Eduardo Date: Fri, 25 Aug 2023 08:30:35 -0300 Subject: [PATCH 259/419] Improve README and Remove unnecessary fixture calls on conftest --- tests/integration/conftest.py | 8 ++++++-- tests/integration/test_aws/README.md | 19 +++++++++++++------ 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 504b1abb618..9e1735acd46 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -181,6 +181,10 @@ def configure_local_internal_options_function(request): It uses the test variable local_internal_options. This should be a dictionary wich keys and values corresponds to the internal option configuration, For example: local_internal_options = {'monitord.rotate_log': '0', 'syscheck.debug': '0' } + + Args: + request (fixture): Provide information on the executing test function. + """ try: local_internal_options = request.param @@ -204,7 +208,7 @@ def configure_local_internal_options_function(request): wazuh_configuration.set_local_internal_options_dict(backup_local_internal_options) -@pytest.fixture(scope='function') +@pytest.fixture() def restart_wazuh_function(request): """Restart before starting a test, and stop it after finishing. @@ -240,7 +244,7 @@ def restart_wazuh_function(request): control_service('stop', daemon=daemon) -@pytest.fixture(scope='function') +@pytest.fixture() def file_monitoring(request): """Fixture to handle the monitoring of a specified file. diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index 59d9ae61a66..a644cff8f15 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -59,8 +59,15 @@ qa-integration-framework/src/wazuh_testing/modules/aws ## Requirements -- Install the [qa-integration-framework](https://github.com/wazuh/qa-integration-framework) -- The module will assume there are already buckets, log groups and an inspector assessment with test data in AWS. +- [Proper testing environment](#Setting up a test environment) + +- [Wazuh](https://github.com/wazuh/qa-integration-framework) repository. + +- [Testing framework](https://github.com/wazuh/qa-integration-framework) installed. + +- Configured buckets, log groups and an inspector assessment with test data in AWS. + +For a step-by-step example guide using linux go to the [test setup section](#linux) ## Configuration settings @@ -102,13 +109,13 @@ _We are using **Ubuntu 22.04** for this example:_ # Install pip apt install python3-pip git -y - # Clone your `wazuh` repository within your testing environment + # Clone `wazuh` repository within your testing environment git clone https://github.com/wazuh/wazuh.git - # Clone the `qa-integration-framework` + # Clone the `qa-integration-framework` repository withing your testing environment git clone https://github.com/wazuh/qa-integration-framework.git - # Install test dependecies + # Install tests dependencies python3 -m pip install qa-integration-framework/ ``` @@ -126,7 +133,7 @@ from the closest one, it will look for the next one (if possible) until reaching need to run every test from the following path, where the general _conftest_ is: ```shell script -cd wazuh-qa/tests/integration +cd wazuh/tests/integration/test_aws/ ``` To run any test, we just need to call `pytest` from `python3` using the following line: From 3f514fea62764ef08fdd1327fde0a92277fccde3 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Thu, 7 Sep 2023 09:17:21 -0300 Subject: [PATCH 260/419] Add new event_monitor file and new imports --- tests/integration/test_aws/conftest.py | 8 +- tests/integration/test_aws/event_monitor.py | 329 ++++++++++++++++++++ tests/integration/test_aws/test_basic.py | 4 +- tests/integration/test_aws/utils.py | 33 -- 4 files changed, 336 insertions(+), 38 deletions(-) create mode 100644 tests/integration/test_aws/event_monitor.py diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 2dfb6bde491..e74f146a473 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -1,18 +1,20 @@ import pytest from wazuh_testing.logger import logger -from wazuh_testing.modules.aws import ( +from wazuh_testing.constants.aws import ( FAKE_CLOUDWATCH_LOG_GROUP, PERMANENT_CLOUDWATCH_LOG_GROUP, ) -from wazuh_testing.modules.aws.cloudwatch_utils import ( +from wazuh_testing.modules.aws.utils import ( create_log_events, create_log_group, create_log_stream, delete_log_group, delete_log_stream, + delete_file, + file_exists, + upload_file ) from wazuh_testing.modules.aws.db_utils import delete_s3_db, delete_services_db -from wazuh_testing.modules.aws.s3_utils import delete_file, file_exists, upload_file from wazuh_testing.utils.services import control_service diff --git a/tests/integration/test_aws/event_monitor.py b/tests/integration/test_aws/event_monitor.py new file mode 100644 index 00000000000..6df70c6c9b7 --- /dev/null +++ b/tests/integration/test_aws/event_monitor.py @@ -0,0 +1,329 @@ +import re + +from wazuh_testing.modules.aws import VPC_FLOW_TYPE +from wazuh_testing.modules.aws.cli_utils import analyze_command_output +from wazuh_testing.modules.aws.patterns import patterns +from wazuh_testing.modules.aws.errors import errors +from wazuh_testing.constants.aws import INSPECTOR_TYPE + + +def make_aws_callback(pattern, prefix=''): + """Create a callback function from a text pattern. + + Args: + pattern (str): String to match on the log. + prefix (str): Regular expression used as prefix before the pattern. + + Returns: + lambda: Function that returns if there's a match in the file. + """ + pattern = WHITESPACE_REGEX.join(pattern.split()) + regex = re.compile(CURLY_BRACE_MATCH.format(prefix, pattern)) + + return lambda line: regex.match(line) + + +def callback_detect_aws_module_called(parameters): + """Detect if aws module was called with correct parameters. + + Args: + parameters (list): Values to check. + + Returns: + Callable: Callback to match the line. + """ + pattern = f'{AWS_MODULE_STARTED_PARAMETRIZED} {" ".join(parameters)}\n*' + regex = re.compile(pattern) + return lambda line: regex.match(line) + + +def callback_detect_aws_error_for_missing_type(line): + """Detect if the AWS module displays an error about missing type. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: Line if it matches. + """ + + if re.match( + AWS_UNDEFINED_SERVICE_TYPE, line + ): + return line + + +def callback_detect_aws_legacy_module_warning(line): + """Detect if the AWS module displays a warning about legacy config. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: Line if it matches. + """ + + if re.match( + AWS_DEPRECATED_CONFIG_DEFINED, line + ): + return line + + +def callback_detect_aws_module_warning(line): + """Detect if the AWS module displays a warning. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: Line if it matches. + """ + + if re.match(AWS_NO_SERVICE_WARNING, line): + return line + + +def callback_detect_aws_module_started(line): + """Detect if the AWS module was called. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: Line if it matches. + """ + + if re.match(AWS_MODULE_STARTED, line): + return line + + +def callback_detect_aws_empty_value(line): + """Detect if the AWS module displays a message about an empty value. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: Line if it matches. + """ + + if ( + re.match(INVALID_TYPE_ERROR, line) or + re.match(EMPTY_CONTENT_ERROR, line) or + re.match(EMPTY_CONTENT_WARNING, line) + ): + return line + + +def callback_detect_aws_invalid_value(line): + """Detect if the AWS module displays a message about an invalid value. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: Line if it matches. + """ + + if ( + re.match(INVALID_EMPTY_SERVICE_TYPE_ERROR, line) or + re.match(INVALID_TAG_CONTENT_ERROR, line) or + re.match(PARSING_BUCKET_ERROR_WARNING, line), + re.match(PARSING_SERVICE_ERROR_WARNING, line) + ): + return line + + +def callback_detect_bucket_or_service_call(line): + """Detect if bucket or service module was called. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: Line if it matches. + """ + + if ( + re.match(SERVICE_ANALYSIS, line) or + re.match(BUCKET_ANALYSIS, line) + ): + return line + + +def callback_detect_aws_module_start(line): + """Search for the start message in the given line. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: Line if it matches. + """ + + if re.match(MODULE_START, line): + return line + + +def callback_detect_all_aws_err(line): + """Search for the parse or module error message in the given line. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: line if it matches. + """ + if re.match(PARSER_ERROR, line) or re.match(MODULE_ERROR, line): + return line + + +def callback_detect_aws_read_err(line): + """Search for the parser error message in the given line. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: line if it matches. + """ + if re.match(PARSER_ERROR, line): + return line + + +def callback_detect_aws_wmodule_err(line): + """Search for the module error message in the given line. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: line if it matches. + """ + if re.match(MODULE_ERROR, line): + return line + + +def callback_detect_event_processed(line): + """Search for the event processed message in the given line. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: line if it matches. + """ + if re.match(NEW_LOG_FOUND, line): + return line + + +def callback_detect_event_processed_or_skipped(pattern): + """Search for event processed or skipped message in the given line. + + Args: + pattern (str): Pattern to match in line. + Returns: + Callable: Callback to match the given line. + """ + pattern_regex = re.compile(pattern) + return lambda line: pattern_regex.match(line) or callback_detect_event_processed(line) + + +def callback_detect_service_event_processed(expected_results, service_type): + if service_type == INSPECTOR_TYPE: + regex = re.compile(f"{DEBUG_MESSAGE} {expected_results} {EVENTS_COLLECTED}") + else: + regex = re.compile(f"{DEBUG_ANALYSISD_MESSAGE} {expected_results} {ANALYSISD_EVENT}") + return lambda line: regex.match(line) + + +def callback_event_sent_to_analysisd(line): + """Search for the module header message in the given line. + + Args: + line (str): Line to match. + + Returns: + Optional[str]: line if it matches. + """ + if line.startswith(AWS_EVENT_HEADER): + return line + + +def check_processed_logs_from_output(command_output, expected_results=1): + """Check for processed messages in the give output. + + Args: + command_output (str): Output to analyze. + expected_results (int, optional): Number of results to find. Default to 1. + """ + analyze_command_output( + command_output=command_output, + callback=callback_detect_event_processed, + expected_results=expected_results, + error_message=INCORRECT_EVENT_NUMBER + ) + + +def check_non_processed_logs_from_output(command_output, bucket_type, expected_results=1): + """Check for the non 'processed' messages in the give output. + + Args: + command_output (str): Output to analyze. + bucket_type (str): Bucket type to select the message. + expected_results (int, optional): Number of results to find. Default to 1. + """ + if bucket_type == VPC_FLOW_TYPE: + pattern = NO_LOG_PROCESSED + else: + pattern = NO_BUCKET_LOG_PROCESSED + + analyze_command_output( + command_output, + callback=make_aws_callback(pattern), + expected_results=expected_results, + error_message=UNEXPECTED_NUMBER_OF_EVENTS_FOUND + ) + + +def check_marker_from_output(command_output, file_key, expected_results=1): + """Check for the marker message in the given output. + + Args: + command_output (str): Output to analyze. + file_key (str): Value to check as a marker. + expected_results (int, optional): Number of results to find. Default to 1. + """ + pattern = f"{MARKER} {file_key}" + + analyze_command_output( + command_output, + callback=make_aws_callback(pattern), + expected_results=expected_results, + error_message=INCORRECT_MARKER + ) + + +def check_service_processed_logs_from_output( + command_output, events_sent, service_type, expected_results=1 +): + analyze_command_output( + command_output=command_output, + callback=callback_detect_service_event_processed(events_sent, service_type), + expected_results=expected_results, + error_message=INCORRECT_EVENT_NUMBER + ) + + +def check_service_non_processed_logs_from_output(command_output, service_type, expected_results=1): + if service_type == INSPECTOR_TYPE: + pattern = NO_NEW_EVENTS + else: + pattern = EVENT_SENT + + analyze_command_output( + command_output, + callback=make_aws_callback(pattern), + expected_results=expected_results, + error_message=POSSIBLY_PROCESSED_LOGS + ) diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index f6707aa0947..609e933738a 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -3,8 +3,8 @@ # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.constants.aws import TEMPLATE_DIR, TEST_CASES_DIR +from . import event_monitor, local_internal_options # noqa: F401 from wazuh_testing.utils.configuration import ( get_test_cases_data, load_configuration_template, diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py index 23601bf2d6c..e69de29bb2d 100644 --- a/tests/integration/test_aws/utils.py +++ b/tests/integration/test_aws/utils.py @@ -1,33 +0,0 @@ -# Copyright (C) 2015-2023, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -""" - This file contains constant and other utilities to be used in the AWS integration test module. -""" - -# CONSTANTS - -ERROR_MESSAGES = { - - "failed_start": "The AWS module did not start as expected", - "incorrect_parameters": "The AWS module was not called with the correct parameters", - "error_found": "Found error message on AWS module", - "incorrect_event_number": "The AWS module did not process the expected number of events", - "incorrect_non-existent_region_message": "The AWS module did not show correct message about non-existent region", - "incorrect_no_existent_log_group": "The AWS module did not show correct message non-existent log group", - "incorrect_empty_path_message": "The AWS module did not show correct message about empty path", - "incorrect_empty_path_suffix_message": "The AWS module did not show correct message about empty path_suffix", - "incorrect_error_message": "The AWS module did not show the expected error message", - "incorrect_empty_value_message": "The AWS module did not show the expected message about empty value", - "incorrect_legacy_warning": "The AWS module did not show the expected legacy warning", - "incorrect_warning": "The AWS module did not show the expected warning", - "incorrect_invalid_value_message": "The AWS module did not show the expected message about invalid value", - "incorrect_service_calls_amount": "The AWS module was not called for bucket or service the right amount of times" -} - -TIMEOUTS = { - - 10: 10, - 20: 20 -} From 1831d1c17ab16f344bab22baefd979baef5d8773 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Fri, 22 Sep 2023 14:48:55 -0300 Subject: [PATCH 261/419] Remove empty utils file --- tests/integration/test_aws/utils.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tests/integration/test_aws/utils.py diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py deleted file mode 100644 index e69de29bb2d..00000000000 From 690cc6c1e177921884dd64f08d76c638d32fba47 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Tue, 14 Nov 2023 16:13:30 -0300 Subject: [PATCH 262/419] Refactor module to use a configuration handler called TestConfigurator --- .../test_check_rare_socket_responses.py | 3 +- tests/integration/test_aws/conftest.py | 70 ++++++ tests/integration/test_aws/test_basic.py | 71 +++---- .../test_aws/test_discard_regex.py | 39 ++-- tests/integration/test_aws/test_log_groups.py | 37 ++-- .../test_aws/test_only_logs_after.py | 120 +++++------ tests/integration/test_aws/test_parser.py | 200 ++++++++---------- tests/integration/test_aws/test_path.py | 38 ++-- .../integration/test_aws/test_path_suffix.py | 38 ++-- tests/integration/test_aws/test_regions.py | 65 +++--- .../test_aws/test_remove_from_bucket.py | 56 +++-- tests/integration/test_aws/utils.py | 37 ++++ 12 files changed, 406 insertions(+), 368 deletions(-) create mode 100644 tests/integration/test_aws/utils.py diff --git a/tests/integration/test_analysisd/test_all_syscheckd_configurations/test_check_rare_socket_responses.py b/tests/integration/test_analysisd/test_all_syscheckd_configurations/test_check_rare_socket_responses.py index 366dbe065cf..571ace13b66 100644 --- a/tests/integration/test_analysisd/test_all_syscheckd_configurations/test_check_rare_socket_responses.py +++ b/tests/integration/test_analysisd/test_all_syscheckd_configurations/test_check_rare_socket_responses.py @@ -93,7 +93,8 @@ def test_validate_rare_socket_responses(test_metadata, configure_local_internal_ tier: 2 - parameters: + parameters:eceiver_sockets[0].send(test_metadata['input']) + monitored_sockets[0].start(c - test_metadata: type: dict brief: Test case metadata. diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index e74f146a473..2aab65e4e05 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -1,4 +1,16 @@ +""" +Copyright (C) 2015-2023, Wazuh Inc. +Created by Wazuh, Inc. . +This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +This module will contains all necessary components (fixtures, classes, methods) +to configure the test for it's execution. +""" + import pytest +from os.path import join + +# Qa-integration-framework imports from wazuh_testing.logger import logger from wazuh_testing.constants.aws import ( FAKE_CLOUDWATCH_LOG_GROUP, @@ -16,7 +28,14 @@ ) from wazuh_testing.modules.aws.db_utils import delete_s3_db, delete_services_db from wazuh_testing.utils.services import control_service +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) +# Local imports +from .utils import TEST_DATA_PATH @pytest.fixture def mark_cases_as_skipped(metadata): @@ -162,3 +181,54 @@ def clean_aws_services_db(): yield delete_services_db() + + +class TestConfigurator: + """ + TestConfigurator class is responsible for configuring test data and parameters for a specific test module. + + Attributes: + - module (str): The name of the test module. + - configuration_path (str): The path to the configuration directory for the test module. + - test_cases_path (str): The path to the test cases directory for the test module. + - metadata (dict): Test metadata retrieved from the test cases. + - parameters (list): Test parameters retrieved from the test cases. + - cases_ids (list): Identifiers for the test cases. + - test_configuration_template (dict): The loaded configuration template for the test module. + + """ + def __init__(self, module): + self.module = module + self.configuration_path = join(TEST_DATA_PATH, TEMPLATE_DIR, self.module) + self.test_cases_path = join(TEST_DATA_PATH, TEST_CASES_DIR, self.module) + self.metadata = None + self.parameters = None + self.cases_ids = None + self.test_configuration_template = None + + def configure_test(self, configuration_file="", cases_file=""): + """ + Configures the test data and parameters for the given test module. + + Args: + - configuration_file (str): The name of the configuration file. + - cases_file (str): The name of the test cases file. + + Returns: + None + """ + # Set test configuration path + configurations_path = join(self.configuration_path, configuration_file) + + # Set test cases path + cases_path = join(self.test_cases_path, cases_file) + + # Get test cases data + self.parameters, self.metadata, self.cases_ids = get_test_cases_data(cases_path) + + # load configuration template + self.test_configuration_template = load_configuration_template( + configurations_path, + self.parameters, + self.metadata + ) diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index 609e933738a..0daa5b1aa8b 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -1,41 +1,36 @@ -import os +""" +Copyright (C) 2015-2023, Wazuh Inc. +Created by Wazuh, Inc. . +This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +This module will contains all cases for the basic test suite +""" + import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.aws import TEMPLATE_DIR, TEST_CASES_DIR from . import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) # Local module imports from .utils import ERROR_MESSAGES +from conftest import TestConfigurator pytestmark = [pytest.mark.server] - -# Generic vars -MODULE = 'basic_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='basic_test_module') # -------------------------------------------- TEST_BUCKET_DEFAULTS ---------------------------------------------------- -# Configuration and cases -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_defaults.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_defaults.yaml') - -# Enabled test configurations -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='bucket_configuration_defaults.yaml', + cases_file='cases_bucket_defaults.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_bucket_defaults( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -122,19 +117,15 @@ def test_bucket_defaults( # -------------------------------------------- TEST_CLOUDWATCH_DEFAULTS ------------------------------------------------ -# Configuration and cases data -t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'cloudwatch_configuration_defaults.yaml') -t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_defaults.yaml') - -# Enabled test configurations -t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) -configurations = load_configuration_template( - t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata -) +# Configure T2 test +configurator.configure_test(configuration_file='cloudwatch_configuration_defaults.yaml', + cases_file='cases_cloudwatch_defaults.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, t2_configuration_metadata), ids=t2_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_service_defaults( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -227,19 +218,15 @@ def test_service_defaults( # ------------------------------------------ TEST_INSPECTOR_DEFAULTS --------------------------------------------------- -# Configuration and cases data -t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'inspector_configuration_defaults.yaml') -t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_defaults.yaml') - -# Enabled test configurations -t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) -configurations = load_configuration_template( - t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata -) +# Configure T3 test +configurator.configure_test(configuration_file='inspector_configuration_defaults.yaml', + cases_file='cases_inspector_defaults.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, t3_configuration_metadata), ids=t3_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_inspector_defaults( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py index 54cd7f27d9b..977c6e5ce67 100644 --- a/tests/integration/test_aws/test_discard_regex.py +++ b/tests/integration/test_aws/test_discard_regex.py @@ -1,40 +1,37 @@ -import os +""" +Copyright (C) 2015-2023, Wazuh Inc. +Created by Wazuh, Inc. . +This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +This module will contains all cases for the discard_regex test suite +""" + import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) from wazuh_testing.modules.aws.db_utils import s3_db_exists # Local module imports from .utils import ERROR_MESSAGES, TIMEOUTS +from conftest import TestConfigurator pytestmark = [pytest.mark.server] +# Set test configurator for the module +configurator = TestConfigurator(module='discard_regex_test_module') -# Generic vars -MODULE = 'discard_regex_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) - -# -----------------------------------------opvb----------- TEST_PATH ------------------------------------------------------- -configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_discard_regex.yaml') -cases_path = os.path.join(TEST_CASES_PATH, 'cases_discard_regex.yaml') - -configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) -configurations = load_configuration_template( - configurations_path, configuration_parameters, configuration_metadata -) +# ---------------------------------------------------- TEST_PATH ------------------------------------------------------- +# Test configuration +configurator.configure_test(configuration_file='configuration_discard_regex.yaml', + cases_file='cases_discard_regex.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, configuration_metadata), ids=case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_discard_regex( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, diff --git a/tests/integration/test_aws/test_log_groups.py b/tests/integration/test_aws/test_log_groups.py index b806bfd186c..1e35cdd509a 100644 --- a/tests/integration/test_aws/test_log_groups.py +++ b/tests/integration/test_aws/test_log_groups.py @@ -1,14 +1,16 @@ -import os +""" +Copyright (C) 2015-2023, Wazuh Inc. +Created by Wazuh, Inc. . +This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +This module will contains all cases for the log groups test suite +""" + import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) from wazuh_testing.modules.aws.db_utils import ( get_multiple_service_db_row, services_db_exists, @@ -17,28 +19,23 @@ # Local module imports from .utils import ERROR_MESSAGES, TIMEOUTS +from conftest import TestConfigurator pytestmark = [pytest.mark.server] - -# Generic vars -MODULE = 'log_groups_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='log_groups_test_module') # ----------------------------------------------- TEST_AWS_LOG_GROUPS -------------------------------------------------- -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_log_groups.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_log_groups.yaml') - -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='configuration_log_groups.yaml', + cases_file='cases_log_groups.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_log_groups( configuration, metadata, create_log_stream, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, diff --git a/tests/integration/test_aws/test_only_logs_after.py b/tests/integration/test_aws/test_only_logs_after.py index 42a117f735a..d407ecd9431 100644 --- a/tests/integration/test_aws/test_only_logs_after.py +++ b/tests/integration/test_aws/test_only_logs_after.py @@ -1,10 +1,16 @@ -import os +""" +Copyright (C) 2015-2023, Wazuh Inc. +Created by Wazuh, Inc. . +This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +This module will contains all cases for the only logs after test suite +""" + import pytest from datetime import datetime # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR from wazuh_testing.modules import aws as cons from wazuh_testing.modules.aws import ONLY_LOGS_AFTER_PARAM, event_monitor, local_internal_options # noqa: F401 from wazuh_testing.modules.aws.cli_utils import call_aws_module @@ -20,34 +26,27 @@ get_s3_db_row, ) from wazuh_testing.modules.aws.s3_utils import get_last_file_key, upload_file -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) +# Local module imports from .utils import ERROR_MESSAGES, TIMEOUTS +from conftest import TestConfigurator pytestmark = [pytest.mark.server] -# Generic vars -MODULE = 'only_logs_after_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='only_logs_after_test_module') # --------------------------------------------- TEST_BUCKET_WITHOUT_ONLY_LOGS_AFTER ------------------------------------ -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_without_only_logs_after.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_without_only_logs_after.yaml') - -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='bucket_configuration_without_only_logs_after.yaml', + cases_file='cases_bucket_without_only_logs_after.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_bucket_without_only_logs_after( configuration, metadata, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, @@ -169,17 +168,15 @@ def test_bucket_without_only_logs_after( # -------------------------------------------- TEST_SERVICE_WITHOUT_ONLY_LOGS_AFTER ------------------------------------ -t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'service_configuration_without_only_logs_after.yaml') -t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_service_without_only_logs_after.yaml') - -t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) -t2_configurations = load_configuration_template( - t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata -) +# Configure T2 test +configurator.configure_test(configuration_file='service_configuration_without_only_logs_after.yaml', + cases_file='cases_service_without_only_logs_after.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_service_without_only_logs_after( configuration, metadata, create_log_stream_in_existent_group, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, @@ -289,17 +286,15 @@ def test_service_without_only_logs_after( # --------------------------------------------- TEST_BUCKET_WITH_ONLY_LOGS_AFTER --------------------------------------- -t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_with_only_logs_after.yaml') -t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_with_only_logs_after.yaml') - -t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) -t3_configurations = load_configuration_template( - t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata -) +# Configure T3 test +configurator.configure_test(configuration_file='bucket_configuration_with_only_logs_after.yaml', + cases_file='cases_bucket_with_only_logs_after.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t3_configurations, t3_configuration_metadata), ids=t3_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_bucket_with_only_logs_after( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -420,17 +415,15 @@ def test_bucket_with_only_logs_after( # --------------------------------------------TEST_CLOUDWATCH_WITH_ONLY_LOGS_AFTER ------------------------------------- -t4_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'cloudwatch_configuration_with_only_logs_after.yaml') -t4_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_with_only_logs_after.yaml') - -t4_configuration_parameters, t4_configuration_metadata, t4_case_ids = get_test_cases_data(t4_cases_path) -t4_configurations = load_configuration_template( - t4_configurations_path, t4_configuration_parameters, t4_configuration_metadata -) +# Configure T4 test +configurator.configure_test(configuration_file='cloudwatch_configuration_with_only_logs_after.yaml', + cases_file='cases_cloudwatch_with_only_logs_after.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t4_configurations, t4_configuration_metadata), ids=t4_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_cloudwatch_with_only_logs_after( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -549,17 +542,15 @@ def test_cloudwatch_with_only_logs_after( # ------------------------------------------ TEST_INSPECTOR_WITH_ONLY_LOGS_AFTER --------------------------------------- -t5_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'inspector_configuration_with_only_logs_after.yaml') -t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_with_only_logs_after.yaml') - -t5_configuration_parameters, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path) -t5_configurations = load_configuration_template( - t5_configurations_path, t5_configuration_parameters, t5_configuration_metadata -) +# Configure T5 test +configurator.configure_test(configuration_file='inspector_configuration_with_only_logs_after.yaml', + cases_file='cases_inspector_with_only_logs_after.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t5_configurations, t5_configuration_metadata), ids=t5_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_inspector_with_only_logs_after( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -670,13 +661,14 @@ def test_inspector_with_only_logs_after( # ---------------------------------------------------- TEST_MULTIPLE_CALLS --------------------------------------------- -t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_multiple_calls.yaml') - -_, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path) +# Configure T5 test +configurator.configure_test(cases_file='cases_bucket_multiple_calls.yaml') @pytest.mark.tier(level=1) -@pytest.mark.parametrize('metadata', t5_configuration_metadata, ids=t5_case_ids) +@pytest.mark.parametrize('metadata', + configurator.metadata, + ids=configurator.cases_ids) def test_bucket_multiple_calls( metadata, clean_s3_cloudtrail_db, load_wazuh_basic_configuration, restart_wazuh_function, delete_file_from_s3 ): @@ -781,13 +773,14 @@ def test_bucket_multiple_calls( # -------------------------------------------- TEST_INSPECTOR_MULTIPLE_CALLS ------------------------------------------- -t6_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_multiple_calls.yaml') - -_, t6_configuration_metadata, t6_case_ids = get_test_cases_data(t6_cases_path) +# Configure T6 test +configurator.configure_test(cases_file='cases_inspector_multiple_calls.yaml') @pytest.mark.tier(level=1) -@pytest.mark.parametrize('metadata', t6_configuration_metadata, ids=t6_case_ids) +@pytest.mark.parametrize('metadata', + configurator.metadata, + ids=configurator.cases_ids) @pytest.mark.xfail def test_inspector_multiple_calls( metadata, clean_aws_services_db, load_wazuh_basic_configuration, restart_wazuh_function @@ -863,13 +856,14 @@ def test_inspector_multiple_calls( # ----------------------------------------- TEST_CLOUDWATCH_MULTIPLE_CALLS --------------------------------------------- -t7_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_multiple_calls.yaml') - -_, t7_configuration_metadata, t7_case_ids = get_test_cases_data(t7_cases_path) +# Configure T7 test +configurator.configure_test(cases_file='cases_cloudwatch_multiple_calls.yaml') @pytest.mark.tier(level=1) -@pytest.mark.parametrize('metadata', t7_configuration_metadata, ids=t7_case_ids) +@pytest.mark.parametrize('metadata', + configurator.metadata, + ids=configurator.cases_ids) def test_cloudwatch_multiple_calls( metadata, clean_aws_services_db, load_wazuh_basic_configuration, restart_wazuh_function, delete_log_stream ): diff --git a/tests/integration/test_aws/test_parser.py b/tests/integration/test_aws/test_parser.py index e46feeda3cb..0020bac8648 100644 --- a/tests/integration/test_aws/test_parser.py +++ b/tests/integration/test_aws/test_parser.py @@ -1,45 +1,39 @@ -import os +""" +Copyright (C) 2015-2023, Wazuh Inc. +Created by Wazuh, Inc. . +This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +This module will contains all cases for the parser test suite +""" import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) # Local module imports from .utils import ERROR_MESSAGES, TIMEOUTS +from conftest import TestConfigurator pytestmark = [pytest.mark.server] - -# Generic vars -MODULE = 'parser_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='parser_test_module') # --------------------------------------------TEST_BUCKET_AND_SERVICE_MISSING ------------------------------------------ -# Configuration and cases data -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_bucket_and_service_missing.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_and_service_missing.yaml') - -# Enabled test configurations -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='configuration_bucket_and_service_missing.yaml', + cases_file='cases_bucket_and_service_missing.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_bucket_and_service_missing( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: Command for bucket and service weren't invoked. @@ -96,23 +90,19 @@ def test_bucket_and_service_missing( # -------------------------------------------- TEST_TYPE_MISSING_IN_BUCKET --------------------------------------------- -# Configuration and cases data -t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_type_missing_in_bucket.yaml') -t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_type_missing_in_bucket.yaml') - -# Enabled test configurations -t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) -t2_configurations = load_configuration_template( - t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata -) +# Configure T2 test +configurator.configure_test(configuration_file='configuration_type_missing_in_bucket.yaml', + cases_file='cases_type_missing_in_bucket.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_type_missing_in_bucket( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: A warning occurs and was displayed in `ossec.log`. @@ -168,23 +158,19 @@ def test_type_missing_in_bucket( # -------------------------------------------- TEST_TYPE_MISSING_IN_SERVICE -------------------------------------------- -# Configuration and cases data -t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_type_missing_in_service.yaml') -t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_type_missing_in_service.yaml') - -# Enabled test configurations -t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) -t3_configurations = load_configuration_template( - t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata -) +# Configure T3 test +configurator.configure_test(configuration_file='configuration_type_missing_in_service.yaml', + cases_file='cases_type_missing_in_service.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t3_configurations, t3_configuration_metadata), ids=t3_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_type_missing_in_service( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -239,24 +225,21 @@ def test_type_missing_in_service( assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_error_message'] -# -------------------------------------------- TEST_EMPTY_VALUES_IN_BUCKET --------------------------------------------- -# Configuration and cases data -t4_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_bucket.yaml') -t4_cases_path = os.path.join(TEST_CASES_PATH, 'cases_empty_values_in_bucket.yaml') -# Enabled test configurations -t4_configuration_parameters, t4_configuration_metadata, t4_case_ids = get_test_cases_data(t4_cases_path) -t4_configurations = load_configuration_template( - t4_configurations_path, t4_configuration_parameters, t4_configuration_metadata -) +# -------------------------------------------- TEST_EMPTY_VALUES_IN_BUCKET --------------------------------------------- +# Configure T4 test +configurator.configure_test(configuration_file='configuration_values_in_bucket.yaml', + cases_file='cases_empty_values_in_bucket.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t4_configurations, t4_configuration_metadata), ids=t4_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_empty_values_in_bucket( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -310,24 +293,21 @@ def test_empty_values_in_bucket( assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_value_message'] -# -------------------------------------------- TEST_EMPTY_VALUES_IN_SERVICE -------------------------------------------- -# Configuration and cases data -t5_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_service.yaml') -t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_empty_values_in_service.yaml') -# Enabled test configurations -t5_configuration_parameters, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path) -t5_configurations = load_configuration_template( - t5_configurations_path, t5_configuration_parameters, t5_configuration_metadata -) +# -------------------------------------------- TEST_EMPTY_VALUES_IN_SERVICE -------------------------------------------- +# Configure T5 test +configurator.configure_test(configuration_file='configuration_values_in_service.yaml', + cases_file='cases_empty_values_in_service.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t5_configurations, t5_configuration_metadata), ids=t5_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_empty_values_in_service( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -384,23 +364,19 @@ def test_empty_values_in_service( # ------------------------------------------ TEST_INVALID_VALUES_IN_BUCKET --------------------------------------------- -# Configuration and cases data -t6_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_bucket.yaml') -t6_cases_path = os.path.join(TEST_CASES_PATH, 'cases_invalid_values_in_bucket.yaml') - -# Enabled test configurations -t6_configuration_parameters, t6_configuration_metadata, t6_case_ids = get_test_cases_data(t6_cases_path) -t6_configurations = load_configuration_template( - t6_configurations_path, t6_configuration_parameters, t6_configuration_metadata -) +# Configure T6 test +configurator.configure_test(configuration_file='configuration_values_in_bucket.yaml', + cases_file='cases_invalid_values_in_bucket.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t6_configurations, t6_configuration_metadata), ids=t6_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_invalid_values_in_bucket( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -456,23 +432,19 @@ def test_invalid_values_in_bucket( # ------------------------------------------ TEST_INVALID_VALUES_IN_BUCKET --------------------------------------------- -# Configuration and cases data -t7_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_service.yaml') -t7_cases_path = os.path.join(TEST_CASES_PATH, 'cases_invalid_values_in_service.yaml') - -# Enabled test configurations -t7_configuration_parameters, t7_configuration_metadata, t7_case_ids = get_test_cases_data(t7_cases_path) -t7_configurations = load_configuration_template( - t7_configurations_path, t7_configuration_parameters, t7_configuration_metadata -) +# Configure T7 test +configurator.configure_test(configuration_file='configuration_values_in_service.yaml', + cases_file='cases_invalid_values_in_service.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t7_configurations, t7_configuration_metadata), ids=t7_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_invalid_values_in_service( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -528,23 +500,19 @@ def test_invalid_values_in_service( # --------------------------------------- TEST_MULTIPLE_BUCKET_AND_SERVICE_TAGS ---------------------------------------- -# Configuration and cases data -t8_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_multiple_bucket_and_service_tags.yaml') -t8_cases_path = os.path.join(TEST_CASES_PATH, 'cases_multiple_bucket_and_service_tags.yaml') - -# Enabled test configurations -t8_configuration_parameters, t8_configuration_metadata, t8_case_ids = get_test_cases_data(t8_cases_path) -t8_configurations = load_configuration_template( - t8_configurations_path, t8_configuration_parameters, t8_configuration_metadata -) +# Configure T8 test +configurator.configure_test(configuration_file='configuration_multiple_bucket_and_service_tags.yaml', + cases_file='cases_multiple_bucket_and_service_tags.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t8_configurations, t8_configuration_metadata), ids=t8_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_multiple_bucket_and_service_tags( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: The command is invoked two times for buckets and two times for services. diff --git a/tests/integration/test_aws/test_path.py b/tests/integration/test_aws/test_path.py index d6d740b0034..ace1e54d7cc 100644 --- a/tests/integration/test_aws/test_path.py +++ b/tests/integration/test_aws/test_path.py @@ -1,43 +1,41 @@ -import os +""" +Copyright (C) 2015-2023, Wazuh Inc. +Created by Wazuh, Inc. . +This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +This module will contains all cases for the path test suite +""" + import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 from wazuh_testing.modules.aws.db_utils import ( get_s3_db_row, s3_db_exists, table_exists_or_has_values, ) -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) + # Local module imports from .utils import ERROR_MESSAGES, TIMEOUTS +from conftest import TestConfigurator pytestmark = [pytest.mark.server] - -# Generic vars -MODULE = 'path_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='path_test_module') # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- -configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_path.yaml') -cases_path = os.path.join(TEST_CASES_PATH, 'cases_path.yaml') - -configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) -configurations = load_configuration_template( - configurations_path, configuration_parameters, configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='configuration_path.yaml', + cases_file='cases_path.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, configuration_metadata), ids=case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_path( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring diff --git a/tests/integration/test_aws/test_path_suffix.py b/tests/integration/test_aws/test_path_suffix.py index 6c7450091ec..d04a0696964 100644 --- a/tests/integration/test_aws/test_path_suffix.py +++ b/tests/integration/test_aws/test_path_suffix.py @@ -1,43 +1,41 @@ -import os +""" +Copyright (C) 2015-2023, Wazuh Inc. +Created by Wazuh, Inc. . +This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +This module will contains all cases for the path suffix test suite +""" + import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 from wazuh_testing.modules.aws.db_utils import ( get_s3_db_row, s3_db_exists, table_exists_or_has_values, ) -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) + # Local module imports from .utils import ERROR_MESSAGES, TIMEOUTS +from conftest import TestConfigurator pytestmark = [pytest.mark.server] - -# Generic vars -MODULE = 'path_suffix_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='path_suffix_test_module') # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- -configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_path_suffix.yaml') -cases_path = os.path.join(TEST_CASES_PATH, 'cases_path_suffix.yaml') - -configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) -configurations = load_configuration_template( - configurations_path, configuration_parameters, configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='configuration_path_suffix.yaml', + cases_file='cases_path_suffix.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, configuration_metadata), ids=case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_path_suffix( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring diff --git a/tests/integration/test_aws/test_regions.py b/tests/integration/test_aws/test_regions.py index 4b45b1feb6e..23593491062 100644 --- a/tests/integration/test_aws/test_regions.py +++ b/tests/integration/test_aws/test_regions.py @@ -1,9 +1,15 @@ -import os +""" +Copyright (C) 2015-2023, Wazuh Inc. +Created by Wazuh, Inc. . +This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +This module will contains all cases for the region test suite +""" + import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 from wazuh_testing.modules.aws import ( # noqa: F401 AWS_SERVICES_DB_PATH, @@ -17,33 +23,26 @@ s3_db_exists, table_exists_or_has_values, ) -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) + # Local module imports from .utils import ERROR_MESSAGES, TIMEOUTS +from conftest import TestConfigurator pytestmark = [pytest.mark.server] -# Generic vars -MODULE = 'regions_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='regions_test_module') # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_regions.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_regions.yaml') - -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='bucket_configuration_regions.yaml', + cases_file='cases_bucket_regions.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_regions( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -177,17 +176,15 @@ def test_regions( # -------------------------------------------- TEST_CLOUDWATCH_REGIONS ------------------------------------------------- -t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'cloudwatch_configuration_regions.yaml') -t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_regions.yaml') - -t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) -configurations = load_configuration_template( - t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata -) +# Configure T2 test +configurator.configure_test(configuration_file='cloudwatch_configuration_regions.yaml', + cases_file='cases_cloudwatch_regions.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, t2_configuration_metadata), ids=t2_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_cloudwatch_regions( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -319,17 +316,15 @@ def test_cloudwatch_regions( # ------------------------------------------ TEST_INSPECTOR_PATH ------------------------------------------------------- -t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'inspector_configuration_regions.yaml') -t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_regions.yaml') - -t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) -configurations = load_configuration_template( - t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata -) +# Configure T3 test +configurator.configure_test(configuration_file='inspector_configuration_regions.yaml', + cases_file='cases_inspector_regions.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, t3_configuration_metadata), ids=t3_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_inspector_regions( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring diff --git a/tests/integration/test_aws/test_remove_from_bucket.py b/tests/integration/test_aws/test_remove_from_bucket.py index 8c6dc85bd89..4f4aef34772 100644 --- a/tests/integration/test_aws/test_remove_from_bucket.py +++ b/tests/integration/test_aws/test_remove_from_bucket.py @@ -1,38 +1,37 @@ -import os +""" +Copyright (C) 2015-2023, Wazuh Inc. +Created by Wazuh, Inc. . +This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +This module will contains all cases for the remove from bucket test suite +""" import pytest +# qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 from wazuh_testing.modules.aws.cloudwatch_utils import log_stream_exists from wazuh_testing.modules.aws.s3_utils import file_exists -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) -pytestmark = [pytest.mark.server] +# Local module imports +from .utils import ERROR_MESSAGES +from conftest import TestConfigurator +pytestmark = [pytest.mark.server] -# Generic vars -MODULE = 'remove_from_bucket_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='remove_from_bucket_test_module') # ---------------------------------------------------- TEST_REMOVE_FROM_BUCKET ----------------------------------------- -# Configuration and cases data -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_remove_from_bucket.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_remove_from_bucket.yaml') - -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='configuration_remove_from_bucket.yaml', + cases_file='cases_remove_from_bucket.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_remove_from_bucket( configuration, metadata, mark_cases_as_skipped, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, @@ -135,18 +134,15 @@ def test_remove_from_bucket( # ---------------------------------------------------- TEST_REMOVE_LOG_STREAM ------------------------------------------ -# Configuration and cases data -t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_remove_log_stream.yaml') -t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_remove_log_streams.yaml') - -t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) -t2_configurations = load_configuration_template( - t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata -) +# Configure T2 test +configurator.configure_test(configuration_file='configuration_remove_log_stream.yaml', + cases_file='cases_remove_log_streams.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_remove_log_stream( configuration, metadata, create_log_stream, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py new file mode 100644 index 00000000000..da0851074c5 --- /dev/null +++ b/tests/integration/test_aws/utils.py @@ -0,0 +1,37 @@ +# Copyright (C) 2015-2023, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" + This file contains constant and other utilities to be used in the AWS integration test module. +""" +from os.path import join, dirname, realpath + +# CONSTANTS + +ERROR_MESSAGES = { + + "failed_start": "The AWS module did not start as expected", + "incorrect_parameters": "The AWS module was not called with the correct parameters", + "error_found": "Found error message on AWS module", + "incorrect_event_number": "The AWS module did not process the expected number of events", + "incorrect_non-existent_region_message": "The AWS module did not show correct message about non-existent region", + "incorrect_no_existent_log_group": "The AWS module did not show correct message non-existent log group", + "incorrect_empty_path_message": "The AWS module did not show correct message about empty path", + "incorrect_empty_path_suffix_message": "The AWS module did not show correct message about empty path_suffix", + "incorrect_error_message": "The AWS module did not show the expected error message", + "incorrect_empty_value_message": "The AWS module did not show the expected message about empty value", + "incorrect_legacy_warning": "The AWS module did not show the expected legacy warning", + "incorrect_warning": "The AWS module did not show the expected warning", + "incorrect_invalid_value_message": "The AWS module did not show the expected message about invalid value", + "incorrect_service_calls_amount": "The AWS module was not called for bucket or service the right amount of times" +} + +TIMEOUTS = { + + 10: 10, + 20: 20 +} + +# Paths +TEST_DATA_PATH = join(dirname(realpath(__file__)), 'data') From 41eb8cd505a2b081a7060f7a1753ca884286dad4 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Wed, 15 Nov 2023 09:54:24 -0300 Subject: [PATCH 263/419] Add parameter to fixture --- tests/integration/conftest.py | 13 ++++++++----- .../integration/test_aws/test_remove_from_bucket.py | 1 + 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 9e1735acd46..aa41be7edcc 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -245,18 +245,21 @@ def restart_wazuh_function(request): @pytest.fixture() -def file_monitoring(request): +def file_monitoring(request, file_to_monitor=None): """Fixture to handle the monitoring of a specified file. It uses the variable `file_to_monitor` to determinate the file to monitor. Default `LOG_FILE_PATH` Args: request (fixture): Provide information on the executing test function. + + Parameters + ---------- + request + file_to_monitor """ - if hasattr(request.module, 'file_to_monitor'): - file_to_monitor = getattr(request.module, 'file_to_monitor') - else: - file_to_monitor = WAZUH_LOG_PATH + if file_to_monitor is None: + file_to_monitor = getattr(request.module, 'file_to_monitor', WAZUH_LOG_PATH) logger.debug(f"Initializing file to monitor to {file_to_monitor}") diff --git a/tests/integration/test_aws/test_remove_from_bucket.py b/tests/integration/test_aws/test_remove_from_bucket.py index 4f4aef34772..b20a8d9b191 100644 --- a/tests/integration/test_aws/test_remove_from_bucket.py +++ b/tests/integration/test_aws/test_remove_from_bucket.py @@ -5,6 +5,7 @@ This module will contains all cases for the remove from bucket test suite """ + import pytest # qa-integration-framework imports From e4f7aa36bfbed1fe020a42d1432ffa1772962d6f Mon Sep 17 00:00:00 2001 From: RamosFe Date: Thu, 26 Oct 2023 18:30:10 -0300 Subject: [PATCH 264/419] ix: add aws custom bucket integration test --- .../custom_bucket_configuration.yaml | 15 ++ .../cases_bucket_custom.yaml | 6 + .../cases_bucket_custom_logs.yaml | 8 + .../test_aws/test_custom_bucket.py | 239 ++++++++++++++++++ 4 files changed, 268 insertions(+) create mode 100644 tests/integration/test_aws/data/configuration_template/custom_bucket_test_module/custom_bucket_configuration.yaml create mode 100644 tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml create mode 100644 tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml create mode 100644 tests/integration/test_aws/test_custom_bucket.py diff --git a/tests/integration/test_aws/data/configuration_template/custom_bucket_test_module/custom_bucket_configuration.yaml b/tests/integration/test_aws/data/configuration_template/custom_bucket_test_module/custom_bucket_configuration.yaml new file mode 100644 index 00000000000..6e9e24b643e --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/custom_bucket_test_module/custom_bucket_configuration.yaml @@ -0,0 +1,15 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - subscriber: + attributes: + - type: buckets + elements: + - sqs_name: + value: SQS_NAME + - aws_profile: + value: qa diff --git a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml new file mode 100644 index 00000000000..acf58cc8b43 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml @@ -0,0 +1,6 @@ +- name: custom_bucket_defaults + description: Custom bucket default configuration + configuration_parameters: + SQS_NAME: wazuh-sqs-integration-tests + metadata: + sqs_name: wazuh-sqs-integration-tests diff --git a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml new file mode 100644 index 00000000000..8b2667a9126 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml @@ -0,0 +1,8 @@ +- name: bucket_with_logs + description: Logs inside a custom bucket + configuration_parameters: + SQS_NAME: wazuh-sqs-integration-tests + metadata: + sqs_name: wazuh-sqs-integration-tests + bucket_name: wazuh-sqs-integration-test + bucket_type: cloudtrail diff --git a/tests/integration/test_aws/test_custom_bucket.py b/tests/integration/test_aws/test_custom_bucket.py new file mode 100644 index 00000000000..8d6d9a4749c --- /dev/null +++ b/tests/integration/test_aws/test_custom_bucket.py @@ -0,0 +1,239 @@ +import os + +import pytest +from wazuh_testing import TEMPLATE_DIR, TEST_CASES_DIR, global_parameters, T_10 +from wazuh_testing.modules.aws import event_monitor, local_internal_options +from wazuh_testing.tools.configuration import ( + get_test_cases_data, + load_configuration_template, +) + +pytestmark = [pytest.mark.server] + +# Generic vars +# Name of the folder test module +MODULE = 'custom_bucket_test_module' +# Path of the data for the tests +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +# Path for the configuration of this module +CONFIGURATION_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +# Path for the test cases of this module +TEST_CASE_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) + +# -------------------------------------------- TEST_CUSTOM_BUCKETS_DEFAULTS ------------------------------------------- +# Configuration and cases data +t1_configurations_path = os.path.join(CONFIGURATION_PATH, 'custom_bucket_configuration.yaml') +t1_cases_path = os.path.join(TEST_CASE_PATH, 'cases_bucket_custom.yaml') + +# Enabled test configurations +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +def test_custom_bucket_defaults(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, + restart_wazuh_function, file_monitoring): + """ + description: Test the AWS S3 custom bucket module is invoked the expected parameters and no error occurs. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check in the ossec.log that no errors occurs. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.7.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - upload_and_delete_file_to_s3: + type: fixture + brief: Upload a file to S3 bucket for the day of the execution. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check in the log that no errors occurs. + input_description: + - The `configuration_defaults` file provides the module configuration for this test. + - The `cases_defaults` file provides the test cases. + """ + parameters = [ + 'wodles/aws/aws-s3', + '--subscriber', 'buckets', + '--queue', metadata['sqs_name'], + '--aws_profile', 'qa', + '--debug', '2' + ] + log_header = 'Launching S3 Subscriber Command: ' + expected_log = log_header + " ".join(parameters) + + # Check AWS module started + log_monitor.start( + timeout=global_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start, + error_message='The AWS module did not start as expected', + ).result() + + # Check command was called correctly + log_monitor.start( + timeout=global_parameters.default_timeout, + callback=event_monitor.make_aws_callback(expected_log, prefix='^.*'), + error_message='The AWS module was not called with the correct parameters', + ).result() + + # Detect any ERROR message + with pytest.raises(TimeoutError): + log_monitor.start( + timeout=global_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err, + ).result() + + +# -------------------------------------------- TEST_CUSTOM_BUCKETS_LOGS ------------------------------------------- +# Configuration and cases data +t2_configurations_path = os.path.join(CONFIGURATION_PATH, 'custom_bucket_configuration.yaml') +t2_cases_path = os.path.join(TEST_CASE_PATH, 'cases_bucket_custom_logs.yaml') + +# Enabled test configurations +t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) +t2_configurations = load_configuration_template( + t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +def test_custom_bucket_logs(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, + restart_wazuh_function, file_monitoring, upload_and_delete_file_to_s3): + """ + description: Test the AWS S3 custom bucket module is invoked the expected parameters and no error occurs. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - Uploads a file to the S3 Bucket. + - test: + - Check in the log that the module was called with correct parameters. + - Check that the module retrieved a message from the SQS Queue. + - Check that the module processes a message from the SQS Queue. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Deletes the file created in the S3 Bucket. + wazuh_min_version: 4.7.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - upload_and_delete_file_to_s3: + type: fixture + brief: Upload a file to S3 bucket for the day of the execution. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + - upload_and_delete_file_to_s3: + type: fixture + brief: Upload a file to S3 bucket for the day of the execution. + assertions: + - Check in the log that the module was called with correct parameters. + - Check that the module retrieved a message from the SQS Queue. + - Check that the module processes a message from the SQS Queue. + input_description: + - The `configuration_defaults` file provides the module configuration for this test. + - The `cases_defaults` file provides the test cases. + """ + sqs_name = metadata['sqs_name'] + + parameters = [ + 'wodles/aws/aws-s3', + '--subscriber', 'buckets', + '--queue', sqs_name, + '--aws_profile', 'qa', + '--debug', '2' + ] + log_header = 'Launching S3 Subscriber Command: ' + expected_log = log_header + " ".join(parameters) + + # Check AWS module started + log_monitor.start( + timeout=global_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start, + error_message='The AWS module did not start as expected', + ).result() + + # Check command was called correctly + log_monitor.start( + timeout=global_parameters.default_timeout, + callback=event_monitor.make_aws_callback(expected_log, prefix='^.*'), + error_message='The AWS module was not called with the correct parameters', + ).result() + + retrieve_pattern = fr'.*Retrieving messages from: {sqs_name}' + message_pattern = fr'.*The message is: .*' + + # Check if retrieves from the queue + log_monitor.start( + timeout=T_10, + callback=event_monitor.make_aws_callback(retrieve_pattern), + error_message='The AWS module did not retrieve from the SQS Queue', + ).result() + + # Check if it processes the created file + log_monitor.start( + timeout=T_10, + callback=event_monitor.make_aws_callback(message_pattern), + error_message='The AWS module did not handle the message', + ).result() From e78b6ed59fb33ad1ee25c9009ac2fc77e2e6a168 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Fri, 3 Nov 2023 11:28:52 -0300 Subject: [PATCH 265/419] fix: improves docstrings. --- .../test_aws/test_custom_bucket.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_aws/test_custom_bucket.py b/tests/integration/test_aws/test_custom_bucket.py index 8d6d9a4749c..c3707a8820a 100644 --- a/tests/integration/test_aws/test_custom_bucket.py +++ b/tests/integration/test_aws/test_custom_bucket.py @@ -38,7 +38,8 @@ def test_custom_bucket_defaults(configuration, metadata, load_wazuh_basic_config configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring): """ - description: Test the AWS S3 custom bucket module is invoked the expected parameters and no error occurs. + description: Test the AWS S3 custom bucket module is invoked with the expected parameters and no error occurs. + test_phases: - setup: - Load Wazuh light configuration. @@ -52,7 +53,9 @@ def test_custom_bucket_defaults(configuration, metadata, load_wazuh_basic_config - teardown: - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. + wazuh_min_version: 4.7.0 + parameters: - configuration: type: dict @@ -81,9 +84,11 @@ def test_custom_bucket_defaults(configuration, metadata, load_wazuh_basic_config - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. + assertions: - Check in the log that the module was called with correct parameters. - Check in the log that no errors occurs. + input_description: - The `configuration_defaults` file provides the module configuration for this test. - The `cases_defaults` file provides the test cases. @@ -138,7 +143,9 @@ def test_custom_bucket_logs(configuration, metadata, load_wazuh_basic_configurat configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, upload_and_delete_file_to_s3): """ - description: Test the AWS S3 custom bucket module is invoked the expected parameters and no error occurs. + description: Test the AWS S3 custom bucket module is invoked with the expected parameters and retrieve + the messages from the SQS Queue. + test_phases: - setup: - Load Wazuh light configuration. @@ -150,12 +157,14 @@ def test_custom_bucket_logs(configuration, metadata, load_wazuh_basic_configurat - test: - Check in the log that the module was called with correct parameters. - Check that the module retrieved a message from the SQS Queue. - - Check that the module processes a message from the SQS Queue. + - Check that the module processed a message from the SQS Queue. - teardown: - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - Deletes the file created in the S3 Bucket. + wazuh_min_version: 4.7.0 + parameters: - configuration: type: dict @@ -187,10 +196,12 @@ def test_custom_bucket_logs(configuration, metadata, load_wazuh_basic_configurat - upload_and_delete_file_to_s3: type: fixture brief: Upload a file to S3 bucket for the day of the execution. + assertions: - Check in the log that the module was called with correct parameters. - Check that the module retrieved a message from the SQS Queue. - - Check that the module processes a message from the SQS Queue. + - Check that the module processed a message from the SQS Queue. + input_description: - The `configuration_defaults` file provides the module configuration for this test. - The `cases_defaults` file provides the test cases. From 2a431e9c70361d9796eb20760e3fb1d1a5c78249 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Mon, 17 Jul 2023 12:19:44 -0300 Subject: [PATCH 266/419] efactor: rename discard cases files --- ...ml => configuration_bucket_discard_regex.yaml} | 0 ...regex.yaml => cases_bucket_discard_regex.yaml} | 0 tests/integration/test_aws/test_discard_regex.py | 15 ++++++++++----- 3 files changed, 10 insertions(+), 5 deletions(-) rename tests/integration/test_aws/data/configuration_template/discard_regex_test_module/{configuration_discard_regex.yaml => configuration_bucket_discard_regex.yaml} (100%) rename tests/integration/test_aws/data/test_cases/discard_regex_test_module/{cases_discard_regex.yaml => cases_bucket_discard_regex.yaml} (100%) diff --git a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_bucket_discard_regex.yaml similarity index 100% rename from tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml rename to tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_bucket_discard_regex.yaml diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_bucket_discard_regex.yaml similarity index 100% rename from tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml rename to tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_bucket_discard_regex.yaml diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py index 977c6e5ce67..571d05fcd7b 100644 --- a/tests/integration/test_aws/test_discard_regex.py +++ b/tests/integration/test_aws/test_discard_regex.py @@ -23,9 +23,14 @@ configurator = TestConfigurator(module='discard_regex_test_module') # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- -# Test configuration -configurator.configure_test(configuration_file='configuration_discard_regex.yaml', - cases_file='cases_discard_regex.yaml') +configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_bucket_discard_regex.yaml') +cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_discard_regex.yaml') + +configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) +configurations = load_configuration_template( + configurations_path, configuration_parameters, configuration_metadata +) + @pytest.mark.tier(level=0) @@ -88,8 +93,8 @@ def test_discard_regex( - Check the expected number of events were forwarded to analysisd. - Check the database was created and updated accordingly. input_description: - - The `configuration_discard_regex` file provides the module configuration for this test. - - The `cases_discard_regex` file provides the test cases. + - The `configuration_bucket_discard_regex` file provides the module configuration for this test. + - The `cases_bucket_discard_regex` file provides the test cases. """ bucket_name = metadata['bucket_name'] bucket_type = metadata['bucket_type'] From f2ac6e740a2429df702f0f95deb21fe289d665a9 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Mon, 17 Jul 2023 12:31:53 -0300 Subject: [PATCH 267/419] feat: add cloudwatch and inspector discard regex tests and cases --- ...uration_cloudwatch_discard_regex_json.yaml | 23 + ..._cloudwatch_discard_regex_simple_text.yaml | 21 + ...configuration_inspector_discard_regex.yaml | 21 + .../cases_cloudwatch_discard_regex_json.yaml | 17 + ..._cloudwatch_discard_regex_simple_text.yaml | 15 + .../cases_inspector_discard_regex.yaml | 15 + .../test_aws/test_discard_regex.py | 401 ++++++++++++++++-- 7 files changed, 489 insertions(+), 24 deletions(-) create mode 100644 tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_json.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_simple_text.yaml create mode 100644 tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_inspector_discard_regex.yaml create mode 100644 tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_json.yaml create mode 100644 tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_simple_text.yaml create mode 100644 tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_inspector_discard_regex.yaml diff --git a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_json.yaml b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_json.yaml new file mode 100644 index 00000000000..d25c21bc4d3 --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_json.yaml @@ -0,0 +1,23 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - aws_log_groups: + value: LOG_GROUP_NAME + - only_logs_after: + value: ONLY_LOGS_AFTER + - regions: + value: REGIONS + - discard_regex: + attributes: + - field: DISCARD_FIELD + value: DISCARD_REGEX diff --git a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_simple_text.yaml b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_simple_text.yaml new file mode 100644 index 00000000000..cb433b979fd --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_simple_text.yaml @@ -0,0 +1,21 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - aws_log_groups: + value: LOG_GROUP_NAME + - only_logs_after: + value: ONLY_LOGS_AFTER + - regions: + value: REGIONS + - discard_regex: + value: DISCARD_REGEX diff --git a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_inspector_discard_regex.yaml b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_inspector_discard_regex.yaml new file mode 100644 index 00000000000..fd4086fb9f7 --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_inspector_discard_regex.yaml @@ -0,0 +1,21 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - service: + attributes: + - type: SERVICE_TYPE + elements: + - aws_profile: + value: qa + - only_logs_after: + value: ONLY_LOGS_AFTER + - regions: + value: REGIONS + - discard_regex: + attributes: + - field: DISCARD_FIELD + value: DISCARD_REGEX diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_json.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_json.yaml new file mode 100644 index 00000000000..a5126380c30 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_json.yaml @@ -0,0 +1,17 @@ +- name: cloudwatch_discard_regex_json + description: CloudWatch discard regex configuration for JSON logs + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests + REGIONS: us-east-1 + DISCARD_FIELD: networkInterfaces.networkInterfaceId + DISCARD_REGEX: .*eni-networkInterfaceId* + ONLY_LOGS_AFTER: 2023-JUL-03 + metadata: + service_type: cloudwatchlogs + log_group_name: wazuh-cloudwatchlogs-integration-tests + only_logs_after: 2023-JUL-03 + discard_field: networkInterfaces.networkInterfaceId + discard_regex: .*eni-networkInterfaceId.* + regions: us-east-1 + found_logs: 1 diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_simple_text.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_simple_text.yaml new file mode 100644 index 00000000000..a8d25d4d669 --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_simple_text.yaml @@ -0,0 +1,15 @@ +- name: cloudwatch_discard_regex_simple_text + description: CloudWatch discard regex configuration for simple text logs + configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests + REGIONS: us-east-1 + DISCARD_REGEX: .*Test.* + ONLY_LOGS_AFTER: 2023-JAN-12 + metadata: + service_type: cloudwatchlogs + log_group_name: wazuh-cloudwatchlogs-integration-tests + only_logs_after: 2023-JAN-12 + discard_regex: .*Test.* + regions: us-east-1 + found_logs: 3 diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_inspector_discard_regex.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_inspector_discard_regex.yaml new file mode 100644 index 00000000000..8b1591033fd --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_inspector_discard_regex.yaml @@ -0,0 +1,15 @@ +- name: inspector_discard_regex + description: Inspector discard regex configurations + configuration_parameters: + SERVICE_TYPE: inspector + REGIONS: us-east-1 + DISCARD_FIELD: assetAttributes.tags.value + DISCARD_REGEX: .*inspector-integration-test.* + ONLY_LOGS_AFTER: 2023-JAN-12 + metadata: + service_type: inspector + only_logs_after: 2023-JAN-12 + discard_field: assetAttributes.tags.value + discard_regex: .*inspector-integration-test.* + regions: us-east-1 + found_logs: 4 diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py index 571d05fcd7b..f34036b2aed 100644 --- a/tests/integration/test_aws/test_discard_regex.py +++ b/tests/integration/test_aws/test_discard_regex.py @@ -11,35 +11,37 @@ # qa-integration-framework imports from wazuh_testing import session_parameters from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.modules.aws.db_utils import s3_db_exists -# Local module imports -from .utils import ERROR_MESSAGES, TIMEOUTS -from conftest import TestConfigurator +from wazuh_testing.modules.aws.db_utils import s3_db_exists, services_db_exists +from wazuh_testing.tools.configuration import ( + get_test_cases_data, + load_configuration_template, +) pytestmark = [pytest.mark.server] -# Set test configurator for the module -configurator = TestConfigurator(module='discard_regex_test_module') +# Generic vars +MODULE = 'discard_regex_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) -# ---------------------------------------------------- TEST_PATH ------------------------------------------------------- -configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_bucket_discard_regex.yaml') -cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_discard_regex.yaml') +# --------------------------------------------- TEST_BUCKET_DISCARD_REGEX --------------------------------------------- +t0_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_bucket_discard_regex.yaml') +t0_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_discard_regex.yaml') -configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) -configurations = load_configuration_template( - configurations_path, configuration_parameters, configuration_metadata +t0_configuration_parameters, t0_configuration_metadata, t0_case_ids = get_test_cases_data(t0_cases_path) +t0_configurations = load_configuration_template( + t0_configurations_path, t0_configuration_parameters, t0_configuration_metadata ) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) -def test_discard_regex( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, +@pytest.mark.parametrize('configuration, metadata', zip(t0_configurations, t0_configuration_metadata), ids=t0_case_ids) +def test_bucket_discard_regex( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ description: Fetch logs excluding the ones that match with the regex. @@ -105,7 +107,8 @@ def test_discard_regex( skipped_logs = metadata['skipped_logs'] path = metadata['path'] if 'path' in metadata else None - pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field. The event will be skipped.' + pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field.' \ + ' The event will be skipped.' parameters = [ 'wodles/aws/aws-s3', @@ -146,10 +149,360 @@ def test_discard_regex( assert s3_db_exists() - # Detect any ERROR message +# ----------------------------------------- TEST_CLOUDWATCH_DISCARD_REGEX_JSON ---------------------------------------- +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_cloudwatch_discard_regex_json.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_discard_regex_json.yaml') + +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +def test_cloudwatch_discard_regex_json( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, +): + """ + description: Fetch logs excluding the ones that match with the regex. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket and skips + the ones that match with regex. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly. + input_description: + - The `configuration_cloudwatch_discard_regex` file provides the module configuration for this test. + - The `cases_cloudwatch_discard_regex` file provides the test cases. + """ + log_group_name = metadata.get('log_group_name') + service_type = metadata.get('service_type') + only_logs_after = metadata.get('only_logs_after') + regions: str = metadata.get('regions') + discard_field = metadata.get('discard_field', None) + discard_regex = metadata.get('discard_regex') + found_logs = metadata.get('found_logs') + + pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field.' \ + ' The event will be skipped.' + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--regions', regions, + '--aws_log_groups', log_group_name, + '--discard-field', discard_field, + '--discard-regex', discard_regex, + '--debug', '2' + ] + + # Check AWS module started log_monitor.start( - timeout=session_parameters.default_timeout, - callback=event_monitor.callback_detect_all_aws_err - ) + timeout=global_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start, + error_message='The AWS module did not start as expected', + ).result() + + # Check command was called correctly + log_monitor.start( + timeout=global_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters), + error_message='The AWS module was not called with the correct parameters', + ).result() + + log_monitor.start( + timeout=T_20, + callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), + error_message=( + 'The AWS module did not show the correct message about discard regex or ', + 'did not process the expected amount of logs' + ), + accum_results=found_logs + ).result() + + assert services_db_exists() + + +# ------------------------------------- TEST_CLOUDWATCH_DISCARD_REGEX_SIMPLE_TEXT ------------------------------------- +t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_cloudwatch_discard_regex_simple_text.yaml') +t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_discard_regex_simple_text.yaml') + +t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) +t2_configurations = load_configuration_template( + t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +def test_cloudwatch_discard_regex_simple_text( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, +): + """ + description: Fetch logs excluding the ones that match with the regex. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket and skips + the ones that match with regex. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly. + input_description: + - The `configuration_cloudwatch_discard_regex_simple_text` file provides + the module configuration for this test. + - The `cases_cloudwatch_discard_regex_simple_text` file provides the test cases. + """ + log_group_name = metadata.get('log_group_name') + service_type = metadata.get('service_type') + only_logs_after = metadata.get('only_logs_after') + regions: str = metadata.get('regions') + discard_regex = metadata.get('discard_regex') + found_logs = metadata.get('found_logs') + + pattern = fr'.*The "{discard_regex}" regex found a match. The event will be skipped.' + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--regions', regions, + '--aws_log_groups', log_group_name, + '--discard-regex', discard_regex, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=global_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start, + error_message='The AWS module did not start as expected', + ).result() + + # Check command was called correctly + log_monitor.start( + timeout=global_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters), + error_message='The AWS module was not called with the correct parameters', + ).result() + + log_monitor.start( + timeout=T_20, + callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), + error_message=( + 'The AWS module did not show the correct message about discard regex or ', + 'did not process the expected amount of logs' + ), + accum_results=found_logs + ).result() + + assert services_db_exists() + + +# ------------------------------------------- TEST_INSPECTOR_DISCARD_REGEX -------------------------------------------- +t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_inspector_discard_regex.yaml') +t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_discard_regex.yaml') + +t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) +t3_configurations = load_configuration_template( + t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata +) + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', zip(t3_configurations, t3_configuration_metadata), ids=t3_case_ids) +def test_inspector_discard_regex( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, +): + """ + description: Fetch logs excluding the ones that match with the regex. + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket and skips + the ones that match with regex. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file + wazuh_min_version: 4.6.0 + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly. + input_description: + - The `configuration_inspector_discard_regex` file provides the module configuration for this test. + - The `cases_inspector_discard_regex` file provides the test cases. + """ + service_type = metadata.get('service_type') + only_logs_after = metadata.get('only_logs_after') + regions: str = metadata.get('regions') + discard_field = metadata.get('discard_field', '') + discard_regex = metadata.get('discard_regex') + found_logs = metadata.get('found_logs') + + pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field.' \ + ' The event will be skipped.' + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--regions', regions, + '--discard-field', discard_field, + '--discard-regex', discard_regex, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=global_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start, + error_message='The AWS module did not start as expected', + ).result() + + # Check command was called correctly + log_monitor.start( + timeout=global_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters), + error_message='The AWS module was not called with the correct parameters', + ).result() + + log_monitor.start( + timeout=T_20, + callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), + error_message=( + 'The AWS module did not show the correct message about discard regex or ', + 'did not process the expected amount of logs' + ), + accum_results=found_logs + ).result() - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert services_db_exists() From f99a2f8fbad13838601b23d2d3a44578e7c3c403 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Mon, 14 Aug 2023 13:21:05 -0300 Subject: [PATCH 268/419] docs: modify changelog and test cases descriptions --- .../cases_cloudwatch_discard_regex_json.yaml | 4 ++- ..._cloudwatch_discard_regex_simple_text.yaml | 4 ++- .../cases_inspector_discard_regex.yaml | 4 ++- .../test_aws/test_discard_regex.py | 32 ++++++++++++++++--- 4 files changed, 37 insertions(+), 7 deletions(-) diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_json.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_json.yaml index a5126380c30..fd3836cc7e2 100644 --- a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_json.yaml +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_json.yaml @@ -1,5 +1,7 @@ - name: cloudwatch_discard_regex_json - description: CloudWatch discard regex configuration for JSON logs + description: > + CloudWatch configuration for an event being discarded when the regex matches + the content in the specified field inside the incoming JSON log configuration_parameters: SERVICE_TYPE: cloudwatchlogs LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_simple_text.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_simple_text.yaml index a8d25d4d669..d10325cd043 100644 --- a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_simple_text.yaml +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_simple_text.yaml @@ -1,5 +1,7 @@ - name: cloudwatch_discard_regex_simple_text - description: CloudWatch discard regex configuration for simple text logs + description: > + CloudWatch configuration for an event being discarded when the regex matches + the content inside the incoming simple text log configuration_parameters: SERVICE_TYPE: cloudwatchlogs LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_inspector_discard_regex.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_inspector_discard_regex.yaml index 8b1591033fd..0af561d13ff 100644 --- a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_inspector_discard_regex.yaml +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_inspector_discard_regex.yaml @@ -1,5 +1,7 @@ - name: inspector_discard_regex - description: Inspector discard regex configurations + description: > + Inspector configuration for an event being discarded when the regex matches + the content in the specified field inside the incoming JSON log configuration_parameters: SERVICE_TYPE: inspector REGIONS: us-east-1 diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py index f34036b2aed..52cceb48e32 100644 --- a/tests/integration/test_aws/test_discard_regex.py +++ b/tests/integration/test_aws/test_discard_regex.py @@ -44,7 +44,9 @@ def test_bucket_discard_regex( configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ - description: Fetch logs excluding the ones that match with the regex. + description: Check that some bucket logs are excluded when the regex and field defined in + match an event. + test_phases: - setup: - Load Wazuh light configuration. @@ -61,7 +63,9 @@ def test_bucket_discard_regex( - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - Delete the uploaded file + wazuh_min_version: 4.6.0 + parameters: - configuration: type: dict @@ -90,10 +94,12 @@ def test_bucket_discard_regex( - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. + assertions: - Check in the log that the module was called with correct parameters. - Check the expected number of events were forwarded to analysisd. - Check the database was created and updated accordingly. + input_description: - The `configuration_bucket_discard_regex` file provides the module configuration for this test. - The `cases_bucket_discard_regex` file provides the test cases. @@ -166,7 +172,9 @@ def test_cloudwatch_discard_regex_json( configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ - description: Fetch logs excluding the ones that match with the regex. + description: Check that some CloudWatch JSON logs are excluded when the regex and field defined in + match an event. + test_phases: - setup: - Load Wazuh light configuration. @@ -183,7 +191,9 @@ def test_cloudwatch_discard_regex_json( - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - Delete the uploaded file + wazuh_min_version: 4.6.0 + parameters: - configuration: type: dict @@ -212,10 +222,12 @@ def test_cloudwatch_discard_regex_json( - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. + assertions: - Check in the log that the module was called with correct parameters. - Check the expected number of events were forwarded to analysisd. - Check the database was created and updated accordingly. + input_description: - The `configuration_cloudwatch_discard_regex` file provides the module configuration for this test. - The `cases_cloudwatch_discard_regex` file provides the test cases. @@ -287,7 +299,9 @@ def test_cloudwatch_discard_regex_simple_text( configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ - description: Fetch logs excluding the ones that match with the regex. + description: Check that some CloudWatch simple text logs are excluded when the regex defined in + matches an event. + test_phases: - setup: - Load Wazuh light configuration. @@ -304,7 +318,9 @@ def test_cloudwatch_discard_regex_simple_text( - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - Delete the uploaded file + wazuh_min_version: 4.6.0 + parameters: - configuration: type: dict @@ -333,10 +349,12 @@ def test_cloudwatch_discard_regex_simple_text( - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. + assertions: - Check in the log that the module was called with correct parameters. - Check the expected number of events were forwarded to analysisd. - Check the database was created and updated accordingly. + input_description: - The `configuration_cloudwatch_discard_regex_simple_text` file provides the module configuration for this test. @@ -406,7 +424,9 @@ def test_inspector_discard_regex( configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ - description: Fetch logs excluding the ones that match with the regex. + description: Check that some Inspector logs are excluded when the regex and field defined in + match an event. + test_phases: - setup: - Load Wazuh light configuration. @@ -423,7 +443,9 @@ def test_inspector_discard_regex( - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - Delete the uploaded file + wazuh_min_version: 4.6.0 + parameters: - configuration: type: dict @@ -452,10 +474,12 @@ def test_inspector_discard_regex( - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. + assertions: - Check in the log that the module was called with correct parameters. - Check the expected number of events were forwarded to analysisd. - Check the database was created and updated accordingly. + input_description: - The `configuration_inspector_discard_regex` file provides the module configuration for this test. - The `cases_inspector_discard_regex` file provides the test cases. From 21bb633854d8b671f413256705efe4a2765b3bd8 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Wed, 15 Nov 2023 18:07:51 -0300 Subject: [PATCH 269/419] Fix import errors --- tests/integration/conftest.py | 13 ++-- tests/integration/test_aws/conftest.py | 5 +- tests/integration/test_aws/event_monitor.py | 17 +++-- .../test_aws/test_discard_regex.py | 73 ++++++++----------- tests/integration/test_aws/utils.py | 2 + 5 files changed, 52 insertions(+), 58 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index aa41be7edcc..9e1735acd46 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -245,21 +245,18 @@ def restart_wazuh_function(request): @pytest.fixture() -def file_monitoring(request, file_to_monitor=None): +def file_monitoring(request): """Fixture to handle the monitoring of a specified file. It uses the variable `file_to_monitor` to determinate the file to monitor. Default `LOG_FILE_PATH` Args: request (fixture): Provide information on the executing test function. - - Parameters - ---------- - request - file_to_monitor """ - if file_to_monitor is None: - file_to_monitor = getattr(request.module, 'file_to_monitor', WAZUH_LOG_PATH) + if hasattr(request.module, 'file_to_monitor'): + file_to_monitor = getattr(request.module, 'file_to_monitor') + else: + file_to_monitor = WAZUH_LOG_PATH logger.debug(f"Initializing file to monitor to {file_to_monitor}") diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 2aab65e4e05..3bce270fbb6 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -26,16 +26,15 @@ file_exists, upload_file ) -from wazuh_testing.modules.aws.db_utils import delete_s3_db, delete_services_db +from wazuh_testing.modules.aws.utils import delete_s3_db, delete_services_db from wazuh_testing.utils.services import control_service -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR from wazuh_testing.utils.configuration import ( get_test_cases_data, load_configuration_template, ) # Local imports -from .utils import TEST_DATA_PATH +from .utils import TEST_DATA_PATH, TEMPLATE_DIR, TEST_CASES_DIR @pytest.fixture def mark_cases_as_skipped(metadata): diff --git a/tests/integration/test_aws/event_monitor.py b/tests/integration/test_aws/event_monitor.py index 6df70c6c9b7..1cb7c940804 100644 --- a/tests/integration/test_aws/event_monitor.py +++ b/tests/integration/test_aws/event_monitor.py @@ -1,10 +1,17 @@ +""" +Copyright (C) 2015-2023, Wazuh Inc. +Created by Wazuh, Inc. . +This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +This module will contains all callback methods to monitor and event +""" + import re -from wazuh_testing.modules.aws import VPC_FLOW_TYPE -from wazuh_testing.modules.aws.cli_utils import analyze_command_output -from wazuh_testing.modules.aws.patterns import patterns -from wazuh_testing.modules.aws.errors import errors -from wazuh_testing.constants.aws import INSPECTOR_TYPE +# +from wazuh_testing.constants.aws import VPC_FLOW_TYPE, INSPECTOR_TYPE +from wazuh_testing.modules.aws.utils import analyze_command_output + def make_aws_callback(pattern, prefix=''): diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py index 52cceb48e32..75438701701 100644 --- a/tests/integration/test_aws/test_discard_regex.py +++ b/tests/integration/test_aws/test_discard_regex.py @@ -13,32 +13,26 @@ from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 from wazuh_testing.modules.aws.db_utils import s3_db_exists, services_db_exists -from wazuh_testing.tools.configuration import ( - get_test_cases_data, - load_configuration_template, -) + +# Local module imports +from .utils import ERROR_MESSAGES, TIMEOUTS +from conftest import TestConfigurator pytestmark = [pytest.mark.server] -# Generic vars -MODULE = 'discard_regex_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='discard_regex_test_module') # --------------------------------------------- TEST_BUCKET_DISCARD_REGEX --------------------------------------------- -t0_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_bucket_discard_regex.yaml') -t0_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_discard_regex.yaml') - -t0_configuration_parameters, t0_configuration_metadata, t0_case_ids = get_test_cases_data(t0_cases_path) -t0_configurations = load_configuration_template( - t0_configurations_path, t0_configuration_parameters, t0_configuration_metadata -) - +# Configure T1 test +configurator.configure_test(configuration_file='configuration_bucket_discard_regex.yaml', + cases_file='cases_bucket_discard_regex.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t0_configurations, t0_configuration_metadata), ids=t0_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_bucket_discard_regex( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, @@ -155,18 +149,17 @@ def test_bucket_discard_regex( assert s3_db_exists() -# ----------------------------------------- TEST_CLOUDWATCH_DISCARD_REGEX_JSON ---------------------------------------- -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_cloudwatch_discard_regex_json.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_discard_regex_json.yaml') -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# ----------------------------------------- TEST_CLOUDWATCH_DISCARD_REGEX_JSON ---------------------------------------- +# Configure T2 test +configurator.configure_test(configuration_file='configuration_cloudwatch_discard_regex_json.yaml', + cases_file='cases_cloudwatch_discard_regex_json.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_cloudwatch_discard_regex_json( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, @@ -283,17 +276,15 @@ def test_cloudwatch_discard_regex_json( # ------------------------------------- TEST_CLOUDWATCH_DISCARD_REGEX_SIMPLE_TEXT ------------------------------------- -t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_cloudwatch_discard_regex_simple_text.yaml') -t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_discard_regex_simple_text.yaml') - -t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) -t2_configurations = load_configuration_template( - t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata -) +# Configure T3 test +configurator.configure_test(configuration_file='configuration_cloudwatch_discard_regex_simple_text.yaml', + cases_file='cases_cloudwatch_discard_regex_simple_text.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_cloudwatch_discard_regex_simple_text( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, @@ -408,17 +399,15 @@ def test_cloudwatch_discard_regex_simple_text( # ------------------------------------------- TEST_INSPECTOR_DISCARD_REGEX -------------------------------------------- -t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_inspector_discard_regex.yaml') -t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_discard_regex.yaml') - -t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) -t3_configurations = load_configuration_template( - t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata -) +# Configure T4 test +configurator.configure_test(configuration_file='configuration_inspector_discard_regex.yaml', + cases_file='cases_inspector_discard_regex.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t3_configurations, t3_configuration_metadata), ids=t3_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_inspector_discard_regex( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py index da0851074c5..e11bd98599e 100644 --- a/tests/integration/test_aws/utils.py +++ b/tests/integration/test_aws/utils.py @@ -8,6 +8,8 @@ from os.path import join, dirname, realpath # CONSTANTS +TEMPLATE_DIR = 'configuration_template' +TEST_CASES_DIR = 'test_cases' ERROR_MESSAGES = { From c6dcfc9c518679d9d03620695201af65bfb9bd11 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Thu, 23 Nov 2023 19:48:56 -0300 Subject: [PATCH 270/419] Refactored multiple test suites --- tests/integration/test_aws/conftest.py | 28 +++--- tests/integration/test_aws/event_monitor.py | 93 +++++++++++-------- tests/integration/test_aws/test_basic.py | 26 +++--- .../test_aws/test_custom_bucket.py | 17 ++-- tests/integration/test_aws/test_log_groups.py | 18 ++-- .../test_aws/test_only_logs_after.py | 49 +++++----- tests/integration/test_aws/test_parser.py | 18 ++-- tests/integration/test_aws/test_path.py | 26 +++--- .../integration/test_aws/test_path_suffix.py | 24 +++-- tests/integration/test_aws/test_regions.py | 36 +++---- .../test_aws/test_remove_from_bucket.py | 17 ++-- tests/integration/test_aws/utils.py | 7 +- 12 files changed, 187 insertions(+), 172 deletions(-) diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 3bce270fbb6..92c511babad 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -1,10 +1,9 @@ -""" -Copyright (C) 2015-2023, Wazuh Inc. -Created by Wazuh, Inc. . -This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +# Copyright (C) 2015-2023, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 -This module will contains all necessary components (fixtures, classes, methods) -to configure the test for it's execution. +""" +This module contains all necessary components (fixtures, classes, methods)to configure the test for its execution. """ import pytest @@ -32,9 +31,16 @@ get_test_cases_data, load_configuration_template, ) +from wazuh_testing.modules.monitord import configuration as monitord_config # Local imports -from .utils import TEST_DATA_PATH, TEMPLATE_DIR, TEST_CASES_DIR +from .utils import TEST_DATA_PATH, TEMPLATE_DIR, TEST_CASES_DIR, WAZUH_MODULES_DEBUG + + +# Set local internal options +local_internal_options = {WAZUH_MODULES_DEBUG: '2', + monitord_config.MONITORD_ROTATE_LOG: '0'} + @pytest.fixture def mark_cases_as_skipped(metadata): @@ -71,7 +77,6 @@ def upload_and_delete_file_to_s3(metadata): metadata['uploaded_file'] = filename yield - if file_exists(filename=filename, bucket_name=bucket_name): delete_file(filename=filename, bucket_name=bucket_name) logger.debug('Deleted file: %s from bucket %s', filename, bucket_name) @@ -159,9 +164,8 @@ def fixture_delete_log_stream(metadata): delete_log_stream(log_stream=log_stream) logger.debug('Deleted log stream: %s', log_stream) -# DB fixtures - +# DB fixtures @pytest.fixture def clean_s3_cloudtrail_db(): """Delete the DB file before and after the test execution""" @@ -190,10 +194,10 @@ class TestConfigurator: - module (str): The name of the test module. - configuration_path (str): The path to the configuration directory for the test module. - test_cases_path (str): The path to the test cases directory for the test module. - - metadata (dict): Test metadata retrieved from the test cases. + - metadata (list): Test metadata retrieved from the test cases. - parameters (list): Test parameters retrieved from the test cases. - cases_ids (list): Identifiers for the test cases. - - test_configuration_template (dict): The loaded configuration template for the test module. + - test_configuration_template (list): The loaded configuration template for the test module. """ def __init__(self, module): diff --git a/tests/integration/test_aws/event_monitor.py b/tests/integration/test_aws/event_monitor.py index 1cb7c940804..dac222fa4aa 100644 --- a/tests/integration/test_aws/event_monitor.py +++ b/tests/integration/test_aws/event_monitor.py @@ -8,10 +8,23 @@ import re -# +# # qa-integration-framework imports +from wazuh_testing.modules.aws.patterns import (WHITESPACE_MATCH, CURLY_BRACE_MATCH, AWS_MODULE_STARTED_PARAMETRIZED, + AWS_UNDEFINED_SERVICE_TYPE, AWS_DEPRECATED_CONFIG_DEFINED, + AWS_NO_SERVICE_WARNING, AWS_MODULE_STARTED, INVALID_EMPTY_TYPE_ERROR, + EMPTY_CONTENT_ERROR, EMPTY_CONTENT_WARNING, + INVALID_EMPTY_SERVICE_TYPE_ERROR, INVALID_TAG_CONTENT_ERROR, + PARSING_BUCKET_ERROR_WARNING, + PARSING_SERVICE_ERROR_WARNING, SERVICE_ANALYSIS, BUCKET_ANALYSIS, + MODULE_START, PARSER_ERROR, MODULE_ERROR, NEW_LOG_FOUND, DEBUG_MESSAGE, + EVENTS_COLLECTED, DEBUG_ANALYSISD_MESSAGE, ANALYSISD_EVENT, + AWS_EVENT_HEADER, NO_LOG_PROCESSED, NO_BUCKET_LOG_PROCESSED, + MARKER, NO_NEW_EVENTS, EVENT_SENT, ) from wazuh_testing.constants.aws import VPC_FLOW_TYPE, INSPECTOR_TYPE from wazuh_testing.modules.aws.utils import analyze_command_output +# Local imports +from .utils import ERROR_MESSAGES def make_aws_callback(pattern, prefix=''): @@ -24,9 +37,7 @@ def make_aws_callback(pattern, prefix=''): Returns: lambda: Function that returns if there's a match in the file. """ - pattern = WHITESPACE_REGEX.join(pattern.split()) - regex = re.compile(CURLY_BRACE_MATCH.format(prefix, pattern)) - + regex = re.compile(r'{}{}'.format(prefix, pattern)) return lambda line: regex.match(line) @@ -39,7 +50,7 @@ def callback_detect_aws_module_called(parameters): Returns: Callable: Callback to match the line. """ - pattern = f'{AWS_MODULE_STARTED_PARAMETRIZED} {" ".join(parameters)}\n*' + pattern = fr'{AWS_MODULE_STARTED_PARAMETRIZED}{" ".join(parameters)}\n*' regex = re.compile(pattern) return lambda line: regex.match(line) @@ -54,9 +65,7 @@ def callback_detect_aws_error_for_missing_type(line): Optional[str]: Line if it matches. """ - if re.match( - AWS_UNDEFINED_SERVICE_TYPE, line - ): + if re.match(fr"{AWS_UNDEFINED_SERVICE_TYPE}", line): return line @@ -70,9 +79,7 @@ def callback_detect_aws_legacy_module_warning(line): Optional[str]: Line if it matches. """ - if re.match( - AWS_DEPRECATED_CONFIG_DEFINED, line - ): + if re.match(fr"{AWS_DEPRECATED_CONFIG_DEFINED}", line): return line @@ -86,7 +93,9 @@ def callback_detect_aws_module_warning(line): Optional[str]: Line if it matches. """ - if re.match(AWS_NO_SERVICE_WARNING, line): + if re.match( + fr"{AWS_NO_SERVICE_WARNING}", line + ): return line @@ -100,7 +109,9 @@ def callback_detect_aws_module_started(line): Optional[str]: Line if it matches. """ - if re.match(AWS_MODULE_STARTED, line): + if re.match( + fr"{AWS_MODULE_STARTED}", line + ): return line @@ -115,9 +126,9 @@ def callback_detect_aws_empty_value(line): """ if ( - re.match(INVALID_TYPE_ERROR, line) or - re.match(EMPTY_CONTENT_ERROR, line) or - re.match(EMPTY_CONTENT_WARNING, line) + re.match(fr"{INVALID_EMPTY_TYPE_ERROR}", line) or + re.match(fr"{EMPTY_CONTENT_ERROR}", line) or + re.match(fr"{EMPTY_CONTENT_WARNING}", line) ): return line @@ -133,10 +144,10 @@ def callback_detect_aws_invalid_value(line): """ if ( - re.match(INVALID_EMPTY_SERVICE_TYPE_ERROR, line) or - re.match(INVALID_TAG_CONTENT_ERROR, line) or - re.match(PARSING_BUCKET_ERROR_WARNING, line), - re.match(PARSING_SERVICE_ERROR_WARNING, line) + re.match(fr"{INVALID_EMPTY_SERVICE_TYPE_ERROR}", line) or + re.match(fr"{INVALID_TAG_CONTENT_ERROR}", line) or + re.match(fr"{PARSING_BUCKET_ERROR_WARNING}", line), + re.match(fr"{PARSING_SERVICE_ERROR_WARNING}", line) ): return line @@ -152,8 +163,8 @@ def callback_detect_bucket_or_service_call(line): """ if ( - re.match(SERVICE_ANALYSIS, line) or - re.match(BUCKET_ANALYSIS, line) + re.match(fr"{SERVICE_ANALYSIS}", line) or + re.match(fr"{BUCKET_ANALYSIS}", line) ): return line @@ -168,7 +179,9 @@ def callback_detect_aws_module_start(line): Optional[str]: Line if it matches. """ - if re.match(MODULE_START, line): + if re.match( + fr"{MODULE_START}", line + ): return line @@ -181,7 +194,8 @@ def callback_detect_all_aws_err(line): Returns: Optional[str]: line if it matches. """ - if re.match(PARSER_ERROR, line) or re.match(MODULE_ERROR, line): + if (re.match(fr"{PARSER_ERROR}", line) or + re.match(fr"{MODULE_ERROR}", line)): return line @@ -220,7 +234,9 @@ def callback_detect_event_processed(line): Returns: Optional[str]: line if it matches. """ - if re.match(NEW_LOG_FOUND, line): + if re.match( + fr"{NEW_LOG_FOUND}", line + ): return line @@ -238,9 +254,9 @@ def callback_detect_event_processed_or_skipped(pattern): def callback_detect_service_event_processed(expected_results, service_type): if service_type == INSPECTOR_TYPE: - regex = re.compile(f"{DEBUG_MESSAGE} {expected_results} {EVENTS_COLLECTED}") + regex = re.compile(fr"{DEBUG_MESSAGE} {expected_results} {EVENTS_COLLECTED}") else: - regex = re.compile(f"{DEBUG_ANALYSISD_MESSAGE} {expected_results} {ANALYSISD_EVENT}") + regex = re.compile(fr"{DEBUG_ANALYSISD_MESSAGE} {expected_results} {ANALYSISD_EVENT}") return lambda line: regex.match(line) @@ -253,7 +269,8 @@ def callback_event_sent_to_analysisd(line): Returns: Optional[str]: line if it matches. """ - if line.startswith(AWS_EVENT_HEADER): + if line.startswith( + fr"{AWS_EVENT_HEADER}"): return line @@ -268,7 +285,7 @@ def check_processed_logs_from_output(command_output, expected_results=1): command_output=command_output, callback=callback_detect_event_processed, expected_results=expected_results, - error_message=INCORRECT_EVENT_NUMBER + error_message=ERROR_MESSAGES['incorrect_event_number'] ) @@ -281,15 +298,15 @@ def check_non_processed_logs_from_output(command_output, bucket_type, expected_r expected_results (int, optional): Number of results to find. Default to 1. """ if bucket_type == VPC_FLOW_TYPE: - pattern = NO_LOG_PROCESSED + pattern = fr"{NO_LOG_PROCESSED}" else: - pattern = NO_BUCKET_LOG_PROCESSED + pattern = fr"{NO_BUCKET_LOG_PROCESSED}" analyze_command_output( command_output, callback=make_aws_callback(pattern), expected_results=expected_results, - error_message=UNEXPECTED_NUMBER_OF_EVENTS_FOUND + error_message=ERROR_MESSAGES['unexpected_number_of_events_found'] ) @@ -301,13 +318,13 @@ def check_marker_from_output(command_output, file_key, expected_results=1): file_key (str): Value to check as a marker. expected_results (int, optional): Number of results to find. Default to 1. """ - pattern = f"{MARKER} {file_key}" + pattern = fr"{MARKER} {file_key}" analyze_command_output( command_output, callback=make_aws_callback(pattern), expected_results=expected_results, - error_message=INCORRECT_MARKER + error_message=ERROR_MESSAGES['incorrect_marker'] ) @@ -318,19 +335,19 @@ def check_service_processed_logs_from_output( command_output=command_output, callback=callback_detect_service_event_processed(events_sent, service_type), expected_results=expected_results, - error_message=INCORRECT_EVENT_NUMBER + error_message=ERROR_MESSAGES['incorrect_event_number'] ) def check_service_non_processed_logs_from_output(command_output, service_type, expected_results=1): if service_type == INSPECTOR_TYPE: - pattern = NO_NEW_EVENTS + pattern = fr"{NO_NEW_EVENTS}" else: - pattern = EVENT_SENT + pattern = fr"{EVENT_SENT}" analyze_command_output( command_output, callback=make_aws_callback(pattern), expected_results=expected_results, - error_message=POSSIBLY_PROCESSED_LOGS + error_message=ERROR_MESSAGES['unexpected_number_of_events_found'] ) diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index 0daa5b1aa8b..07f99e067c9 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -1,20 +1,20 @@ -""" -Copyright (C) 2015-2023, Wazuh Inc. -Created by Wazuh, Inc. . -This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 -This module will contains all cases for the basic test suite +""" +This module will contain all cases for the basic test suite """ import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from . import event_monitor, local_internal_options # noqa: F401 # Local module imports +from . import event_monitor from .utils import ERROR_MESSAGES -from conftest import TestConfigurator +from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -32,8 +32,8 @@ zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_bucket_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -127,8 +127,8 @@ def test_bucket_defaults( zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_service_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -228,8 +228,8 @@ def test_service_defaults( zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_inspector_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. diff --git a/tests/integration/test_aws/test_custom_bucket.py b/tests/integration/test_aws/test_custom_bucket.py index c3707a8820a..7dddff8f3af 100644 --- a/tests/integration/test_aws/test_custom_bucket.py +++ b/tests/integration/test_aws/test_custom_bucket.py @@ -1,12 +1,17 @@ +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" +This module will contain all cases for the custom bucket test suite +""" + import os import pytest -from wazuh_testing import TEMPLATE_DIR, TEST_CASES_DIR, global_parameters, T_10 -from wazuh_testing.modules.aws import event_monitor, local_internal_options -from wazuh_testing.tools.configuration import ( - get_test_cases_data, - load_configuration_template, -) + +# qa-integration-framework imports +from wazuh_testing import session_parameters pytestmark = [pytest.mark.server] diff --git a/tests/integration/test_aws/test_log_groups.py b/tests/integration/test_aws/test_log_groups.py index 1e35cdd509a..f465303d9f0 100644 --- a/tests/integration/test_aws/test_log_groups.py +++ b/tests/integration/test_aws/test_log_groups.py @@ -10,16 +10,15 @@ # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.modules.aws.db_utils import ( - get_multiple_service_db_row, - services_db_exists, - table_exists, -) +from wazuh_testing.utils.db_queries.aws_db import get_multiple_service_db_row, table_exists +from wazuh_testing.modules.aws.utils import path_exist +from wazuh_testing.constants.paths.aws import AWS_SERVICES_DB_PATH +from wazuh_testing.modules.aws.patterns import NON_EXISTENT_SPECIFIED_LOG_GROUPS # Local module imports +from . import event_monitor from .utils import ERROR_MESSAGES, TIMEOUTS -from conftest import TestConfigurator +from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -138,13 +137,14 @@ def test_log_groups( else: log_monitor.start( timeout=TIMEOUTS[10], - callback=event_monitor.make_aws_callback(r'.*The specified log group does not exist.'), + callback=event_monitor.make_aws_callback(pattern=fr"{NON_EXISTENT_SPECIFIED_LOG_GROUPS}") ) assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_no_existent_log_group'] - assert services_db_exists() + assert path_exist(path=AWS_SERVICES_DB_PATH) + # @todo Ask reason behind query retuning string instead of list as stated in docstring if expected_results: log_group_list = log_group_names.split(",") for row in get_multiple_service_db_row(table_name='cloudwatch_logs'): diff --git a/tests/integration/test_aws/test_only_logs_after.py b/tests/integration/test_aws/test_only_logs_after.py index d407ecd9431..bdab62e6ef1 100644 --- a/tests/integration/test_aws/test_only_logs_after.py +++ b/tests/integration/test_aws/test_only_logs_after.py @@ -1,39 +1,32 @@ -""" -Copyright (C) 2015-2023, Wazuh Inc. -Created by Wazuh, Inc. . -This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 -This module will contains all cases for the only logs after test suite """ +This module will contain all cases for the only logs after test suite +""" +import pydevd_pycharm + +pydevd_pycharm.settrace('192.168.56.1', port=55555, stdoutToServer=True, stderrToServer=True) import pytest from datetime import datetime # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.modules import aws as cons -from wazuh_testing.modules.aws import ONLY_LOGS_AFTER_PARAM, event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.modules.aws.cli_utils import call_aws_module -from wazuh_testing.modules.aws.cloudwatch_utils import ( - create_log_events, - create_log_stream, -) -from wazuh_testing.modules.aws.db_utils import ( - get_multiple_s3_db_row, - get_service_db_row, - s3_db_exists, - services_db_exists, - get_s3_db_row, -) -from wazuh_testing.modules.aws.s3_utils import get_last_file_key, upload_file +from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH, AWS_SERVICES_DB_PATH +from wazuh_testing.constants.aws import ONLY_LOGS_AFTER_PARAM, PATH_DATE_FORMAT +from wazuh_testing.utils.db_queries.aws_db import get_multiple_s3_db_row, get_service_db_row, get_s3_db_row +from wazuh_testing.modules.aws.utils import (call_aws_module, create_log_events, create_log_stream, path_exist, + get_last_file_key, upload_file) # Local module imports +from . import event_monitor from .utils import ERROR_MESSAGES, TIMEOUTS -from conftest import TestConfigurator +from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] - # Set test configurator for the module configurator = TestConfigurator(module='only_logs_after_test_module') @@ -151,7 +144,7 @@ def test_bucket_without_only_logs_after( assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] - assert s3_db_exists() + assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) data = get_s3_db_row(table_name=table_name) @@ -268,7 +261,7 @@ def test_service_without_only_logs_after( assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] - assert services_db_exists() + assert path_exist(path=AWS_SERVICES_DB_PATH) data = get_service_db_row(table_name="cloudwatch_logs") @@ -397,7 +390,7 @@ def test_bucket_with_only_logs_after( assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] - assert s3_db_exists() + assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) for row in get_multiple_s3_db_row(table_name=table_name): assert bucket_name in row.bucket_path @@ -525,7 +518,7 @@ def test_cloudwatch_with_only_logs_after( assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] - assert services_db_exists() + assert path_exist(path=AWS_SERVICES_DB_PATH) data = get_service_db_row(table_name=table_name_map[service_type]) @@ -650,7 +643,7 @@ def test_inspector_with_only_logs_after( assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] - assert services_db_exists() + assert path_exist(path=AWS_SERVICES_DB_PATH) data = get_service_db_row(table_name=table_name_map[service_type]) @@ -731,7 +724,7 @@ def test_bucket_multiple_calls( base_parameters.extend(['--trail_prefix', path]) # Call the module without only_logs_after and check that no logs were processed - last_marker_key = datetime.utcnow().strftime(cons.PATH_DATE_FORMAT) + last_marker_key = datetime.utcnow().strftime(PATH_DATE_FORMAT) event_monitor.check_non_processed_logs_from_output( command_output=call_aws_module(*base_parameters), diff --git a/tests/integration/test_aws/test_parser.py b/tests/integration/test_aws/test_parser.py index 0020bac8648..38e3306d726 100644 --- a/tests/integration/test_aws/test_parser.py +++ b/tests/integration/test_aws/test_parser.py @@ -1,19 +1,20 @@ -""" -Copyright (C) 2015-2023, Wazuh Inc. -Created by Wazuh, Inc. . -This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 -This module will contains all cases for the parser test suite """ +This module will contain all cases for the parser test suite +""" + import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 # Local module imports +from . import event_monitor from .utils import ERROR_MESSAGES, TIMEOUTS -from conftest import TestConfigurator +from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -423,6 +424,9 @@ def test_invalid_values_in_bucket( input_description: - The `configuration_values_in_bucket` file provides the configuration for this test. """ + + + log_monitor.start( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_invalid_value, diff --git a/tests/integration/test_aws/test_path.py b/tests/integration/test_aws/test_path.py index ace1e54d7cc..6bc5baf03a1 100644 --- a/tests/integration/test_aws/test_path.py +++ b/tests/integration/test_aws/test_path.py @@ -1,25 +1,22 @@ -""" -Copyright (C) 2015-2023, Wazuh Inc. -Created by Wazuh, Inc. . -This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 -This module will contains all cases for the path test suite """ - +This module will contain all cases for the path test suite +""" import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.modules.aws.db_utils import ( - get_s3_db_row, - s3_db_exists, - table_exists_or_has_values, -) +from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH +from wazuh_testing.utils.db_queries.aws_db import get_s3_db_row, table_exists_or_has_values +from wazuh_testing.modules.aws.utils import path_exist # Local module imports +from . import event_monitor from .utils import ERROR_MESSAGES, TIMEOUTS -from conftest import TestConfigurator +from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -146,8 +143,9 @@ def test_path( assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_path_message'] - assert s3_db_exists() + assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) + # @todo same as the other db if expected_results: data = get_s3_db_row(table_name=table_name) assert f"{bucket_name}/{path}/" == data.bucket_path diff --git a/tests/integration/test_aws/test_path_suffix.py b/tests/integration/test_aws/test_path_suffix.py index d04a0696964..b2720e59a9b 100644 --- a/tests/integration/test_aws/test_path_suffix.py +++ b/tests/integration/test_aws/test_path_suffix.py @@ -1,25 +1,23 @@ -""" -Copyright (C) 2015-2023, Wazuh Inc. -Created by Wazuh, Inc. . -This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 -This module will contains all cases for the path suffix test suite +""" +This module will contain all cases for the path suffix test suite """ import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.modules.aws.db_utils import ( - get_s3_db_row, - s3_db_exists, - table_exists_or_has_values, -) +from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH +from wazuh_testing.utils.db_queries.aws_db import get_s3_db_row, table_exists_or_has_values +from wazuh_testing.modules.aws.utils import path_exist # Local module imports +from . import event_monitor from .utils import ERROR_MESSAGES, TIMEOUTS -from conftest import TestConfigurator +from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -147,7 +145,7 @@ def test_path_suffix( assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_path_suffix_message'] - assert s3_db_exists() + assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) if expected_results: data = get_s3_db_row(table_name=bucket_type) diff --git a/tests/integration/test_aws/test_regions.py b/tests/integration/test_aws/test_regions.py index 23593491062..b16199e72fe 100644 --- a/tests/integration/test_aws/test_regions.py +++ b/tests/integration/test_aws/test_regions.py @@ -1,32 +1,24 @@ -""" -Copyright (C) 2015-2023, Wazuh Inc. -Created by Wazuh, Inc. . -This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 -This module will contains all cases for the region test suite """ - +This module will contain all cases for the region test suite +""" import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.modules.aws import ( # noqa: F401 - AWS_SERVICES_DB_PATH, - RANDOM_ACCOUNT_ID, - event_monitor, - local_internal_options -) -from wazuh_testing.modules.aws.db_utils import ( - get_multiple_s3_db_row, - get_multiple_service_db_row, - s3_db_exists, - table_exists_or_has_values, -) +from wazuh_testing.constants.aws import RANDOM_ACCOUNT_ID +from wazuh_testing.constants.paths.aws import AWS_SERVICES_DB_PATH, S3_CLOUDTRAIL_DB_PATH +from wazuh_testing.modules.aws.utils import path_exist +from wazuh_testing.utils.db_queries.aws_db import (get_multiple_service_db_row, table_exists_or_has_values, + get_multiple_s3_db_row) # Local module imports +from . import event_monitor from .utils import ERROR_MESSAGES, TIMEOUTS -from conftest import TestConfigurator +from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -109,7 +101,7 @@ def test_regions( only_logs_after = metadata['only_logs_after'] regions = metadata['regions'] expected_results = metadata['expected_results'] - pattern = fr".*DEBUG: \+\+\+ No logs to process in bucket: {RANDOM_ACCOUNT_ID}/{regions}" + pattern = f".*DEBUG: \+\+\+ No logs to process in bucket: {RANDOM_ACCOUNT_ID}/{regions}" parameters = [ 'wodles/aws/aws-s3', @@ -154,7 +146,7 @@ def test_regions( assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_no_region_found_message'] - assert s3_db_exists() + assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) if expected_results: regions_list = regions.split(",") diff --git a/tests/integration/test_aws/test_remove_from_bucket.py b/tests/integration/test_aws/test_remove_from_bucket.py index b20a8d9b191..c55f6f761cd 100644 --- a/tests/integration/test_aws/test_remove_from_bucket.py +++ b/tests/integration/test_aws/test_remove_from_bucket.py @@ -1,22 +1,21 @@ -""" -Copyright (C) 2015-2023, Wazuh Inc. -Created by Wazuh, Inc. . -This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 -This module will contains all cases for the remove from bucket test suite +""" +This module will contain all cases for the remove from bucket test suite """ import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.modules.aws.cloudwatch_utils import log_stream_exists -from wazuh_testing.modules.aws.s3_utils import file_exists +from wazuh_testing.modules.aws.utils import log_stream_exists, file_exists # Local module imports +from . import event_monitor from .utils import ERROR_MESSAGES -from conftest import TestConfigurator +from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py index e11bd98599e..14054bb7d6a 100644 --- a/tests/integration/test_aws/utils.py +++ b/tests/integration/test_aws/utils.py @@ -10,6 +10,7 @@ # CONSTANTS TEMPLATE_DIR = 'configuration_template' TEST_CASES_DIR = 'test_cases' +WAZUH_MODULES_DEBUG = 'wazuh_modules.debug' ERROR_MESSAGES = { @@ -26,7 +27,11 @@ "incorrect_legacy_warning": "The AWS module did not show the expected legacy warning", "incorrect_warning": "The AWS module did not show the expected warning", "incorrect_invalid_value_message": "The AWS module did not show the expected message about invalid value", - "incorrect_service_calls_amount": "The AWS module was not called for bucket or service the right amount of times" + "incorrect_service_calls_amount": "The AWS module was not called for bucket or service the right amount of times", + "unexpected_number_of_events_found": "Some logs may have been processed, " + "or the results found are more than expected", + "incorrect_marker": "The AWS module did not use the correct marker", + "incorrect_no_region_found_message": "The AWS module did not show correct message about non-existent region" } TIMEOUTS = { From d5feffd331fbd59664934dfbe8f67834256e5482 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Fri, 24 Nov 2023 18:04:37 -0300 Subject: [PATCH 271/419] refactor naming of variable and last modules --- tests/integration/test_aws/conftest.py | 20 +-- tests/integration/test_aws/event_monitor.py | 12 +- tests/integration/test_aws/test_basic.py | 20 +-- .../test_aws/test_custom_bucket.py | 125 ++++++++--------- .../test_aws/test_discard_regex.py | 129 +++++++++--------- tests/integration/test_aws/test_log_groups.py | 12 +- .../test_aws/test_only_logs_after.py | 56 ++++---- tests/integration/test_aws/test_parser.py | 20 +-- tests/integration/test_aws/test_path.py | 16 +-- .../integration/test_aws/test_path_suffix.py | 16 +-- tests/integration/test_aws/test_regions.py | 42 +++--- .../test_aws/test_remove_from_bucket.py | 14 +- tests/integration/test_aws/utils.py | 10 +- 13 files changed, 252 insertions(+), 240 deletions(-) diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 92c511babad..70a4bf3f299 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -220,18 +220,20 @@ def configure_test(self, configuration_file="", cases_file=""): Returns: None """ - # Set test configuration path - configurations_path = join(self.configuration_path, configuration_file) - # Set test cases path cases_path = join(self.test_cases_path, cases_file) # Get test cases data self.parameters, self.metadata, self.cases_ids = get_test_cases_data(cases_path) - # load configuration template - self.test_configuration_template = load_configuration_template( - configurations_path, - self.parameters, - self.metadata - ) + # Set test configuration template for tests with config files + if configuration_file != "": + # Set config path + configurations_path = join(self.configuration_path, configuration_file) + + # load configuration template + self.test_configuration_template = load_configuration_template( + configurations_path, + self.parameters, + self.metadata + ) diff --git a/tests/integration/test_aws/event_monitor.py b/tests/integration/test_aws/event_monitor.py index dac222fa4aa..d942e295d5a 100644 --- a/tests/integration/test_aws/event_monitor.py +++ b/tests/integration/test_aws/event_monitor.py @@ -24,7 +24,7 @@ from wazuh_testing.modules.aws.utils import analyze_command_output # Local imports -from .utils import ERROR_MESSAGES +from .utils import ERROR_MESSAGE def make_aws_callback(pattern, prefix=''): @@ -285,7 +285,7 @@ def check_processed_logs_from_output(command_output, expected_results=1): command_output=command_output, callback=callback_detect_event_processed, expected_results=expected_results, - error_message=ERROR_MESSAGES['incorrect_event_number'] + error_message=ERROR_MESSAGE['incorrect_event_number'] ) @@ -306,7 +306,7 @@ def check_non_processed_logs_from_output(command_output, bucket_type, expected_r command_output, callback=make_aws_callback(pattern), expected_results=expected_results, - error_message=ERROR_MESSAGES['unexpected_number_of_events_found'] + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) @@ -324,7 +324,7 @@ def check_marker_from_output(command_output, file_key, expected_results=1): command_output, callback=make_aws_callback(pattern), expected_results=expected_results, - error_message=ERROR_MESSAGES['incorrect_marker'] + error_message=ERROR_MESSAGE['incorrect_marker'] ) @@ -335,7 +335,7 @@ def check_service_processed_logs_from_output( command_output=command_output, callback=callback_detect_service_event_processed(events_sent, service_type), expected_results=expected_results, - error_message=ERROR_MESSAGES['incorrect_event_number'] + error_message=ERROR_MESSAGE['incorrect_event_number'] ) @@ -349,5 +349,5 @@ def check_service_non_processed_logs_from_output(command_output, service_type, e command_output, callback=make_aws_callback(pattern), expected_results=expected_results, - error_message=ERROR_MESSAGES['unexpected_number_of_events_found'] + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index 07f99e067c9..791f644edb9 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -13,7 +13,7 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGES +from .utils import ERROR_MESSAGE from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -97,7 +97,7 @@ def test_bucket_defaults( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -105,7 +105,7 @@ def test_bucket_defaults( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] # Detect any ERROR message log_monitor.start( @@ -113,7 +113,7 @@ def test_bucket_defaults( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # -------------------------------------------- TEST_CLOUDWATCH_DEFAULTS ------------------------------------------------ @@ -198,7 +198,7 @@ def test_service_defaults( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -206,7 +206,7 @@ def test_service_defaults( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] # Detect any ERROR message log_monitor.start( @@ -214,7 +214,7 @@ def test_service_defaults( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # ------------------------------------------ TEST_INSPECTOR_DEFAULTS --------------------------------------------------- @@ -297,7 +297,7 @@ def test_inspector_defaults( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -305,7 +305,7 @@ def test_inspector_defaults( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] # Detect any ERROR message log_monitor.start( @@ -313,4 +313,4 @@ def test_inspector_defaults( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/test_custom_bucket.py b/tests/integration/test_aws/test_custom_bucket.py index 7dddff8f3af..dc0eb9d31fe 100644 --- a/tests/integration/test_aws/test_custom_bucket.py +++ b/tests/integration/test_aws/test_custom_bucket.py @@ -6,39 +6,31 @@ This module will contain all cases for the custom bucket test suite """ -import os - import pytest # qa-integration-framework imports from wazuh_testing import session_parameters +# Local module imports +from . import event_monitor +from .utils import ERROR_MESSAGE, TIMEOUT +from .conftest import TestConfigurator, local_internal_options + pytestmark = [pytest.mark.server] -# Generic vars -# Name of the folder test module -MODULE = 'custom_bucket_test_module' -# Path of the data for the tests -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -# Path for the configuration of this module -CONFIGURATION_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -# Path for the test cases of this module -TEST_CASE_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='custom_bucket_test_module') # -------------------------------------------- TEST_CUSTOM_BUCKETS_DEFAULTS ------------------------------------------- -# Configuration and cases data -t1_configurations_path = os.path.join(CONFIGURATION_PATH, 'custom_bucket_configuration.yaml') -t1_cases_path = os.path.join(TEST_CASE_PATH, 'cases_bucket_custom.yaml') - -# Enabled test configurations -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='custom_bucket_configuration.yaml', + cases_file='cases_bucket_custom.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_custom_bucket_defaults(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring): @@ -110,40 +102,39 @@ def test_custom_bucket_defaults(configuration, metadata, load_wazuh_basic_config # Check AWS module started log_monitor.start( - timeout=global_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_start, - error_message='The AWS module did not start as expected', - ).result() + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( - timeout=global_parameters.default_timeout, - callback=event_monitor.make_aws_callback(expected_log, prefix='^.*'), - error_message='The AWS module was not called with the correct parameters', - ).result() + timeout=session_parameters.default_timeout, + callback=event_monitor.make_aws_callback(expected_log, prefix='^.*') + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] # Detect any ERROR message - with pytest.raises(TimeoutError): - log_monitor.start( - timeout=global_parameters.default_timeout, - callback=event_monitor.callback_detect_all_aws_err, - ).result() + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] -# -------------------------------------------- TEST_CUSTOM_BUCKETS_LOGS ------------------------------------------- -# Configuration and cases data -t2_configurations_path = os.path.join(CONFIGURATION_PATH, 'custom_bucket_configuration.yaml') -t2_cases_path = os.path.join(TEST_CASE_PATH, 'cases_bucket_custom_logs.yaml') -# Enabled test configurations -t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) -t2_configurations = load_configuration_template( - t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata -) +# -------------------------------------------- TEST_CUSTOM_BUCKETS_LOGS ------------------------------------------- +# Configure T2 test +configurator.configure_test(configuration_file='custom_bucket_configuration.yaml', + cases_file='cases_bucket_custom_logs.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_custom_bucket_logs(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, upload_and_delete_file_to_s3): @@ -225,31 +216,43 @@ def test_custom_bucket_logs(configuration, metadata, load_wazuh_basic_configurat # Check AWS module started log_monitor.start( - timeout=global_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_start, - error_message='The AWS module did not start as expected', - ).result() + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( - timeout=global_parameters.default_timeout, - callback=event_monitor.make_aws_callback(expected_log, prefix='^.*'), - error_message='The AWS module was not called with the correct parameters', - ).result() + timeout=session_parameters.default_timeout, + callback=event_monitor.make_aws_callback(expected_log, prefix='^.*') + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] retrieve_pattern = fr'.*Retrieving messages from: {sqs_name}' message_pattern = fr'.*The message is: .*' - # Check if retrieves from the queue + # Check if the message was retrieved from the queue log_monitor.start( - timeout=T_10, - callback=event_monitor.make_aws_callback(retrieve_pattern), - error_message='The AWS module did not retrieve from the SQS Queue', - ).result() + timeout=TIMEOUT[10], + callback=event_monitor.make_aws_callback(retrieve_pattern) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_sqs_message_retrieval'] # Check if it processes the created file log_monitor.start( - timeout=T_10, - callback=event_monitor.make_aws_callback(message_pattern), - error_message='The AWS module did not handle the message', - ).result() + timeout=TIMEOUT[10], + callback=event_monitor.make_aws_callback(message_pattern) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_message_handling'] + + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] \ No newline at end of file diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py index 75438701701..3f4ca724974 100644 --- a/tests/integration/test_aws/test_discard_regex.py +++ b/tests/integration/test_aws/test_discard_regex.py @@ -1,22 +1,23 @@ -""" -Copyright (C) 2015-2023, Wazuh Inc. -Created by Wazuh, Inc. . -This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 -This module will contains all cases for the discard_regex test suite +""" +This module will contain all cases for the discard_regex test suite """ import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 - -from wazuh_testing.modules.aws.db_utils import s3_db_exists, services_db_exists +from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH, AWS_SERVICES_DB_PATH +from wazuh_testing.modules.aws.utils import path_exist # Local module imports -from .utils import ERROR_MESSAGES, TIMEOUTS -from conftest import TestConfigurator +from . import event_monitor +from .utils import ERROR_MESSAGE, TIMEOUT +from .conftest import TestConfigurator, local_internal_options + pytestmark = [pytest.mark.server] @@ -131,7 +132,7 @@ def test_bucket_discard_regex( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -139,15 +140,17 @@ def test_bucket_discard_regex( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), accumulations=found_logs + skipped_logs ) - assert s3_db_exists() + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] + + assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) # ----------------------------------------- TEST_CLOUDWATCH_DISCARD_REGEX_JSON ---------------------------------------- @@ -250,29 +253,29 @@ def test_cloudwatch_discard_regex_json( # Check AWS module started log_monitor.start( - timeout=global_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_start, - error_message='The AWS module did not start as expected', - ).result() + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( - timeout=global_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_called(parameters), - error_message='The AWS module was not called with the correct parameters', - ).result() + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( - timeout=T_20, + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), - error_message=( - 'The AWS module did not show the correct message about discard regex or ', - 'did not process the expected amount of logs' - ), - accum_results=found_logs - ).result() + accumulations=found_logs + ) - assert services_db_exists() + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] + + assert path_exist(path=AWS_SERVICES_DB_PATH) # ------------------------------------- TEST_CLOUDWATCH_DISCARD_REGEX_SIMPLE_TEXT ------------------------------------- @@ -373,29 +376,29 @@ def test_cloudwatch_discard_regex_simple_text( # Check AWS module started log_monitor.start( - timeout=global_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_start, - error_message='The AWS module did not start as expected', - ).result() + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( - timeout=global_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_called(parameters), - error_message='The AWS module was not called with the correct parameters', - ).result() + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( - timeout=T_20, + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), - error_message=( - 'The AWS module did not show the correct message about discard regex or ', - 'did not process the expected amount of logs' - ), - accum_results=found_logs - ).result() + accumulations=found_logs + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] - assert services_db_exists() + assert path_exist(path=AWS_SERVICES_DB_PATH) # ------------------------------------------- TEST_INSPECTOR_DISCARD_REGEX -------------------------------------------- @@ -496,26 +499,26 @@ def test_inspector_discard_regex( # Check AWS module started log_monitor.start( - timeout=global_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_start, - error_message='The AWS module did not start as expected', - ).result() + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( - timeout=global_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_called(parameters), - error_message='The AWS module was not called with the correct parameters', - ).result() + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( - timeout=T_20, + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), - error_message=( - 'The AWS module did not show the correct message about discard regex or ', - 'did not process the expected amount of logs' - ), - accum_results=found_logs - ).result() - - assert services_db_exists() + accumulations=found_logs + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] + + assert path_exist(path=AWS_SERVICES_DB_PATH) diff --git a/tests/integration/test_aws/test_log_groups.py b/tests/integration/test_aws/test_log_groups.py index f465303d9f0..21f684726bb 100644 --- a/tests/integration/test_aws/test_log_groups.py +++ b/tests/integration/test_aws/test_log_groups.py @@ -17,7 +17,7 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGES, TIMEOUTS +from .utils import ERROR_MESSAGE, TIMEOUT from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -120,7 +120,7 @@ def test_log_groups( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -130,17 +130,17 @@ def test_log_groups( if expected_results: log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), accumulations=len(log_group_names.split(',')) ) else: log_monitor.start( - timeout=TIMEOUTS[10], + timeout=TIMEOUT[10], callback=event_monitor.make_aws_callback(pattern=fr"{NON_EXISTENT_SPECIFIED_LOG_GROUPS}") ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_no_existent_log_group'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_no_existent_log_group'] assert path_exist(path=AWS_SERVICES_DB_PATH) @@ -158,4 +158,4 @@ def test_log_groups( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/test_only_logs_after.py b/tests/integration/test_aws/test_only_logs_after.py index bdab62e6ef1..690b0876e12 100644 --- a/tests/integration/test_aws/test_only_logs_after.py +++ b/tests/integration/test_aws/test_only_logs_after.py @@ -5,9 +5,6 @@ """ This module will contain all cases for the only logs after test suite """ -import pydevd_pycharm - -pydevd_pycharm.settrace('192.168.56.1', port=55555, stdoutToServer=True, stderrToServer=True) import pytest from datetime import datetime @@ -22,7 +19,7 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGES, TIMEOUTS +from .utils import ERROR_MESSAGE, TIMEOUT from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -30,6 +27,9 @@ # Set test configurator for the module configurator = TestConfigurator(module='only_logs_after_test_module') +import pydevd_pycharm +pydevd_pycharm.settrace('192.168.56.1', port=55555, stdoutToServer=True, stderrToServer=True) + # --------------------------------------------- TEST_BUCKET_WITHOUT_ONLY_LOGS_AFTER ------------------------------------ # Configure T1 test configurator.configure_test(configuration_file='bucket_configuration_without_only_logs_after.yaml', @@ -126,7 +126,7 @@ def test_bucket_without_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -134,7 +134,7 @@ def test_bucket_without_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( timeout=session_parameters.default_timeout, @@ -142,7 +142,7 @@ def test_bucket_without_only_logs_after( accumulations=expected_results ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) @@ -157,7 +157,7 @@ def test_bucket_without_only_logs_after( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # -------------------------------------------- TEST_SERVICE_WITHOUT_ONLY_LOGS_AFTER ------------------------------------ @@ -251,7 +251,7 @@ def test_service_without_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -259,7 +259,7 @@ def test_service_without_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] assert path_exist(path=AWS_SERVICES_DB_PATH) @@ -275,7 +275,7 @@ def test_service_without_only_logs_after( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # --------------------------------------------- TEST_BUCKET_WITH_ONLY_LOGS_AFTER --------------------------------------- @@ -372,7 +372,7 @@ def test_bucket_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -380,15 +380,15 @@ def test_bucket_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed, accumulations=expected_results ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) @@ -404,7 +404,7 @@ def test_bucket_with_only_logs_after( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # --------------------------------------------TEST_CLOUDWATCH_WITH_ONLY_LOGS_AFTER ------------------------------------- @@ -501,7 +501,7 @@ def test_cloudwatch_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -509,14 +509,14 @@ def test_cloudwatch_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( - timeout=TIMEOUTS[10], + timeout=TIMEOUT[10], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] assert path_exist(path=AWS_SERVICES_DB_PATH) @@ -531,7 +531,7 @@ def test_cloudwatch_with_only_logs_after( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # ------------------------------------------ TEST_INSPECTOR_WITH_ONLY_LOGS_AFTER --------------------------------------- @@ -626,7 +626,7 @@ def test_inspector_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -634,14 +634,14 @@ def test_inspector_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( - timeout=TIMEOUTS[10], + timeout=TIMEOUT[10], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] assert path_exist(path=AWS_SERVICES_DB_PATH) @@ -654,7 +654,7 @@ def test_inspector_with_only_logs_after( # ---------------------------------------------------- TEST_MULTIPLE_CALLS --------------------------------------------- -# Configure T5 test +# Configure T6 test configurator.configure_test(cases_file='cases_bucket_multiple_calls.yaml') @@ -766,7 +766,7 @@ def test_bucket_multiple_calls( # -------------------------------------------- TEST_INSPECTOR_MULTIPLE_CALLS ------------------------------------------- -# Configure T6 test +# Configure T7 test configurator.configure_test(cases_file='cases_inspector_multiple_calls.yaml') @@ -849,7 +849,7 @@ def test_inspector_multiple_calls( # ----------------------------------------- TEST_CLOUDWATCH_MULTIPLE_CALLS --------------------------------------------- -# Configure T7 test +# Configure T8 test configurator.configure_test(cases_file='cases_cloudwatch_multiple_calls.yaml') diff --git a/tests/integration/test_aws/test_parser.py b/tests/integration/test_aws/test_parser.py index 38e3306d726..b57145fafc8 100644 --- a/tests/integration/test_aws/test_parser.py +++ b/tests/integration/test_aws/test_parser.py @@ -13,7 +13,7 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGES, TIMEOUTS +from .utils import ERROR_MESSAGE, TIMEOUT from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -87,7 +87,7 @@ def test_bucket_and_service_missing( callback=event_monitor.callback_detect_aws_module_warning, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_warning'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_warning'] # -------------------------------------------- TEST_TYPE_MISSING_IN_BUCKET --------------------------------------------- @@ -155,7 +155,7 @@ def test_type_missing_in_bucket( callback=event_monitor.callback_detect_aws_legacy_module_warning, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_legacy_warning'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_legacy_warning'] # -------------------------------------------- TEST_TYPE_MISSING_IN_SERVICE -------------------------------------------- @@ -224,7 +224,7 @@ def test_type_missing_in_service( callback=event_monitor.callback_detect_aws_error_for_missing_type, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_error_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_error_message'] # -------------------------------------------- TEST_EMPTY_VALUES_IN_BUCKET --------------------------------------------- @@ -292,7 +292,7 @@ def test_empty_values_in_bucket( callback=event_monitor.callback_detect_aws_empty_value, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_value_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_empty_value_message'] # -------------------------------------------- TEST_EMPTY_VALUES_IN_SERVICE -------------------------------------------- @@ -361,7 +361,7 @@ def test_empty_values_in_service( callback=event_monitor.callback_detect_aws_empty_value, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_value_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_empty_value_message'] # ------------------------------------------ TEST_INVALID_VALUES_IN_BUCKET --------------------------------------------- @@ -432,7 +432,7 @@ def test_invalid_values_in_bucket( callback=event_monitor.callback_detect_aws_invalid_value, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_invalid_value_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_invalid_value_message'] # ------------------------------------------ TEST_INVALID_VALUES_IN_BUCKET --------------------------------------------- @@ -500,7 +500,7 @@ def test_invalid_values_in_service( callback=event_monitor.callback_detect_aws_invalid_value, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_invalid_value_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_invalid_value_message'] # --------------------------------------- TEST_MULTIPLE_BUCKET_AND_SERVICE_TAGS ---------------------------------------- @@ -564,9 +564,9 @@ def test_multiple_bucket_and_service_tags( - The `configuration_multiple_bucket_and_service_tags` file provides the configuration for this test. """ log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_bucket_or_service_call, accumulations=4 ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_service_calls_amount'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_service_calls_amount'] diff --git a/tests/integration/test_aws/test_path.py b/tests/integration/test_aws/test_path.py index 6bc5baf03a1..d150038ab58 100644 --- a/tests/integration/test_aws/test_path.py +++ b/tests/integration/test_aws/test_path.py @@ -15,7 +15,7 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGES, TIMEOUTS +from .utils import ERROR_MESSAGE, TIMEOUT from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -117,7 +117,7 @@ def test_path( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -125,23 +125,23 @@ def test_path( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] else: log_monitor.start( - timeout=TIMEOUTS[10], + timeout=TIMEOUT[10], callback=event_monitor.make_aws_callback(pattern), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_path_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_empty_path_message'] assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) @@ -159,4 +159,4 @@ def test_path( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/test_path_suffix.py b/tests/integration/test_aws/test_path_suffix.py index b2720e59a9b..65ee5a25a74 100644 --- a/tests/integration/test_aws/test_path_suffix.py +++ b/tests/integration/test_aws/test_path_suffix.py @@ -16,7 +16,7 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGES, TIMEOUTS +from .utils import ERROR_MESSAGE, TIMEOUT from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -120,7 +120,7 @@ def test_path_suffix( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -128,22 +128,22 @@ def test_path_suffix( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] else: log_monitor.start( - timeout=TIMEOUTS[10], + timeout=TIMEOUT[10], callback=event_monitor.make_aws_callback(pattern), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_path_suffix_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_empty_path_suffix_message'] assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) @@ -160,4 +160,4 @@ def test_path_suffix( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/test_regions.py b/tests/integration/test_aws/test_regions.py index b16199e72fe..a82e1204684 100644 --- a/tests/integration/test_aws/test_regions.py +++ b/tests/integration/test_aws/test_regions.py @@ -17,7 +17,7 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGES, TIMEOUTS +from .utils import ERROR_MESSAGE, TIMEOUT from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -119,7 +119,7 @@ def test_regions( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -127,24 +127,24 @@ def test_regions( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed, accumulations=expected_results ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] else: log_monitor.start( - timeout=TIMEOUTS[10], + timeout=TIMEOUT[10], callback=event_monitor.make_aws_callback(pattern), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_no_region_found_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_no_region_found_message'] assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) @@ -164,7 +164,7 @@ def test_regions( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # -------------------------------------------- TEST_CLOUDWATCH_REGIONS ------------------------------------------------- @@ -261,7 +261,7 @@ def test_cloudwatch_regions( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -269,15 +269,15 @@ def test_cloudwatch_regions( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), accumulations=len(regions_list) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] else: log_monitor.start( @@ -287,7 +287,7 @@ def test_cloudwatch_regions( ), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_non-existent_region_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_non-existent_region_message'] table_name = 'cloudwatch_logs' @@ -304,7 +304,7 @@ def test_cloudwatch_regions( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # ------------------------------------------ TEST_INSPECTOR_PATH ------------------------------------------------------- @@ -399,7 +399,7 @@ def test_inspector_regions( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -407,15 +407,15 @@ def test_inspector_regions( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), accumulations=len(regions_list) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] else: log_monitor.start( @@ -425,7 +425,7 @@ def test_inspector_regions( ), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_non-existent_region_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_non-existent_region_message'] table_name = 'aws_services' @@ -442,7 +442,7 @@ def test_inspector_regions( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # Detect any ERROR message log_monitor.start( @@ -450,4 +450,4 @@ def test_inspector_regions( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/test_remove_from_bucket.py b/tests/integration/test_aws/test_remove_from_bucket.py index c55f6f761cd..c4d9d2d65bd 100644 --- a/tests/integration/test_aws/test_remove_from_bucket.py +++ b/tests/integration/test_aws/test_remove_from_bucket.py @@ -14,7 +14,7 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGES +from .utils import ERROR_MESSAGE from .conftest import TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -112,7 +112,7 @@ def test_remove_from_bucket( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -120,7 +120,7 @@ def test_remove_from_bucket( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] assert not file_exists(filename=metadata['uploaded_file'], bucket_name=bucket_name) @@ -130,7 +130,7 @@ def test_remove_from_bucket( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # ---------------------------------------------------- TEST_REMOVE_LOG_STREAM ------------------------------------------ @@ -221,7 +221,7 @@ def test_remove_log_stream( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -229,7 +229,7 @@ def test_remove_log_stream( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] assert not log_stream_exists(log_stream=metadata['log_stream'], log_group=log_group_name) @@ -239,4 +239,4 @@ def test_remove_log_stream( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py index 14054bb7d6a..7df804c4d3a 100644 --- a/tests/integration/test_aws/utils.py +++ b/tests/integration/test_aws/utils.py @@ -12,7 +12,7 @@ TEST_CASES_DIR = 'test_cases' WAZUH_MODULES_DEBUG = 'wazuh_modules.debug' -ERROR_MESSAGES = { +ERROR_MESSAGE = { "failed_start": "The AWS module did not start as expected", "incorrect_parameters": "The AWS module was not called with the correct parameters", @@ -31,10 +31,14 @@ "unexpected_number_of_events_found": "Some logs may have been processed, " "or the results found are more than expected", "incorrect_marker": "The AWS module did not use the correct marker", - "incorrect_no_region_found_message": "The AWS module did not show correct message about non-existent region" + "incorrect_no_region_found_message": "The AWS module did not show correct message about non-existent region", + "incorrect_discard_regex_message": "The AWS module did not show the correct message about discard regex or, " + "did not process the expected amount of logs", + "failed_sqs_message_retrieval": "The AWS module did not retrieve the expected message from the SQS Queue", + "failed_message_handling": "The AWS module did not handle the expected message" } -TIMEOUTS = { +TIMEOUT = { 10: 10, 20: 20 From 12aa71ab10e4ed02c4ef87081e1fea58e4af61dc Mon Sep 17 00:00:00 2001 From: Eduardo Date: Mon, 27 Nov 2023 19:03:21 -0300 Subject: [PATCH 272/419] modularize event monitor methds, fix imports --- tests/integration/test_aws/event_monitor.py | 90 +----------- tests/integration/test_aws/test_log_groups.py | 1 - .../test_aws/test_only_logs_after.py | 137 ++++++++++++------ tests/integration/test_aws/test_path.py | 1 - 4 files changed, 92 insertions(+), 137 deletions(-) diff --git a/tests/integration/test_aws/event_monitor.py b/tests/integration/test_aws/event_monitor.py index d942e295d5a..47c670fc910 100644 --- a/tests/integration/test_aws/event_monitor.py +++ b/tests/integration/test_aws/event_monitor.py @@ -9,7 +9,7 @@ import re # # qa-integration-framework imports -from wazuh_testing.modules.aws.patterns import (WHITESPACE_MATCH, CURLY_BRACE_MATCH, AWS_MODULE_STARTED_PARAMETRIZED, +from wazuh_testing.modules.aws.patterns import (AWS_MODULE_STARTED_PARAMETRIZED, AWS_UNDEFINED_SERVICE_TYPE, AWS_DEPRECATED_CONFIG_DEFINED, AWS_NO_SERVICE_WARNING, AWS_MODULE_STARTED, INVALID_EMPTY_TYPE_ERROR, EMPTY_CONTENT_ERROR, EMPTY_CONTENT_WARNING, @@ -18,13 +18,8 @@ PARSING_SERVICE_ERROR_WARNING, SERVICE_ANALYSIS, BUCKET_ANALYSIS, MODULE_START, PARSER_ERROR, MODULE_ERROR, NEW_LOG_FOUND, DEBUG_MESSAGE, EVENTS_COLLECTED, DEBUG_ANALYSISD_MESSAGE, ANALYSISD_EVENT, - AWS_EVENT_HEADER, NO_LOG_PROCESSED, NO_BUCKET_LOG_PROCESSED, - MARKER, NO_NEW_EVENTS, EVENT_SENT, ) -from wazuh_testing.constants.aws import VPC_FLOW_TYPE, INSPECTOR_TYPE -from wazuh_testing.modules.aws.utils import analyze_command_output - -# Local imports -from .utils import ERROR_MESSAGE + AWS_EVENT_HEADER, NO_LOG_PROCESSED, NO_BUCKET_LOG_PROCESSED) +from wazuh_testing.constants.aws import INSPECTOR_TYPE def make_aws_callback(pattern, prefix=''): @@ -272,82 +267,3 @@ def callback_event_sent_to_analysisd(line): if line.startswith( fr"{AWS_EVENT_HEADER}"): return line - - -def check_processed_logs_from_output(command_output, expected_results=1): - """Check for processed messages in the give output. - - Args: - command_output (str): Output to analyze. - expected_results (int, optional): Number of results to find. Default to 1. - """ - analyze_command_output( - command_output=command_output, - callback=callback_detect_event_processed, - expected_results=expected_results, - error_message=ERROR_MESSAGE['incorrect_event_number'] - ) - - -def check_non_processed_logs_from_output(command_output, bucket_type, expected_results=1): - """Check for the non 'processed' messages in the give output. - - Args: - command_output (str): Output to analyze. - bucket_type (str): Bucket type to select the message. - expected_results (int, optional): Number of results to find. Default to 1. - """ - if bucket_type == VPC_FLOW_TYPE: - pattern = fr"{NO_LOG_PROCESSED}" - else: - pattern = fr"{NO_BUCKET_LOG_PROCESSED}" - - analyze_command_output( - command_output, - callback=make_aws_callback(pattern), - expected_results=expected_results, - error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] - ) - - -def check_marker_from_output(command_output, file_key, expected_results=1): - """Check for the marker message in the given output. - - Args: - command_output (str): Output to analyze. - file_key (str): Value to check as a marker. - expected_results (int, optional): Number of results to find. Default to 1. - """ - pattern = fr"{MARKER} {file_key}" - - analyze_command_output( - command_output, - callback=make_aws_callback(pattern), - expected_results=expected_results, - error_message=ERROR_MESSAGE['incorrect_marker'] - ) - - -def check_service_processed_logs_from_output( - command_output, events_sent, service_type, expected_results=1 -): - analyze_command_output( - command_output=command_output, - callback=callback_detect_service_event_processed(events_sent, service_type), - expected_results=expected_results, - error_message=ERROR_MESSAGE['incorrect_event_number'] - ) - - -def check_service_non_processed_logs_from_output(command_output, service_type, expected_results=1): - if service_type == INSPECTOR_TYPE: - pattern = fr"{NO_NEW_EVENTS}" - else: - pattern = fr"{EVENT_SENT}" - - analyze_command_output( - command_output, - callback=make_aws_callback(pattern), - expected_results=expected_results, - error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] - ) diff --git a/tests/integration/test_aws/test_log_groups.py b/tests/integration/test_aws/test_log_groups.py index 21f684726bb..512132d1ebf 100644 --- a/tests/integration/test_aws/test_log_groups.py +++ b/tests/integration/test_aws/test_log_groups.py @@ -144,7 +144,6 @@ def test_log_groups( assert path_exist(path=AWS_SERVICES_DB_PATH) - # @todo Ask reason behind query retuning string instead of list as stated in docstring if expected_results: log_group_list = log_group_names.split(",") for row in get_multiple_service_db_row(table_name='cloudwatch_logs'): diff --git a/tests/integration/test_aws/test_only_logs_after.py b/tests/integration/test_aws/test_only_logs_after.py index 690b0876e12..93f0765804f 100644 --- a/tests/integration/test_aws/test_only_logs_after.py +++ b/tests/integration/test_aws/test_only_logs_after.py @@ -12,10 +12,12 @@ # qa-integration-framework imports from wazuh_testing import session_parameters from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH, AWS_SERVICES_DB_PATH -from wazuh_testing.constants.aws import ONLY_LOGS_AFTER_PARAM, PATH_DATE_FORMAT +from wazuh_testing.constants.aws import ONLY_LOGS_AFTER_PARAM, PATH_DATE_FORMAT, VPC_FLOW_TYPE, INSPECTOR_TYPE from wazuh_testing.utils.db_queries.aws_db import get_multiple_s3_db_row, get_service_db_row, get_s3_db_row from wazuh_testing.modules.aws.utils import (call_aws_module, create_log_events, create_log_stream, path_exist, - get_last_file_key, upload_file) + get_last_file_key, upload_file, analyze_command_output) +from wazuh_testing.modules.aws.patterns import (NO_LOG_PROCESSED, NO_BUCKET_LOG_PROCESSED, MARKER, NO_NEW_EVENTS, + EVENT_SENT) # Local module imports from . import event_monitor @@ -703,7 +705,7 @@ def test_bucket_multiple_calls( brief: Restart the wazuh service. - delete_file_from_s3: type: fixture - brief: Delete the a file after the test execution. + brief: Delete the file after the test execution. input_description: - The `cases_multiple_calls` file provides the test cases. """ @@ -726,42 +728,57 @@ def test_bucket_multiple_calls( # Call the module without only_logs_after and check that no logs were processed last_marker_key = datetime.utcnow().strftime(PATH_DATE_FORMAT) - event_monitor.check_non_processed_logs_from_output( + # Get bucket type + if bucket_type == VPC_FLOW_TYPE: + pattern = fr"{NO_LOG_PROCESSED}" + else: + pattern = fr"{NO_BUCKET_LOG_PROCESSED}" + + # Check for the non 'processed' messages in the given output. + analyze_command_output( command_output=call_aws_module(*base_parameters), - bucket_type=bucket_type + callback=event_monitor.make_aws_callback(pattern), + expected_results=1, + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) - # Call the module with only_logs_after set in the past and check that the expected number of logs were - # processed - event_monitor.check_processed_logs_from_output( + # Call the module with only_logs_after set in the past and check that the expected number of logs were processed + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-20'), - expected_results=3 + callback=event_monitor.callback_detect_event_processed, + expected_results=3, + error_message=ERROR_MESSAGE['incorrect_event_number'] ) # Call the module with the same parameters in and check there were no duplicates expected_skipped_logs_step_3 = metadata.get('expected_skipped_logs_step_3', 1) - event_monitor.check_non_processed_logs_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-20'), - bucket_type=bucket_type, - expected_results=expected_skipped_logs_step_3 + callback=event_monitor.make_aws_callback(pattern), + expected_results=expected_skipped_logs_step_3, + error_message=ERROR_MESSAGE['incorrect_event_number'] ) - # Call the module with only_logs_after set with an early date than setted previously and check that no logs + # Call the module with only_logs_after set with an early date than the one set previously and check that no logs # were processed, there were no duplicates - event_monitor.check_non_processed_logs_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-22'), - bucket_type=bucket_type, - expected_results=expected_skipped_logs_step_3 - 1 if expected_skipped_logs_step_3 > 1 else 1 + callback=event_monitor.make_aws_callback(pattern), + expected_results=expected_skipped_logs_step_3 - 1 if expected_skipped_logs_step_3 > 1 else 1, + error_message=ERROR_MESSAGE['incorrect_event_number'] ) # Upload a log file for the day of the test execution and call the module without only_logs_after and check that # only the uploaded logs were processed and the last marker is specified in the DB. last_marker_key = get_last_file_key(bucket_type, bucket_name, datetime.utcnow()) metadata['filename'] = upload_file(bucket_type, bucket_name) + pattern = fr"{MARKER}{last_marker_key}" - event_monitor.check_marker_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters), - file_key=last_marker_key + callback=event_monitor.make_aws_callback(pattern), + expected_results=1, + error_message=ERROR_MESSAGE['incorrect_marker'] ) @@ -819,32 +836,40 @@ def test_inspector_multiple_calls( '--debug', '2' ] + if service_type == INSPECTOR_TYPE: + pattern = fr"{NO_NEW_EVENTS}" + else: + pattern = fr"{EVENT_SENT}" + # Call the module without only_logs_after and check that no logs were processed - event_monitor.check_service_non_processed_logs_from_output( - command_output=call_aws_module(*base_parameters), service_type=service_type, expected_results=1 + analyze_command_output( + command_output=call_aws_module(*base_parameters), + callback=event_monitor.make_aws_callback(pattern), + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) - # Call the module with only_logs_after set in the past and check that the expected number of logs were - # processed - event_monitor.check_service_processed_logs_from_output( + # Call the module with only_logs_after set in the past and check that the expected number of logs were processed. + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-30'), - service_type=service_type, - events_sent=4 + callback=event_monitor.callback_detect_service_event_processed( + expected_results=4, + service_type=service_type), + error_message=ERROR_MESSAGE['incorrect_event_number'] ) # Call the module with the same parameters in and check there were no duplicates - event_monitor.check_service_non_processed_logs_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-30'), - service_type=service_type, - expected_results=1 + callback=event_monitor.make_aws_callback(pattern), + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) - # Call the module with only_logs_after set with an early date than setted previously and check that no logs + # Call the module with only_logs_after set with an early date than the one set previously and check that no logs # were processed, there were no duplicates - event_monitor.check_service_non_processed_logs_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-31'), - service_type=service_type, - expected_results=1 + callback=event_monitor.make_aws_callback(pattern), + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) @@ -912,32 +937,43 @@ def test_cloudwatch_multiple_calls( '--debug', '2' ] + if service_type == INSPECTOR_TYPE: + pattern = fr"{NO_NEW_EVENTS}" + else: + pattern = fr"{EVENT_SENT}" + # Call the module without only_logs_after and check that no logs were processed - event_monitor.check_service_non_processed_logs_from_output( - command_output=call_aws_module(*base_parameters), service_type=service_type, expected_results=0 + analyze_command_output( + command_output=call_aws_module(*base_parameters), + callback=event_monitor.make_aws_callback(pattern), + expected_results=0, + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) - # Call the module with only_logs_after set in the past and check that the expected number of logs were - # processed - event_monitor.check_service_processed_logs_from_output( + # Call the module with only_logs_after set in the past and check that the expected number of logs were processed. + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-12'), - service_type=service_type, - events_sent=3 + callback=event_monitor.callback_detect_service_event_processed( + expected_results=3, + service_type=service_type), + error_message=ERROR_MESSAGE['incorrect_event_number'] ) # Call the module with the same parameters in and check there were no duplicates - event_monitor.check_service_non_processed_logs_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-12'), - service_type=service_type, - expected_results=0 + callback=event_monitor.make_aws_callback(pattern), + expected_results=0, + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) - # Call the module with only_logs_after set with an early date than setted previously and check that no logs + # Call the module with only_logs_after set with an early date than the one set previously and check that no logs # were processed, there were no duplicates - event_monitor.check_service_non_processed_logs_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-15'), - service_type=service_type, - expected_results=0 + callback=event_monitor.make_aws_callback(pattern), + expected_results=0, + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) # Upload a log file for the day of the test execution and call the module without only_logs_after and check that @@ -945,6 +981,11 @@ def test_cloudwatch_multiple_calls( log_stream = create_log_stream() metadata['log_stream'] = log_stream create_log_events(log_stream) - event_monitor.check_service_processed_logs_from_output( - command_output=call_aws_module(*base_parameters), service_type=service_type, events_sent=1 + + analyze_command_output( + command_output=call_aws_module(*base_parameters), + callback=event_monitor.callback_detect_service_event_processed( + expected_results=1, + service_type=service_type), + error_message=ERROR_MESSAGE['incorrect_event_number'] ) diff --git a/tests/integration/test_aws/test_path.py b/tests/integration/test_aws/test_path.py index d150038ab58..e9a06b804d9 100644 --- a/tests/integration/test_aws/test_path.py +++ b/tests/integration/test_aws/test_path.py @@ -145,7 +145,6 @@ def test_path( assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) - # @todo same as the other db if expected_results: data = get_s3_db_row(table_name=table_name) assert f"{bucket_name}/{path}/" == data.bucket_path From 2050a039c7ac6c80e9de1c0133afe20d5180e3df Mon Sep 17 00:00:00 2001 From: Eduardo Leon Wazuh Date: Wed, 19 Jul 2023 08:05:36 -0300 Subject: [PATCH 273/419] Migrate AWS ITs from wazuh-qa --- tests/integration/conftest.py | 8 +- tests/integration/test_aws/README.md | 103 ++-- tests/integration/test_aws/conftest.py | 91 +--- .../configuration_discard_regex.yaml | 23 + .../cases_discard_regex.yaml | 238 ++++++++++ tests/integration/test_aws/test_basic.py | 105 ++-- .../test_aws/test_discard_regex.py | 447 ++---------------- tests/integration/test_aws/test_log_groups.py | 64 +-- .../test_aws/test_only_logs_after.py | 334 ++++++------- tests/integration/test_aws/test_parser.py | 226 +++++---- tests/integration/test_aws/test_path.py | 65 +-- .../integration/test_aws/test_path_suffix.py | 66 +-- tests/integration/test_aws/test_regions.py | 129 ++--- .../test_aws/test_remove_from_bucket.py | 74 +-- tests/integration/test_aws/utils.py | 21 +- 15 files changed, 913 insertions(+), 1081 deletions(-) create mode 100644 tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml create mode 100644 tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 9e1735acd46..504b1abb618 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -181,10 +181,6 @@ def configure_local_internal_options_function(request): It uses the test variable local_internal_options. This should be a dictionary wich keys and values corresponds to the internal option configuration, For example: local_internal_options = {'monitord.rotate_log': '0', 'syscheck.debug': '0' } - - Args: - request (fixture): Provide information on the executing test function. - """ try: local_internal_options = request.param @@ -208,7 +204,7 @@ def configure_local_internal_options_function(request): wazuh_configuration.set_local_internal_options_dict(backup_local_internal_options) -@pytest.fixture() +@pytest.fixture(scope='function') def restart_wazuh_function(request): """Restart before starting a test, and stop it after finishing. @@ -244,7 +240,7 @@ def restart_wazuh_function(request): control_service('stop', daemon=daemon) -@pytest.fixture() +@pytest.fixture(scope='function') def file_monitoring(request): """Fixture to handle the monitoring of a specified file. diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index a644cff8f15..f06aac5e543 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -7,14 +7,13 @@ It is a _wodle based_ module that has a capability to pull logs from several AWS ## Tests directory structure ```bash -wazuh/tests/integration/test_aws +wazuh-qa/tests/integration/test_aws +├── conftest.py ├── data │   ├── configuration_template │   │   ├── basic_test_module │   │   ├── discard_regex_test_module -│   │   ├── log_groups_test_module │   │   ├── only_logs_after_test_module -│   │   ├── parser_test_module │   │   ├── path_suffix_test_module │   │   ├── path_test_module │   │   ├── regions_test_module @@ -22,52 +21,38 @@ wazuh/tests/integration/test_aws │   └── test_cases │   ├── basic_test_module │   ├── discard_regex_test_module -│   ├── log_groups_test_module │   ├── only_logs_after_test_module -│   ├── parser_test_module │   ├── path_suffix_test_module │   ├── path_test_module │   ├── regions_test_module │   └── remove_from_bucket_test_module -├── __init__.py -├── README.md -├── conftest.py +├── README.MD ├── test_basic.py ├── test_discard_regex.py -├── test_log_groups.py ├── test_only_logs_after.py ├── test_path.py ├── test_path_suffix.py ├── test_regions.py -├── test_remove_from_bucket.py -└── utils.py +└── test_remove_from_bucket.py ``` ## Deps directory structure ```bash -qa-integration-framework/src/wazuh_testing/modules/aws -├── __init__.py +wazuh-qa/deps/wazuh_testing/wazuh_testing/modules/aws ├── cli_utils.py -├── cloudwatch_utils.py +├── constants.py ├── data_generator.py ├── db_utils.py ├── event_monitor.py -├── exceptions.py +├── __init__.py └── s3_utils.py ``` ## Requirements -- [Proper testing environment](#Setting up a test environment) - -- [Wazuh](https://github.com/wazuh/qa-integration-framework) repository. - -- [Testing framework](https://github.com/wazuh/qa-integration-framework) installed. - -- Configured buckets, log groups and an inspector assessment with test data in AWS. - -For a step-by-step example guide using linux go to the [test setup section](#linux) +- The only extra dependency is `boto3` +- The module will assume there are already buckets, log groups and an inspector assessment with test data in AWS. ## Configuration settings @@ -82,7 +67,7 @@ aws_secret_access_key = ## Setting up a test environment -You will need a proper environment to run the integration tests. You can use Docker or any virtual machine. If you have +You will need a proper environment to run the integration tests. You can use any virtual machine you wish. If you have one already, go to the [integration tests section](#integration-tests) If you use [Vagrant](https://www.vagrantup.com/downloads.html) @@ -107,16 +92,16 @@ _We are using **Ubuntu 22.04** for this example:_ ```shell script # Install pip - apt install python3-pip git -y - - # Clone `wazuh` repository within your testing environment - git clone https://github.com/wazuh/wazuh.git - - # Clone the `qa-integration-framework` repository withing your testing environment - git clone https://github.com/wazuh/qa-integration-framework.git - - # Install tests dependencies - python3 -m pip install qa-integration-framework/ + apt install python3-pip + + # Clone your `wazuh-qa` repository within your testing environment + cd wazuh-qa + + # Install Python libraries + python3 -m pip install -r requirements.txt + + # Install test dependecies + python3 -m pip install deps/wazuh-testing ``` @@ -133,7 +118,7 @@ from the closest one, it will look for the next one (if possible) until reaching need to run every test from the following path, where the general _conftest_ is: ```shell script -cd wazuh/tests/integration/test_aws/ +cd wazuh-qa/tests/integration ``` To run any test, we just need to call `pytest` from `python3` using the following line: @@ -164,22 +149,32 @@ check its documentation for further information. #### AWS integration tests example ```bash -#root@wazuh-master:/wazuh/tests/integration# pytest -x test_aws/ --disable-warnings -==================================== test session starts ==================================== -platform linux -- Python 3.10.12, pytest-7.1.2, pluggy-1.2.0 -rootdir: /wazuh/tests/integration, configfile: pytest.ini -plugins: testinfra-5.0.0, metadata-3.0.0, html-3.1.1 -collected 195 items - -test_aws/test_basic.py ................ [ 8%] -test_aws/test_discard_regex.py .............. [ 15%] -test_aws/test_log_groups.py .. [ 16%] -test_aws/test_only_logs_after.py .............................................x. [ 40%] -test_aws/test_parser.py .......................... [ 53%] -test_aws/test_path.py .......................................... [ 75%] -test_aws/test_path_suffix.py ......... [ 80%] -test_aws/test_regions.py ........................ [ 92%] -test_aws/test_remove_from_bucket.py ...sss......... [100%] - -============ 191 passed, 3 skipped, 1 xfailed, 7 warnings in 3723.08s (1:02:03) ============= +# python3 -m pytest -vvx test_aws/ -k cloudtrail +=========================================================== test session starts ====================================================== +platform linux -- Python 3.10.6, pytest-7.1.2, pluggy-1.0.0 -- /usr/bin/python3 +cachedir: .pytest_cache +metadata: {'Python': '3.10.6', 'Platform': 'Linux-5.15.0-58-generic-x86_64-with-glibc2.35', +'Packages': {'pytest': '7.1.2', 'py': '1.10.0', 'pluggy': '1.0.0'}, +'Plugins': {'metadata': '2.0.2', 'html': '3.1.1', 'testinfra': '5.0.0'}} +rootdir: /home/vagrant/qa/tests/integration, configfile: pytest.ini +plugins: metadata-2.0.2, html-3.1.1, testinfra-5.0.0 +collected 15 items + +test_aws/test_basic.py::test_defaults[cloudtrail_defaults] PASSED [ 6%] +test_aws/test_discard_regex.py::test_discard_regex[cloudtrail_discard_regex] PASSED [ 13%] +test_aws/test_only_logs_after.py::test_without_only_logs_after[cloudtrail_without_only_logs_after] PASSED [ 20%] +test_aws/test_only_logs_after.py::test_with_only_logs_after[cloudtrail_with_only_logs_after] PASSED [ 26%] +test_aws/test_only_logs_after.py::test_multiple_calls[cloudtrail_only_logs_after_multiple_calls] PASSED [ 33%] +test_aws/test_path.py::test_path[cloudtrail_path_with_data] PASSED [ 40%] +test_aws/test_path.py::test_path[cloudtrail_path_without_data] PASSED [ 46%] +test_aws/test_path.py::test_path[cloudtrail_inexistent_path] PASSED [ 53%] +test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_path_suffix_with_data] PASSED [ 60%] +test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_path_suffix_without_data] PASSED [ 66%] +test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_inexistent_path_suffix] PASSED [ 73%] +test_aws/test_regions.py::test_regions[cloudtrail_region_with_data] PASSED [ 80%] +test_aws/test_regions.py::test_regions[cloudtrail_regions_with_data] PASSED [ 86%] +test_aws/test_regions.py::test_regions[cloudtrail_inexistent_region] PASSED [ 93%] +test_aws/test_remove_from_bucket.py::test_remove_from_bucket[cloudtrail_remove_from_bucket] PASSED [100%] + +=============================================== 15 passed, 2 warnings in 332.67s (0:05:32) =========================================== ``` diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 70a4bf3f299..2dfb6bde491 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -1,45 +1,19 @@ -# Copyright (C) 2015-2023, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -""" -This module contains all necessary components (fixtures, classes, methods)to configure the test for its execution. -""" - import pytest -from os.path import join - -# Qa-integration-framework imports from wazuh_testing.logger import logger -from wazuh_testing.constants.aws import ( +from wazuh_testing.modules.aws import ( FAKE_CLOUDWATCH_LOG_GROUP, PERMANENT_CLOUDWATCH_LOG_GROUP, ) -from wazuh_testing.modules.aws.utils import ( +from wazuh_testing.modules.aws.cloudwatch_utils import ( create_log_events, create_log_group, create_log_stream, delete_log_group, delete_log_stream, - delete_file, - file_exists, - upload_file ) -from wazuh_testing.modules.aws.utils import delete_s3_db, delete_services_db +from wazuh_testing.modules.aws.db_utils import delete_s3_db, delete_services_db +from wazuh_testing.modules.aws.s3_utils import delete_file, file_exists, upload_file from wazuh_testing.utils.services import control_service -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) -from wazuh_testing.modules.monitord import configuration as monitord_config - -# Local imports -from .utils import TEST_DATA_PATH, TEMPLATE_DIR, TEST_CASES_DIR, WAZUH_MODULES_DEBUG - - -# Set local internal options -local_internal_options = {WAZUH_MODULES_DEBUG: '2', - monitord_config.MONITORD_ROTATE_LOG: '0'} @pytest.fixture @@ -77,6 +51,7 @@ def upload_and_delete_file_to_s3(metadata): metadata['uploaded_file'] = filename yield + if file_exists(filename=filename, bucket_name=bucket_name): delete_file(filename=filename, bucket_name=bucket_name) logger.debug('Deleted file: %s from bucket %s', filename, bucket_name) @@ -164,8 +139,9 @@ def fixture_delete_log_stream(metadata): delete_log_stream(log_stream=log_stream) logger.debug('Deleted log stream: %s', log_stream) - # DB fixtures + + @pytest.fixture def clean_s3_cloudtrail_db(): """Delete the DB file before and after the test execution""" @@ -184,56 +160,3 @@ def clean_aws_services_db(): yield delete_services_db() - - -class TestConfigurator: - """ - TestConfigurator class is responsible for configuring test data and parameters for a specific test module. - - Attributes: - - module (str): The name of the test module. - - configuration_path (str): The path to the configuration directory for the test module. - - test_cases_path (str): The path to the test cases directory for the test module. - - metadata (list): Test metadata retrieved from the test cases. - - parameters (list): Test parameters retrieved from the test cases. - - cases_ids (list): Identifiers for the test cases. - - test_configuration_template (list): The loaded configuration template for the test module. - - """ - def __init__(self, module): - self.module = module - self.configuration_path = join(TEST_DATA_PATH, TEMPLATE_DIR, self.module) - self.test_cases_path = join(TEST_DATA_PATH, TEST_CASES_DIR, self.module) - self.metadata = None - self.parameters = None - self.cases_ids = None - self.test_configuration_template = None - - def configure_test(self, configuration_file="", cases_file=""): - """ - Configures the test data and parameters for the given test module. - - Args: - - configuration_file (str): The name of the configuration file. - - cases_file (str): The name of the test cases file. - - Returns: - None - """ - # Set test cases path - cases_path = join(self.test_cases_path, cases_file) - - # Get test cases data - self.parameters, self.metadata, self.cases_ids = get_test_cases_data(cases_path) - - # Set test configuration template for tests with config files - if configuration_file != "": - # Set config path - configurations_path = join(self.configuration_path, configuration_file) - - # load configuration template - self.test_configuration_template = load_configuration_template( - configurations_path, - self.parameters, - self.metadata - ) diff --git a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml new file mode 100644 index 00000000000..cd7e6175e8c --- /dev/null +++ b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml @@ -0,0 +1,23 @@ +- sections: + - section: wodle + attributes: + - name: aws-s3 + elements: + - disabled: + value: 'no' + - bucket: + attributes: + - type: BUCKET_TYPE + elements: + - aws_profile: + value: qa + - name: + value: BUCKET_NAME + - only_logs_after: + value: 2022-NOV-20 + - path: + value: PATH + - discard_regex: + attributes: + - field: DISCARD_FIELD + value: DISCARD_REGEX diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml new file mode 100644 index 00000000000..7c37319fb6e --- /dev/null +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml @@ -0,0 +1,238 @@ +- name: cloudtrail_discard_regex + description: CloudTrail discard regex configurations + configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests + DISCARD_FIELD: eventSource + DISCARD_REGEX: .*ec2.amazonaws.com.* + PATH: '' + metadata: + bucket_type: cloudtrail + bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: eventSource + discard_regex: .*ec2.amazonaws.com.* + found_logs: 5 + skipped_logs: 1 + +- name: vpc_discard_regex + description: VPC discard regex configurations + configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests + DISCARD_FIELD: srcport + DISCARD_REGEX: "5319" + PATH: '' + metadata: + bucket_type: vpcflow + bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: srcport + discard_regex: "5319" + found_logs: 5 + skipped_logs: 1 + +- name: config_discard_regex + description: Config discard regex configurations + configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests + DISCARD_FIELD: configuration.complianceType + DISCARD_REGEX: .*COMPLIANT.* + PATH: '' + metadata: + bucket_type: config + bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: configuration.complianceType + discard_regex: .*COMPLIANT.* + found_logs: 5 + skipped_logs: 1 + +- name: alb_discard_regex + description: ALB discard regex configurations + configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests + DISCARD_FIELD: elb_status_code + DISCARD_REGEX: '401' + PATH: '' + metadata: + bucket_type: alb + bucket_name: wazuh-alb-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: elb_status_code + discard_regex: '401' + found_logs: 5 + skipped_logs: 1 + +- name: clb_discard_regex + description: CLB discard regex configurations + configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests + DISCARD_FIELD: elb_status_code + DISCARD_REGEX: '401' + PATH: '' + metadata: + bucket_type: clb + bucket_name: wazuh-clb-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: elb_status_code + discard_regex: '401' + found_logs: 5 + skipped_logs: 1 + +- name: nlb_discard_regex + description: NLB discard regex configurations + configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests + DISCARD_FIELD: listener + DISCARD_REGEX: 0CMK2UAG108C7AXK + PATH: '' + metadata: + bucket_type: nlb + bucket_name: wazuh-nlb-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: listener + discard_regex: 0CMK2UAG108C7AXK + found_logs: 5 + skipped_logs: 1 + +- name: kms_discard_regex + description: KMS discard regex configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-kms-integration-tests + DISCARD_FIELD: eventName + DISCARD_REGEX: MatchDataKey + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-kms-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: eventName + discard_regex: MatchDataKey + found_logs: 3 + skipped_logs: 1 + +- name: macie_discard_regex + description: Macie discard regex configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-macie-integration-tests + DISCARD_FIELD: severity + DISCARD_REGEX: LOW + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-macie-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: severity + discard_regex: LOW + found_logs: 3 + skipped_logs: 1 + +- name: trusted_advisor_discard_regex + description: Trusted Advisor discard regex configurations + configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-trusted-advisor-integration-tests + DISCARD_FIELD: status + DISCARD_REGEX: ERROR + PATH: '' + metadata: + bucket_type: custom + bucket_name: wazuh-trusted-advisor-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: status + discard_regex: ERROR + found_logs: 3 + skipped_logs: 1 + +- name: guardduty_discard_regex + description: GuardDuty discard regex configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-guardduty-integration-tests + DISCARD_FIELD: partition + DISCARD_REGEX: aws-test + PATH: '' + metadata: + bucket_type: guardduty + bucket_name: wazuh-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: partition + discard_regex: aws-test + found_logs: 3 + skipped_logs: 1 + +- name: native_guardduty_discard_regex + description: Native GuardDuty discard regex configurations + configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-native-guardduty-integration-tests + DISCARD_FIELD: partition + DISCARD_REGEX: aws-test + PATH: '' + metadata: + bucket_type: guardduty + bucket_name: wazuh-native-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: partition + discard_regex: aws-test + found_logs: 3 + skipped_logs: 1 + +- name: waf_discard_regex + description: WAF discard regex configurations + configuration_parameters: + BUCKET_TYPE: waf + BUCKET_NAME: wazuh-waf-integration-tests + DISCARD_FIELD: action + DISCARD_REGEX: ALLOW + PATH: '' + metadata: + bucket_type: waf + bucket_name: wazuh-waf-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: action + discard_regex: ALLOW + found_logs: 3 + skipped_logs: 1 + +- name: server_access_discard_regex + description: Server Access discard regex configurations + configuration_parameters: + BUCKET_TYPE: server_access + BUCKET_NAME: wazuh-server-access-integration-tests + DISCARD_FIELD: http_status + DISCARD_REGEX: '200' + PATH: '' + metadata: + bucket_type: server_access + bucket_name: wazuh-server-access-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: http_status + discard_regex: '200' + found_logs: 3 + skipped_logs: 1 + +- name: cisco_umbrella_discard_regex + description: CloudTrail discard regex configurations + configuration_parameters: + BUCKET_TYPE: cisco_umbrella + BUCKET_NAME: wazuh-umbrella-integration-tests + DISCARD_FIELD: action + DISCARD_REGEX: Blocked + PATH: dnslogs + metadata: + bucket_type: cisco_umbrella + bucket_name: wazuh-umbrella-integration-tests + only_logs_after: 2022-NOV-20 + discard_field: action + discard_regex: Blocked + found_logs: 3 + skipped_logs: 1 + path: dnslogs diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index 791f644edb9..f6707aa0947 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -1,39 +1,44 @@ -# Copyright (C) 2015, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -""" -This module will contain all cases for the basic test suite -""" - +import os import pytest # qa-integration-framework imports from wazuh_testing import session_parameters +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) # Local module imports -from . import event_monitor -from .utils import ERROR_MESSAGE -from .conftest import TestConfigurator, local_internal_options +from .utils import ERROR_MESSAGES pytestmark = [pytest.mark.server] -# Set test configurator for the module -configurator = TestConfigurator(module='basic_test_module') + +# Generic vars +MODULE = 'basic_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) # -------------------------------------------- TEST_BUCKET_DEFAULTS ---------------------------------------------------- -# Configure T1 test -configurator.configure_test(configuration_file='bucket_configuration_defaults.yaml', - cases_file='cases_bucket_defaults.yaml') +# Configuration and cases +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_defaults.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_defaults.yaml') + +# Enabled test configurations +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) def test_bucket_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -97,7 +102,7 @@ def test_bucket_defaults( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -105,7 +110,7 @@ def test_bucket_defaults( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] # Detect any ERROR message log_monitor.start( @@ -113,22 +118,26 @@ def test_bucket_defaults( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] # -------------------------------------------- TEST_CLOUDWATCH_DEFAULTS ------------------------------------------------ -# Configure T2 test -configurator.configure_test(configuration_file='cloudwatch_configuration_defaults.yaml', - cases_file='cases_cloudwatch_defaults.yaml') +# Configuration and cases data +t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'cloudwatch_configuration_defaults.yaml') +t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_defaults.yaml') + +# Enabled test configurations +t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) +configurations = load_configuration_template( + t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, t2_configuration_metadata), ids=t2_case_ids) def test_service_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -198,7 +207,7 @@ def test_service_defaults( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -206,7 +215,7 @@ def test_service_defaults( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] # Detect any ERROR message log_monitor.start( @@ -214,22 +223,26 @@ def test_service_defaults( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] # ------------------------------------------ TEST_INSPECTOR_DEFAULTS --------------------------------------------------- -# Configure T3 test -configurator.configure_test(configuration_file='inspector_configuration_defaults.yaml', - cases_file='cases_inspector_defaults.yaml') +# Configuration and cases data +t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'inspector_configuration_defaults.yaml') +t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_defaults.yaml') + +# Enabled test configurations +t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) +configurations = load_configuration_template( + t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, t3_configuration_metadata), ids=t3_case_ids) def test_inspector_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -297,7 +310,7 @@ def test_inspector_defaults( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -305,7 +318,7 @@ def test_inspector_defaults( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] # Detect any ERROR message log_monitor.start( @@ -313,4 +326,4 @@ def test_inspector_defaults( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py index 3f4ca724974..54cd7f27d9b 100644 --- a/tests/integration/test_aws/test_discard_regex.py +++ b/tests/integration/test_aws/test_discard_regex.py @@ -1,47 +1,46 @@ -# Copyright (C) 2015, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -""" -This module will contain all cases for the discard_regex test suite -""" - +import os import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH, AWS_SERVICES_DB_PATH -from wazuh_testing.modules.aws.utils import path_exist +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) +from wazuh_testing.modules.aws.db_utils import s3_db_exists # Local module imports -from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT -from .conftest import TestConfigurator, local_internal_options - +from .utils import ERROR_MESSAGES, TIMEOUTS pytestmark = [pytest.mark.server] -# Set test configurator for the module -configurator = TestConfigurator(module='discard_regex_test_module') -# --------------------------------------------- TEST_BUCKET_DISCARD_REGEX --------------------------------------------- -# Configure T1 test -configurator.configure_test(configuration_file='configuration_bucket_discard_regex.yaml', - cases_file='cases_bucket_discard_regex.yaml') +# Generic vars +MODULE = 'discard_regex_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) + +# -----------------------------------------opvb----------- TEST_PATH ------------------------------------------------------- +configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_discard_regex.yaml') +cases_path = os.path.join(TEST_CASES_PATH, 'cases_discard_regex.yaml') + +configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) +configurations = load_configuration_template( + configurations_path, configuration_parameters, configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) -def test_bucket_discard_regex( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, +@pytest.mark.parametrize('configuration, metadata', zip(configurations, configuration_metadata), ids=case_ids) +def test_discard_regex( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ - description: Check that some bucket logs are excluded when the regex and field defined in - match an event. - + description: Fetch logs excluding the ones that match with the regex. test_phases: - setup: - Load Wazuh light configuration. @@ -58,9 +57,7 @@ def test_bucket_discard_regex( - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - Delete the uploaded file - wazuh_min_version: 4.6.0 - parameters: - configuration: type: dict @@ -89,15 +86,13 @@ def test_bucket_discard_regex( - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. - assertions: - Check in the log that the module was called with correct parameters. - Check the expected number of events were forwarded to analysisd. - Check the database was created and updated accordingly. - input_description: - - The `configuration_bucket_discard_regex` file provides the module configuration for this test. - - The `cases_bucket_discard_regex` file provides the test cases. + - The `configuration_discard_regex` file provides the module configuration for this test. + - The `cases_discard_regex` file provides the test cases. """ bucket_name = metadata['bucket_name'] bucket_type = metadata['bucket_type'] @@ -108,8 +103,7 @@ def test_bucket_discard_regex( skipped_logs = metadata['skipped_logs'] path = metadata['path'] if 'path' in metadata else None - pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field.' \ - ' The event will be skipped.' + pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field. The event will be skipped.' parameters = [ 'wodles/aws/aws-s3', @@ -132,7 +126,7 @@ def test_bucket_discard_regex( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -140,385 +134,20 @@ def test_bucket_discard_regex( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] log_monitor.start( - timeout=TIMEOUT[20], + timeout=TIMEOUTS[20], callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), accumulations=found_logs + skipped_logs ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] - - assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) - - -# ----------------------------------------- TEST_CLOUDWATCH_DISCARD_REGEX_JSON ---------------------------------------- -# Configure T2 test -configurator.configure_test(configuration_file='configuration_cloudwatch_discard_regex_json.yaml', - cases_file='cases_cloudwatch_discard_regex_json.yaml') - - -@pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) -def test_cloudwatch_discard_regex_json( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, -): - """ - description: Check that some CloudWatch JSON logs are excluded when the regex and field defined in - match an event. - - test_phases: - - setup: - - Load Wazuh light configuration. - - Apply ossec.conf configuration changes according to the configuration template and use case. - - Apply custom settings in local_internal_options.conf. - - Truncate wazuh logs. - - Restart wazuh-manager service to apply configuration changes. - - test: - - Check in the ossec.log that a line has appeared calling the module with correct parameters. - - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket and skips - the ones that match with regex. - - Check the database was created and updated accordingly. - - teardown: - - Truncate wazuh logs. - - Restore initial configuration, both ossec.conf and local_internal_options.conf. - - Delete the uploaded file + assert s3_db_exists() - wazuh_min_version: 4.6.0 - - parameters: - - configuration: - type: dict - brief: Get configurations from the module. - - metadata: - type: dict - brief: Get metadata from the module. - - load_wazuh_basic_configuration: - type: fixture - brief: Load basic wazuh configuration. - - set_wazuh_configuration: - type: fixture - brief: Apply changes to the ossec.conf configuration. - - clean_aws_services_db: - type: fixture - brief: Delete the DB file before and after the test execution. - - configure_local_internal_options_function: - type: fixture - brief: Apply changes to the local_internal_options.conf configuration. - - truncate_monitored_files: - type: fixture - brief: Truncate wazuh logs. - - restart_wazuh_daemon_function: - type: fixture - brief: Restart the wazuh service. - - file_monitoring: - type: fixture - brief: Handle the monitoring of a specified file. - - assertions: - - Check in the log that the module was called with correct parameters. - - Check the expected number of events were forwarded to analysisd. - - Check the database was created and updated accordingly. - - input_description: - - The `configuration_cloudwatch_discard_regex` file provides the module configuration for this test. - - The `cases_cloudwatch_discard_regex` file provides the test cases. - """ - log_group_name = metadata.get('log_group_name') - service_type = metadata.get('service_type') - only_logs_after = metadata.get('only_logs_after') - regions: str = metadata.get('regions') - discard_field = metadata.get('discard_field', None) - discard_regex = metadata.get('discard_regex') - found_logs = metadata.get('found_logs') - - pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field.' \ - ' The event will be skipped.' - - parameters = [ - 'wodles/aws/aws-s3', - '--service', service_type, - '--aws_profile', 'qa', - '--only_logs_after', only_logs_after, - '--regions', regions, - '--aws_log_groups', log_group_name, - '--discard-field', discard_field, - '--discard-regex', discard_regex, - '--debug', '2' - ] - - # Check AWS module started + # Detect any ERROR message log_monitor.start( timeout=session_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_start + callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] - - # Check command was called correctly - log_monitor.start( - timeout=session_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_called(parameters) - ) - - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] - - log_monitor.start( - timeout=TIMEOUT[20], - callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), - accumulations=found_logs - ) - - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] - - assert path_exist(path=AWS_SERVICES_DB_PATH) - - -# ------------------------------------- TEST_CLOUDWATCH_DISCARD_REGEX_SIMPLE_TEXT ------------------------------------- -# Configure T3 test -configurator.configure_test(configuration_file='configuration_cloudwatch_discard_regex_simple_text.yaml', - cases_file='cases_cloudwatch_discard_regex_simple_text.yaml') - - -@pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) -def test_cloudwatch_discard_regex_simple_text( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, -): - """ - description: Check that some CloudWatch simple text logs are excluded when the regex defined in - matches an event. - - test_phases: - - setup: - - Load Wazuh light configuration. - - Apply ossec.conf configuration changes according to the configuration template and use case. - - Apply custom settings in local_internal_options.conf. - - Truncate wazuh logs. - - Restart wazuh-manager service to apply configuration changes. - - test: - - Check in the ossec.log that a line has appeared calling the module with correct parameters. - - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket and skips - the ones that match with regex. - - Check the database was created and updated accordingly. - - teardown: - - Truncate wazuh logs. - - Restore initial configuration, both ossec.conf and local_internal_options.conf. - - Delete the uploaded file - - wazuh_min_version: 4.6.0 - - parameters: - - configuration: - type: dict - brief: Get configurations from the module. - - metadata: - type: dict - brief: Get metadata from the module. - - load_wazuh_basic_configuration: - type: fixture - brief: Load basic wazuh configuration. - - set_wazuh_configuration: - type: fixture - brief: Apply changes to the ossec.conf configuration. - - clean_aws_services_db: - type: fixture - brief: Delete the DB file before and after the test execution. - - configure_local_internal_options_function: - type: fixture - brief: Apply changes to the local_internal_options.conf configuration. - - truncate_monitored_files: - type: fixture - brief: Truncate wazuh logs. - - restart_wazuh_daemon_function: - type: fixture - brief: Restart the wazuh service. - - file_monitoring: - type: fixture - brief: Handle the monitoring of a specified file. - - assertions: - - Check in the log that the module was called with correct parameters. - - Check the expected number of events were forwarded to analysisd. - - Check the database was created and updated accordingly. - - input_description: - - The `configuration_cloudwatch_discard_regex_simple_text` file provides - the module configuration for this test. - - The `cases_cloudwatch_discard_regex_simple_text` file provides the test cases. - """ - log_group_name = metadata.get('log_group_name') - service_type = metadata.get('service_type') - only_logs_after = metadata.get('only_logs_after') - regions: str = metadata.get('regions') - discard_regex = metadata.get('discard_regex') - found_logs = metadata.get('found_logs') - - pattern = fr'.*The "{discard_regex}" regex found a match. The event will be skipped.' - - parameters = [ - 'wodles/aws/aws-s3', - '--service', service_type, - '--aws_profile', 'qa', - '--only_logs_after', only_logs_after, - '--regions', regions, - '--aws_log_groups', log_group_name, - '--discard-regex', discard_regex, - '--debug', '2' - ] - - # Check AWS module started - log_monitor.start( - timeout=session_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_start - ) - - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] - - # Check command was called correctly - log_monitor.start( - timeout=session_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_called(parameters) - ) - - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] - - log_monitor.start( - timeout=TIMEOUT[20], - callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), - accumulations=found_logs - ) - - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] - - assert path_exist(path=AWS_SERVICES_DB_PATH) - - -# ------------------------------------------- TEST_INSPECTOR_DISCARD_REGEX -------------------------------------------- -# Configure T4 test -configurator.configure_test(configuration_file='configuration_inspector_discard_regex.yaml', - cases_file='cases_inspector_discard_regex.yaml') - - -@pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) -def test_inspector_discard_regex( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, -): - """ - description: Check that some Inspector logs are excluded when the regex and field defined in - match an event. - - test_phases: - - setup: - - Load Wazuh light configuration. - - Apply ossec.conf configuration changes according to the configuration template and use case. - - Apply custom settings in local_internal_options.conf. - - Truncate wazuh logs. - - Restart wazuh-manager service to apply configuration changes. - - test: - - Check in the ossec.log that a line has appeared calling the module with correct parameters. - - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket and skips - the ones that match with regex. - - Check the database was created and updated accordingly. - - teardown: - - Truncate wazuh logs. - - Restore initial configuration, both ossec.conf and local_internal_options.conf. - - Delete the uploaded file - - wazuh_min_version: 4.6.0 - - parameters: - - configuration: - type: dict - brief: Get configurations from the module. - - metadata: - type: dict - brief: Get metadata from the module. - - load_wazuh_basic_configuration: - type: fixture - brief: Load basic wazuh configuration. - - set_wazuh_configuration: - type: fixture - brief: Apply changes to the ossec.conf configuration. - - clean_aws_services_db: - type: fixture - brief: Delete the DB file before and after the test execution. - - configure_local_internal_options_function: - type: fixture - brief: Apply changes to the local_internal_options.conf configuration. - - truncate_monitored_files: - type: fixture - brief: Truncate wazuh logs. - - restart_wazuh_daemon_function: - type: fixture - brief: Restart the wazuh service. - - file_monitoring: - type: fixture - brief: Handle the monitoring of a specified file. - - assertions: - - Check in the log that the module was called with correct parameters. - - Check the expected number of events were forwarded to analysisd. - - Check the database was created and updated accordingly. - - input_description: - - The `configuration_inspector_discard_regex` file provides the module configuration for this test. - - The `cases_inspector_discard_regex` file provides the test cases. - """ - service_type = metadata.get('service_type') - only_logs_after = metadata.get('only_logs_after') - regions: str = metadata.get('regions') - discard_field = metadata.get('discard_field', '') - discard_regex = metadata.get('discard_regex') - found_logs = metadata.get('found_logs') - - pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field.' \ - ' The event will be skipped.' - - parameters = [ - 'wodles/aws/aws-s3', - '--service', service_type, - '--aws_profile', 'qa', - '--only_logs_after', only_logs_after, - '--regions', regions, - '--discard-field', discard_field, - '--discard-regex', discard_regex, - '--debug', '2' - ] - - # Check AWS module started - log_monitor.start( - timeout=session_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_start - ) - - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] - - # Check command was called correctly - log_monitor.start( - timeout=session_parameters.default_timeout, - callback=event_monitor.callback_detect_aws_module_called(parameters) - ) - - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] - - log_monitor.start( - timeout=TIMEOUT[20], - callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), - accumulations=found_logs - ) - - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] - - assert path_exist(path=AWS_SERVICES_DB_PATH) + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/test_log_groups.py b/tests/integration/test_aws/test_log_groups.py index 512132d1ebf..b806bfd186c 100644 --- a/tests/integration/test_aws/test_log_groups.py +++ b/tests/integration/test_aws/test_log_groups.py @@ -1,40 +1,44 @@ -""" -Copyright (C) 2015-2023, Wazuh Inc. -Created by Wazuh, Inc. . -This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -This module will contains all cases for the log groups test suite -""" - +import os import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.utils.db_queries.aws_db import get_multiple_service_db_row, table_exists -from wazuh_testing.modules.aws.utils import path_exist -from wazuh_testing.constants.paths.aws import AWS_SERVICES_DB_PATH -from wazuh_testing.modules.aws.patterns import NON_EXISTENT_SPECIFIED_LOG_GROUPS +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) +from wazuh_testing.modules.aws.db_utils import ( + get_multiple_service_db_row, + services_db_exists, + table_exists, +) # Local module imports -from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT -from .conftest import TestConfigurator, local_internal_options +from .utils import ERROR_MESSAGES, TIMEOUTS pytestmark = [pytest.mark.server] -# Set test configurator for the module -configurator = TestConfigurator(module='log_groups_test_module') + +# Generic vars +MODULE = 'log_groups_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) # ----------------------------------------------- TEST_AWS_LOG_GROUPS -------------------------------------------------- -# Configure T1 test -configurator.configure_test(configuration_file='configuration_log_groups.yaml', - cases_file='cases_log_groups.yaml') +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_log_groups.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_log_groups.yaml') + +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) def test_log_groups( configuration, metadata, create_log_stream, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, @@ -120,7 +124,7 @@ def test_log_groups( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -130,19 +134,19 @@ def test_log_groups( if expected_results: log_monitor.start( - timeout=TIMEOUT[20], + timeout=TIMEOUTS[20], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), accumulations=len(log_group_names.split(',')) ) else: log_monitor.start( - timeout=TIMEOUT[10], - callback=event_monitor.make_aws_callback(pattern=fr"{NON_EXISTENT_SPECIFIED_LOG_GROUPS}") + timeout=TIMEOUTS[10], + callback=event_monitor.make_aws_callback(r'.*The specified log group does not exist.'), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_no_existent_log_group'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_no_existent_log_group'] - assert path_exist(path=AWS_SERVICES_DB_PATH) + assert services_db_exists() if expected_results: log_group_list = log_group_names.split(",") @@ -157,4 +161,4 @@ def test_log_groups( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/test_only_logs_after.py b/tests/integration/test_aws/test_only_logs_after.py index 93f0765804f..42a117f735a 100644 --- a/tests/integration/test_aws/test_only_logs_after.py +++ b/tests/integration/test_aws/test_only_logs_after.py @@ -1,47 +1,53 @@ -# Copyright (C) 2015, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -""" -This module will contain all cases for the only logs after test suite -""" - +import os import pytest from datetime import datetime # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH, AWS_SERVICES_DB_PATH -from wazuh_testing.constants.aws import ONLY_LOGS_AFTER_PARAM, PATH_DATE_FORMAT, VPC_FLOW_TYPE, INSPECTOR_TYPE -from wazuh_testing.utils.db_queries.aws_db import get_multiple_s3_db_row, get_service_db_row, get_s3_db_row -from wazuh_testing.modules.aws.utils import (call_aws_module, create_log_events, create_log_stream, path_exist, - get_last_file_key, upload_file, analyze_command_output) -from wazuh_testing.modules.aws.patterns import (NO_LOG_PROCESSED, NO_BUCKET_LOG_PROCESSED, MARKER, NO_NEW_EVENTS, - EVENT_SENT) - -# Local module imports -from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT -from .conftest import TestConfigurator, local_internal_options +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules import aws as cons +from wazuh_testing.modules.aws import ONLY_LOGS_AFTER_PARAM, event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.modules.aws.cli_utils import call_aws_module +from wazuh_testing.modules.aws.cloudwatch_utils import ( + create_log_events, + create_log_stream, +) +from wazuh_testing.modules.aws.db_utils import ( + get_multiple_s3_db_row, + get_service_db_row, + s3_db_exists, + services_db_exists, + get_s3_db_row, +) +from wazuh_testing.modules.aws.s3_utils import get_last_file_key, upload_file +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) + +from .utils import ERROR_MESSAGES, TIMEOUTS pytestmark = [pytest.mark.server] -# Set test configurator for the module -configurator = TestConfigurator(module='only_logs_after_test_module') -import pydevd_pycharm -pydevd_pycharm.settrace('192.168.56.1', port=55555, stdoutToServer=True, stderrToServer=True) +# Generic vars +MODULE = 'only_logs_after_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) # --------------------------------------------- TEST_BUCKET_WITHOUT_ONLY_LOGS_AFTER ------------------------------------ -# Configure T1 test -configurator.configure_test(configuration_file='bucket_configuration_without_only_logs_after.yaml', - cases_file='cases_bucket_without_only_logs_after.yaml') +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_without_only_logs_after.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_without_only_logs_after.yaml') + +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) def test_bucket_without_only_logs_after( configuration, metadata, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, @@ -128,7 +134,7 @@ def test_bucket_without_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -136,7 +142,7 @@ def test_bucket_without_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] log_monitor.start( timeout=session_parameters.default_timeout, @@ -144,9 +150,9 @@ def test_bucket_without_only_logs_after( accumulations=expected_results ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] - assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) + assert s3_db_exists() data = get_s3_db_row(table_name=table_name) @@ -159,19 +165,21 @@ def test_bucket_without_only_logs_after( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] # -------------------------------------------- TEST_SERVICE_WITHOUT_ONLY_LOGS_AFTER ------------------------------------ -# Configure T2 test -configurator.configure_test(configuration_file='service_configuration_without_only_logs_after.yaml', - cases_file='cases_service_without_only_logs_after.yaml') +t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'service_configuration_without_only_logs_after.yaml') +t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_service_without_only_logs_after.yaml') + +t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) +t2_configurations = load_configuration_template( + t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) def test_service_without_only_logs_after( configuration, metadata, create_log_stream_in_existent_group, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, @@ -253,7 +261,7 @@ def test_service_without_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -261,9 +269,9 @@ def test_service_without_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] - assert path_exist(path=AWS_SERVICES_DB_PATH) + assert services_db_exists() data = get_service_db_row(table_name="cloudwatch_logs") @@ -277,19 +285,21 @@ def test_service_without_only_logs_after( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] # --------------------------------------------- TEST_BUCKET_WITH_ONLY_LOGS_AFTER --------------------------------------- -# Configure T3 test -configurator.configure_test(configuration_file='bucket_configuration_with_only_logs_after.yaml', - cases_file='cases_bucket_with_only_logs_after.yaml') +t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_with_only_logs_after.yaml') +t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_with_only_logs_after.yaml') + +t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) +t3_configurations = load_configuration_template( + t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t3_configurations, t3_configuration_metadata), ids=t3_case_ids) def test_bucket_with_only_logs_after( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -374,7 +384,7 @@ def test_bucket_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -382,17 +392,17 @@ def test_bucket_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] log_monitor.start( - timeout=TIMEOUT[20], + timeout=TIMEOUTS[20], callback=event_monitor.callback_detect_event_processed, accumulations=expected_results ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] - assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) + assert s3_db_exists() for row in get_multiple_s3_db_row(table_name=table_name): assert bucket_name in row.bucket_path @@ -406,19 +416,21 @@ def test_bucket_with_only_logs_after( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] # --------------------------------------------TEST_CLOUDWATCH_WITH_ONLY_LOGS_AFTER ------------------------------------- -# Configure T4 test -configurator.configure_test(configuration_file='cloudwatch_configuration_with_only_logs_after.yaml', - cases_file='cases_cloudwatch_with_only_logs_after.yaml') +t4_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'cloudwatch_configuration_with_only_logs_after.yaml') +t4_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_with_only_logs_after.yaml') + +t4_configuration_parameters, t4_configuration_metadata, t4_case_ids = get_test_cases_data(t4_cases_path) +t4_configurations = load_configuration_template( + t4_configurations_path, t4_configuration_parameters, t4_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t4_configurations, t4_configuration_metadata), ids=t4_case_ids) def test_cloudwatch_with_only_logs_after( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -503,7 +515,7 @@ def test_cloudwatch_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -511,16 +523,16 @@ def test_cloudwatch_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] log_monitor.start( - timeout=TIMEOUT[10], + timeout=TIMEOUTS[10], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] - assert path_exist(path=AWS_SERVICES_DB_PATH) + assert services_db_exists() data = get_service_db_row(table_name=table_name_map[service_type]) @@ -533,19 +545,21 @@ def test_cloudwatch_with_only_logs_after( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] # ------------------------------------------ TEST_INSPECTOR_WITH_ONLY_LOGS_AFTER --------------------------------------- -# Configure T5 test -configurator.configure_test(configuration_file='inspector_configuration_with_only_logs_after.yaml', - cases_file='cases_inspector_with_only_logs_after.yaml') +t5_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'inspector_configuration_with_only_logs_after.yaml') +t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_with_only_logs_after.yaml') + +t5_configuration_parameters, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path) +t5_configurations = load_configuration_template( + t5_configurations_path, t5_configuration_parameters, t5_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t5_configurations, t5_configuration_metadata), ids=t5_case_ids) def test_inspector_with_only_logs_after( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -628,7 +642,7 @@ def test_inspector_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -636,16 +650,16 @@ def test_inspector_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] log_monitor.start( - timeout=TIMEOUT[10], + timeout=TIMEOUTS[10], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] - assert path_exist(path=AWS_SERVICES_DB_PATH) + assert services_db_exists() data = get_service_db_row(table_name=table_name_map[service_type]) @@ -656,14 +670,13 @@ def test_inspector_with_only_logs_after( # ---------------------------------------------------- TEST_MULTIPLE_CALLS --------------------------------------------- -# Configure T6 test -configurator.configure_test(cases_file='cases_bucket_multiple_calls.yaml') +t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_multiple_calls.yaml') + +_, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path) @pytest.mark.tier(level=1) -@pytest.mark.parametrize('metadata', - configurator.metadata, - ids=configurator.cases_ids) +@pytest.mark.parametrize('metadata', t5_configuration_metadata, ids=t5_case_ids) def test_bucket_multiple_calls( metadata, clean_s3_cloudtrail_db, load_wazuh_basic_configuration, restart_wazuh_function, delete_file_from_s3 ): @@ -705,7 +718,7 @@ def test_bucket_multiple_calls( brief: Restart the wazuh service. - delete_file_from_s3: type: fixture - brief: Delete the file after the test execution. + brief: Delete the a file after the test execution. input_description: - The `cases_multiple_calls` file provides the test cases. """ @@ -726,71 +739,55 @@ def test_bucket_multiple_calls( base_parameters.extend(['--trail_prefix', path]) # Call the module without only_logs_after and check that no logs were processed - last_marker_key = datetime.utcnow().strftime(PATH_DATE_FORMAT) - - # Get bucket type - if bucket_type == VPC_FLOW_TYPE: - pattern = fr"{NO_LOG_PROCESSED}" - else: - pattern = fr"{NO_BUCKET_LOG_PROCESSED}" + last_marker_key = datetime.utcnow().strftime(cons.PATH_DATE_FORMAT) - # Check for the non 'processed' messages in the given output. - analyze_command_output( + event_monitor.check_non_processed_logs_from_output( command_output=call_aws_module(*base_parameters), - callback=event_monitor.make_aws_callback(pattern), - expected_results=1, - error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] + bucket_type=bucket_type ) - # Call the module with only_logs_after set in the past and check that the expected number of logs were processed - analyze_command_output( + # Call the module with only_logs_after set in the past and check that the expected number of logs were + # processed + event_monitor.check_processed_logs_from_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-20'), - callback=event_monitor.callback_detect_event_processed, - expected_results=3, - error_message=ERROR_MESSAGE['incorrect_event_number'] + expected_results=3 ) # Call the module with the same parameters in and check there were no duplicates expected_skipped_logs_step_3 = metadata.get('expected_skipped_logs_step_3', 1) - analyze_command_output( + event_monitor.check_non_processed_logs_from_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-20'), - callback=event_monitor.make_aws_callback(pattern), - expected_results=expected_skipped_logs_step_3, - error_message=ERROR_MESSAGE['incorrect_event_number'] + bucket_type=bucket_type, + expected_results=expected_skipped_logs_step_3 ) - # Call the module with only_logs_after set with an early date than the one set previously and check that no logs + # Call the module with only_logs_after set with an early date than setted previously and check that no logs # were processed, there were no duplicates - analyze_command_output( + event_monitor.check_non_processed_logs_from_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-22'), - callback=event_monitor.make_aws_callback(pattern), - expected_results=expected_skipped_logs_step_3 - 1 if expected_skipped_logs_step_3 > 1 else 1, - error_message=ERROR_MESSAGE['incorrect_event_number'] + bucket_type=bucket_type, + expected_results=expected_skipped_logs_step_3 - 1 if expected_skipped_logs_step_3 > 1 else 1 ) # Upload a log file for the day of the test execution and call the module without only_logs_after and check that # only the uploaded logs were processed and the last marker is specified in the DB. last_marker_key = get_last_file_key(bucket_type, bucket_name, datetime.utcnow()) metadata['filename'] = upload_file(bucket_type, bucket_name) - pattern = fr"{MARKER}{last_marker_key}" - analyze_command_output( + event_monitor.check_marker_from_output( command_output=call_aws_module(*base_parameters), - callback=event_monitor.make_aws_callback(pattern), - expected_results=1, - error_message=ERROR_MESSAGE['incorrect_marker'] + file_key=last_marker_key ) # -------------------------------------------- TEST_INSPECTOR_MULTIPLE_CALLS ------------------------------------------- -# Configure T7 test -configurator.configure_test(cases_file='cases_inspector_multiple_calls.yaml') +t6_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_multiple_calls.yaml') + +_, t6_configuration_metadata, t6_case_ids = get_test_cases_data(t6_cases_path) @pytest.mark.tier(level=1) -@pytest.mark.parametrize('metadata', - configurator.metadata, - ids=configurator.cases_ids) +@pytest.mark.parametrize('metadata', t6_configuration_metadata, ids=t6_case_ids) @pytest.mark.xfail def test_inspector_multiple_calls( metadata, clean_aws_services_db, load_wazuh_basic_configuration, restart_wazuh_function @@ -836,52 +833,43 @@ def test_inspector_multiple_calls( '--debug', '2' ] - if service_type == INSPECTOR_TYPE: - pattern = fr"{NO_NEW_EVENTS}" - else: - pattern = fr"{EVENT_SENT}" - # Call the module without only_logs_after and check that no logs were processed - analyze_command_output( - command_output=call_aws_module(*base_parameters), - callback=event_monitor.make_aws_callback(pattern), - error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] + event_monitor.check_service_non_processed_logs_from_output( + command_output=call_aws_module(*base_parameters), service_type=service_type, expected_results=1 ) - # Call the module with only_logs_after set in the past and check that the expected number of logs were processed. - analyze_command_output( + # Call the module with only_logs_after set in the past and check that the expected number of logs were + # processed + event_monitor.check_service_processed_logs_from_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-30'), - callback=event_monitor.callback_detect_service_event_processed( - expected_results=4, - service_type=service_type), - error_message=ERROR_MESSAGE['incorrect_event_number'] + service_type=service_type, + events_sent=4 ) # Call the module with the same parameters in and check there were no duplicates - analyze_command_output( + event_monitor.check_service_non_processed_logs_from_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-30'), - callback=event_monitor.make_aws_callback(pattern), - error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] + service_type=service_type, + expected_results=1 ) - # Call the module with only_logs_after set with an early date than the one set previously and check that no logs + # Call the module with only_logs_after set with an early date than setted previously and check that no logs # were processed, there were no duplicates - analyze_command_output( + event_monitor.check_service_non_processed_logs_from_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-31'), - callback=event_monitor.make_aws_callback(pattern), - error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] + service_type=service_type, + expected_results=1 ) # ----------------------------------------- TEST_CLOUDWATCH_MULTIPLE_CALLS --------------------------------------------- -# Configure T8 test -configurator.configure_test(cases_file='cases_cloudwatch_multiple_calls.yaml') +t7_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_multiple_calls.yaml') + +_, t7_configuration_metadata, t7_case_ids = get_test_cases_data(t7_cases_path) @pytest.mark.tier(level=1) -@pytest.mark.parametrize('metadata', - configurator.metadata, - ids=configurator.cases_ids) +@pytest.mark.parametrize('metadata', t7_configuration_metadata, ids=t7_case_ids) def test_cloudwatch_multiple_calls( metadata, clean_aws_services_db, load_wazuh_basic_configuration, restart_wazuh_function, delete_log_stream ): @@ -937,43 +925,32 @@ def test_cloudwatch_multiple_calls( '--debug', '2' ] - if service_type == INSPECTOR_TYPE: - pattern = fr"{NO_NEW_EVENTS}" - else: - pattern = fr"{EVENT_SENT}" - # Call the module without only_logs_after and check that no logs were processed - analyze_command_output( - command_output=call_aws_module(*base_parameters), - callback=event_monitor.make_aws_callback(pattern), - expected_results=0, - error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] + event_monitor.check_service_non_processed_logs_from_output( + command_output=call_aws_module(*base_parameters), service_type=service_type, expected_results=0 ) - # Call the module with only_logs_after set in the past and check that the expected number of logs were processed. - analyze_command_output( + # Call the module with only_logs_after set in the past and check that the expected number of logs were + # processed + event_monitor.check_service_processed_logs_from_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-12'), - callback=event_monitor.callback_detect_service_event_processed( - expected_results=3, - service_type=service_type), - error_message=ERROR_MESSAGE['incorrect_event_number'] + service_type=service_type, + events_sent=3 ) # Call the module with the same parameters in and check there were no duplicates - analyze_command_output( + event_monitor.check_service_non_processed_logs_from_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-12'), - callback=event_monitor.make_aws_callback(pattern), - expected_results=0, - error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] + service_type=service_type, + expected_results=0 ) - # Call the module with only_logs_after set with an early date than the one set previously and check that no logs + # Call the module with only_logs_after set with an early date than setted previously and check that no logs # were processed, there were no duplicates - analyze_command_output( + event_monitor.check_service_non_processed_logs_from_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-15'), - callback=event_monitor.make_aws_callback(pattern), - expected_results=0, - error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] + service_type=service_type, + expected_results=0 ) # Upload a log file for the day of the test execution and call the module without only_logs_after and check that @@ -981,11 +958,6 @@ def test_cloudwatch_multiple_calls( log_stream = create_log_stream() metadata['log_stream'] = log_stream create_log_events(log_stream) - - analyze_command_output( - command_output=call_aws_module(*base_parameters), - callback=event_monitor.callback_detect_service_event_processed( - expected_results=1, - service_type=service_type), - error_message=ERROR_MESSAGE['incorrect_event_number'] + event_monitor.check_service_processed_logs_from_output( + command_output=call_aws_module(*base_parameters), service_type=service_type, events_sent=1 ) diff --git a/tests/integration/test_aws/test_parser.py b/tests/integration/test_aws/test_parser.py index b57145fafc8..e46feeda3cb 100644 --- a/tests/integration/test_aws/test_parser.py +++ b/tests/integration/test_aws/test_parser.py @@ -1,40 +1,45 @@ -# Copyright (C) 2015, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -""" -This module will contain all cases for the parser test suite -""" - +import os import pytest # qa-integration-framework imports from wazuh_testing import session_parameters +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) # Local module imports -from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT -from .conftest import TestConfigurator, local_internal_options +from .utils import ERROR_MESSAGES, TIMEOUTS pytestmark = [pytest.mark.server] -# Set test configurator for the module -configurator = TestConfigurator(module='parser_test_module') + +# Generic vars +MODULE = 'parser_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) # --------------------------------------------TEST_BUCKET_AND_SERVICE_MISSING ------------------------------------------ -# Configure T1 test -configurator.configure_test(configuration_file='configuration_bucket_and_service_missing.yaml', - cases_file='cases_bucket_and_service_missing.yaml') +# Configuration and cases data +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_bucket_and_service_missing.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_and_service_missing.yaml') + +# Enabled test configurations +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) def test_bucket_and_service_missing( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: Command for bucket and service weren't invoked. @@ -87,23 +92,27 @@ def test_bucket_and_service_missing( callback=event_monitor.callback_detect_aws_module_warning, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_warning'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_warning'] # -------------------------------------------- TEST_TYPE_MISSING_IN_BUCKET --------------------------------------------- -# Configure T2 test -configurator.configure_test(configuration_file='configuration_type_missing_in_bucket.yaml', - cases_file='cases_type_missing_in_bucket.yaml') +# Configuration and cases data +t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_type_missing_in_bucket.yaml') +t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_type_missing_in_bucket.yaml') + +# Enabled test configurations +t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) +t2_configurations = load_configuration_template( + t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) def test_type_missing_in_bucket( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: A warning occurs and was displayed in `ossec.log`. @@ -155,23 +164,27 @@ def test_type_missing_in_bucket( callback=event_monitor.callback_detect_aws_legacy_module_warning, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_legacy_warning'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_legacy_warning'] # -------------------------------------------- TEST_TYPE_MISSING_IN_SERVICE -------------------------------------------- -# Configure T3 test -configurator.configure_test(configuration_file='configuration_type_missing_in_service.yaml', - cases_file='cases_type_missing_in_service.yaml') +# Configuration and cases data +t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_type_missing_in_service.yaml') +t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_type_missing_in_service.yaml') + +# Enabled test configurations +t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) +t3_configurations = load_configuration_template( + t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t3_configurations, t3_configuration_metadata), ids=t3_case_ids) def test_type_missing_in_service( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -224,23 +237,26 @@ def test_type_missing_in_service( callback=event_monitor.callback_detect_aws_error_for_missing_type, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_error_message'] - + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_error_message'] # -------------------------------------------- TEST_EMPTY_VALUES_IN_BUCKET --------------------------------------------- -# Configure T4 test -configurator.configure_test(configuration_file='configuration_values_in_bucket.yaml', - cases_file='cases_empty_values_in_bucket.yaml') +# Configuration and cases data +t4_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_bucket.yaml') +t4_cases_path = os.path.join(TEST_CASES_PATH, 'cases_empty_values_in_bucket.yaml') + +# Enabled test configurations +t4_configuration_parameters, t4_configuration_metadata, t4_case_ids = get_test_cases_data(t4_cases_path) +t4_configurations = load_configuration_template( + t4_configurations_path, t4_configuration_parameters, t4_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t4_configurations, t4_configuration_metadata), ids=t4_case_ids) def test_empty_values_in_bucket( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -292,23 +308,26 @@ def test_empty_values_in_bucket( callback=event_monitor.callback_detect_aws_empty_value, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_empty_value_message'] - + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_value_message'] # -------------------------------------------- TEST_EMPTY_VALUES_IN_SERVICE -------------------------------------------- -# Configure T5 test -configurator.configure_test(configuration_file='configuration_values_in_service.yaml', - cases_file='cases_empty_values_in_service.yaml') +# Configuration and cases data +t5_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_service.yaml') +t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_empty_values_in_service.yaml') + +# Enabled test configurations +t5_configuration_parameters, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path) +t5_configurations = load_configuration_template( + t5_configurations_path, t5_configuration_parameters, t5_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t5_configurations, t5_configuration_metadata), ids=t5_case_ids) def test_empty_values_in_service( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -361,23 +380,27 @@ def test_empty_values_in_service( callback=event_monitor.callback_detect_aws_empty_value, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_empty_value_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_value_message'] # ------------------------------------------ TEST_INVALID_VALUES_IN_BUCKET --------------------------------------------- -# Configure T6 test -configurator.configure_test(configuration_file='configuration_values_in_bucket.yaml', - cases_file='cases_invalid_values_in_bucket.yaml') +# Configuration and cases data +t6_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_bucket.yaml') +t6_cases_path = os.path.join(TEST_CASES_PATH, 'cases_invalid_values_in_bucket.yaml') + +# Enabled test configurations +t6_configuration_parameters, t6_configuration_metadata, t6_case_ids = get_test_cases_data(t6_cases_path) +t6_configurations = load_configuration_template( + t6_configurations_path, t6_configuration_parameters, t6_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t6_configurations, t6_configuration_metadata), ids=t6_case_ids) def test_invalid_values_in_bucket( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -424,31 +447,32 @@ def test_invalid_values_in_bucket( input_description: - The `configuration_values_in_bucket` file provides the configuration for this test. """ - - - log_monitor.start( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_invalid_value, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_invalid_value_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_invalid_value_message'] # ------------------------------------------ TEST_INVALID_VALUES_IN_BUCKET --------------------------------------------- -# Configure T7 test -configurator.configure_test(configuration_file='configuration_values_in_service.yaml', - cases_file='cases_invalid_values_in_service.yaml') +# Configuration and cases data +t7_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_service.yaml') +t7_cases_path = os.path.join(TEST_CASES_PATH, 'cases_invalid_values_in_service.yaml') + +# Enabled test configurations +t7_configuration_parameters, t7_configuration_metadata, t7_case_ids = get_test_cases_data(t7_cases_path) +t7_configurations = load_configuration_template( + t7_configurations_path, t7_configuration_parameters, t7_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t7_configurations, t7_configuration_metadata), ids=t7_case_ids) def test_invalid_values_in_service( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -500,23 +524,27 @@ def test_invalid_values_in_service( callback=event_monitor.callback_detect_aws_invalid_value, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_invalid_value_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_invalid_value_message'] # --------------------------------------- TEST_MULTIPLE_BUCKET_AND_SERVICE_TAGS ---------------------------------------- -# Configure T8 test -configurator.configure_test(configuration_file='configuration_multiple_bucket_and_service_tags.yaml', - cases_file='cases_multiple_bucket_and_service_tags.yaml') +# Configuration and cases data +t8_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_multiple_bucket_and_service_tags.yaml') +t8_cases_path = os.path.join(TEST_CASES_PATH, 'cases_multiple_bucket_and_service_tags.yaml') + +# Enabled test configurations +t8_configuration_parameters, t8_configuration_metadata, t8_case_ids = get_test_cases_data(t8_cases_path) +t8_configurations = load_configuration_template( + t8_configurations_path, t8_configuration_parameters, t8_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t8_configurations, t8_configuration_metadata), ids=t8_case_ids) def test_multiple_bucket_and_service_tags( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: The command is invoked two times for buckets and two times for services. @@ -564,9 +592,9 @@ def test_multiple_bucket_and_service_tags( - The `configuration_multiple_bucket_and_service_tags` file provides the configuration for this test. """ log_monitor.start( - timeout=TIMEOUT[20], + timeout=TIMEOUTS[20], callback=event_monitor.callback_detect_bucket_or_service_call, accumulations=4 ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_service_calls_amount'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_service_calls_amount'] diff --git a/tests/integration/test_aws/test_path.py b/tests/integration/test_aws/test_path.py index e9a06b804d9..d6d740b0034 100644 --- a/tests/integration/test_aws/test_path.py +++ b/tests/integration/test_aws/test_path.py @@ -1,38 +1,43 @@ -# Copyright (C) 2015, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -""" -This module will contain all cases for the path test suite -""" +import os import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH -from wazuh_testing.utils.db_queries.aws_db import get_s3_db_row, table_exists_or_has_values -from wazuh_testing.modules.aws.utils import path_exist - +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.modules.aws.db_utils import ( + get_s3_db_row, + s3_db_exists, + table_exists_or_has_values, +) +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) # Local module imports -from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT -from .conftest import TestConfigurator, local_internal_options +from .utils import ERROR_MESSAGES, TIMEOUTS pytestmark = [pytest.mark.server] -# Set test configurator for the module -configurator = TestConfigurator(module='path_test_module') + +# Generic vars +MODULE = 'path_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- -# Configure T1 test -configurator.configure_test(configuration_file='configuration_path.yaml', - cases_file='cases_path.yaml') +configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_path.yaml') +cases_path = os.path.join(TEST_CASES_PATH, 'cases_path.yaml') + +configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) +configurations = load_configuration_template( + configurations_path, configuration_parameters, configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, configuration_metadata), ids=case_ids) def test_path( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -117,7 +122,7 @@ def test_path( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -125,25 +130,25 @@ def test_path( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUT[20], + timeout=TIMEOUTS[20], callback=event_monitor.callback_detect_event_processed, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] else: log_monitor.start( - timeout=TIMEOUT[10], + timeout=TIMEOUTS[10], callback=event_monitor.make_aws_callback(pattern), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_empty_path_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_path_message'] - assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) + assert s3_db_exists() if expected_results: data = get_s3_db_row(table_name=table_name) @@ -158,4 +163,4 @@ def test_path( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/test_path_suffix.py b/tests/integration/test_aws/test_path_suffix.py index 65ee5a25a74..6c7450091ec 100644 --- a/tests/integration/test_aws/test_path_suffix.py +++ b/tests/integration/test_aws/test_path_suffix.py @@ -1,39 +1,43 @@ -# Copyright (C) 2015, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -""" -This module will contain all cases for the path suffix test suite -""" - +import os import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH -from wazuh_testing.utils.db_queries.aws_db import get_s3_db_row, table_exists_or_has_values -from wazuh_testing.modules.aws.utils import path_exist - +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.modules.aws.db_utils import ( + get_s3_db_row, + s3_db_exists, + table_exists_or_has_values, +) +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) # Local module imports -from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT -from .conftest import TestConfigurator, local_internal_options +from .utils import ERROR_MESSAGES, TIMEOUTS pytestmark = [pytest.mark.server] -# Set test configurator for the module -configurator = TestConfigurator(module='path_suffix_test_module') + +# Generic vars +MODULE = 'path_suffix_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- -# Configure T1 test -configurator.configure_test(configuration_file='configuration_path_suffix.yaml', - cases_file='cases_path_suffix.yaml') +configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_path_suffix.yaml') +cases_path = os.path.join(TEST_CASES_PATH, 'cases_path_suffix.yaml') + +configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) +configurations = load_configuration_template( + configurations_path, configuration_parameters, configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, configuration_metadata), ids=case_ids) def test_path_suffix( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -120,7 +124,7 @@ def test_path_suffix( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -128,24 +132,24 @@ def test_path_suffix( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUT[20], + timeout=TIMEOUTS[20], callback=event_monitor.callback_detect_event_processed, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] else: log_monitor.start( - timeout=TIMEOUT[10], + timeout=TIMEOUTS[10], callback=event_monitor.make_aws_callback(pattern), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_empty_path_suffix_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_path_suffix_message'] - assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) + assert s3_db_exists() if expected_results: data = get_s3_db_row(table_name=bucket_type) @@ -160,4 +164,4 @@ def test_path_suffix( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/test_regions.py b/tests/integration/test_aws/test_regions.py index a82e1204684..4b45b1feb6e 100644 --- a/tests/integration/test_aws/test_regions.py +++ b/tests/integration/test_aws/test_regions.py @@ -1,40 +1,49 @@ -# Copyright (C) 2015, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -""" -This module will contain all cases for the region test suite -""" +import os import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.aws import RANDOM_ACCOUNT_ID -from wazuh_testing.constants.paths.aws import AWS_SERVICES_DB_PATH, S3_CLOUDTRAIL_DB_PATH -from wazuh_testing.modules.aws.utils import path_exist -from wazuh_testing.utils.db_queries.aws_db import (get_multiple_service_db_row, table_exists_or_has_values, - get_multiple_s3_db_row) - +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.modules.aws import ( # noqa: F401 + AWS_SERVICES_DB_PATH, + RANDOM_ACCOUNT_ID, + event_monitor, + local_internal_options +) +from wazuh_testing.modules.aws.db_utils import ( + get_multiple_s3_db_row, + get_multiple_service_db_row, + s3_db_exists, + table_exists_or_has_values, +) +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) # Local module imports -from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT -from .conftest import TestConfigurator, local_internal_options +from .utils import ERROR_MESSAGES, TIMEOUTS pytestmark = [pytest.mark.server] -# Set test configurator for the module -configurator = TestConfigurator(module='regions_test_module') +# Generic vars +MODULE = 'regions_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- -# Configure T1 test -configurator.configure_test(configuration_file='bucket_configuration_regions.yaml', - cases_file='cases_bucket_regions.yaml') +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_regions.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_regions.yaml') + +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) def test_regions( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -101,7 +110,7 @@ def test_regions( only_logs_after = metadata['only_logs_after'] regions = metadata['regions'] expected_results = metadata['expected_results'] - pattern = f".*DEBUG: \+\+\+ No logs to process in bucket: {RANDOM_ACCOUNT_ID}/{regions}" + pattern = fr".*DEBUG: \+\+\+ No logs to process in bucket: {RANDOM_ACCOUNT_ID}/{regions}" parameters = [ 'wodles/aws/aws-s3', @@ -119,7 +128,7 @@ def test_regions( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -127,26 +136,26 @@ def test_regions( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUT[20], + timeout=TIMEOUTS[20], callback=event_monitor.callback_detect_event_processed, accumulations=expected_results ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] else: log_monitor.start( - timeout=TIMEOUT[10], + timeout=TIMEOUTS[10], callback=event_monitor.make_aws_callback(pattern), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_no_region_found_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_no_region_found_message'] - assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) + assert s3_db_exists() if expected_results: regions_list = regions.split(",") @@ -164,19 +173,21 @@ def test_regions( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] # -------------------------------------------- TEST_CLOUDWATCH_REGIONS ------------------------------------------------- -# Configure T2 test -configurator.configure_test(configuration_file='cloudwatch_configuration_regions.yaml', - cases_file='cases_cloudwatch_regions.yaml') +t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'cloudwatch_configuration_regions.yaml') +t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_regions.yaml') + +t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) +configurations = load_configuration_template( + t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, t2_configuration_metadata), ids=t2_case_ids) def test_cloudwatch_regions( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -261,7 +272,7 @@ def test_cloudwatch_regions( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -269,15 +280,15 @@ def test_cloudwatch_regions( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUT[20], + timeout=TIMEOUTS[20], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), accumulations=len(regions_list) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] else: log_monitor.start( @@ -287,7 +298,7 @@ def test_cloudwatch_regions( ), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_non-existent_region_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_non-existent_region_message'] table_name = 'cloudwatch_logs' @@ -304,19 +315,21 @@ def test_cloudwatch_regions( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] # ------------------------------------------ TEST_INSPECTOR_PATH ------------------------------------------------------- -# Configure T3 test -configurator.configure_test(configuration_file='inspector_configuration_regions.yaml', - cases_file='cases_inspector_regions.yaml') +t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'inspector_configuration_regions.yaml') +t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_regions.yaml') + +t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) +configurations = load_configuration_template( + t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(configurations, t3_configuration_metadata), ids=t3_case_ids) def test_inspector_regions( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -399,7 +412,7 @@ def test_inspector_regions( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -407,15 +420,15 @@ def test_inspector_regions( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUT[20], + timeout=TIMEOUTS[20], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), accumulations=len(regions_list) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] else: log_monitor.start( @@ -425,7 +438,7 @@ def test_inspector_regions( ), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_non-existent_region_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_non-existent_region_message'] table_name = 'aws_services' @@ -442,7 +455,7 @@ def test_inspector_regions( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] # Detect any ERROR message log_monitor.start( @@ -450,4 +463,4 @@ def test_inspector_regions( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/test_remove_from_bucket.py b/tests/integration/test_aws/test_remove_from_bucket.py index c4d9d2d65bd..8c6dc85bd89 100644 --- a/tests/integration/test_aws/test_remove_from_bucket.py +++ b/tests/integration/test_aws/test_remove_from_bucket.py @@ -1,37 +1,38 @@ -# Copyright (C) 2015, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -""" -This module will contain all cases for the remove from bucket test suite -""" - +import os import pytest -# qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.modules.aws.utils import log_stream_exists, file_exists - -# Local module imports -from . import event_monitor -from .utils import ERROR_MESSAGE -from .conftest import TestConfigurator, local_internal_options +from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR +from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.modules.aws.cloudwatch_utils import log_stream_exists +from wazuh_testing.modules.aws.s3_utils import file_exists +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) pytestmark = [pytest.mark.server] -# Set test configurator for the module -configurator = TestConfigurator(module='remove_from_bucket_test_module') + +# Generic vars +MODULE = 'remove_from_bucket_test_module' +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') +CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) +TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) # ---------------------------------------------------- TEST_REMOVE_FROM_BUCKET ----------------------------------------- -# Configure T1 test -configurator.configure_test(configuration_file='configuration_remove_from_bucket.yaml', - cases_file='cases_remove_from_bucket.yaml') +# Configuration and cases data +t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_remove_from_bucket.yaml') +t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_remove_from_bucket.yaml') + +t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) +t1_configurations = load_configuration_template( + t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) def test_remove_from_bucket( configuration, metadata, mark_cases_as_skipped, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, @@ -112,7 +113,7 @@ def test_remove_from_bucket( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -120,7 +121,7 @@ def test_remove_from_bucket( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] assert not file_exists(filename=metadata['uploaded_file'], bucket_name=bucket_name) @@ -130,19 +131,22 @@ def test_remove_from_bucket( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] # ---------------------------------------------------- TEST_REMOVE_LOG_STREAM ------------------------------------------ -# Configure T2 test -configurator.configure_test(configuration_file='configuration_remove_log_stream.yaml', - cases_file='cases_remove_log_streams.yaml') +# Configuration and cases data +t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_remove_log_stream.yaml') +t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_remove_log_streams.yaml') + +t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) +t2_configurations = load_configuration_template( + t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata +) @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', - zip(configurator.test_configuration_template, configurator.metadata), - ids=configurator.cases_ids) +@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) def test_remove_log_stream( configuration, metadata, create_log_stream, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, @@ -221,7 +225,7 @@ def test_remove_log_stream( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] # Check command was called correctly log_monitor.start( @@ -229,7 +233,7 @@ def test_remove_log_stream( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] assert not log_stream_exists(log_stream=metadata['log_stream'], log_group=log_group_name) @@ -239,4 +243,4 @@ def test_remove_log_stream( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py index 7df804c4d3a..23601bf2d6c 100644 --- a/tests/integration/test_aws/utils.py +++ b/tests/integration/test_aws/utils.py @@ -5,14 +5,10 @@ """ This file contains constant and other utilities to be used in the AWS integration test module. """ -from os.path import join, dirname, realpath # CONSTANTS -TEMPLATE_DIR = 'configuration_template' -TEST_CASES_DIR = 'test_cases' -WAZUH_MODULES_DEBUG = 'wazuh_modules.debug' -ERROR_MESSAGE = { +ERROR_MESSAGES = { "failed_start": "The AWS module did not start as expected", "incorrect_parameters": "The AWS module was not called with the correct parameters", @@ -27,22 +23,11 @@ "incorrect_legacy_warning": "The AWS module did not show the expected legacy warning", "incorrect_warning": "The AWS module did not show the expected warning", "incorrect_invalid_value_message": "The AWS module did not show the expected message about invalid value", - "incorrect_service_calls_amount": "The AWS module was not called for bucket or service the right amount of times", - "unexpected_number_of_events_found": "Some logs may have been processed, " - "or the results found are more than expected", - "incorrect_marker": "The AWS module did not use the correct marker", - "incorrect_no_region_found_message": "The AWS module did not show correct message about non-existent region", - "incorrect_discard_regex_message": "The AWS module did not show the correct message about discard regex or, " - "did not process the expected amount of logs", - "failed_sqs_message_retrieval": "The AWS module did not retrieve the expected message from the SQS Queue", - "failed_message_handling": "The AWS module did not handle the expected message" + "incorrect_service_calls_amount": "The AWS module was not called for bucket or service the right amount of times" } -TIMEOUT = { +TIMEOUTS = { 10: 10, 20: 20 } - -# Paths -TEST_DATA_PATH = join(dirname(realpath(__file__)), 'data') From d082a366700de72db7c2370f70176f7f0b5514e4 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Fri, 18 Aug 2023 12:50:16 -0300 Subject: [PATCH 274/419] Update Readme --- tests/integration/test_aws/README.md | 90 ++++++++++++++-------------- 1 file changed, 44 insertions(+), 46 deletions(-) diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index f06aac5e543..59d9ae61a66 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -7,13 +7,14 @@ It is a _wodle based_ module that has a capability to pull logs from several AWS ## Tests directory structure ```bash -wazuh-qa/tests/integration/test_aws -├── conftest.py +wazuh/tests/integration/test_aws ├── data │   ├── configuration_template │   │   ├── basic_test_module │   │   ├── discard_regex_test_module +│   │   ├── log_groups_test_module │   │   ├── only_logs_after_test_module +│   │   ├── parser_test_module │   │   ├── path_suffix_test_module │   │   ├── path_test_module │   │   ├── regions_test_module @@ -21,37 +22,44 @@ wazuh-qa/tests/integration/test_aws │   └── test_cases │   ├── basic_test_module │   ├── discard_regex_test_module +│   ├── log_groups_test_module │   ├── only_logs_after_test_module +│   ├── parser_test_module │   ├── path_suffix_test_module │   ├── path_test_module │   ├── regions_test_module │   └── remove_from_bucket_test_module -├── README.MD +├── __init__.py +├── README.md +├── conftest.py ├── test_basic.py ├── test_discard_regex.py +├── test_log_groups.py ├── test_only_logs_after.py ├── test_path.py ├── test_path_suffix.py ├── test_regions.py -└── test_remove_from_bucket.py +├── test_remove_from_bucket.py +└── utils.py ``` ## Deps directory structure ```bash -wazuh-qa/deps/wazuh_testing/wazuh_testing/modules/aws +qa-integration-framework/src/wazuh_testing/modules/aws +├── __init__.py ├── cli_utils.py -├── constants.py +├── cloudwatch_utils.py ├── data_generator.py ├── db_utils.py ├── event_monitor.py -├── __init__.py +├── exceptions.py └── s3_utils.py ``` ## Requirements -- The only extra dependency is `boto3` +- Install the [qa-integration-framework](https://github.com/wazuh/qa-integration-framework) - The module will assume there are already buckets, log groups and an inspector assessment with test data in AWS. ## Configuration settings @@ -67,7 +75,7 @@ aws_secret_access_key = ## Setting up a test environment -You will need a proper environment to run the integration tests. You can use any virtual machine you wish. If you have +You will need a proper environment to run the integration tests. You can use Docker or any virtual machine. If you have one already, go to the [integration tests section](#integration-tests) If you use [Vagrant](https://www.vagrantup.com/downloads.html) @@ -92,16 +100,16 @@ _We are using **Ubuntu 22.04** for this example:_ ```shell script # Install pip - apt install python3-pip - - # Clone your `wazuh-qa` repository within your testing environment - cd wazuh-qa - - # Install Python libraries - python3 -m pip install -r requirements.txt - + apt install python3-pip git -y + + # Clone your `wazuh` repository within your testing environment + git clone https://github.com/wazuh/wazuh.git + + # Clone the `qa-integration-framework` + git clone https://github.com/wazuh/qa-integration-framework.git + # Install test dependecies - python3 -m pip install deps/wazuh-testing + python3 -m pip install qa-integration-framework/ ``` @@ -149,32 +157,22 @@ check its documentation for further information. #### AWS integration tests example ```bash -# python3 -m pytest -vvx test_aws/ -k cloudtrail -=========================================================== test session starts ====================================================== -platform linux -- Python 3.10.6, pytest-7.1.2, pluggy-1.0.0 -- /usr/bin/python3 -cachedir: .pytest_cache -metadata: {'Python': '3.10.6', 'Platform': 'Linux-5.15.0-58-generic-x86_64-with-glibc2.35', -'Packages': {'pytest': '7.1.2', 'py': '1.10.0', 'pluggy': '1.0.0'}, -'Plugins': {'metadata': '2.0.2', 'html': '3.1.1', 'testinfra': '5.0.0'}} -rootdir: /home/vagrant/qa/tests/integration, configfile: pytest.ini -plugins: metadata-2.0.2, html-3.1.1, testinfra-5.0.0 -collected 15 items - -test_aws/test_basic.py::test_defaults[cloudtrail_defaults] PASSED [ 6%] -test_aws/test_discard_regex.py::test_discard_regex[cloudtrail_discard_regex] PASSED [ 13%] -test_aws/test_only_logs_after.py::test_without_only_logs_after[cloudtrail_without_only_logs_after] PASSED [ 20%] -test_aws/test_only_logs_after.py::test_with_only_logs_after[cloudtrail_with_only_logs_after] PASSED [ 26%] -test_aws/test_only_logs_after.py::test_multiple_calls[cloudtrail_only_logs_after_multiple_calls] PASSED [ 33%] -test_aws/test_path.py::test_path[cloudtrail_path_with_data] PASSED [ 40%] -test_aws/test_path.py::test_path[cloudtrail_path_without_data] PASSED [ 46%] -test_aws/test_path.py::test_path[cloudtrail_inexistent_path] PASSED [ 53%] -test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_path_suffix_with_data] PASSED [ 60%] -test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_path_suffix_without_data] PASSED [ 66%] -test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_inexistent_path_suffix] PASSED [ 73%] -test_aws/test_regions.py::test_regions[cloudtrail_region_with_data] PASSED [ 80%] -test_aws/test_regions.py::test_regions[cloudtrail_regions_with_data] PASSED [ 86%] -test_aws/test_regions.py::test_regions[cloudtrail_inexistent_region] PASSED [ 93%] -test_aws/test_remove_from_bucket.py::test_remove_from_bucket[cloudtrail_remove_from_bucket] PASSED [100%] - -=============================================== 15 passed, 2 warnings in 332.67s (0:05:32) =========================================== +#root@wazuh-master:/wazuh/tests/integration# pytest -x test_aws/ --disable-warnings +==================================== test session starts ==================================== +platform linux -- Python 3.10.12, pytest-7.1.2, pluggy-1.2.0 +rootdir: /wazuh/tests/integration, configfile: pytest.ini +plugins: testinfra-5.0.0, metadata-3.0.0, html-3.1.1 +collected 195 items + +test_aws/test_basic.py ................ [ 8%] +test_aws/test_discard_regex.py .............. [ 15%] +test_aws/test_log_groups.py .. [ 16%] +test_aws/test_only_logs_after.py .............................................x. [ 40%] +test_aws/test_parser.py .......................... [ 53%] +test_aws/test_path.py .......................................... [ 75%] +test_aws/test_path_suffix.py ......... [ 80%] +test_aws/test_regions.py ........................ [ 92%] +test_aws/test_remove_from_bucket.py ...sss......... [100%] + +============ 191 passed, 3 skipped, 1 xfailed, 7 warnings in 3723.08s (1:02:03) ============= ``` From 71115e0fd52b9816330b57577c2c9dd4c89f08e7 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Fri, 25 Aug 2023 08:30:35 -0300 Subject: [PATCH 275/419] Improve README and Remove unnecessary fixture calls on conftest --- tests/integration/conftest.py | 8 ++++++-- tests/integration/test_aws/README.md | 19 +++++++++++++------ 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 504b1abb618..9e1735acd46 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -181,6 +181,10 @@ def configure_local_internal_options_function(request): It uses the test variable local_internal_options. This should be a dictionary wich keys and values corresponds to the internal option configuration, For example: local_internal_options = {'monitord.rotate_log': '0', 'syscheck.debug': '0' } + + Args: + request (fixture): Provide information on the executing test function. + """ try: local_internal_options = request.param @@ -204,7 +208,7 @@ def configure_local_internal_options_function(request): wazuh_configuration.set_local_internal_options_dict(backup_local_internal_options) -@pytest.fixture(scope='function') +@pytest.fixture() def restart_wazuh_function(request): """Restart before starting a test, and stop it after finishing. @@ -240,7 +244,7 @@ def restart_wazuh_function(request): control_service('stop', daemon=daemon) -@pytest.fixture(scope='function') +@pytest.fixture() def file_monitoring(request): """Fixture to handle the monitoring of a specified file. diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index 59d9ae61a66..a644cff8f15 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -59,8 +59,15 @@ qa-integration-framework/src/wazuh_testing/modules/aws ## Requirements -- Install the [qa-integration-framework](https://github.com/wazuh/qa-integration-framework) -- The module will assume there are already buckets, log groups and an inspector assessment with test data in AWS. +- [Proper testing environment](#Setting up a test environment) + +- [Wazuh](https://github.com/wazuh/qa-integration-framework) repository. + +- [Testing framework](https://github.com/wazuh/qa-integration-framework) installed. + +- Configured buckets, log groups and an inspector assessment with test data in AWS. + +For a step-by-step example guide using linux go to the [test setup section](#linux) ## Configuration settings @@ -102,13 +109,13 @@ _We are using **Ubuntu 22.04** for this example:_ # Install pip apt install python3-pip git -y - # Clone your `wazuh` repository within your testing environment + # Clone `wazuh` repository within your testing environment git clone https://github.com/wazuh/wazuh.git - # Clone the `qa-integration-framework` + # Clone the `qa-integration-framework` repository withing your testing environment git clone https://github.com/wazuh/qa-integration-framework.git - # Install test dependecies + # Install tests dependencies python3 -m pip install qa-integration-framework/ ``` @@ -126,7 +133,7 @@ from the closest one, it will look for the next one (if possible) until reaching need to run every test from the following path, where the general _conftest_ is: ```shell script -cd wazuh-qa/tests/integration +cd wazuh/tests/integration/test_aws/ ``` To run any test, we just need to call `pytest` from `python3` using the following line: From 6f99776f862162cf52107ceae6542c3d114bba24 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Thu, 7 Sep 2023 09:17:21 -0300 Subject: [PATCH 276/419] rebase --- tests/integration/test_aws/conftest.py | 8 +- tests/integration/test_aws/event_monitor.py | 160 ++++++++++++++------ tests/integration/test_aws/test_basic.py | 4 +- tests/integration/test_aws/utils.py | 33 ---- 4 files changed, 117 insertions(+), 88 deletions(-) diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 2dfb6bde491..e74f146a473 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -1,18 +1,20 @@ import pytest from wazuh_testing.logger import logger -from wazuh_testing.modules.aws import ( +from wazuh_testing.constants.aws import ( FAKE_CLOUDWATCH_LOG_GROUP, PERMANENT_CLOUDWATCH_LOG_GROUP, ) -from wazuh_testing.modules.aws.cloudwatch_utils import ( +from wazuh_testing.modules.aws.utils import ( create_log_events, create_log_group, create_log_stream, delete_log_group, delete_log_stream, + delete_file, + file_exists, + upload_file ) from wazuh_testing.modules.aws.db_utils import delete_s3_db, delete_services_db -from wazuh_testing.modules.aws.s3_utils import delete_file, file_exists, upload_file from wazuh_testing.utils.services import control_service diff --git a/tests/integration/test_aws/event_monitor.py b/tests/integration/test_aws/event_monitor.py index 47c670fc910..6df70c6c9b7 100644 --- a/tests/integration/test_aws/event_monitor.py +++ b/tests/integration/test_aws/event_monitor.py @@ -1,24 +1,9 @@ -""" -Copyright (C) 2015-2023, Wazuh Inc. -Created by Wazuh, Inc. . -This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -This module will contains all callback methods to monitor and event -""" - import re -# # qa-integration-framework imports -from wazuh_testing.modules.aws.patterns import (AWS_MODULE_STARTED_PARAMETRIZED, - AWS_UNDEFINED_SERVICE_TYPE, AWS_DEPRECATED_CONFIG_DEFINED, - AWS_NO_SERVICE_WARNING, AWS_MODULE_STARTED, INVALID_EMPTY_TYPE_ERROR, - EMPTY_CONTENT_ERROR, EMPTY_CONTENT_WARNING, - INVALID_EMPTY_SERVICE_TYPE_ERROR, INVALID_TAG_CONTENT_ERROR, - PARSING_BUCKET_ERROR_WARNING, - PARSING_SERVICE_ERROR_WARNING, SERVICE_ANALYSIS, BUCKET_ANALYSIS, - MODULE_START, PARSER_ERROR, MODULE_ERROR, NEW_LOG_FOUND, DEBUG_MESSAGE, - EVENTS_COLLECTED, DEBUG_ANALYSISD_MESSAGE, ANALYSISD_EVENT, - AWS_EVENT_HEADER, NO_LOG_PROCESSED, NO_BUCKET_LOG_PROCESSED) +from wazuh_testing.modules.aws import VPC_FLOW_TYPE +from wazuh_testing.modules.aws.cli_utils import analyze_command_output +from wazuh_testing.modules.aws.patterns import patterns +from wazuh_testing.modules.aws.errors import errors from wazuh_testing.constants.aws import INSPECTOR_TYPE @@ -32,7 +17,9 @@ def make_aws_callback(pattern, prefix=''): Returns: lambda: Function that returns if there's a match in the file. """ - regex = re.compile(r'{}{}'.format(prefix, pattern)) + pattern = WHITESPACE_REGEX.join(pattern.split()) + regex = re.compile(CURLY_BRACE_MATCH.format(prefix, pattern)) + return lambda line: regex.match(line) @@ -45,7 +32,7 @@ def callback_detect_aws_module_called(parameters): Returns: Callable: Callback to match the line. """ - pattern = fr'{AWS_MODULE_STARTED_PARAMETRIZED}{" ".join(parameters)}\n*' + pattern = f'{AWS_MODULE_STARTED_PARAMETRIZED} {" ".join(parameters)}\n*' regex = re.compile(pattern) return lambda line: regex.match(line) @@ -60,7 +47,9 @@ def callback_detect_aws_error_for_missing_type(line): Optional[str]: Line if it matches. """ - if re.match(fr"{AWS_UNDEFINED_SERVICE_TYPE}", line): + if re.match( + AWS_UNDEFINED_SERVICE_TYPE, line + ): return line @@ -74,7 +63,9 @@ def callback_detect_aws_legacy_module_warning(line): Optional[str]: Line if it matches. """ - if re.match(fr"{AWS_DEPRECATED_CONFIG_DEFINED}", line): + if re.match( + AWS_DEPRECATED_CONFIG_DEFINED, line + ): return line @@ -88,9 +79,7 @@ def callback_detect_aws_module_warning(line): Optional[str]: Line if it matches. """ - if re.match( - fr"{AWS_NO_SERVICE_WARNING}", line - ): + if re.match(AWS_NO_SERVICE_WARNING, line): return line @@ -104,9 +93,7 @@ def callback_detect_aws_module_started(line): Optional[str]: Line if it matches. """ - if re.match( - fr"{AWS_MODULE_STARTED}", line - ): + if re.match(AWS_MODULE_STARTED, line): return line @@ -121,9 +108,9 @@ def callback_detect_aws_empty_value(line): """ if ( - re.match(fr"{INVALID_EMPTY_TYPE_ERROR}", line) or - re.match(fr"{EMPTY_CONTENT_ERROR}", line) or - re.match(fr"{EMPTY_CONTENT_WARNING}", line) + re.match(INVALID_TYPE_ERROR, line) or + re.match(EMPTY_CONTENT_ERROR, line) or + re.match(EMPTY_CONTENT_WARNING, line) ): return line @@ -139,10 +126,10 @@ def callback_detect_aws_invalid_value(line): """ if ( - re.match(fr"{INVALID_EMPTY_SERVICE_TYPE_ERROR}", line) or - re.match(fr"{INVALID_TAG_CONTENT_ERROR}", line) or - re.match(fr"{PARSING_BUCKET_ERROR_WARNING}", line), - re.match(fr"{PARSING_SERVICE_ERROR_WARNING}", line) + re.match(INVALID_EMPTY_SERVICE_TYPE_ERROR, line) or + re.match(INVALID_TAG_CONTENT_ERROR, line) or + re.match(PARSING_BUCKET_ERROR_WARNING, line), + re.match(PARSING_SERVICE_ERROR_WARNING, line) ): return line @@ -158,8 +145,8 @@ def callback_detect_bucket_or_service_call(line): """ if ( - re.match(fr"{SERVICE_ANALYSIS}", line) or - re.match(fr"{BUCKET_ANALYSIS}", line) + re.match(SERVICE_ANALYSIS, line) or + re.match(BUCKET_ANALYSIS, line) ): return line @@ -174,9 +161,7 @@ def callback_detect_aws_module_start(line): Optional[str]: Line if it matches. """ - if re.match( - fr"{MODULE_START}", line - ): + if re.match(MODULE_START, line): return line @@ -189,8 +174,7 @@ def callback_detect_all_aws_err(line): Returns: Optional[str]: line if it matches. """ - if (re.match(fr"{PARSER_ERROR}", line) or - re.match(fr"{MODULE_ERROR}", line)): + if re.match(PARSER_ERROR, line) or re.match(MODULE_ERROR, line): return line @@ -229,9 +213,7 @@ def callback_detect_event_processed(line): Returns: Optional[str]: line if it matches. """ - if re.match( - fr"{NEW_LOG_FOUND}", line - ): + if re.match(NEW_LOG_FOUND, line): return line @@ -249,9 +231,9 @@ def callback_detect_event_processed_or_skipped(pattern): def callback_detect_service_event_processed(expected_results, service_type): if service_type == INSPECTOR_TYPE: - regex = re.compile(fr"{DEBUG_MESSAGE} {expected_results} {EVENTS_COLLECTED}") + regex = re.compile(f"{DEBUG_MESSAGE} {expected_results} {EVENTS_COLLECTED}") else: - regex = re.compile(fr"{DEBUG_ANALYSISD_MESSAGE} {expected_results} {ANALYSISD_EVENT}") + regex = re.compile(f"{DEBUG_ANALYSISD_MESSAGE} {expected_results} {ANALYSISD_EVENT}") return lambda line: regex.match(line) @@ -264,6 +246,84 @@ def callback_event_sent_to_analysisd(line): Returns: Optional[str]: line if it matches. """ - if line.startswith( - fr"{AWS_EVENT_HEADER}"): + if line.startswith(AWS_EVENT_HEADER): return line + + +def check_processed_logs_from_output(command_output, expected_results=1): + """Check for processed messages in the give output. + + Args: + command_output (str): Output to analyze. + expected_results (int, optional): Number of results to find. Default to 1. + """ + analyze_command_output( + command_output=command_output, + callback=callback_detect_event_processed, + expected_results=expected_results, + error_message=INCORRECT_EVENT_NUMBER + ) + + +def check_non_processed_logs_from_output(command_output, bucket_type, expected_results=1): + """Check for the non 'processed' messages in the give output. + + Args: + command_output (str): Output to analyze. + bucket_type (str): Bucket type to select the message. + expected_results (int, optional): Number of results to find. Default to 1. + """ + if bucket_type == VPC_FLOW_TYPE: + pattern = NO_LOG_PROCESSED + else: + pattern = NO_BUCKET_LOG_PROCESSED + + analyze_command_output( + command_output, + callback=make_aws_callback(pattern), + expected_results=expected_results, + error_message=UNEXPECTED_NUMBER_OF_EVENTS_FOUND + ) + + +def check_marker_from_output(command_output, file_key, expected_results=1): + """Check for the marker message in the given output. + + Args: + command_output (str): Output to analyze. + file_key (str): Value to check as a marker. + expected_results (int, optional): Number of results to find. Default to 1. + """ + pattern = f"{MARKER} {file_key}" + + analyze_command_output( + command_output, + callback=make_aws_callback(pattern), + expected_results=expected_results, + error_message=INCORRECT_MARKER + ) + + +def check_service_processed_logs_from_output( + command_output, events_sent, service_type, expected_results=1 +): + analyze_command_output( + command_output=command_output, + callback=callback_detect_service_event_processed(events_sent, service_type), + expected_results=expected_results, + error_message=INCORRECT_EVENT_NUMBER + ) + + +def check_service_non_processed_logs_from_output(command_output, service_type, expected_results=1): + if service_type == INSPECTOR_TYPE: + pattern = NO_NEW_EVENTS + else: + pattern = EVENT_SENT + + analyze_command_output( + command_output, + callback=make_aws_callback(pattern), + expected_results=expected_results, + error_message=POSSIBLY_PROCESSED_LOGS + ) diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index f6707aa0947..609e933738a 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -3,8 +3,8 @@ # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 +from wazuh_testing.constants.aws import TEMPLATE_DIR, TEST_CASES_DIR +from . import event_monitor, local_internal_options # noqa: F401 from wazuh_testing.utils.configuration import ( get_test_cases_data, load_configuration_template, diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py index 23601bf2d6c..e69de29bb2d 100644 --- a/tests/integration/test_aws/utils.py +++ b/tests/integration/test_aws/utils.py @@ -1,33 +0,0 @@ -# Copyright (C) 2015-2023, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 - -""" - This file contains constant and other utilities to be used in the AWS integration test module. -""" - -# CONSTANTS - -ERROR_MESSAGES = { - - "failed_start": "The AWS module did not start as expected", - "incorrect_parameters": "The AWS module was not called with the correct parameters", - "error_found": "Found error message on AWS module", - "incorrect_event_number": "The AWS module did not process the expected number of events", - "incorrect_non-existent_region_message": "The AWS module did not show correct message about non-existent region", - "incorrect_no_existent_log_group": "The AWS module did not show correct message non-existent log group", - "incorrect_empty_path_message": "The AWS module did not show correct message about empty path", - "incorrect_empty_path_suffix_message": "The AWS module did not show correct message about empty path_suffix", - "incorrect_error_message": "The AWS module did not show the expected error message", - "incorrect_empty_value_message": "The AWS module did not show the expected message about empty value", - "incorrect_legacy_warning": "The AWS module did not show the expected legacy warning", - "incorrect_warning": "The AWS module did not show the expected warning", - "incorrect_invalid_value_message": "The AWS module did not show the expected message about invalid value", - "incorrect_service_calls_amount": "The AWS module was not called for bucket or service the right amount of times" -} - -TIMEOUTS = { - - 10: 10, - 20: 20 -} From e48760599bddf424a1d440fa7b1b29716735b71c Mon Sep 17 00:00:00 2001 From: Eduardo Date: Fri, 22 Sep 2023 14:48:55 -0300 Subject: [PATCH 277/419] Remove empty utils file --- tests/integration/test_aws/utils.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tests/integration/test_aws/utils.py diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py deleted file mode 100644 index e69de29bb2d..00000000000 From a10c3fcc799c3590fc84be03b0837df29fa77735 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Tue, 28 Nov 2023 10:22:02 -0300 Subject: [PATCH 278/419] Revert unwanted changes --- .../test_check_rare_socket_responses.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/integration/test_analysisd/test_all_syscheckd_configurations/test_check_rare_socket_responses.py b/tests/integration/test_analysisd/test_all_syscheckd_configurations/test_check_rare_socket_responses.py index 571ace13b66..366dbe065cf 100644 --- a/tests/integration/test_analysisd/test_all_syscheckd_configurations/test_check_rare_socket_responses.py +++ b/tests/integration/test_analysisd/test_all_syscheckd_configurations/test_check_rare_socket_responses.py @@ -93,8 +93,7 @@ def test_validate_rare_socket_responses(test_metadata, configure_local_internal_ tier: 2 - parameters:eceiver_sockets[0].send(test_metadata['input']) - monitored_sockets[0].start(c + parameters: - test_metadata: type: dict brief: Test case metadata. From e88182df4166ccd82a5d895c63c54dd27663e14a Mon Sep 17 00:00:00 2001 From: Eduardo Date: Tue, 28 Nov 2023 10:26:30 -0300 Subject: [PATCH 279/419] Revert unwanted changelog changes --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 65b447c84f3..45d74fb189c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4383,4 +4383,4 @@ All notable changes to this project will be documented in this file. - Extensions: Improved Windows deployment script ## [v1.0.0] - 2015-11-23 -- Initial Wazuh version v1.0 +- Initial Wazuh version v1.0 \ No newline at end of file From 9b01e4beb08f100f2e92023e9a9e3aa778118a3b Mon Sep 17 00:00:00 2001 From: Eduardo Date: Tue, 28 Nov 2023 10:51:45 -0300 Subject: [PATCH 280/419] Revert unwanted changelog changes --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 45d74fb189c..65b447c84f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4383,4 +4383,4 @@ All notable changes to this project will be documented in this file. - Extensions: Improved Windows deployment script ## [v1.0.0] - 2015-11-23 -- Initial Wazuh version v1.0 \ No newline at end of file +- Initial Wazuh version v1.0 From 14dc805e007797a9509eb47102dc2d806fa8acfe Mon Sep 17 00:00:00 2001 From: Eduardo Date: Mon, 4 Dec 2023 15:29:08 -0300 Subject: [PATCH 281/419] Move configurator and local conf to utils and fix import --- tests/integration/test_aws/conftest.py | 15 +- tests/integration/test_aws/event_monitor.py | 161 ++----- tests/integration/test_aws/test_basic.py | 111 ++--- .../test_aws/test_custom_bucket.py | 5 +- .../test_aws/test_discard_regex.py | 445 ++++++++++++++++-- tests/integration/test_aws/test_log_groups.py | 63 ++- .../test_aws/test_only_logs_after.py | 326 +++++++------ tests/integration/test_aws/test_parser.py | 225 ++++----- tests/integration/test_aws/test_path.py | 64 ++- .../integration/test_aws/test_path_suffix.py | 65 ++- tests/integration/test_aws/test_regions.py | 128 +++-- .../test_aws/test_remove_from_bucket.py | 73 ++- tests/integration/test_aws/utils.py | 115 +++++ 13 files changed, 1078 insertions(+), 718 deletions(-) create mode 100644 tests/integration/test_aws/utils.py diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index e74f146a473..e65c6cdd4ba 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -1,4 +1,14 @@ +# Copyright (C) 2015-2023, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" +This module contain all necessary components (fixtures, classes, methods)to configure the test for its execution. +""" + import pytest + +# qa-integration-framework imports from wazuh_testing.logger import logger from wazuh_testing.constants.aws import ( FAKE_CLOUDWATCH_LOG_GROUP, @@ -14,7 +24,7 @@ file_exists, upload_file ) -from wazuh_testing.modules.aws.db_utils import delete_s3_db, delete_services_db +from wazuh_testing.modules.aws.utils import delete_s3_db, delete_services_db from wazuh_testing.utils.services import control_service @@ -140,10 +150,9 @@ def fixture_delete_log_stream(metadata): log_stream = metadata['log_stream'] delete_log_stream(log_stream=log_stream) logger.debug('Deleted log stream: %s', log_stream) + # DB fixtures - - @pytest.fixture def clean_s3_cloudtrail_db(): """Delete the DB file before and after the test execution""" diff --git a/tests/integration/test_aws/event_monitor.py b/tests/integration/test_aws/event_monitor.py index 6df70c6c9b7..09aaa863e38 100644 --- a/tests/integration/test_aws/event_monitor.py +++ b/tests/integration/test_aws/event_monitor.py @@ -1,9 +1,24 @@ +# Copyright (C) 2015-2023, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" +This module will contain all callback methods to monitor and event +""" + import re -from wazuh_testing.modules.aws import VPC_FLOW_TYPE -from wazuh_testing.modules.aws.cli_utils import analyze_command_output -from wazuh_testing.modules.aws.patterns import patterns -from wazuh_testing.modules.aws.errors import errors +# # qa-integration-framework imports +from wazuh_testing.modules.aws.patterns import (AWS_MODULE_STARTED_PARAMETRIZED, + AWS_UNDEFINED_SERVICE_TYPE, AWS_DEPRECATED_CONFIG_DEFINED, + AWS_NO_SERVICE_WARNING, AWS_MODULE_STARTED, INVALID_EMPTY_TYPE_ERROR, + EMPTY_CONTENT_ERROR, EMPTY_CONTENT_WARNING, + INVALID_EMPTY_SERVICE_TYPE_ERROR, INVALID_TAG_CONTENT_ERROR, + PARSING_BUCKET_ERROR_WARNING, + PARSING_SERVICE_ERROR_WARNING, SERVICE_ANALYSIS, BUCKET_ANALYSIS, + MODULE_START, PARSER_ERROR, MODULE_ERROR, NEW_LOG_FOUND, DEBUG_MESSAGE, + EVENTS_COLLECTED, DEBUG_ANALYSISD_MESSAGE, ANALYSISD_EVENT, + AWS_EVENT_HEADER, NO_LOG_PROCESSED, NO_BUCKET_LOG_PROCESSED) from wazuh_testing.constants.aws import INSPECTOR_TYPE @@ -17,9 +32,7 @@ def make_aws_callback(pattern, prefix=''): Returns: lambda: Function that returns if there's a match in the file. """ - pattern = WHITESPACE_REGEX.join(pattern.split()) - regex = re.compile(CURLY_BRACE_MATCH.format(prefix, pattern)) - + regex = re.compile(r'{}{}'.format(prefix, pattern)) return lambda line: regex.match(line) @@ -32,7 +45,7 @@ def callback_detect_aws_module_called(parameters): Returns: Callable: Callback to match the line. """ - pattern = f'{AWS_MODULE_STARTED_PARAMETRIZED} {" ".join(parameters)}\n*' + pattern = fr'{AWS_MODULE_STARTED_PARAMETRIZED}{" ".join(parameters)}\n*' regex = re.compile(pattern) return lambda line: regex.match(line) @@ -46,10 +59,7 @@ def callback_detect_aws_error_for_missing_type(line): Returns: Optional[str]: Line if it matches. """ - - if re.match( - AWS_UNDEFINED_SERVICE_TYPE, line - ): + if re.match(fr"{AWS_UNDEFINED_SERVICE_TYPE}", line): return line @@ -62,10 +72,7 @@ def callback_detect_aws_legacy_module_warning(line): Returns: Optional[str]: Line if it matches. """ - - if re.match( - AWS_DEPRECATED_CONFIG_DEFINED, line - ): + if re.match(fr"{AWS_DEPRECATED_CONFIG_DEFINED}", line): return line @@ -78,8 +85,7 @@ def callback_detect_aws_module_warning(line): Returns: Optional[str]: Line if it matches. """ - - if re.match(AWS_NO_SERVICE_WARNING, line): + if re.match(fr"{AWS_NO_SERVICE_WARNING}", line): return line @@ -92,8 +98,7 @@ def callback_detect_aws_module_started(line): Returns: Optional[str]: Line if it matches. """ - - if re.match(AWS_MODULE_STARTED, line): + if re.match(fr"{AWS_MODULE_STARTED}", line): return line @@ -108,9 +113,9 @@ def callback_detect_aws_empty_value(line): """ if ( - re.match(INVALID_TYPE_ERROR, line) or - re.match(EMPTY_CONTENT_ERROR, line) or - re.match(EMPTY_CONTENT_WARNING, line) + re.match(fr"{INVALID_EMPTY_TYPE_ERROR}", line) or + re.match(fr"{EMPTY_CONTENT_ERROR}", line) or + re.match(fr"{EMPTY_CONTENT_WARNING}", line) ): return line @@ -126,10 +131,10 @@ def callback_detect_aws_invalid_value(line): """ if ( - re.match(INVALID_EMPTY_SERVICE_TYPE_ERROR, line) or - re.match(INVALID_TAG_CONTENT_ERROR, line) or - re.match(PARSING_BUCKET_ERROR_WARNING, line), - re.match(PARSING_SERVICE_ERROR_WARNING, line) + re.match(fr"{INVALID_EMPTY_SERVICE_TYPE_ERROR}", line) or + re.match(fr"{INVALID_TAG_CONTENT_ERROR}", line) or + re.match(fr"{PARSING_BUCKET_ERROR_WARNING}", line), + re.match(fr"{PARSING_SERVICE_ERROR_WARNING}", line) ): return line @@ -145,8 +150,8 @@ def callback_detect_bucket_or_service_call(line): """ if ( - re.match(SERVICE_ANALYSIS, line) or - re.match(BUCKET_ANALYSIS, line) + re.match(fr"{SERVICE_ANALYSIS}", line) or + re.match(fr"{BUCKET_ANALYSIS}", line) ): return line @@ -160,8 +165,7 @@ def callback_detect_aws_module_start(line): Returns: Optional[str]: Line if it matches. """ - - if re.match(MODULE_START, line): + if re.match(fr"{MODULE_START}", line): return line @@ -174,7 +178,9 @@ def callback_detect_all_aws_err(line): Returns: Optional[str]: line if it matches. """ - if re.match(PARSER_ERROR, line) or re.match(MODULE_ERROR, line): + if (re.match(fr"{PARSER_ERROR}", line) or + re.match(fr"{MODULE_ERROR}", line) + ): return line @@ -187,7 +193,7 @@ def callback_detect_aws_read_err(line): Returns: Optional[str]: line if it matches. """ - if re.match(PARSER_ERROR, line): + if re.match(fr"{PARSER_ERROR}", line): return line @@ -200,7 +206,7 @@ def callback_detect_aws_wmodule_err(line): Returns: Optional[str]: line if it matches. """ - if re.match(MODULE_ERROR, line): + if re.match(fr"{MODULE_ERROR}", line): return line @@ -213,7 +219,7 @@ def callback_detect_event_processed(line): Returns: Optional[str]: line if it matches. """ - if re.match(NEW_LOG_FOUND, line): + if re.match(fr"{NEW_LOG_FOUND}", line): return line @@ -231,9 +237,9 @@ def callback_detect_event_processed_or_skipped(pattern): def callback_detect_service_event_processed(expected_results, service_type): if service_type == INSPECTOR_TYPE: - regex = re.compile(f"{DEBUG_MESSAGE} {expected_results} {EVENTS_COLLECTED}") + regex = re.compile(fr"{DEBUG_MESSAGE} {expected_results} {EVENTS_COLLECTED}") else: - regex = re.compile(f"{DEBUG_ANALYSISD_MESSAGE} {expected_results} {ANALYSISD_EVENT}") + regex = re.compile(fr"{DEBUG_ANALYSISD_MESSAGE} {expected_results} {ANALYSISD_EVENT}") return lambda line: regex.match(line) @@ -246,84 +252,5 @@ def callback_event_sent_to_analysisd(line): Returns: Optional[str]: line if it matches. """ - if line.startswith(AWS_EVENT_HEADER): + if line.startswith(fr"{AWS_EVENT_HEADER}"): return line - - -def check_processed_logs_from_output(command_output, expected_results=1): - """Check for processed messages in the give output. - - Args: - command_output (str): Output to analyze. - expected_results (int, optional): Number of results to find. Default to 1. - """ - analyze_command_output( - command_output=command_output, - callback=callback_detect_event_processed, - expected_results=expected_results, - error_message=INCORRECT_EVENT_NUMBER - ) - - -def check_non_processed_logs_from_output(command_output, bucket_type, expected_results=1): - """Check for the non 'processed' messages in the give output. - - Args: - command_output (str): Output to analyze. - bucket_type (str): Bucket type to select the message. - expected_results (int, optional): Number of results to find. Default to 1. - """ - if bucket_type == VPC_FLOW_TYPE: - pattern = NO_LOG_PROCESSED - else: - pattern = NO_BUCKET_LOG_PROCESSED - - analyze_command_output( - command_output, - callback=make_aws_callback(pattern), - expected_results=expected_results, - error_message=UNEXPECTED_NUMBER_OF_EVENTS_FOUND - ) - - -def check_marker_from_output(command_output, file_key, expected_results=1): - """Check for the marker message in the given output. - - Args: - command_output (str): Output to analyze. - file_key (str): Value to check as a marker. - expected_results (int, optional): Number of results to find. Default to 1. - """ - pattern = f"{MARKER} {file_key}" - - analyze_command_output( - command_output, - callback=make_aws_callback(pattern), - expected_results=expected_results, - error_message=INCORRECT_MARKER - ) - - -def check_service_processed_logs_from_output( - command_output, events_sent, service_type, expected_results=1 -): - analyze_command_output( - command_output=command_output, - callback=callback_detect_service_event_processed(events_sent, service_type), - expected_results=expected_results, - error_message=INCORRECT_EVENT_NUMBER - ) - - -def check_service_non_processed_logs_from_output(command_output, service_type, expected_results=1): - if service_type == INSPECTOR_TYPE: - pattern = NO_NEW_EVENTS - else: - pattern = EVENT_SENT - - analyze_command_output( - command_output, - callback=make_aws_callback(pattern), - expected_results=expected_results, - error_message=POSSIBLY_PROCESSED_LOGS - ) diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index 609e933738a..dba1aa1a2a6 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -1,44 +1,39 @@ -import os +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" +This module will contain all cases for the basic test suite +""" + import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.aws import TEMPLATE_DIR, TEST_CASES_DIR -from . import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) + # Local module imports -from .utils import ERROR_MESSAGES +from . import event_monitor +from .utils import ERROR_MESSAGE, TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] - -# Generic vars -MODULE = 'basic_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='basic_test_module') # -------------------------------------------- TEST_BUCKET_DEFAULTS ---------------------------------------------------- -# Configuration and cases -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_defaults.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_defaults.yaml') - -# Enabled test configurations -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='bucket_configuration_defaults.yaml', + cases_file='cases_bucket_defaults.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_bucket_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -101,16 +96,16 @@ def test_bucket_defaults( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_module_start ) - - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_module_called(parameters) ) - - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] # Detect any ERROR message log_monitor.start( @@ -118,26 +113,22 @@ def test_bucket_defaults( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # -------------------------------------------- TEST_CLOUDWATCH_DEFAULTS ------------------------------------------------ -# Configuration and cases data -t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'cloudwatch_configuration_defaults.yaml') -t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_defaults.yaml') - -# Enabled test configurations -t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) -configurations = load_configuration_template( - t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata -) +# Configure T2 test +configurator.configure_test(configuration_file='cloudwatch_configuration_defaults.yaml', + cases_file='cases_cloudwatch_defaults.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, t2_configuration_metadata), ids=t2_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_service_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -207,7 +198,7 @@ def test_service_defaults( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -215,34 +206,30 @@ def test_service_defaults( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] # Detect any ERROR message log_monitor.start( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_all_aws_err ) - - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # ------------------------------------------ TEST_INSPECTOR_DEFAULTS --------------------------------------------------- -# Configuration and cases data -t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'inspector_configuration_defaults.yaml') -t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_defaults.yaml') - -# Enabled test configurations -t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) -configurations = load_configuration_template( - t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata -) +# Configure T3 test +configurator.configure_test(configuration_file='inspector_configuration_defaults.yaml', + cases_file='cases_inspector_defaults.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, t3_configuration_metadata), ids=t3_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_inspector_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -310,7 +297,7 @@ def test_inspector_defaults( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -318,7 +305,7 @@ def test_inspector_defaults( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] # Detect any ERROR message log_monitor.start( @@ -326,4 +313,4 @@ def test_inspector_defaults( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/test_custom_bucket.py b/tests/integration/test_aws/test_custom_bucket.py index dc0eb9d31fe..9f62c8bc5b2 100644 --- a/tests/integration/test_aws/test_custom_bucket.py +++ b/tests/integration/test_aws/test_custom_bucket.py @@ -13,8 +13,7 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT -from .conftest import TestConfigurator, local_internal_options +from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] @@ -255,4 +254,4 @@ def test_custom_bucket_logs(configuration, metadata, load_wazuh_basic_configurat callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] \ No newline at end of file + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py index 54cd7f27d9b..acad70f2918 100644 --- a/tests/integration/test_aws/test_discard_regex.py +++ b/tests/integration/test_aws/test_discard_regex.py @@ -1,46 +1,45 @@ -import os +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" +This module will contain all cases for the discard_regex test suite +""" + import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) -from wazuh_testing.modules.aws.db_utils import s3_db_exists +from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH, AWS_SERVICES_DB_PATH +from wazuh_testing.modules.aws.utils import path_exist # Local module imports -from .utils import ERROR_MESSAGES, TIMEOUTS +from . import event_monitor +from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] +# Set test configurator for the module +configurator = TestConfigurator(module='discard_regex_test_module') -# Generic vars -MODULE = 'discard_regex_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) - -# -----------------------------------------opvb----------- TEST_PATH ------------------------------------------------------- -configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_discard_regex.yaml') -cases_path = os.path.join(TEST_CASES_PATH, 'cases_discard_regex.yaml') - -configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) -configurations = load_configuration_template( - configurations_path, configuration_parameters, configuration_metadata -) +# --------------------------------------------- TEST_BUCKET_DISCARD_REGEX --------------------------------------------- +# Configure T1 test +configurator.configure_test(configuration_file='configuration_bucket_discard_regex.yaml', + cases_file='cases_bucket_discard_regex.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, configuration_metadata), ids=case_ids) -def test_discard_regex( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) +def test_bucket_discard_regex( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ - description: Fetch logs excluding the ones that match with the regex. + description: Check that some bucket logs are excluded when the regex and field defined in + match an event. + test_phases: - setup: - Load Wazuh light configuration. @@ -57,7 +56,9 @@ def test_discard_regex( - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - Delete the uploaded file + wazuh_min_version: 4.6.0 + parameters: - configuration: type: dict @@ -86,13 +87,15 @@ def test_discard_regex( - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. + assertions: - Check in the log that the module was called with correct parameters. - Check the expected number of events were forwarded to analysisd. - Check the database was created and updated accordingly. + input_description: - - The `configuration_discard_regex` file provides the module configuration for this test. - - The `cases_discard_regex` file provides the test cases. + - The `configuration_bucket_discard_regex` file provides the module configuration for this test. + - The `cases_bucket_discard_regex` file provides the test cases. """ bucket_name = metadata['bucket_name'] bucket_type = metadata['bucket_type'] @@ -103,7 +106,8 @@ def test_discard_regex( skipped_logs = metadata['skipped_logs'] path = metadata['path'] if 'path' in metadata else None - pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field. The event will be skipped.' + pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field.' \ + ' The event will be skipped.' parameters = [ 'wodles/aws/aws-s3', @@ -126,7 +130,7 @@ def test_discard_regex( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -134,20 +138,385 @@ def test_discard_regex( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), accumulations=found_logs + skipped_logs ) - assert s3_db_exists() + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] + + assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) + + +# ----------------------------------------- TEST_CLOUDWATCH_DISCARD_REGEX_JSON ---------------------------------------- +# Configure T2 test +configurator.configure_test(configuration_file='configuration_cloudwatch_discard_regex_json.yaml', + cases_file='cases_cloudwatch_discard_regex_json.yaml') + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) +def test_cloudwatch_discard_regex_json( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, +): + """ + description: Check that some CloudWatch JSON logs are excluded when the regex and field defined in + match an event. + + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket and skips + the ones that match with regex. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file + + wazuh_min_version: 4.6.0 + + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly. + + input_description: + - The `configuration_cloudwatch_discard_regex` file provides the module configuration for this test. + - The `cases_cloudwatch_discard_regex` file provides the test cases. + """ + log_group_name = metadata.get('log_group_name') + service_type = metadata.get('service_type') + only_logs_after = metadata.get('only_logs_after') + regions: str = metadata.get('regions') + discard_field = metadata.get('discard_field', None) + discard_regex = metadata.get('discard_regex') + found_logs = metadata.get('found_logs') + + pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field.' \ + ' The event will be skipped.' + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--regions', regions, + '--aws_log_groups', log_group_name, + '--discard-field', discard_field, + '--discard-regex', discard_regex, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + + log_monitor.start( + timeout=TIMEOUT[20], + callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), + accumulations=found_logs + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] + + assert path_exist(path=AWS_SERVICES_DB_PATH) + + +# ------------------------------------- TEST_CLOUDWATCH_DISCARD_REGEX_SIMPLE_TEXT ------------------------------------- +# Configure T3 test +configurator.configure_test(configuration_file='configuration_cloudwatch_discard_regex_simple_text.yaml', + cases_file='cases_cloudwatch_discard_regex_simple_text.yaml') + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) +def test_cloudwatch_discard_regex_simple_text( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, +): + """ + description: Check that some CloudWatch simple text logs are excluded when the regex defined in + matches an event. + + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket and skips + the ones that match with regex. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file + + wazuh_min_version: 4.6.0 + + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly. + + input_description: + - The `configuration_cloudwatch_discard_regex_simple_text` file provides + the module configuration for this test. + - The `cases_cloudwatch_discard_regex_simple_text` file provides the test cases. + """ + log_group_name = metadata.get('log_group_name') + service_type = metadata.get('service_type') + only_logs_after = metadata.get('only_logs_after') + regions: str = metadata.get('regions') + discard_regex = metadata.get('discard_regex') + found_logs = metadata.get('found_logs') + + pattern = fr'.*The "{discard_regex}" regex found a match. The event will be skipped.' + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--regions', regions, + '--aws_log_groups', log_group_name, + '--discard-regex', discard_regex, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + + # Check command was called correctly + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + + log_monitor.start( + timeout=TIMEOUT[20], + callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), + accumulations=found_logs + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] + + assert path_exist(path=AWS_SERVICES_DB_PATH) + + +# ------------------------------------------- TEST_INSPECTOR_DISCARD_REGEX -------------------------------------------- +# Configure T4 test +configurator.configure_test(configuration_file='configuration_inspector_discard_regex.yaml', + cases_file='cases_inspector_discard_regex.yaml') + + +@pytest.mark.tier(level=0) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) +def test_inspector_discard_regex( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, +): + """ + description: Check that some Inspector logs are excluded when the regex and field defined in + match an event. + + test_phases: + - setup: + - Load Wazuh light configuration. + - Apply ossec.conf configuration changes according to the configuration template and use case. + - Apply custom settings in local_internal_options.conf. + - Truncate wazuh logs. + - Restart wazuh-manager service to apply configuration changes. + - test: + - Check in the ossec.log that a line has appeared calling the module with correct parameters. + - Check the expected number of events were forwarded to analysisd, only logs stored in the bucket and skips + the ones that match with regex. + - Check the database was created and updated accordingly. + - teardown: + - Truncate wazuh logs. + - Restore initial configuration, both ossec.conf and local_internal_options.conf. + - Delete the uploaded file + + wazuh_min_version: 4.6.0 + + parameters: + - configuration: + type: dict + brief: Get configurations from the module. + - metadata: + type: dict + brief: Get metadata from the module. + - load_wazuh_basic_configuration: + type: fixture + brief: Load basic wazuh configuration. + - set_wazuh_configuration: + type: fixture + brief: Apply changes to the ossec.conf configuration. + - clean_aws_services_db: + type: fixture + brief: Delete the DB file before and after the test execution. + - configure_local_internal_options_function: + type: fixture + brief: Apply changes to the local_internal_options.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate wazuh logs. + - restart_wazuh_daemon_function: + type: fixture + brief: Restart the wazuh service. + - file_monitoring: + type: fixture + brief: Handle the monitoring of a specified file. + + assertions: + - Check in the log that the module was called with correct parameters. + - Check the expected number of events were forwarded to analysisd. + - Check the database was created and updated accordingly. + + input_description: + - The `configuration_inspector_discard_regex` file provides the module configuration for this test. + - The `cases_inspector_discard_regex` file provides the test cases. + """ + service_type = metadata.get('service_type') + only_logs_after = metadata.get('only_logs_after') + regions: str = metadata.get('regions') + discard_field = metadata.get('discard_field', '') + discard_regex = metadata.get('discard_regex') + found_logs = metadata.get('found_logs') + + pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field.' \ + ' The event will be skipped.' + + parameters = [ + 'wodles/aws/aws-s3', + '--service', service_type, + '--aws_profile', 'qa', + '--only_logs_after', only_logs_after, + '--regions', regions, + '--discard-field', discard_field, + '--discard-regex', discard_regex, + '--debug', '2' + ] + + # Check AWS module started + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_aws_module_start + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] - # Detect any ERROR message + # Check command was called correctly log_monitor.start( timeout=session_parameters.default_timeout, - callback=event_monitor.callback_detect_all_aws_err + callback=event_monitor.callback_detect_aws_module_called(parameters) + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] + + log_monitor.start( + timeout=TIMEOUT[20], + callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), + accumulations=found_logs ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] + + assert path_exist(path=AWS_SERVICES_DB_PATH) diff --git a/tests/integration/test_aws/test_log_groups.py b/tests/integration/test_aws/test_log_groups.py index b806bfd186c..ecfaca210cb 100644 --- a/tests/integration/test_aws/test_log_groups.py +++ b/tests/integration/test_aws/test_log_groups.py @@ -1,44 +1,39 @@ -import os +""" +Copyright (C) 2015-2023, Wazuh Inc. +Created by Wazuh, Inc. . +This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +This module will contains all cases for the log groups test suite +""" + import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) -from wazuh_testing.modules.aws.db_utils import ( - get_multiple_service_db_row, - services_db_exists, - table_exists, -) +from wazuh_testing.utils.db_queries.aws_db import get_multiple_service_db_row, table_exists +from wazuh_testing.modules.aws.utils import path_exist +from wazuh_testing.constants.paths.aws import AWS_SERVICES_DB_PATH +from wazuh_testing.modules.aws.patterns import NON_EXISTENT_SPECIFIED_LOG_GROUPS # Local module imports -from .utils import ERROR_MESSAGES, TIMEOUTS +from . import event_monitor +from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] - -# Generic vars -MODULE = 'log_groups_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='log_groups_test_module') # ----------------------------------------------- TEST_AWS_LOG_GROUPS -------------------------------------------------- -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_log_groups.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_log_groups.yaml') - -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='configuration_log_groups.yaml', + cases_file='cases_log_groups.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_log_groups( configuration, metadata, create_log_stream, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, @@ -124,7 +119,7 @@ def test_log_groups( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -134,19 +129,19 @@ def test_log_groups( if expected_results: log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), accumulations=len(log_group_names.split(',')) ) else: log_monitor.start( - timeout=TIMEOUTS[10], - callback=event_monitor.make_aws_callback(r'.*The specified log group does not exist.'), + timeout=TIMEOUT[10], + callback=event_monitor.make_aws_callback(pattern=fr"{NON_EXISTENT_SPECIFIED_LOG_GROUPS}") ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_no_existent_log_group'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_no_existent_log_group'] - assert services_db_exists() + assert path_exist(path=AWS_SERVICES_DB_PATH) if expected_results: log_group_list = log_group_names.split(",") @@ -161,4 +156,4 @@ def test_log_groups( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/test_only_logs_after.py b/tests/integration/test_aws/test_only_logs_after.py index 42a117f735a..d6ae20dd95b 100644 --- a/tests/integration/test_aws/test_only_logs_after.py +++ b/tests/integration/test_aws/test_only_logs_after.py @@ -1,53 +1,43 @@ -import os +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" +This module will contain all cases for the only logs after test suite +""" + import pytest from datetime import datetime # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR -from wazuh_testing.modules import aws as cons -from wazuh_testing.modules.aws import ONLY_LOGS_AFTER_PARAM, event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.modules.aws.cli_utils import call_aws_module -from wazuh_testing.modules.aws.cloudwatch_utils import ( - create_log_events, - create_log_stream, -) -from wazuh_testing.modules.aws.db_utils import ( - get_multiple_s3_db_row, - get_service_db_row, - s3_db_exists, - services_db_exists, - get_s3_db_row, -) -from wazuh_testing.modules.aws.s3_utils import get_last_file_key, upload_file -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) - -from .utils import ERROR_MESSAGES, TIMEOUTS +from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH, AWS_SERVICES_DB_PATH +from wazuh_testing.constants.aws import ONLY_LOGS_AFTER_PARAM, PATH_DATE_FORMAT, VPC_FLOW_TYPE, INSPECTOR_TYPE +from wazuh_testing.utils.db_queries.aws_db import get_multiple_s3_db_row, get_service_db_row, get_s3_db_row +from wazuh_testing.modules.aws.utils import (call_aws_module, create_log_events, create_log_stream, path_exist, + get_last_file_key, upload_file, analyze_command_output) +from wazuh_testing.modules.aws.patterns import (NO_LOG_PROCESSED, NO_BUCKET_LOG_PROCESSED, MARKER, NO_NEW_EVENTS, + EVENT_SENT) + +# Local module imports +from . import event_monitor +from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] - -# Generic vars -MODULE = 'only_logs_after_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='only_logs_after_test_module') # --------------------------------------------- TEST_BUCKET_WITHOUT_ONLY_LOGS_AFTER ------------------------------------ -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_without_only_logs_after.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_without_only_logs_after.yaml') - -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='bucket_configuration_without_only_logs_after.yaml', + cases_file='cases_bucket_without_only_logs_after.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_bucket_without_only_logs_after( configuration, metadata, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, @@ -134,7 +124,7 @@ def test_bucket_without_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -142,7 +132,7 @@ def test_bucket_without_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( timeout=session_parameters.default_timeout, @@ -150,9 +140,9 @@ def test_bucket_without_only_logs_after( accumulations=expected_results ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] - assert s3_db_exists() + assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) data = get_s3_db_row(table_name=table_name) @@ -165,21 +155,19 @@ def test_bucket_without_only_logs_after( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # -------------------------------------------- TEST_SERVICE_WITHOUT_ONLY_LOGS_AFTER ------------------------------------ -t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'service_configuration_without_only_logs_after.yaml') -t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_service_without_only_logs_after.yaml') - -t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) -t2_configurations = load_configuration_template( - t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata -) +# Configure T2 test +configurator.configure_test(configuration_file='service_configuration_without_only_logs_after.yaml', + cases_file='cases_service_without_only_logs_after.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_service_without_only_logs_after( configuration, metadata, create_log_stream_in_existent_group, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, @@ -261,7 +249,7 @@ def test_service_without_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -269,9 +257,9 @@ def test_service_without_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] - assert services_db_exists() + assert path_exist(path=AWS_SERVICES_DB_PATH) data = get_service_db_row(table_name="cloudwatch_logs") @@ -285,21 +273,19 @@ def test_service_without_only_logs_after( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # --------------------------------------------- TEST_BUCKET_WITH_ONLY_LOGS_AFTER --------------------------------------- -t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_with_only_logs_after.yaml') -t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_with_only_logs_after.yaml') - -t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) -t3_configurations = load_configuration_template( - t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata -) +# Configure T3 test +configurator.configure_test(configuration_file='bucket_configuration_with_only_logs_after.yaml', + cases_file='cases_bucket_with_only_logs_after.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t3_configurations, t3_configuration_metadata), ids=t3_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_bucket_with_only_logs_after( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -384,7 +370,7 @@ def test_bucket_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -392,17 +378,17 @@ def test_bucket_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed, accumulations=expected_results ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] - assert s3_db_exists() + assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) for row in get_multiple_s3_db_row(table_name=table_name): assert bucket_name in row.bucket_path @@ -416,21 +402,19 @@ def test_bucket_with_only_logs_after( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # --------------------------------------------TEST_CLOUDWATCH_WITH_ONLY_LOGS_AFTER ------------------------------------- -t4_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'cloudwatch_configuration_with_only_logs_after.yaml') -t4_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_with_only_logs_after.yaml') - -t4_configuration_parameters, t4_configuration_metadata, t4_case_ids = get_test_cases_data(t4_cases_path) -t4_configurations = load_configuration_template( - t4_configurations_path, t4_configuration_parameters, t4_configuration_metadata -) +# Configure T4 test +configurator.configure_test(configuration_file='cloudwatch_configuration_with_only_logs_after.yaml', + cases_file='cases_cloudwatch_with_only_logs_after.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t4_configurations, t4_configuration_metadata), ids=t4_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_cloudwatch_with_only_logs_after( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -515,7 +499,7 @@ def test_cloudwatch_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -523,16 +507,16 @@ def test_cloudwatch_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( - timeout=TIMEOUTS[10], + timeout=TIMEOUT[10], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] - assert services_db_exists() + assert path_exist(path=AWS_SERVICES_DB_PATH) data = get_service_db_row(table_name=table_name_map[service_type]) @@ -545,21 +529,19 @@ def test_cloudwatch_with_only_logs_after( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # ------------------------------------------ TEST_INSPECTOR_WITH_ONLY_LOGS_AFTER --------------------------------------- -t5_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'inspector_configuration_with_only_logs_after.yaml') -t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_with_only_logs_after.yaml') - -t5_configuration_parameters, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path) -t5_configurations = load_configuration_template( - t5_configurations_path, t5_configuration_parameters, t5_configuration_metadata -) +# Configure T5 test +configurator.configure_test(configuration_file='inspector_configuration_with_only_logs_after.yaml', + cases_file='cases_inspector_with_only_logs_after.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t5_configurations, t5_configuration_metadata), ids=t5_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_inspector_with_only_logs_after( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -642,7 +624,7 @@ def test_inspector_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -650,16 +632,16 @@ def test_inspector_with_only_logs_after( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( - timeout=TIMEOUTS[10], + timeout=TIMEOUT[10], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] - assert services_db_exists() + assert path_exist(path=AWS_SERVICES_DB_PATH) data = get_service_db_row(table_name=table_name_map[service_type]) @@ -670,13 +652,14 @@ def test_inspector_with_only_logs_after( # ---------------------------------------------------- TEST_MULTIPLE_CALLS --------------------------------------------- -t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_multiple_calls.yaml') - -_, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path) +# Configure T6 test +configurator.configure_test(cases_file='cases_bucket_multiple_calls.yaml') @pytest.mark.tier(level=1) -@pytest.mark.parametrize('metadata', t5_configuration_metadata, ids=t5_case_ids) +@pytest.mark.parametrize('metadata', + configurator.metadata, + ids=configurator.cases_ids) def test_bucket_multiple_calls( metadata, clean_s3_cloudtrail_db, load_wazuh_basic_configuration, restart_wazuh_function, delete_file_from_s3 ): @@ -718,7 +701,7 @@ def test_bucket_multiple_calls( brief: Restart the wazuh service. - delete_file_from_s3: type: fixture - brief: Delete the a file after the test execution. + brief: Delete the file after the test execution. input_description: - The `cases_multiple_calls` file provides the test cases. """ @@ -739,55 +722,71 @@ def test_bucket_multiple_calls( base_parameters.extend(['--trail_prefix', path]) # Call the module without only_logs_after and check that no logs were processed - last_marker_key = datetime.utcnow().strftime(cons.PATH_DATE_FORMAT) + last_marker_key = datetime.utcnow().strftime(PATH_DATE_FORMAT) + + # Get bucket type + if bucket_type == VPC_FLOW_TYPE: + pattern = fr"{NO_LOG_PROCESSED}" + else: + pattern = fr"{NO_BUCKET_LOG_PROCESSED}" - event_monitor.check_non_processed_logs_from_output( + # Check for the non 'processed' messages in the given output. + analyze_command_output( command_output=call_aws_module(*base_parameters), - bucket_type=bucket_type + callback=event_monitor.make_aws_callback(pattern), + expected_results=1, + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) - # Call the module with only_logs_after set in the past and check that the expected number of logs were - # processed - event_monitor.check_processed_logs_from_output( + # Call the module with only_logs_after set in the past and check that the expected number of logs were processed + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-20'), - expected_results=3 + callback=event_monitor.callback_detect_event_processed, + expected_results=3, + error_message=ERROR_MESSAGE['incorrect_event_number'] ) # Call the module with the same parameters in and check there were no duplicates expected_skipped_logs_step_3 = metadata.get('expected_skipped_logs_step_3', 1) - event_monitor.check_non_processed_logs_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-20'), - bucket_type=bucket_type, - expected_results=expected_skipped_logs_step_3 + callback=event_monitor.make_aws_callback(pattern), + expected_results=expected_skipped_logs_step_3, + error_message=ERROR_MESSAGE['incorrect_event_number'] ) - # Call the module with only_logs_after set with an early date than setted previously and check that no logs + # Call the module with only_logs_after set with an early date than the one set previously and check that no logs # were processed, there were no duplicates - event_monitor.check_non_processed_logs_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-22'), - bucket_type=bucket_type, - expected_results=expected_skipped_logs_step_3 - 1 if expected_skipped_logs_step_3 > 1 else 1 + callback=event_monitor.make_aws_callback(pattern), + expected_results=expected_skipped_logs_step_3 - 1 if expected_skipped_logs_step_3 > 1 else 1, + error_message=ERROR_MESSAGE['incorrect_event_number'] ) # Upload a log file for the day of the test execution and call the module without only_logs_after and check that # only the uploaded logs were processed and the last marker is specified in the DB. last_marker_key = get_last_file_key(bucket_type, bucket_name, datetime.utcnow()) metadata['filename'] = upload_file(bucket_type, bucket_name) + pattern = fr"{MARKER}{last_marker_key}" - event_monitor.check_marker_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters), - file_key=last_marker_key + callback=event_monitor.make_aws_callback(pattern), + expected_results=1, + error_message=ERROR_MESSAGE['incorrect_marker'] ) # -------------------------------------------- TEST_INSPECTOR_MULTIPLE_CALLS ------------------------------------------- -t6_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_multiple_calls.yaml') - -_, t6_configuration_metadata, t6_case_ids = get_test_cases_data(t6_cases_path) +# Configure T7 test +configurator.configure_test(cases_file='cases_inspector_multiple_calls.yaml') @pytest.mark.tier(level=1) -@pytest.mark.parametrize('metadata', t6_configuration_metadata, ids=t6_case_ids) +@pytest.mark.parametrize('metadata', + configurator.metadata, + ids=configurator.cases_ids) @pytest.mark.xfail def test_inspector_multiple_calls( metadata, clean_aws_services_db, load_wazuh_basic_configuration, restart_wazuh_function @@ -833,43 +832,49 @@ def test_inspector_multiple_calls( '--debug', '2' ] + pattern = fr"{NO_NEW_EVENTS}" + # Call the module without only_logs_after and check that no logs were processed - event_monitor.check_service_non_processed_logs_from_output( - command_output=call_aws_module(*base_parameters), service_type=service_type, expected_results=1 + analyze_command_output( + command_output=call_aws_module(*base_parameters), + callback=event_monitor.make_aws_callback(pattern), + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) - # Call the module with only_logs_after set in the past and check that the expected number of logs were - # processed - event_monitor.check_service_processed_logs_from_output( + # Call the module with only_logs_after set in the past and check that the expected number of logs were processed. + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-30'), - service_type=service_type, - events_sent=4 + callback=event_monitor.callback_detect_service_event_processed( + expected_results=4, + service_type=service_type), + error_message=ERROR_MESSAGE['incorrect_event_number'] ) # Call the module with the same parameters in and check there were no duplicates - event_monitor.check_service_non_processed_logs_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-30'), - service_type=service_type, - expected_results=1 + callback=event_monitor.make_aws_callback(pattern), + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) - # Call the module with only_logs_after set with an early date than setted previously and check that no logs + # Call the module with only_logs_after set with an early date than the one set previously and check that no logs # were processed, there were no duplicates - event_monitor.check_service_non_processed_logs_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-31'), - service_type=service_type, - expected_results=1 + callback=event_monitor.make_aws_callback(pattern), + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) # ----------------------------------------- TEST_CLOUDWATCH_MULTIPLE_CALLS --------------------------------------------- -t7_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_multiple_calls.yaml') - -_, t7_configuration_metadata, t7_case_ids = get_test_cases_data(t7_cases_path) +# Configure T8 test +configurator.configure_test(cases_file='cases_cloudwatch_multiple_calls.yaml') @pytest.mark.tier(level=1) -@pytest.mark.parametrize('metadata', t7_configuration_metadata, ids=t7_case_ids) +@pytest.mark.parametrize('metadata', + configurator.metadata, + ids=configurator.cases_ids) def test_cloudwatch_multiple_calls( metadata, clean_aws_services_db, load_wazuh_basic_configuration, restart_wazuh_function, delete_log_stream ): @@ -925,32 +930,40 @@ def test_cloudwatch_multiple_calls( '--debug', '2' ] + pattern = fr"{EVENT_SENT}" + # Call the module without only_logs_after and check that no logs were processed - event_monitor.check_service_non_processed_logs_from_output( - command_output=call_aws_module(*base_parameters), service_type=service_type, expected_results=0 + analyze_command_output( + command_output=call_aws_module(*base_parameters), + callback=event_monitor.make_aws_callback(pattern), + expected_results=0, + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) - # Call the module with only_logs_after set in the past and check that the expected number of logs were - # processed - event_monitor.check_service_processed_logs_from_output( + # Call the module with only_logs_after set in the past and check that the expected number of logs were processed. + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-12'), - service_type=service_type, - events_sent=3 + callback=event_monitor.callback_detect_service_event_processed( + expected_results=3, + service_type=service_type), + error_message=ERROR_MESSAGE['incorrect_event_number'] ) # Call the module with the same parameters in and check there were no duplicates - event_monitor.check_service_non_processed_logs_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-12'), - service_type=service_type, - expected_results=0 + callback=event_monitor.make_aws_callback(pattern), + expected_results=0, + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) - # Call the module with only_logs_after set with an early date than setted previously and check that no logs + # Call the module with only_logs_after set with an early date than the one set previously and check that no logs # were processed, there were no duplicates - event_monitor.check_service_non_processed_logs_from_output( + analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2023-JAN-15'), - service_type=service_type, - expected_results=0 + callback=event_monitor.make_aws_callback(pattern), + expected_results=0, + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] ) # Upload a log file for the day of the test execution and call the module without only_logs_after and check that @@ -958,6 +971,11 @@ def test_cloudwatch_multiple_calls( log_stream = create_log_stream() metadata['log_stream'] = log_stream create_log_events(log_stream) - event_monitor.check_service_processed_logs_from_output( - command_output=call_aws_module(*base_parameters), service_type=service_type, events_sent=1 + + analyze_command_output( + command_output=call_aws_module(*base_parameters), + callback=event_monitor.callback_detect_service_event_processed( + expected_results=1, + service_type=service_type), + error_message=ERROR_MESSAGE['incorrect_event_number'] ) diff --git a/tests/integration/test_aws/test_parser.py b/tests/integration/test_aws/test_parser.py index e46feeda3cb..42cdb8e9bf6 100644 --- a/tests/integration/test_aws/test_parser.py +++ b/tests/integration/test_aws/test_parser.py @@ -1,45 +1,39 @@ -import os +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" +This module will contain all cases for the parser test suite +""" + import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) # Local module imports -from .utils import ERROR_MESSAGES, TIMEOUTS +from . import event_monitor +from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] - -# Generic vars -MODULE = 'parser_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='parser_test_module') # --------------------------------------------TEST_BUCKET_AND_SERVICE_MISSING ------------------------------------------ -# Configuration and cases data -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_bucket_and_service_missing.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_and_service_missing.yaml') - -# Enabled test configurations -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='configuration_bucket_and_service_missing.yaml', + cases_file='cases_bucket_and_service_missing.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_bucket_and_service_missing( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: Command for bucket and service weren't invoked. @@ -92,27 +86,23 @@ def test_bucket_and_service_missing( callback=event_monitor.callback_detect_aws_module_warning, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_warning'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_warning'] # -------------------------------------------- TEST_TYPE_MISSING_IN_BUCKET --------------------------------------------- -# Configuration and cases data -t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_type_missing_in_bucket.yaml') -t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_type_missing_in_bucket.yaml') - -# Enabled test configurations -t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) -t2_configurations = load_configuration_template( - t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata -) +# Configure T2 test +configurator.configure_test(configuration_file='configuration_type_missing_in_bucket.yaml', + cases_file='cases_type_missing_in_bucket.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_type_missing_in_bucket( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: A warning occurs and was displayed in `ossec.log`. @@ -164,27 +154,23 @@ def test_type_missing_in_bucket( callback=event_monitor.callback_detect_aws_legacy_module_warning, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_legacy_warning'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_legacy_warning'] # -------------------------------------------- TEST_TYPE_MISSING_IN_SERVICE -------------------------------------------- -# Configuration and cases data -t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_type_missing_in_service.yaml') -t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_type_missing_in_service.yaml') - -# Enabled test configurations -t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) -t3_configurations = load_configuration_template( - t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata -) +# Configure T3 test +configurator.configure_test(configuration_file='configuration_type_missing_in_service.yaml', + cases_file='cases_type_missing_in_service.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t3_configurations, t3_configuration_metadata), ids=t3_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_type_missing_in_service( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -237,26 +223,23 @@ def test_type_missing_in_service( callback=event_monitor.callback_detect_aws_error_for_missing_type, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_error_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_error_message'] -# -------------------------------------------- TEST_EMPTY_VALUES_IN_BUCKET --------------------------------------------- -# Configuration and cases data -t4_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_bucket.yaml') -t4_cases_path = os.path.join(TEST_CASES_PATH, 'cases_empty_values_in_bucket.yaml') -# Enabled test configurations -t4_configuration_parameters, t4_configuration_metadata, t4_case_ids = get_test_cases_data(t4_cases_path) -t4_configurations = load_configuration_template( - t4_configurations_path, t4_configuration_parameters, t4_configuration_metadata -) +# -------------------------------------------- TEST_EMPTY_VALUES_IN_BUCKET --------------------------------------------- +# Configure T4 test +configurator.configure_test(configuration_file='configuration_values_in_bucket.yaml', + cases_file='cases_empty_values_in_bucket.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t4_configurations, t4_configuration_metadata), ids=t4_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_empty_values_in_bucket( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -308,26 +291,23 @@ def test_empty_values_in_bucket( callback=event_monitor.callback_detect_aws_empty_value, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_value_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_empty_value_message'] -# -------------------------------------------- TEST_EMPTY_VALUES_IN_SERVICE -------------------------------------------- -# Configuration and cases data -t5_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_service.yaml') -t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_empty_values_in_service.yaml') -# Enabled test configurations -t5_configuration_parameters, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path) -t5_configurations = load_configuration_template( - t5_configurations_path, t5_configuration_parameters, t5_configuration_metadata -) +# -------------------------------------------- TEST_EMPTY_VALUES_IN_SERVICE -------------------------------------------- +# Configure T5 test +configurator.configure_test(configuration_file='configuration_values_in_service.yaml', + cases_file='cases_empty_values_in_service.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t5_configurations, t5_configuration_metadata), ids=t5_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_empty_values_in_service( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -380,27 +360,23 @@ def test_empty_values_in_service( callback=event_monitor.callback_detect_aws_empty_value, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_value_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_empty_value_message'] # ------------------------------------------ TEST_INVALID_VALUES_IN_BUCKET --------------------------------------------- -# Configuration and cases data -t6_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_bucket.yaml') -t6_cases_path = os.path.join(TEST_CASES_PATH, 'cases_invalid_values_in_bucket.yaml') - -# Enabled test configurations -t6_configuration_parameters, t6_configuration_metadata, t6_case_ids = get_test_cases_data(t6_cases_path) -t6_configurations = load_configuration_template( - t6_configurations_path, t6_configuration_parameters, t6_configuration_metadata -) +# Configure T6 test +configurator.configure_test(configuration_file='configuration_values_in_bucket.yaml', + cases_file='cases_invalid_values_in_bucket.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t6_configurations, t6_configuration_metadata), ids=t6_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_invalid_values_in_bucket( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -447,32 +423,31 @@ def test_invalid_values_in_bucket( input_description: - The `configuration_values_in_bucket` file provides the configuration for this test. """ + + + log_monitor.start( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_invalid_value, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_invalid_value_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_invalid_value_message'] # ------------------------------------------ TEST_INVALID_VALUES_IN_BUCKET --------------------------------------------- -# Configuration and cases data -t7_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_values_in_service.yaml') -t7_cases_path = os.path.join(TEST_CASES_PATH, 'cases_invalid_values_in_service.yaml') - -# Enabled test configurations -t7_configuration_parameters, t7_configuration_metadata, t7_case_ids = get_test_cases_data(t7_cases_path) -t7_configurations = load_configuration_template( - t7_configurations_path, t7_configuration_parameters, t7_configuration_metadata -) +# Configure T7 test +configurator.configure_test(configuration_file='configuration_values_in_service.yaml', + cases_file='cases_invalid_values_in_service.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t7_configurations, t7_configuration_metadata), ids=t7_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_invalid_values_in_service( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: An error occurs and was displayed in `ossec.log`. @@ -524,27 +499,23 @@ def test_invalid_values_in_service( callback=event_monitor.callback_detect_aws_invalid_value, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_invalid_value_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_invalid_value_message'] # --------------------------------------- TEST_MULTIPLE_BUCKET_AND_SERVICE_TAGS ---------------------------------------- -# Configuration and cases data -t8_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_multiple_bucket_and_service_tags.yaml') -t8_cases_path = os.path.join(TEST_CASES_PATH, 'cases_multiple_bucket_and_service_tags.yaml') - -# Enabled test configurations -t8_configuration_parameters, t8_configuration_metadata, t8_case_ids = get_test_cases_data(t8_cases_path) -t8_configurations = load_configuration_template( - t8_configurations_path, t8_configuration_parameters, t8_configuration_metadata -) +# Configure T8 test +configurator.configure_test(configuration_file='configuration_multiple_bucket_and_service_tags.yaml', + cases_file='cases_multiple_bucket_and_service_tags.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t8_configurations, t8_configuration_metadata), ids=t8_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_multiple_bucket_and_service_tags( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, - file_monitoring + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, + file_monitoring ): """ description: The command is invoked two times for buckets and two times for services. @@ -592,9 +563,9 @@ def test_multiple_bucket_and_service_tags( - The `configuration_multiple_bucket_and_service_tags` file provides the configuration for this test. """ log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_bucket_or_service_call, accumulations=4 ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_service_calls_amount'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_service_calls_amount'] diff --git a/tests/integration/test_aws/test_path.py b/tests/integration/test_aws/test_path.py index d6d740b0034..a7bb3bc2a03 100644 --- a/tests/integration/test_aws/test_path.py +++ b/tests/integration/test_aws/test_path.py @@ -1,43 +1,37 @@ -import os +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" +This module will contain all cases for the path test suite +""" import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.modules.aws.db_utils import ( - get_s3_db_row, - s3_db_exists, - table_exists_or_has_values, -) -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) +from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH +from wazuh_testing.utils.db_queries.aws_db import get_s3_db_row, table_exists_or_has_values +from wazuh_testing.modules.aws.utils import path_exist + # Local module imports -from .utils import ERROR_MESSAGES, TIMEOUTS +from . import event_monitor +from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] - -# Generic vars -MODULE = 'path_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='path_test_module') # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- -configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_path.yaml') -cases_path = os.path.join(TEST_CASES_PATH, 'cases_path.yaml') - -configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) -configurations = load_configuration_template( - configurations_path, configuration_parameters, configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='configuration_path.yaml', + cases_file='cases_path.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, configuration_metadata), ids=case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_path( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -122,7 +116,7 @@ def test_path( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -130,25 +124,25 @@ def test_path( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] else: log_monitor.start( - timeout=TIMEOUTS[10], + timeout=TIMEOUT[10], callback=event_monitor.make_aws_callback(pattern), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_path_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_empty_path_message'] - assert s3_db_exists() + assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) if expected_results: data = get_s3_db_row(table_name=table_name) @@ -163,4 +157,4 @@ def test_path( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/test_path_suffix.py b/tests/integration/test_aws/test_path_suffix.py index 6c7450091ec..6287d1a2888 100644 --- a/tests/integration/test_aws/test_path_suffix.py +++ b/tests/integration/test_aws/test_path_suffix.py @@ -1,43 +1,38 @@ -import os +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" +This module will contain all cases for the path suffix test suite +""" + import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.modules.aws.db_utils import ( - get_s3_db_row, - s3_db_exists, - table_exists_or_has_values, -) -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) +from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH +from wazuh_testing.utils.db_queries.aws_db import get_s3_db_row, table_exists_or_has_values +from wazuh_testing.modules.aws.utils import path_exist + # Local module imports -from .utils import ERROR_MESSAGES, TIMEOUTS +from . import event_monitor +from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] - -# Generic vars -MODULE = 'path_suffix_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='path_suffix_test_module') # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- -configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_path_suffix.yaml') -cases_path = os.path.join(TEST_CASES_PATH, 'cases_path_suffix.yaml') - -configuration_parameters, configuration_metadata, case_ids = get_test_cases_data(cases_path) -configurations = load_configuration_template( - configurations_path, configuration_parameters, configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='configuration_path_suffix.yaml', + cases_file='cases_path_suffix.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, configuration_metadata), ids=case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_path_suffix( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -124,7 +119,7 @@ def test_path_suffix( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -132,24 +127,24 @@ def test_path_suffix( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed, ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] else: log_monitor.start( - timeout=TIMEOUTS[10], + timeout=TIMEOUT[10], callback=event_monitor.make_aws_callback(pattern), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_empty_path_suffix_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_empty_path_suffix_message'] - assert s3_db_exists() + assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) if expected_results: data = get_s3_db_row(table_name=bucket_type) @@ -164,4 +159,4 @@ def test_path_suffix( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/test_regions.py b/tests/integration/test_aws/test_regions.py index 4b45b1feb6e..ce526780a80 100644 --- a/tests/integration/test_aws/test_regions.py +++ b/tests/integration/test_aws/test_regions.py @@ -1,49 +1,39 @@ -import os +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" +This module will contain all cases for the region test suite +""" import pytest # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.modules.aws import ( # noqa: F401 - AWS_SERVICES_DB_PATH, - RANDOM_ACCOUNT_ID, - event_monitor, - local_internal_options -) -from wazuh_testing.modules.aws.db_utils import ( - get_multiple_s3_db_row, - get_multiple_service_db_row, - s3_db_exists, - table_exists_or_has_values, -) -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) +from wazuh_testing.constants.aws import RANDOM_ACCOUNT_ID +from wazuh_testing.constants.paths.aws import AWS_SERVICES_DB_PATH, S3_CLOUDTRAIL_DB_PATH +from wazuh_testing.modules.aws.utils import path_exist +from wazuh_testing.utils.db_queries.aws_db import (get_multiple_service_db_row, table_exists_or_has_values, + get_multiple_s3_db_row) + # Local module imports -from .utils import ERROR_MESSAGES, TIMEOUTS +from . import event_monitor +from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options pytestmark = [pytest.mark.server] -# Generic vars -MODULE = 'regions_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='regions_test_module') # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'bucket_configuration_regions.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_bucket_regions.yaml') - -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='bucket_configuration_regions.yaml', + cases_file='cases_bucket_regions.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_regions( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -110,7 +100,7 @@ def test_regions( only_logs_after = metadata['only_logs_after'] regions = metadata['regions'] expected_results = metadata['expected_results'] - pattern = fr".*DEBUG: \+\+\+ No logs to process in bucket: {RANDOM_ACCOUNT_ID}/{regions}" + pattern = f".*DEBUG: \+\+\+ No logs to process in bucket: {RANDOM_ACCOUNT_ID}/{regions}" parameters = [ 'wodles/aws/aws-s3', @@ -128,7 +118,7 @@ def test_regions( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -136,26 +126,26 @@ def test_regions( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed, accumulations=expected_results ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] else: log_monitor.start( - timeout=TIMEOUTS[10], + timeout=TIMEOUT[10], callback=event_monitor.make_aws_callback(pattern), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_no_region_found_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_no_region_found_message'] - assert s3_db_exists() + assert path_exist(path=S3_CLOUDTRAIL_DB_PATH) if expected_results: regions_list = regions.split(",") @@ -173,21 +163,19 @@ def test_regions( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # -------------------------------------------- TEST_CLOUDWATCH_REGIONS ------------------------------------------------- -t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'cloudwatch_configuration_regions.yaml') -t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_cloudwatch_regions.yaml') - -t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) -configurations = load_configuration_template( - t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata -) +# Configure T2 test +configurator.configure_test(configuration_file='cloudwatch_configuration_regions.yaml', + cases_file='cases_cloudwatch_regions.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, t2_configuration_metadata), ids=t2_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_cloudwatch_regions( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -272,7 +260,7 @@ def test_cloudwatch_regions( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -280,15 +268,15 @@ def test_cloudwatch_regions( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), accumulations=len(regions_list) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] else: log_monitor.start( @@ -298,7 +286,7 @@ def test_cloudwatch_regions( ), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_non-existent_region_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_non-existent_region_message'] table_name = 'cloudwatch_logs' @@ -315,21 +303,19 @@ def test_cloudwatch_regions( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # ------------------------------------------ TEST_INSPECTOR_PATH ------------------------------------------------------- -t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'inspector_configuration_regions.yaml') -t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_inspector_regions.yaml') - -t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path) -configurations = load_configuration_template( - t3_configurations_path, t3_configuration_parameters, t3_configuration_metadata -) +# Configure T3 test +configurator.configure_test(configuration_file='inspector_configuration_regions.yaml', + cases_file='cases_inspector_regions.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(configurations, t3_configuration_metadata), ids=t3_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_inspector_regions( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -412,7 +398,7 @@ def test_inspector_regions( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -420,15 +406,15 @@ def test_inspector_regions( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] if expected_results: log_monitor.start( - timeout=TIMEOUTS[20], + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_service_event_processed(expected_results, service_type), accumulations=len(regions_list) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_event_number'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_event_number'] else: log_monitor.start( @@ -438,7 +424,7 @@ def test_inspector_regions( ), ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_non-existent_region_message'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_non-existent_region_message'] table_name = 'aws_services' @@ -455,7 +441,7 @@ def test_inspector_regions( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # Detect any ERROR message log_monitor.start( @@ -463,4 +449,4 @@ def test_inspector_regions( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/test_remove_from_bucket.py b/tests/integration/test_aws/test_remove_from_bucket.py index 8c6dc85bd89..68a3147e68b 100644 --- a/tests/integration/test_aws/test_remove_from_bucket.py +++ b/tests/integration/test_aws/test_remove_from_bucket.py @@ -1,38 +1,36 @@ -import os +# Copyright (C) 2015, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" +This module will contain all cases for the remove from bucket test suite +""" + import pytest +# qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.constants.paths.configurations import TEMPLATE_DIR, TEST_CASES_DIR -from wazuh_testing.modules.aws import event_monitor, local_internal_options # noqa: F401 -from wazuh_testing.modules.aws.cloudwatch_utils import log_stream_exists -from wazuh_testing.modules.aws.s3_utils import file_exists -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) +from wazuh_testing.modules.aws.utils import log_stream_exists, file_exists -pytestmark = [pytest.mark.server] +# Local module imports +from . import event_monitor +from .utils import ERROR_MESSAGE, TestConfigurator, local_internal_options +pytestmark = [pytest.mark.server] -# Generic vars -MODULE = 'remove_from_bucket_test_module' -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') -CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, TEMPLATE_DIR, MODULE) -TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, TEST_CASES_DIR, MODULE) +# Set test configurator for the module +configurator = TestConfigurator(module='remove_from_bucket_test_module') # ---------------------------------------------------- TEST_REMOVE_FROM_BUCKET ----------------------------------------- -# Configuration and cases data -t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_remove_from_bucket.yaml') -t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_remove_from_bucket.yaml') - -t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path) -t1_configurations = load_configuration_template( - t1_configurations_path, t1_configuration_parameters, t1_configuration_metadata -) +# Configure T1 test +configurator.configure_test(configuration_file='configuration_remove_from_bucket.yaml', + cases_file='cases_remove_from_bucket.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_remove_from_bucket( configuration, metadata, mark_cases_as_skipped, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, @@ -113,7 +111,7 @@ def test_remove_from_bucket( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -121,7 +119,7 @@ def test_remove_from_bucket( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] assert not file_exists(filename=metadata['uploaded_file'], bucket_name=bucket_name) @@ -131,22 +129,19 @@ def test_remove_from_bucket( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # ---------------------------------------------------- TEST_REMOVE_LOG_STREAM ------------------------------------------ -# Configuration and cases data -t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_remove_log_stream.yaml') -t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_remove_log_streams.yaml') - -t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path) -t2_configurations = load_configuration_template( - t2_configurations_path, t2_configuration_parameters, t2_configuration_metadata -) +# Configure T2 test +configurator.configure_test(configuration_file='configuration_remove_log_stream.yaml', + cases_file='cases_remove_log_streams.yaml') @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids) +@pytest.mark.parametrize('configuration, metadata', + zip(configurator.test_configuration_template, configurator.metadata), + ids=configurator.cases_ids) def test_remove_log_stream( configuration, metadata, create_log_stream, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, @@ -225,7 +220,7 @@ def test_remove_log_stream( callback=event_monitor.callback_detect_aws_module_start ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['failed_start'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly log_monitor.start( @@ -233,7 +228,7 @@ def test_remove_log_stream( callback=event_monitor.callback_detect_aws_module_called(parameters) ) - assert log_monitor.callback_result is not None, ERROR_MESSAGES['incorrect_parameters'] + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] assert not log_stream_exists(log_stream=metadata['log_stream'], log_group=log_group_name) @@ -243,4 +238,4 @@ def test_remove_log_stream( callback=event_monitor.callback_detect_all_aws_err ) - assert log_monitor.callback_result is None, ERROR_MESSAGES['error_found'] + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py new file mode 100644 index 00000000000..3cf22fa8236 --- /dev/null +++ b/tests/integration/test_aws/utils.py @@ -0,0 +1,115 @@ +# Copyright (C) 2015-2023, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" + This file contain constant and other utilities to be used in the AWS integration test module. +""" + +# qa-integration-framework imports +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) +from wazuh_testing.modules.monitord import configuration as monitord_config + +from os.path import join, dirname, realpath + +# CONSTANTS +TEMPLATE_DIR = 'configuration_template' +TEST_CASES_DIR = 'test_cases' +WAZUH_MODULES_DEBUG = 'wazuh_modules.debug' + +ERROR_MESSAGE = { + + "failed_start": "The AWS module did not start as expected", + "incorrect_parameters": "The AWS module was not called with the correct parameters", + "error_found": "Found error message on AWS module", + "incorrect_event_number": "The AWS module did not process the expected number of events", + "incorrect_non-existent_region_message": "The AWS module did not show correct message about non-existent region", + "incorrect_no_existent_log_group": "The AWS module did not show correct message non-existent log group", + "incorrect_empty_path_message": "The AWS module did not show correct message about empty path", + "incorrect_empty_path_suffix_message": "The AWS module did not show correct message about empty path_suffix", + "incorrect_error_message": "The AWS module did not show the expected error message", + "incorrect_empty_value_message": "The AWS module did not show the expected message about empty value", + "incorrect_legacy_warning": "The AWS module did not show the expected legacy warning", + "incorrect_warning": "The AWS module did not show the expected warning", + "incorrect_invalid_value_message": "The AWS module did not show the expected message about invalid value", + "incorrect_service_calls_amount": "The AWS module was not called for bucket or service the right amount of times", + "unexpected_number_of_events_found": "Some logs may have been processed, " + "or the results found are more than expected", + "incorrect_marker": "The AWS module did not use the correct marker", + "incorrect_no_region_found_message": "The AWS module did not show correct message about non-existent region", + "incorrect_discard_regex_message": "The AWS module did not show the correct message about discard regex or, " + "did not process the expected amount of logs", + "failed_sqs_message_retrieval": "The AWS module did not retrieve the expected message from the SQS Queue", + "failed_message_handling": "The AWS module did not handle the expected message" +} + +TIMEOUT = { + + 10: 10, + 20: 20 +} + +# Paths +TEST_DATA_PATH = join(dirname(realpath(__file__)), 'data') + + +# Set local internal options +local_internal_options = {WAZUH_MODULES_DEBUG: '2', + monitord_config.MONITORD_ROTATE_LOG: '0'} + + +# Classes +class TestConfigurator: + """ + TestConfigurator class is responsible for configuring test data and parameters for a specific test module. + + Attributes: + - module (str): The name of the test module. + - configuration_path (str): The path to the configuration directory for the test module. + - test_cases_path (str): The path to the test cases directory for the test module. + - metadata (list): Test metadata retrieved from the test cases. + - parameters (list): Test parameters retrieved from the test cases. + - cases_ids (list): Identifiers for the test cases. + - test_configuration_template (list): The loaded configuration template for the test module. + + """ + def __init__(self, module): + self.module = module + self.configuration_path = join(TEST_DATA_PATH, TEMPLATE_DIR, self.module) + self.test_cases_path = join(TEST_DATA_PATH, TEST_CASES_DIR, self.module) + self.metadata = None + self.parameters = None + self.cases_ids = None + self.test_configuration_template = None + + def configure_test(self, configuration_file="", cases_file=""): + """ + Configures the test data and parameters for the given test module. + + Args: + - configuration_file (str): The name of the configuration file. + - cases_file (str): The name of the test cases file. + + Returns: + None + """ + # Set test cases path + cases_path = join(self.test_cases_path, cases_file) + + # set test cases data + self.parameters, self.metadata, self.cases_ids = get_test_cases_data(cases_path) + + # Set test configuration template for tests with config files + if configuration_file != "": + # Set config path + configurations_path = join(self.configuration_path, configuration_file) + + # load configuration template + self.test_configuration_template = load_configuration_template( + configurations_path, + self.parameters, + self.metadata + ) From 1e5b2b6ae516267673440ee6b445d2b6cbe87204 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Mon, 4 Dec 2023 15:35:21 -0300 Subject: [PATCH 282/419] Fix formatting --- tests/integration/test_aws/event_monitor.py | 18 +++++++++--------- tests/integration/test_aws/test_basic.py | 19 +++++++++---------- .../test_aws/test_custom_bucket.py | 6 ++++-- 3 files changed, 22 insertions(+), 21 deletions(-) diff --git a/tests/integration/test_aws/event_monitor.py b/tests/integration/test_aws/event_monitor.py index 09aaa863e38..2ea3251fe92 100644 --- a/tests/integration/test_aws/event_monitor.py +++ b/tests/integration/test_aws/event_monitor.py @@ -113,9 +113,9 @@ def callback_detect_aws_empty_value(line): """ if ( - re.match(fr"{INVALID_EMPTY_TYPE_ERROR}", line) or - re.match(fr"{EMPTY_CONTENT_ERROR}", line) or - re.match(fr"{EMPTY_CONTENT_WARNING}", line) + re.match(fr"{INVALID_EMPTY_TYPE_ERROR}", line) or + re.match(fr"{EMPTY_CONTENT_ERROR}", line) or + re.match(fr"{EMPTY_CONTENT_WARNING}", line) ): return line @@ -131,10 +131,10 @@ def callback_detect_aws_invalid_value(line): """ if ( - re.match(fr"{INVALID_EMPTY_SERVICE_TYPE_ERROR}", line) or - re.match(fr"{INVALID_TAG_CONTENT_ERROR}", line) or - re.match(fr"{PARSING_BUCKET_ERROR_WARNING}", line), - re.match(fr"{PARSING_SERVICE_ERROR_WARNING}", line) + re.match(fr"{INVALID_EMPTY_SERVICE_TYPE_ERROR}", line) or + re.match(fr"{INVALID_TAG_CONTENT_ERROR}", line) or + re.match(fr"{PARSING_BUCKET_ERROR_WARNING}", line), + re.match(fr"{PARSING_SERVICE_ERROR_WARNING}", line) ): return line @@ -150,8 +150,8 @@ def callback_detect_bucket_or_service_call(line): """ if ( - re.match(fr"{SERVICE_ANALYSIS}", line) or - re.match(fr"{BUCKET_ANALYSIS}", line) + re.match(fr"{SERVICE_ANALYSIS}", line) or + re.match(fr"{BUCKET_ANALYSIS}", line) ): return line diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index dba1aa1a2a6..a0d94aa5752 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -11,7 +11,6 @@ # qa-integration-framework imports from wazuh_testing import session_parameters - # Local module imports from . import event_monitor from .utils import ERROR_MESSAGE, TestConfigurator, local_internal_options @@ -96,7 +95,7 @@ def test_bucket_defaults( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_module_start ) - + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly @@ -104,7 +103,7 @@ def test_bucket_defaults( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_module_called(parameters) ) - + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] # Detect any ERROR message @@ -126,9 +125,9 @@ def test_bucket_defaults( @pytest.mark.parametrize('configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_service_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +def test_service_defaults(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, + restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -213,7 +212,7 @@ def test_service_defaults( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_all_aws_err ) - + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] @@ -227,9 +226,9 @@ def test_service_defaults( @pytest.mark.parametrize('configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_inspector_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +def test_inspector_defaults(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, + restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. diff --git a/tests/integration/test_aws/test_custom_bucket.py b/tests/integration/test_aws/test_custom_bucket.py index 9f62c8bc5b2..7f96412aeb9 100644 --- a/tests/integration/test_aws/test_custom_bucket.py +++ b/tests/integration/test_aws/test_custom_bucket.py @@ -32,7 +32,8 @@ ids=configurator.cases_ids) def test_custom_bucket_defaults(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, - restart_wazuh_function, file_monitoring): + restart_wazuh_function, file_monitoring +): """ description: Test the AWS S3 custom bucket module is invoked with the expected parameters and no error occurs. @@ -136,7 +137,8 @@ def test_custom_bucket_defaults(configuration, metadata, load_wazuh_basic_config ids=configurator.cases_ids) def test_custom_bucket_logs(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, - restart_wazuh_function, file_monitoring, upload_and_delete_file_to_s3): + restart_wazuh_function, file_monitoring, upload_and_delete_file_to_s3 +): """ description: Test the AWS S3 custom bucket module is invoked with the expected parameters and retrieve the messages from the SQS Queue. From 02bc3ddf3b97d7240f766781579697e7af997012 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Mon, 4 Dec 2023 15:58:44 -0300 Subject: [PATCH 283/419] Update README --- tests/integration/test_aws/README.md | 57 +++++++++++----------------- 1 file changed, 23 insertions(+), 34 deletions(-) diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index a644cff8f15..93a461b10d7 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -1,8 +1,9 @@ -# AWS Integration +# AWS Integration tests ## Description -It is a _wodle based_ module that has a capability to pull logs from several AWS services. +It is a _wodle based_ module that test the capabilities of the Wazuh AWS integration, pulling logs from different +buckets and services. ## Tests directory structure @@ -11,6 +12,7 @@ wazuh/tests/integration/test_aws ├── data │   ├── configuration_template │   │   ├── basic_test_module +│   │   ├── custom_bucket_test_module │   │   ├── discard_regex_test_module │   │   ├── log_groups_test_module │   │   ├── only_logs_after_test_module @@ -21,6 +23,7 @@ wazuh/tests/integration/test_aws │   │   └── remove_from_bucket_test_module │   └── test_cases │   ├── basic_test_module +│   ├── custom_bucket_test_module │   ├── discard_regex_test_module │   ├── log_groups_test_module │   ├── only_logs_after_test_module @@ -33,6 +36,7 @@ wazuh/tests/integration/test_aws ├── README.md ├── conftest.py ├── test_basic.py +├── test_custom_bucket.py ├── test_discard_regex.py ├── test_log_groups.py ├── test_only_logs_after.py @@ -43,20 +47,6 @@ wazuh/tests/integration/test_aws └── utils.py ``` -## Deps directory structure - -```bash -qa-integration-framework/src/wazuh_testing/modules/aws -├── __init__.py -├── cli_utils.py -├── cloudwatch_utils.py -├── data_generator.py -├── db_utils.py -├── event_monitor.py -├── exceptions.py -└── s3_utils.py -``` - ## Requirements - [Proper testing environment](#Setting up a test environment) @@ -71,8 +61,9 @@ For a step-by-step example guide using linux go to the [test setup section](#lin ## Configuration settings -- **credentials** - Set the credentials at `$HOME/.aws/credentials` (being `HOME` the home directory of the user who runs the tests, more information [here](https://documentation.wazuh.com/current/amazon/services/prerequisites/credentials.html#profiles)) with the content: +- **Credentials**: + Set the credentials at `$HOME/.aws/credentials` (being `HOME` the home directory of the user who runs the tests, + more information [here](https://documentation.wazuh.com/current/amazon/services/prerequisites/credentials.html#profiles) with the content: ```ini [qa] @@ -103,21 +94,18 @@ _We are using **Ubuntu 22.04** for this example:_ - Install **Wazuh** -- Install python tests dependencies: +- Install Python tests dependencies: - ```shell script - # Install pip - apt install python3-pip git -y - - # Clone `wazuh` repository within your testing environment - git clone https://github.com/wazuh/wazuh.git +```shell script +# Install pip +apt install python3-pip git -y + +# Clone the `qa-integration-framework` repository withing your testing environment +git clone https://github.com/wazuh/qa-integration-framework.git - # Clone the `qa-integration-framework` repository withing your testing environment - git clone https://github.com/wazuh/qa-integration-framework.git - - # Install tests dependencies - python3 -m pip install qa-integration-framework/ - ``` +# Install tests dependencies +python3 -m pip install qa-integration-framework/ +``` ## Integration tests @@ -133,15 +121,16 @@ from the closest one, it will look for the next one (if possible) until reaching need to run every test from the following path, where the general _conftest_ is: ```shell script -cd wazuh/tests/integration/test_aws/ + cd wazuh/tests/integration/test_aws/ ``` To run any test, we just need to call `pytest` from `python3` using the following line: ```shell script -python3 -m pytest [options] [file_or_dir] [file_or_dir] [...] + python3 -m pytest [options] [file_or_dir] [file_or_dir] [...] ``` + **Options:** - `v`: verbosity level (-v or -vv. Highly recommended to use -vv when tests are failing) @@ -153,7 +142,7 @@ python3 -m pytest [options] [file_or_dir] [file_or_dir] [...] - `--tier`: only run tests with given tier (ex. --tier 2) - `--html`: generates a HTML report for the test results. (ex. --html=report.html) - `--default-timeout`: overwrites the default timeout (in seconds). This value is used to make a test fail if a - condition is not met before the given time lapse. Some tests make use of this value and other has other fixed timeout + condition is not met before the given timelapse. Some tests make use of this value and other has other fixed timeout that cannot be modified. _Use `-h` to see the rest or check its [documentation](https://docs.pytest.org/en/latest/usage.html)._ From 319aad3c78dd42840c9ce12e199427f24b5413b0 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Thu, 14 Dec 2023 13:03:54 -0300 Subject: [PATCH 284/419] Fix formatting and clean unused code --- tests/integration/test_aws/conftest.py | 2 +- tests/integration/test_aws/test_only_logs_after.py | 1 - tests/integration/test_aws/test_parser.py | 3 --- tests/integration/test_aws/utils.py | 1 - 4 files changed, 1 insertion(+), 6 deletions(-) diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index e65c6cdd4ba..2db236bd325 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -3,7 +3,7 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 """ -This module contain all necessary components (fixtures, classes, methods)to configure the test for its execution. +This module contains all necessary components (fixtures, classes, methods)to configure the test for its execution. """ import pytest diff --git a/tests/integration/test_aws/test_only_logs_after.py b/tests/integration/test_aws/test_only_logs_after.py index d6ae20dd95b..73050fdd72d 100644 --- a/tests/integration/test_aws/test_only_logs_after.py +++ b/tests/integration/test_aws/test_only_logs_after.py @@ -232,7 +232,6 @@ def test_service_without_only_logs_after( """ service_type = metadata['service_type'] log_group_name = metadata['log_group_name'] - expected_results = metadata['expected_results'] parameters = [ 'wodles/aws/aws-s3', diff --git a/tests/integration/test_aws/test_parser.py b/tests/integration/test_aws/test_parser.py index 42cdb8e9bf6..7b8497f3e3f 100644 --- a/tests/integration/test_aws/test_parser.py +++ b/tests/integration/test_aws/test_parser.py @@ -423,9 +423,6 @@ def test_invalid_values_in_bucket( input_description: - The `configuration_values_in_bucket` file provides the configuration for this test. """ - - - log_monitor.start( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_invalid_value, diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py index 3cf22fa8236..d0a0ff7f5df 100644 --- a/tests/integration/test_aws/utils.py +++ b/tests/integration/test_aws/utils.py @@ -47,7 +47,6 @@ } TIMEOUT = { - 10: 10, 20: 20 } From e1a24ef28f235c911837673bf23823d9c2604d65 Mon Sep 17 00:00:00 2001 From: Eduardo Leon Wazuh Date: Wed, 19 Jul 2023 08:05:36 -0300 Subject: [PATCH 285/419] Fix imports Add new fixture --- tests/integration/conftest.py | 8 +- tests/integration/test_aws/README.md | 126 +++++++++--------- tests/integration/test_aws/conftest.py | 73 +++++++++- tests/integration/test_aws/event_monitor.py | 18 +-- tests/integration/test_aws/test_basic.py | 19 +-- .../test_aws/test_discard_regex.py | 3 - .../test_aws/test_only_logs_after.py | 1 + tests/integration/test_aws/test_parser.py | 3 + .../integration/test_aws/test_path_suffix.py | 3 + tests/integration/test_aws/utils.py | 1 + 10 files changed, 161 insertions(+), 94 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 9e1735acd46..504b1abb618 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -181,10 +181,6 @@ def configure_local_internal_options_function(request): It uses the test variable local_internal_options. This should be a dictionary wich keys and values corresponds to the internal option configuration, For example: local_internal_options = {'monitord.rotate_log': '0', 'syscheck.debug': '0' } - - Args: - request (fixture): Provide information on the executing test function. - """ try: local_internal_options = request.param @@ -208,7 +204,7 @@ def configure_local_internal_options_function(request): wazuh_configuration.set_local_internal_options_dict(backup_local_internal_options) -@pytest.fixture() +@pytest.fixture(scope='function') def restart_wazuh_function(request): """Restart before starting a test, and stop it after finishing. @@ -244,7 +240,7 @@ def restart_wazuh_function(request): control_service('stop', daemon=daemon) -@pytest.fixture() +@pytest.fixture(scope='function') def file_monitoring(request): """Fixture to handle the monitoring of a specified file. diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index 93a461b10d7..f06aac5e543 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -1,69 +1,63 @@ -# AWS Integration tests +# AWS Integration ## Description -It is a _wodle based_ module that test the capabilities of the Wazuh AWS integration, pulling logs from different -buckets and services. +It is a _wodle based_ module that has a capability to pull logs from several AWS services. ## Tests directory structure ```bash -wazuh/tests/integration/test_aws +wazuh-qa/tests/integration/test_aws +├── conftest.py ├── data │   ├── configuration_template │   │   ├── basic_test_module -│   │   ├── custom_bucket_test_module │   │   ├── discard_regex_test_module -│   │   ├── log_groups_test_module │   │   ├── only_logs_after_test_module -│   │   ├── parser_test_module │   │   ├── path_suffix_test_module │   │   ├── path_test_module │   │   ├── regions_test_module │   │   └── remove_from_bucket_test_module │   └── test_cases │   ├── basic_test_module -│   ├── custom_bucket_test_module │   ├── discard_regex_test_module -│   ├── log_groups_test_module │   ├── only_logs_after_test_module -│   ├── parser_test_module │   ├── path_suffix_test_module │   ├── path_test_module │   ├── regions_test_module │   └── remove_from_bucket_test_module -├── __init__.py -├── README.md -├── conftest.py +├── README.MD ├── test_basic.py -├── test_custom_bucket.py ├── test_discard_regex.py -├── test_log_groups.py ├── test_only_logs_after.py ├── test_path.py ├── test_path_suffix.py ├── test_regions.py -├── test_remove_from_bucket.py -└── utils.py +└── test_remove_from_bucket.py ``` -## Requirements +## Deps directory structure -- [Proper testing environment](#Setting up a test environment) - -- [Wazuh](https://github.com/wazuh/qa-integration-framework) repository. - -- [Testing framework](https://github.com/wazuh/qa-integration-framework) installed. +```bash +wazuh-qa/deps/wazuh_testing/wazuh_testing/modules/aws +├── cli_utils.py +├── constants.py +├── data_generator.py +├── db_utils.py +├── event_monitor.py +├── __init__.py +└── s3_utils.py +``` -- Configured buckets, log groups and an inspector assessment with test data in AWS. +## Requirements -For a step-by-step example guide using linux go to the [test setup section](#linux) +- The only extra dependency is `boto3` +- The module will assume there are already buckets, log groups and an inspector assessment with test data in AWS. ## Configuration settings -- **Credentials**: - Set the credentials at `$HOME/.aws/credentials` (being `HOME` the home directory of the user who runs the tests, - more information [here](https://documentation.wazuh.com/current/amazon/services/prerequisites/credentials.html#profiles) with the content: +- **credentials** + Set the credentials at `$HOME/.aws/credentials` (being `HOME` the home directory of the user who runs the tests, more information [here](https://documentation.wazuh.com/current/amazon/services/prerequisites/credentials.html#profiles)) with the content: ```ini [qa] @@ -73,7 +67,7 @@ aws_secret_access_key = ## Setting up a test environment -You will need a proper environment to run the integration tests. You can use Docker or any virtual machine. If you have +You will need a proper environment to run the integration tests. You can use any virtual machine you wish. If you have one already, go to the [integration tests section](#integration-tests) If you use [Vagrant](https://www.vagrantup.com/downloads.html) @@ -94,18 +88,21 @@ _We are using **Ubuntu 22.04** for this example:_ - Install **Wazuh** -- Install Python tests dependencies: +- Install python tests dependencies: -```shell script -# Install pip -apt install python3-pip git -y + ```shell script + # Install pip + apt install python3-pip -# Clone the `qa-integration-framework` repository withing your testing environment -git clone https://github.com/wazuh/qa-integration-framework.git + # Clone your `wazuh-qa` repository within your testing environment + cd wazuh-qa -# Install tests dependencies -python3 -m pip install qa-integration-framework/ -``` + # Install Python libraries + python3 -m pip install -r requirements.txt + + # Install test dependecies + python3 -m pip install deps/wazuh-testing + ``` ## Integration tests @@ -121,16 +118,15 @@ from the closest one, it will look for the next one (if possible) until reaching need to run every test from the following path, where the general _conftest_ is: ```shell script - cd wazuh/tests/integration/test_aws/ +cd wazuh-qa/tests/integration ``` To run any test, we just need to call `pytest` from `python3` using the following line: ```shell script - python3 -m pytest [options] [file_or_dir] [file_or_dir] [...] +python3 -m pytest [options] [file_or_dir] [file_or_dir] [...] ``` - **Options:** - `v`: verbosity level (-v or -vv. Highly recommended to use -vv when tests are failing) @@ -142,7 +138,7 @@ To run any test, we just need to call `pytest` from `python3` using the followin - `--tier`: only run tests with given tier (ex. --tier 2) - `--html`: generates a HTML report for the test results. (ex. --html=report.html) - `--default-timeout`: overwrites the default timeout (in seconds). This value is used to make a test fail if a - condition is not met before the given timelapse. Some tests make use of this value and other has other fixed timeout + condition is not met before the given time lapse. Some tests make use of this value and other has other fixed timeout that cannot be modified. _Use `-h` to see the rest or check its [documentation](https://docs.pytest.org/en/latest/usage.html)._ @@ -153,22 +149,32 @@ check its documentation for further information. #### AWS integration tests example ```bash -#root@wazuh-master:/wazuh/tests/integration# pytest -x test_aws/ --disable-warnings -==================================== test session starts ==================================== -platform linux -- Python 3.10.12, pytest-7.1.2, pluggy-1.2.0 -rootdir: /wazuh/tests/integration, configfile: pytest.ini -plugins: testinfra-5.0.0, metadata-3.0.0, html-3.1.1 -collected 195 items - -test_aws/test_basic.py ................ [ 8%] -test_aws/test_discard_regex.py .............. [ 15%] -test_aws/test_log_groups.py .. [ 16%] -test_aws/test_only_logs_after.py .............................................x. [ 40%] -test_aws/test_parser.py .......................... [ 53%] -test_aws/test_path.py .......................................... [ 75%] -test_aws/test_path_suffix.py ......... [ 80%] -test_aws/test_regions.py ........................ [ 92%] -test_aws/test_remove_from_bucket.py ...sss......... [100%] - -============ 191 passed, 3 skipped, 1 xfailed, 7 warnings in 3723.08s (1:02:03) ============= +# python3 -m pytest -vvx test_aws/ -k cloudtrail +=========================================================== test session starts ====================================================== +platform linux -- Python 3.10.6, pytest-7.1.2, pluggy-1.0.0 -- /usr/bin/python3 +cachedir: .pytest_cache +metadata: {'Python': '3.10.6', 'Platform': 'Linux-5.15.0-58-generic-x86_64-with-glibc2.35', +'Packages': {'pytest': '7.1.2', 'py': '1.10.0', 'pluggy': '1.0.0'}, +'Plugins': {'metadata': '2.0.2', 'html': '3.1.1', 'testinfra': '5.0.0'}} +rootdir: /home/vagrant/qa/tests/integration, configfile: pytest.ini +plugins: metadata-2.0.2, html-3.1.1, testinfra-5.0.0 +collected 15 items + +test_aws/test_basic.py::test_defaults[cloudtrail_defaults] PASSED [ 6%] +test_aws/test_discard_regex.py::test_discard_regex[cloudtrail_discard_regex] PASSED [ 13%] +test_aws/test_only_logs_after.py::test_without_only_logs_after[cloudtrail_without_only_logs_after] PASSED [ 20%] +test_aws/test_only_logs_after.py::test_with_only_logs_after[cloudtrail_with_only_logs_after] PASSED [ 26%] +test_aws/test_only_logs_after.py::test_multiple_calls[cloudtrail_only_logs_after_multiple_calls] PASSED [ 33%] +test_aws/test_path.py::test_path[cloudtrail_path_with_data] PASSED [ 40%] +test_aws/test_path.py::test_path[cloudtrail_path_without_data] PASSED [ 46%] +test_aws/test_path.py::test_path[cloudtrail_inexistent_path] PASSED [ 53%] +test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_path_suffix_with_data] PASSED [ 60%] +test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_path_suffix_without_data] PASSED [ 66%] +test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_inexistent_path_suffix] PASSED [ 73%] +test_aws/test_regions.py::test_regions[cloudtrail_region_with_data] PASSED [ 80%] +test_aws/test_regions.py::test_regions[cloudtrail_regions_with_data] PASSED [ 86%] +test_aws/test_regions.py::test_regions[cloudtrail_inexistent_region] PASSED [ 93%] +test_aws/test_remove_from_bucket.py::test_remove_from_bucket[cloudtrail_remove_from_bucket] PASSED [100%] + +=============================================== 15 passed, 2 warnings in 332.67s (0:05:32) =========================================== ``` diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 2db236bd325..1ea99c433ec 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -3,10 +3,11 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 """ -This module contains all necessary components (fixtures, classes, methods)to configure the test for its execution. +This module contain all necessary components (fixtures, classes, methods)to configure the test for its execution. """ - +import botocore import pytest +from uuid import uuid4 # qa-integration-framework imports from wazuh_testing.logger import logger @@ -15,16 +16,19 @@ PERMANENT_CLOUDWATCH_LOG_GROUP, ) from wazuh_testing.modules.aws.utils import ( + #create_bucket, create_log_events, create_log_group, create_log_stream, + #delete_bucket, delete_log_group, delete_log_stream, delete_file, + delete_s3_db, + delete_services_db, file_exists, upload_file ) -from wazuh_testing.modules.aws.utils import delete_s3_db, delete_services_db from wazuh_testing.utils.services import control_service @@ -49,6 +53,47 @@ def restart_wazuh_function_without_exception(daemon=None): # S3 fixtures +@pytest.fixture(scope="session", autouse=True) +def create_session_uuid(): + uuid = str(uuid4())[:8] + return uuid + + +@pytest.fixture(scope="session", autouse=True) +def delete_buckets(): + bucket_list = [] + + yield bucket_list + + for bucket in bucket_list: + #delete_bucket(bucket) + pass + + +@pytest.fixture() +def create_bucket(create_session_uuid, bucket_list, metadata): + """ + + Parameters + ---------- + bucket_list + create_session_uuid + metadata + + Returns + ------- + + """ + bucket_name = metadata['bucket_name'] + bucket_name += f"-{create_session_uuid}" + + create_bucket(bucket_name=bucket_name) + metadata['bucket_name'] = bucket_name + + yield + + bucket_list.append(bucket_name) + @pytest.fixture def upload_and_delete_file_to_s3(metadata): """Upload a file to S3 bucket and delete after the test ends. @@ -94,19 +139,33 @@ def fixture_create_log_stream(metadata): Args: metadata (dict): Metadata to get the parameters. """ + SKIP_LOG_GROUP_CREATION = [PERMANENT_CLOUDWATCH_LOG_GROUP, FAKE_CLOUDWATCH_LOG_GROUP] + print(PERMANENT_CLOUDWATCH_LOG_GROUP, FAKE_CLOUDWATCH_LOG_GROUP) log_group_names = [item.strip() for item in metadata['log_group_name'].split(',')] for log_group_name in log_group_names: if log_group_name in SKIP_LOG_GROUP_CREATION: continue + import random + log_group_name += f"-{random.randint(10**3, 10**4 - 1)}" + logger.debug('Creating log group: %s', log_group_name) - create_log_group(log_group_name) + try: + create_log_group(log_group_name) + except botocore.ResourceAlreadyExistsException as e: + pass log_stream = create_log_stream(log_group_name) logger.debug('Created log stream "%s" within log group "%s"', log_stream, log_group_name) - create_log_events( - log_stream=log_stream, log_group=log_group_name, event_number=metadata.get('expected_results', 1) - ) + try: + create_log_events( + log_stream=log_stream, log_group=log_group_name, event_number=metadata.get('expected_results', 1) + ) + except botocore.errorfactory.ResourceAlreadyExistsException as e: + pass + except Exception as e: + print(e) + pass logger.debug('Created log events') metadata['log_stream'] = log_stream diff --git a/tests/integration/test_aws/event_monitor.py b/tests/integration/test_aws/event_monitor.py index 2ea3251fe92..09aaa863e38 100644 --- a/tests/integration/test_aws/event_monitor.py +++ b/tests/integration/test_aws/event_monitor.py @@ -113,9 +113,9 @@ def callback_detect_aws_empty_value(line): """ if ( - re.match(fr"{INVALID_EMPTY_TYPE_ERROR}", line) or - re.match(fr"{EMPTY_CONTENT_ERROR}", line) or - re.match(fr"{EMPTY_CONTENT_WARNING}", line) + re.match(fr"{INVALID_EMPTY_TYPE_ERROR}", line) or + re.match(fr"{EMPTY_CONTENT_ERROR}", line) or + re.match(fr"{EMPTY_CONTENT_WARNING}", line) ): return line @@ -131,10 +131,10 @@ def callback_detect_aws_invalid_value(line): """ if ( - re.match(fr"{INVALID_EMPTY_SERVICE_TYPE_ERROR}", line) or - re.match(fr"{INVALID_TAG_CONTENT_ERROR}", line) or - re.match(fr"{PARSING_BUCKET_ERROR_WARNING}", line), - re.match(fr"{PARSING_SERVICE_ERROR_WARNING}", line) + re.match(fr"{INVALID_EMPTY_SERVICE_TYPE_ERROR}", line) or + re.match(fr"{INVALID_TAG_CONTENT_ERROR}", line) or + re.match(fr"{PARSING_BUCKET_ERROR_WARNING}", line), + re.match(fr"{PARSING_SERVICE_ERROR_WARNING}", line) ): return line @@ -150,8 +150,8 @@ def callback_detect_bucket_or_service_call(line): """ if ( - re.match(fr"{SERVICE_ANALYSIS}", line) or - re.match(fr"{BUCKET_ANALYSIS}", line) + re.match(fr"{SERVICE_ANALYSIS}", line) or + re.match(fr"{BUCKET_ANALYSIS}", line) ): return line diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index a0d94aa5752..dba1aa1a2a6 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -11,6 +11,7 @@ # qa-integration-framework imports from wazuh_testing import session_parameters + # Local module imports from . import event_monitor from .utils import ERROR_MESSAGE, TestConfigurator, local_internal_options @@ -95,7 +96,7 @@ def test_bucket_defaults( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_module_start ) - + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly @@ -103,7 +104,7 @@ def test_bucket_defaults( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_module_called(parameters) ) - + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] # Detect any ERROR message @@ -125,9 +126,9 @@ def test_bucket_defaults( @pytest.mark.parametrize('configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_service_defaults(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, - restart_wazuh_function, file_monitoring +def test_service_defaults( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -212,7 +213,7 @@ def test_service_defaults(configuration, metadata, load_wazuh_basic_configuratio timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_all_aws_err ) - + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] @@ -226,9 +227,9 @@ def test_service_defaults(configuration, metadata, load_wazuh_basic_configuratio @pytest.mark.parametrize('configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_inspector_defaults(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, - restart_wazuh_function, file_monitoring +def test_inspector_defaults( + configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py index acad70f2918..8c1b3841e05 100644 --- a/tests/integration/test_aws/test_discard_regex.py +++ b/tests/integration/test_aws/test_discard_regex.py @@ -55,7 +55,6 @@ def test_bucket_discard_regex( - teardown: - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - - Delete the uploaded file wazuh_min_version: 4.6.0 @@ -184,7 +183,6 @@ def test_cloudwatch_discard_regex_json( - teardown: - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - - Delete the uploaded file wazuh_min_version: 4.6.0 @@ -432,7 +430,6 @@ def test_inspector_discard_regex( - teardown: - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - - Delete the uploaded file wazuh_min_version: 4.6.0 diff --git a/tests/integration/test_aws/test_only_logs_after.py b/tests/integration/test_aws/test_only_logs_after.py index 73050fdd72d..d6ae20dd95b 100644 --- a/tests/integration/test_aws/test_only_logs_after.py +++ b/tests/integration/test_aws/test_only_logs_after.py @@ -232,6 +232,7 @@ def test_service_without_only_logs_after( """ service_type = metadata['service_type'] log_group_name = metadata['log_group_name'] + expected_results = metadata['expected_results'] parameters = [ 'wodles/aws/aws-s3', diff --git a/tests/integration/test_aws/test_parser.py b/tests/integration/test_aws/test_parser.py index 7b8497f3e3f..42cdb8e9bf6 100644 --- a/tests/integration/test_aws/test_parser.py +++ b/tests/integration/test_aws/test_parser.py @@ -423,6 +423,9 @@ def test_invalid_values_in_bucket( input_description: - The `configuration_values_in_bucket` file provides the configuration for this test. """ + + + log_monitor.start( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_invalid_value, diff --git a/tests/integration/test_aws/test_path_suffix.py b/tests/integration/test_aws/test_path_suffix.py index 6287d1a2888..c95d2e4aef9 100644 --- a/tests/integration/test_aws/test_path_suffix.py +++ b/tests/integration/test_aws/test_path_suffix.py @@ -8,6 +8,9 @@ import pytest +import pydevd_pycharm +pydevd_pycharm.settrace('192.168.56.1', port=55555, stdoutToServer=True, stderrToServer=True) + # qa-integration-framework imports from wazuh_testing import session_parameters from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py index d0a0ff7f5df..3cf22fa8236 100644 --- a/tests/integration/test_aws/utils.py +++ b/tests/integration/test_aws/utils.py @@ -47,6 +47,7 @@ } TIMEOUT = { + 10: 10, 20: 20 } From 6f87c87a94fbec95bc9dfb701db4a49d218558bd Mon Sep 17 00:00:00 2001 From: Eduardo Date: Mon, 8 Jan 2024 13:12:41 -0300 Subject: [PATCH 286/419] Modify fixtures and data for test basic suite --- tests/integration/test_aws/configurator.py | 220 +++++++++++++ tests/integration/test_aws/conftest.py | 309 ++++++++++-------- .../bucket_configuration_defaults.yaml | 2 - .../cloudwatch_configuration_defaults.yaml | 2 - .../inspector_configuration_defaults.yaml | 2 - .../cases_bucket_defaults.yaml | 27 ++ .../cases_cloudwatch_defaults.yaml | 2 + .../cases_bucket_custom.yaml | 2 + tests/integration/test_aws/test_basic.py | 10 +- tests/integration/test_aws/utils.py | 60 +--- 10 files changed, 431 insertions(+), 205 deletions(-) create mode 100644 tests/integration/test_aws/configurator.py diff --git a/tests/integration/test_aws/configurator.py b/tests/integration/test_aws/configurator.py new file mode 100644 index 00000000000..388933a3251 --- /dev/null +++ b/tests/integration/test_aws/configurator.py @@ -0,0 +1,220 @@ +# Copyright (C) 2015-2023, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +""" + This file contain the Test Configurator class that will manage all resources and configurations for each test + module. +""" +from os.path import join +from uuid import uuid4 + +# qa-integration-framework imports +from wazuh_testing.utils.configuration import ( + get_test_cases_data, + load_configuration_template, +) +from wazuh_testing.modules.aws.utils import ( + create_bucket, + upload_log_events, + create_log_group, + create_log_stream, + delete_bucket, + delete_log_group, + delete_s3_db, + delete_services_db, + upload_bucket_file, + delete_resources, + generate_file +) + +# Local imports +from .utils import TEST_DATA_PATH, TEMPLATE_DIR, TEST_CASES_DIR + + +# Classes +class TestConfigurator: + """ + TestConfigurator class is responsible for configuring test data and parameters for a specific test module. + + Attributes: + - module (str): The name of the test module. + - configuration_path (str): The path to the configuration directory for the test module. + - test_cases_path (str): The path to the test cases directory for the test module. + - metadata (list): Test metadata retrieved from the test cases. + - parameters (list): Test parameters retrieved from the test cases. + - cases_ids (list): Identifiers for the test cases. + - test_configuration_template (list): The loaded configuration template for the test module. + + """ + + def __init__(self): + self.module = None + self._metadata = None + self._cases_ids = None + self._test_configuration_template = None + self._set_session_id() + + @property + def module(self): + return self.module + + @module.setter + def module(self, test_module: str): + self.module = test_module + + @property + def metadata(self): + return self._metadata + + @metadata.setter + def metadata(self, value): + self._metadata = value + + @property + def cases_ids(self): + return self._cases_ids + + @cases_ids.setter + def cases_ids(self, value): + self._cases_ids = value + + def _set_session_id(self) -> None: + """Create and set the test session id.""" + self._session_id = str(uuid4())[:8] + print(f"This test session id is: {self._session_id}") + + def configure_test(self, configuration_file="", cases_file="") -> None: + """ + Configure and manage the resources for the test. + + Params + ------ + - configuration_file (str): The name of the configuration file. + - cases_file (str): The name of the test cases file. + """ + # Set configuration path + configuration_path = join(TEST_DATA_PATH, TEMPLATE_DIR, self.module) + + # Set test cases yaml path + cases_yaml_path = join(TEST_DATA_PATH, TEST_CASES_DIR, self.module, cases_file) + + # Backup test data file + backup_test_file = modify_file(test_data_path=cases_yaml_path) + + # Modify original file + resources = self._modify_original_file(test_data_path=cases_yaml_path) + + # Create resources for test + self._create_resources(resources=resources) + + # Set test cases data + parameters, self._metadata, self._cases_ids = get_test_cases_data(cases_yaml_path) + + # Set test configuration template for tests with config files + self._set_configuration_template(configuration_file=configuration_file, + parameters=parameters) + + yield + + # Delete resources + self._delete_resources(resources=resources) + + # Restore original file + restore_original_file(test_data_path=cases_yaml_path, + backup_file=backup_test_file) + + def _modify_original_file(self, test_data_path: str) -> set: + """Add session id to original yaml file resource name + + Returns + ------- + - resources (set): Set containing resources to create. + """ + resources = set() + # Read and Modify the cases yaml file + with open(test_data_path, 'w') as file: + lines = file.readlines() # Read all lines from the file + + for line in lines: + if 'BUCKET_NAME' in line or 'bucket_name' in line: + # Extract the bucket name, modify it, and write the modified line + parts = line.split(':') + if len(parts) > 1: + bucket_name = parts[1].strip() + self._session_id + resources.add(bucket_name) # Add only the modified bucket name to the set + modified_line = parts[0] + ': ' + bucket_name + '\n' + else: + modified_line = line + file.write(modified_line) + else: + file.write(line) + + file.truncate() # Truncate the file to the current position to remove any leftover content + + return resources + + def _set_configuration_template(self, configuration_file: str, parameters: str) -> None: + """Set the configuration template of the test + + Params + ------ + - configuration_file (str): The name of the configuration file. + - parameters (str): The test parameters. + + """ + if configuration_file != "": + # Set config path + configuration_path = join(TEST_DATA_PATH, TEMPLATE_DIR, self.module, configuration_file) + + # load configuration template + self.test_configuration_template = load_configuration_template( + configuration_path, + parameters, + self._metadata + ) + + def _create_resources(self, resources: set) -> None: + """Create AWS resources for test execution + + Parameters + ---------- + - resources (set): Set containing resources to create. + + """ + pass + + def _delete_resources(self, resources): + pass + + +def modify_file(test_data_path: str) -> str: + """Backup test data file and modify it + + Parameters + ---------- + - test_data_path (str): Path of the original test file + + """ + with open(test_data_path, 'r') as original_file: + backup_content = original_file.read() + return backup_content + + +def restore_original_file(test_data_path: str, backup_file: str) -> None: + """Restore file to original state. + + Parameters + ---------- + - test_data_path (str): Path of test file. + + - backup_file (str): Backup test file. + + """ + with open(test_data_path, 'w') as original_file: + # Write the original content back to the file + original_file.write(backup_file) + + +# Instantiate configurator +configurator = TestConfigurator() diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 1ea99c433ec..8bd2b916f57 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -3,31 +3,27 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 """ -This module contain all necessary components (fixtures, classes, methods)to configure the test for its execution. +This module contain all necessary components (fixtures, classes, methods) to configure the test for its execution. """ -import botocore import pytest from uuid import uuid4 +from time import time +from botocore.exceptions import ClientError # qa-integration-framework imports from wazuh_testing.logger import logger -from wazuh_testing.constants.aws import ( - FAKE_CLOUDWATCH_LOG_GROUP, - PERMANENT_CLOUDWATCH_LOG_GROUP, -) from wazuh_testing.modules.aws.utils import ( - #create_bucket, - create_log_events, + create_bucket, + upload_log_events, create_log_group, create_log_stream, - #delete_bucket, + delete_bucket, delete_log_group, - delete_log_stream, - delete_file, delete_s3_db, delete_services_db, - file_exists, - upload_file + upload_bucket_file, + delete_resources, + generate_file ) from wazuh_testing.utils.services import control_service @@ -51,167 +47,208 @@ def restart_wazuh_function_without_exception(daemon=None): control_service('stop', daemon=daemon) -# S3 fixtures +"""S3 fixtures""" + + +@pytest.fixture +def upload_file_to_bucket(metadata): + """Upload a file to S3 bucket and delete after the test ends. -@pytest.fixture(scope="session", autouse=True) -def create_session_uuid(): - uuid = str(uuid4())[:8] - return uuid + Args: + metadata (dict): Metadata to get the parameters. + """ + # Get bucket name + bucket_name = metadata['bucket_name'] + # Get bucket type + bucket_type = metadata['bucket_type'] -@pytest.fixture(scope="session", autouse=True) -def delete_buckets(): - bucket_list = [] + # Generate file + data, filename = generate_file(bucket_type=bucket_type, + bucket_name=bucket_name) - yield bucket_list + try: + # Upload file to bucket + upload_bucket_file(bucket_name=bucket_name, + data=data, + filename=filename) - for bucket in bucket_list: - #delete_bucket(bucket) + logger.debug('Uploaded file: %s to bucket "%s"', filename, bucket_name) + + # Set filename for test execution + metadata['uploaded_file'] = filename + + except ClientError as error: + logger.error({ + "message": "Client error uploading file to bucket", + "bucket_name": bucket_name, + "filename": filename, + "error": str(error) + }) pass + except Exception as error: + logger.error({ + "message": "Broad error uploading file to bucket", + "bucket_name": bucket_name, + "filename": filename, + "error": str(error) + }) + pass + + +"""CloudWatch fixtures""" + @pytest.fixture() -def create_bucket(create_session_uuid, bucket_list, metadata): - """ +def create_test_log_group(create_session_id: str, create_and_delete_resources_list: list, metadata: dict): + """Create a bucket. Parameters ---------- - bucket_list - create_session_uuid - metadata + create_session_id (str): Test session id. + create_and_delete_resources_list (list): Resources list. + metadata (dict): Log group information. Returns ------- - + None """ - bucket_name = metadata['bucket_name'] - bucket_name += f"-{create_session_uuid}" - - create_bucket(bucket_name=bucket_name) - metadata['bucket_name'] = bucket_name - - yield - - bucket_list.append(bucket_name) - -@pytest.fixture -def upload_and_delete_file_to_s3(metadata): - """Upload a file to S3 bucket and delete after the test ends. + # Set variables from fixture + test_session_id = create_session_id + resources_list = create_and_delete_resources_list - Args: - metadata (dict): Metadata to get the parameters. - """ - bucket_name = metadata['bucket_name'] - filename = upload_file(bucket_type=metadata['bucket_type'], bucket_name=metadata['bucket_name']) - if filename != '': - logger.debug('Uploaded file: %s to bucket "%s"', filename, bucket_name) - metadata['uploaded_file'] = filename + # Get log group information and add session id + log_group_name = metadata["log_group_name"] + f"-{test_session_id}" - yield + try: + # Create log group + create_log_group(log_group_name=log_group_name) + logger.debug(f"Created log group: {log_group_name}") + + # Create resource dict + resource = { + "type": "log_group", + "name": log_group_name + } + + # Append created log group to resources list + resources_list.append(log_group_name) + + except ClientError as error: + logger.error({ + "message": "Client error creating log group", + "log_group": log_group_name, + "error": str(error) + }) + raise + + except Exception as error: + logger.error({ + "message": "Broad error creating log group", + "log_group": log_group_name, + "error": str(error) + }) + raise - if file_exists(filename=filename, bucket_name=bucket_name): - delete_file(filename=filename, bucket_name=bucket_name) - logger.debug('Deleted file: %s from bucket %s', filename, bucket_name) +@pytest.fixture() +def create_test_log_stream(metadata: dict): + """Create a log stream. -@pytest.fixture -def delete_file_from_s3(metadata): - """Delete a file from S3 bucket after the test ends. + Parameters + ---------- + metadata (dict): Log group information. - Args: - metadata (dict): Metadata to get the parameters. + Returns + ------- + None """ - yield - - bucket_name = metadata['bucket_name'] - filename = metadata.get('filename') - if filename is not None: - delete_file(filename=filename, bucket_name=bucket_name) - logger.debug('Deleted file: %s from bucket %s', filename, bucket_name) + # Get log group + log_group_name = metadata['log_group_name'] + # Create random stream name + log_stream_name = str(uuid4()) -# CloudWatch fixtures + try: + # Create log stream + create_log_stream(log_group=log_group_name, + log_stream=log_stream_name) + logger.debug(f'Created log stream {log_stream_name} within log group {log_group_name}') + + metadata['log_stream'] = log_stream_name + + except ClientError as error: + logger.error({ + "message": "Client error creating log stream", + "log_group": log_group_name, + "error": str(error) + }) + raise + + except Exception as error: + logger.error({ + "message": "Broad error creating log stream", + "log_group": log_group_name, + "error": str(error) + }) + raise -@pytest.fixture(name='create_log_stream') -def fixture_create_log_stream(metadata): - """Create a log stream with events and delete after the execution. - Args: - metadata (dict): Metadata to get the parameters. - """ - - SKIP_LOG_GROUP_CREATION = [PERMANENT_CLOUDWATCH_LOG_GROUP, FAKE_CLOUDWATCH_LOG_GROUP] - print(PERMANENT_CLOUDWATCH_LOG_GROUP, FAKE_CLOUDWATCH_LOG_GROUP) - log_group_names = [item.strip() for item in metadata['log_group_name'].split(',')] - for log_group_name in log_group_names: - if log_group_name in SKIP_LOG_GROUP_CREATION: - continue - - import random - log_group_name += f"-{random.randint(10**3, 10**4 - 1)}" - - logger.debug('Creating log group: %s', log_group_name) - try: - create_log_group(log_group_name) - except botocore.ResourceAlreadyExistsException as e: - pass - log_stream = create_log_stream(log_group_name) - logger.debug('Created log stream "%s" within log group "%s"', log_stream, log_group_name) - try: - create_log_events( - log_stream=log_stream, log_group=log_group_name, event_number=metadata.get('expected_results', 1) - ) - except botocore.errorfactory.ResourceAlreadyExistsException as e: - pass - except Exception as e: - print(e) - pass - logger.debug('Created log events') - metadata['log_stream'] = log_stream +@pytest.fixture() +def create_test_events(metadata: dict): + """Create a log event in a log stream. - yield + Parameters + ---------- + metadata (dict): Log group information. - for log_group_name in log_group_names: - if log_group_name in SKIP_LOG_GROUP_CREATION: - continue - delete_log_group(log_group_name) - logger.debug('Deleted log group: %s', log_group_name) + Returns + ------- + None + """ + # Get log group name + log_group_name = metadata["log_group_name"] + # Get log stream name + log_stream_name = metadata["log_stream_name"] -@pytest.fixture -def create_log_stream_in_existent_group(metadata): - """Create a log stream with events and delete after the execution. + # Get number of events + event_number = metadata["expected_results"] - Args: - metadata (dict): Metadata to get the parameters. - """ - log_group_name = metadata['log_group_name'] - log_stream = create_log_stream(log_group_name) - logger.debug('Created log stream "%s" within log group "%s"', log_stream, log_group_name) - create_log_events(log_stream=log_stream, log_group=log_group_name) - logger.debug('Created log events') - metadata['log_stream'] = log_stream + # Generate event information + events = [ + {'timestamp': int(time() * 1000), 'message': f"Test event number {i}"} for i in range(event_number) + ] - yield + try: + # Insert log events in log group + upload_log_events( + log_stream=log_stream_name, + log_group=log_group_name, + events=events + ) + + except ClientError as error: + logger.error({ + "message": "Client error creating log stream", + "log_group": log_group_name, + "error": str(error) + }) + pass - delete_log_stream(log_stream=log_stream, log_group=log_group_name) - logger.debug('Deleted log stream: %s', log_stream) + except Exception as error: + logger.error({ + "message": "Broad error creating log stream", + "log_group": log_group_name, + "error": str(error) + }) + pass -@pytest.fixture(name='delete_log_stream') -def fixture_delete_log_stream(metadata): - """Create a log stream with events and delete after the execution. +"""DB fixtures""" - Args: - metadata (dict): Metadata to get the parameters. - """ - yield - log_stream = metadata['log_stream'] - delete_log_stream(log_stream=log_stream) - logger.debug('Deleted log stream: %s', log_stream) - -# DB fixtures @pytest.fixture def clean_s3_cloudtrail_db(): """Delete the DB file before and after the test execution""" diff --git a/tests/integration/test_aws/data/configuration_template/basic_test_module/bucket_configuration_defaults.yaml b/tests/integration/test_aws/data/configuration_template/basic_test_module/bucket_configuration_defaults.yaml index 507a734e36b..022de090082 100644 --- a/tests/integration/test_aws/data/configuration_template/basic_test_module/bucket_configuration_defaults.yaml +++ b/tests/integration/test_aws/data/configuration_template/basic_test_module/bucket_configuration_defaults.yaml @@ -9,7 +9,5 @@ attributes: - type: BUCKET_TYPE elements: - - aws_profile: - value: qa - name: value: BUCKET_NAME diff --git a/tests/integration/test_aws/data/configuration_template/basic_test_module/cloudwatch_configuration_defaults.yaml b/tests/integration/test_aws/data/configuration_template/basic_test_module/cloudwatch_configuration_defaults.yaml index 6fc76e6537a..5aa0d879f1c 100644 --- a/tests/integration/test_aws/data/configuration_template/basic_test_module/cloudwatch_configuration_defaults.yaml +++ b/tests/integration/test_aws/data/configuration_template/basic_test_module/cloudwatch_configuration_defaults.yaml @@ -9,8 +9,6 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - aws_log_groups: value: LOG_GROUP_NAME - regions: diff --git a/tests/integration/test_aws/data/configuration_template/basic_test_module/inspector_configuration_defaults.yaml b/tests/integration/test_aws/data/configuration_template/basic_test_module/inspector_configuration_defaults.yaml index 2b6c6bd0430..5e7f4d5957b 100644 --- a/tests/integration/test_aws/data/configuration_template/basic_test_module/inspector_configuration_defaults.yaml +++ b/tests/integration/test_aws/data/configuration_template/basic_test_module/inspector_configuration_defaults.yaml @@ -9,7 +9,5 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - regions: value: us-east-1 diff --git a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml index cedf6c83b23..0c0de9e12dd 100644 --- a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml +++ b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml @@ -1,18 +1,22 @@ - name: cloudtrail_defaults description: CloudTrail default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: cloudtrail BUCKET_NAME: wazuh-cloudtrail-integration-tests metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests - name: vpc_defaults description: VPC default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: vpcflow BUCKET_NAME: wazuh-vpcflow-integration-tests metadata: + resource_type: bucket bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests @@ -22,104 +26,127 @@ BUCKET_TYPE: config BUCKET_NAME: wazuh-config-integration-tests metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests - name: alb_defaults description: ALB default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: alb BUCKET_NAME: wazuh-alb-integration-tests metadata: + resource_type: bucket bucket_type: alb bucket_name: wazuh-alb-integration-tests - name: clb_defaults description: CLB default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: clb BUCKET_NAME: wazuh-clb-integration-tests metadata: + resource_type: bucket bucket_type: clb bucket_name: wazuh-clb-integration-tests - name: nlb_defaults description: NLB default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: nlb BUCKET_NAME: wazuh-nlb-integration-tests metadata: + resource_type: bucket bucket_type: nlb bucket_name: wazuh-nlb-integration-tests - name: kms_defaults description: KMS default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: custom BUCKET_NAME: wazuh-kms-integration-tests metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-kms-integration-tests - name: macie_defaults description: CloudTrail default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: custom BUCKET_NAME: wazuh-macie-integration-tests metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-macie-integration-tests - name: trusted_advisor_defaults description: Trusted Advisor default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: custom BUCKET_NAME: wazuh-trusted-advisor-integration-tests metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-trusted-advisor-integration-tests - name: guardduty_defaults description: GuardDuty default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: guardduty BUCKET_NAME: wazuh-guardduty-integration-tests metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-guardduty-integration-tests - name: native_guardduty_defaults description: Native GuardDuty default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: guardduty BUCKET_NAME: wazuh-native-guardduty-integration-tests metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-native-guardduty-integration-tests - name: waf_defaults description: WAF default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: waf BUCKET_NAME: wazuh-waf-integration-tests metadata: + resource_type: bucket bucket_type: waf bucket_name: wazuh-waf-integration-tests - name: server_access_defaults description: Server Access default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: server_access BUCKET_NAME: wazuh-server-access-integration-tests metadata: + resource_type: bucket bucket_type: server_access bucket_name: wazuh-server-access-integration-tests - name: cisco_umbrella_defaults description: Umbrella default configurations configuration_parameters: + RESOURCE_TYPE: bucket BUCKET_TYPE: cisco_umbrella BUCKET_NAME: wazuh-umbrella-integration-tests metadata: + resource_type: bucket bucket_type: cisco_umbrella bucket_name: wazuh-umbrella-integration-tests diff --git a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_cloudwatch_defaults.yaml b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_cloudwatch_defaults.yaml index ca0c2e7adac..e4d2d8c7d22 100644 --- a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_cloudwatch_defaults.yaml +++ b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_cloudwatch_defaults.yaml @@ -1,8 +1,10 @@ - name: cloudwatchlogs_defaults description: CloudWatch default configurations configuration_parameters: + RESOURCE_TYPE: log_group SERVICE_TYPE: cloudwatchlogs LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests metadata: + resource_type: log_group service_type: cloudwatchlogs log_group_name: wazuh-cloudwatchlogs-integration-tests diff --git a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml index acf58cc8b43..1b8bfa8f9c6 100644 --- a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml +++ b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml @@ -4,3 +4,5 @@ SQS_NAME: wazuh-sqs-integration-tests metadata: sqs_name: wazuh-sqs-integration-tests + bucket_name: wazuh-sqs-integration-test + bucket_type: cloudtrail diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index dba1aa1a2a6..345581d2fa2 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -14,12 +14,13 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGE, TestConfigurator, local_internal_options +from .utils import ERROR_MESSAGE +from .configurator import configurator pytestmark = [pytest.mark.server] -# Set test configurator for the module -configurator = TestConfigurator(module='basic_test_module') +# Set module name +configurator.module(test_module='basic_test_module') # -------------------------------------------- TEST_BUCKET_DEFAULTS ---------------------------------------------------- # Configure T1 test @@ -33,7 +34,8 @@ ids=configurator.cases_ids) def test_bucket_defaults( configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, + ): """ description: The module is invoked with the expected parameters and no error occurs. diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py index 3cf22fa8236..454a8954bcc 100644 --- a/tests/integration/test_aws/utils.py +++ b/tests/integration/test_aws/utils.py @@ -7,10 +7,6 @@ """ # qa-integration-framework imports -from wazuh_testing.utils.configuration import ( - get_test_cases_data, - load_configuration_template, -) from wazuh_testing.modules.monitord import configuration as monitord_config from os.path import join, dirname, realpath @@ -20,6 +16,7 @@ TEST_CASES_DIR = 'test_cases' WAZUH_MODULES_DEBUG = 'wazuh_modules.debug' +# DICTS ERROR_MESSAGE = { "failed_start": "The AWS module did not start as expected", @@ -55,61 +52,6 @@ # Paths TEST_DATA_PATH = join(dirname(realpath(__file__)), 'data') - # Set local internal options local_internal_options = {WAZUH_MODULES_DEBUG: '2', monitord_config.MONITORD_ROTATE_LOG: '0'} - - -# Classes -class TestConfigurator: - """ - TestConfigurator class is responsible for configuring test data and parameters for a specific test module. - - Attributes: - - module (str): The name of the test module. - - configuration_path (str): The path to the configuration directory for the test module. - - test_cases_path (str): The path to the test cases directory for the test module. - - metadata (list): Test metadata retrieved from the test cases. - - parameters (list): Test parameters retrieved from the test cases. - - cases_ids (list): Identifiers for the test cases. - - test_configuration_template (list): The loaded configuration template for the test module. - - """ - def __init__(self, module): - self.module = module - self.configuration_path = join(TEST_DATA_PATH, TEMPLATE_DIR, self.module) - self.test_cases_path = join(TEST_DATA_PATH, TEST_CASES_DIR, self.module) - self.metadata = None - self.parameters = None - self.cases_ids = None - self.test_configuration_template = None - - def configure_test(self, configuration_file="", cases_file=""): - """ - Configures the test data and parameters for the given test module. - - Args: - - configuration_file (str): The name of the configuration file. - - cases_file (str): The name of the test cases file. - - Returns: - None - """ - # Set test cases path - cases_path = join(self.test_cases_path, cases_file) - - # set test cases data - self.parameters, self.metadata, self.cases_ids = get_test_cases_data(cases_path) - - # Set test configuration template for tests with config files - if configuration_file != "": - # Set config path - configurations_path = join(self.configuration_path, configuration_file) - - # load configuration template - self.test_configuration_template = load_configuration_template( - configurations_path, - self.parameters, - self.metadata - ) From 6c4f30083986ca42aeb0674a06dcc9d6b7815ca1 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Thu, 15 Feb 2024 19:14:30 -0300 Subject: [PATCH 287/419] Improved configure test flow and added creation logic --- tests/integration/conftest.py | 1 - tests/integration/test_aws/configurator.py | 162 +++++------------- tests/integration/test_aws/conftest.py | 157 ++++++++++++++--- .../cases_bucket_defaults.yaml | 13 -- .../cases_cloudwatch_defaults.yaml | 1 - .../cases_inspector_defaults.yaml | 4 + .../cases_bucket_custom.yaml | 3 +- .../cases_bucket_custom_logs.yaml | 3 +- tests/integration/test_aws/test.py | 28 +++ tests/integration/test_aws/test_basic.py | 37 ++-- 10 files changed, 223 insertions(+), 186 deletions(-) create mode 100644 tests/integration/test_aws/test.py diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 504b1abb618..9299b8b113e 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -31,7 +31,6 @@ import wazuh_testing.utils.configuration as wazuh_configuration from wazuh_testing.utils.services import control_service - # - - - - - - - - - - - - - - - - - - - - - - - - -Pytest configuration - - - - - - - - - - - - - - - - - - - - - - - diff --git a/tests/integration/test_aws/configurator.py b/tests/integration/test_aws/configurator.py index 388933a3251..56c564b85a9 100644 --- a/tests/integration/test_aws/configurator.py +++ b/tests/integration/test_aws/configurator.py @@ -14,19 +14,8 @@ get_test_cases_data, load_configuration_template, ) -from wazuh_testing.modules.aws.utils import ( - create_bucket, - upload_log_events, - create_log_group, - create_log_stream, - delete_bucket, - delete_log_group, - delete_s3_db, - delete_services_db, - upload_bucket_file, - delete_resources, - generate_file -) +from wazuh_testing.logger import logger + # Local imports from .utils import TEST_DATA_PATH, TEMPLATE_DIR, TEST_CASES_DIR @@ -39,50 +28,43 @@ class TestConfigurator: Attributes: - module (str): The name of the test module. - - configuration_path (str): The path to the configuration directory for the test module. - - test_cases_path (str): The path to the test cases directory for the test module. - metadata (list): Test metadata retrieved from the test cases. - - parameters (list): Test parameters retrieved from the test cases. - cases_ids (list): Identifiers for the test cases. - test_configuration_template (list): The loaded configuration template for the test module. """ def __init__(self): - self.module = None - self._metadata = None - self._cases_ids = None - self._test_configuration_template = None + self._module = "" + self._metadata: list = [] + self._cases_ids: list = [] + self._test_configuration_template: list = [] self._set_session_id() @property def module(self): - return self.module + return self._module @module.setter def module(self, test_module: str): - self.module = test_module + self._module = test_module @property def metadata(self): return self._metadata - @metadata.setter - def metadata(self, value): - self._metadata = value + @property + def test_configuration_template(self): + return self._test_configuration_template @property def cases_ids(self): return self._cases_ids - @cases_ids.setter - def cases_ids(self, value): - self._cases_ids = value - def _set_session_id(self) -> None: """Create and set the test session id.""" self._session_id = str(uuid4())[:8] - print(f"This test session id is: {self._session_id}") + logger.info(f"This test session id is: {self._session_id}") def configure_test(self, configuration_file="", cases_file="") -> None: """ @@ -93,68 +75,20 @@ def configure_test(self, configuration_file="", cases_file="") -> None: - configuration_file (str): The name of the configuration file. - cases_file (str): The name of the test cases file. """ - # Set configuration path - configuration_path = join(TEST_DATA_PATH, TEMPLATE_DIR, self.module) - # Set test cases yaml path cases_yaml_path = join(TEST_DATA_PATH, TEST_CASES_DIR, self.module, cases_file) - # Backup test data file - backup_test_file = modify_file(test_data_path=cases_yaml_path) - - # Modify original file - resources = self._modify_original_file(test_data_path=cases_yaml_path) - - # Create resources for test - self._create_resources(resources=resources) - # Set test cases data parameters, self._metadata, self._cases_ids = get_test_cases_data(cases_yaml_path) - # Set test configuration template for tests with config files - self._set_configuration_template(configuration_file=configuration_file, - parameters=parameters) - - yield - - # Delete resources - self._delete_resources(resources=resources) - - # Restore original file - restore_original_file(test_data_path=cases_yaml_path, - backup_file=backup_test_file) + # Modify original data to include session information + self._modify_raw_data(parameters=parameters) - def _modify_original_file(self, test_data_path: str) -> set: - """Add session id to original yaml file resource name + # Set test configuration template for tests with config files + self._load_configuration_template(configuration_file=configuration_file, + parameters=parameters) - Returns - ------- - - resources (set): Set containing resources to create. - """ - resources = set() - # Read and Modify the cases yaml file - with open(test_data_path, 'w') as file: - lines = file.readlines() # Read all lines from the file - - for line in lines: - if 'BUCKET_NAME' in line or 'bucket_name' in line: - # Extract the bucket name, modify it, and write the modified line - parts = line.split(':') - if len(parts) > 1: - bucket_name = parts[1].strip() + self._session_id - resources.add(bucket_name) # Add only the modified bucket name to the set - modified_line = parts[0] + ': ' + bucket_name + '\n' - else: - modified_line = line - file.write(modified_line) - else: - file.write(line) - - file.truncate() # Truncate the file to the current position to remove any leftover content - - return resources - - def _set_configuration_template(self, configuration_file: str, parameters: str) -> None: + def _load_configuration_template(self, configuration_file: str, parameters: str) -> None: """Set the configuration template of the test Params @@ -168,52 +102,34 @@ def _set_configuration_template(self, configuration_file: str, parameters: str) configuration_path = join(TEST_DATA_PATH, TEMPLATE_DIR, self.module, configuration_file) # load configuration template - self.test_configuration_template = load_configuration_template( + self._test_configuration_template = load_configuration_template( configuration_path, parameters, self._metadata ) - def _create_resources(self, resources: set) -> None: - """Create AWS resources for test execution - - Parameters - ---------- - - resources (set): Set containing resources to create. + def _modify_raw_data(self, parameters: list) -> None: + """Modify raw data to add test session information - """ - pass - - def _delete_resources(self, resources): - pass - - -def modify_file(test_data_path: str) -> str: - """Backup test data file and modify it - - Parameters - ---------- - - test_data_path (str): Path of the original test file - - """ - with open(test_data_path, 'r') as original_file: - backup_content = original_file.read() - return backup_content - - -def restore_original_file(test_data_path: str, backup_file: str) -> None: - """Restore file to original state. - - Parameters - ---------- - - test_data_path (str): Path of test file. - - - backup_file (str): Backup test file. + Params + ------ + - parameters (list): The parameters of the test. + - metadata (list): The metadata of the test. + """ + # Add Suffix (_todelete) to alert a safe deletion of resource in case of errors. + suffix = f"-{self._session_id}-todelete" + for param, data in zip(parameters, self._metadata): + try: + if param["RESOURCE_TYPE"] == "bucket": + param["BUCKET_NAME"] += suffix + data["bucket_name"] += suffix + + elif param["RESOURCE_TYPE"] == "log_stream": + param["LOG_STREAM_NAME"] += suffix + data["LOG_STREAM_NAME"] += suffix + except KeyError: + raise - """ - with open(test_data_path, 'w') as original_file: - # Write the original content back to the file - original_file.write(backup_file) # Instantiate configurator diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 8bd2b916f57..27f22971c18 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -5,10 +5,11 @@ """ This module contain all necessary components (fixtures, classes, methods) to configure the test for its execution. """ + import pytest -from uuid import uuid4 from time import time from botocore.exceptions import ClientError +from uuid import uuid4 # qa-integration-framework imports from wazuh_testing.logger import logger @@ -22,7 +23,6 @@ delete_s3_db, delete_services_db, upload_bucket_file, - delete_resources, generate_file ) from wazuh_testing.utils.services import control_service @@ -47,15 +47,130 @@ def restart_wazuh_function_without_exception(daemon=None): control_service('stop', daemon=daemon) +"""Session fixtures""" + + +@pytest.fixture(scope="session", autouse=True) +def buckets_manager(): + """Initializes a set to manage the creation and deletion of the buckets used throughout the test session. + + Yields + ------ + buckets : set + Set of buckets + """ + # Create buckets set + buckets: set = set() + + yield buckets + + # Delete all resources created during execution + for bucket in buckets: + try: + delete_bucket(bucket_name=bucket) + except ClientError as error: + logger.warning({ + "message": "Client error deleting bucket, delete manually", + "resource_name": bucket, + "error": str(error) + }) + + except Exception as error: + logger.warning({ + "message": "Broad error deleting bucket, delete manually", + "resource_name": bucket, + "error": str(error) + }) + + +@pytest.fixture(scope="session", autouse=True) +def log_groups_manager(): + """Initializes a set to manage the creation and deletion of the log groups used throughout the test session. + + Yields + ------ + log_groups : set + Set of log groups. + """ + # Create log groups set + log_groups: set = set() + + yield log_groups + + # Delete all resources created during execution + for log_group in log_groups: + try: + delete_log_group(log_group_name=log_group) + except ClientError as error: + logger.warning({ + "message": "Client error deleting log_group, delete manually", + "resource_name": log_group, + "error": str(error) + }) + raise + + except Exception as error: + logger.warning({ + "message": "Broad error deleting log_group, delete manually", + "resource_name": log_group, + "error": str(error) + }) + + """S3 fixtures""" +@pytest.fixture() +def create_test_bucket(buckets_manager, + metadata: dict): + """Create a bucket. + + Parameters + ---------- + buckets_manager : fixture + Set of buckets. + metadata : dict + Bucket information. + + """ + bucket_name = metadata["bucket_name"] + bucket_type = metadata["bucket_type"] + + try: + # Create bucket + create_bucket(bucket_name=bucket_name) + logger.debug(f"Created new bucket: type {bucket_name}") + + # Append created bucket to resource set + buckets_manager.add(bucket_name) + + except ClientError as error: + logger.error({ + "message": "Client error creating bucket", + "bucket_name": bucket_name, + "bucket_type": bucket_type, + "error": str(error) + }) + raise + + except Exception as error: + logger.error({ + "message": "Broad error creating bucket", + "bucket_name": bucket_name, + "bucket_type": bucket_type, + "error": str(error) + }) + raise + + @pytest.fixture -def upload_file_to_bucket(metadata): +def upload_file_to_bucket(metadata: dict): """Upload a file to S3 bucket and delete after the test ends. - Args: - metadata (dict): Metadata to get the parameters. + Parameters + ---------- + metadata : dict + Metadata to get the parameters. """ # Get bucket name bucket_name = metadata['bucket_name'] @@ -101,39 +216,27 @@ def upload_file_to_bucket(metadata): @pytest.fixture() -def create_test_log_group(create_session_id: str, create_and_delete_resources_list: list, metadata: dict): +def create_test_log_group(log_groups_manager, + metadata: dict) -> None: """Create a bucket. Parameters ---------- - create_session_id (str): Test session id. - create_and_delete_resources_list (list): Resources list. - metadata (dict): Log group information. - - Returns - ------- - None + log_groups_manager : fixture + Log groups set. + metadata : dict + Log group information. """ - # Set variables from fixture - test_session_id = create_session_id - resources_list = create_and_delete_resources_list - - # Get log group information and add session id - log_group_name = metadata["log_group_name"] + f"-{test_session_id}" + # Get log group name + log_group_name = metadata["log_group_name"] try: # Create log group create_log_group(log_group_name=log_group_name) logger.debug(f"Created log group: {log_group_name}") - # Create resource dict - resource = { - "type": "log_group", - "name": log_group_name - } - - # Append created log group to resources list - resources_list.append(log_group_name) + # Append created bucket to resource list + log_groups_manager.add(log_group_name) except ClientError as error: logger.error({ diff --git a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml index 0c0de9e12dd..3bde4e3f7bc 100644 --- a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml +++ b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml @@ -1,7 +1,6 @@ - name: cloudtrail_defaults description: CloudTrail default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: cloudtrail BUCKET_NAME: wazuh-cloudtrail-integration-tests metadata: @@ -12,7 +11,6 @@ - name: vpc_defaults description: VPC default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: vpcflow BUCKET_NAME: wazuh-vpcflow-integration-tests metadata: @@ -33,7 +31,6 @@ - name: alb_defaults description: ALB default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: alb BUCKET_NAME: wazuh-alb-integration-tests metadata: @@ -44,7 +41,6 @@ - name: clb_defaults description: CLB default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: clb BUCKET_NAME: wazuh-clb-integration-tests metadata: @@ -55,7 +51,6 @@ - name: nlb_defaults description: NLB default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: nlb BUCKET_NAME: wazuh-nlb-integration-tests metadata: @@ -66,7 +61,6 @@ - name: kms_defaults description: KMS default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: custom BUCKET_NAME: wazuh-kms-integration-tests metadata: @@ -77,7 +71,6 @@ - name: macie_defaults description: CloudTrail default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: custom BUCKET_NAME: wazuh-macie-integration-tests metadata: @@ -88,7 +81,6 @@ - name: trusted_advisor_defaults description: Trusted Advisor default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: custom BUCKET_NAME: wazuh-trusted-advisor-integration-tests metadata: @@ -99,7 +91,6 @@ - name: guardduty_defaults description: GuardDuty default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: guardduty BUCKET_NAME: wazuh-guardduty-integration-tests metadata: @@ -110,7 +101,6 @@ - name: native_guardduty_defaults description: Native GuardDuty default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: guardduty BUCKET_NAME: wazuh-native-guardduty-integration-tests metadata: @@ -121,7 +111,6 @@ - name: waf_defaults description: WAF default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: waf BUCKET_NAME: wazuh-waf-integration-tests metadata: @@ -132,7 +121,6 @@ - name: server_access_defaults description: Server Access default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: server_access BUCKET_NAME: wazuh-server-access-integration-tests metadata: @@ -143,7 +131,6 @@ - name: cisco_umbrella_defaults description: Umbrella default configurations configuration_parameters: - RESOURCE_TYPE: bucket BUCKET_TYPE: cisco_umbrella BUCKET_NAME: wazuh-umbrella-integration-tests metadata: diff --git a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_cloudwatch_defaults.yaml b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_cloudwatch_defaults.yaml index e4d2d8c7d22..0694fc03413 100644 --- a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_cloudwatch_defaults.yaml +++ b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_cloudwatch_defaults.yaml @@ -1,7 +1,6 @@ - name: cloudwatchlogs_defaults description: CloudWatch default configurations configuration_parameters: - RESOURCE_TYPE: log_group SERVICE_TYPE: cloudwatchlogs LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests metadata: diff --git a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_inspector_defaults.yaml b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_inspector_defaults.yaml index 094da2a5390..7116fe13c2d 100644 --- a/tests/integration/test_aws/data/test_cases/basic_test_module/cases_inspector_defaults.yaml +++ b/tests/integration/test_aws/data/test_cases/basic_test_module/cases_inspector_defaults.yaml @@ -2,5 +2,9 @@ description: Inspector default configurations configuration_parameters: SERVICE_TYPE: inspector + LOG_GROUP_NAME: wazuh-inspector-integration-tests metadata: + resource_type: log_group service_type: inspector + log_group_name: wazuh-inspector-integration-tests + diff --git a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml index 1b8bfa8f9c6..f546d1149c5 100644 --- a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml +++ b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml @@ -3,6 +3,7 @@ configuration_parameters: SQS_NAME: wazuh-sqs-integration-tests metadata: + resource_type: bucket sqs_name: wazuh-sqs-integration-tests - bucket_name: wazuh-sqs-integration-test + bucket_name: wazuh-sqs-bucket-integration-test bucket_type: cloudtrail diff --git a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml index 8b2667a9126..c0feb0ce79d 100644 --- a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml +++ b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml @@ -3,6 +3,7 @@ configuration_parameters: SQS_NAME: wazuh-sqs-integration-tests metadata: + resource_type: bucket sqs_name: wazuh-sqs-integration-tests - bucket_name: wazuh-sqs-integration-test + bucket_name: wazuh-sqs-bucket-integration-test bucket_type: cloudtrail diff --git a/tests/integration/test_aws/test.py b/tests/integration/test_aws/test.py new file mode 100644 index 00000000000..145a3321801 --- /dev/null +++ b/tests/integration/test_aws/test.py @@ -0,0 +1,28 @@ +cases_yaml_path = '/home/eduardoleon/git/wazuh/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml' + +from copy import deepcopy + +import yaml +with open(cases_yaml_path) as f: + test_cases_data = yaml.safe_load(f) +configuration_parameters = [] +configuration_metadata = [] +test_cases_ids = [] + +for test_case in test_cases_data: + if test_case.get('metadata') is None: + test_case['metadata'] = deepcopy(test_case['configuration_parameters']) + configuration_parameters.append(test_case['configuration_parameters']) + metadata_parameters = { + 'name': test_case['name'], 'description': test_case['description']} + metadata_parameters.update(test_case['metadata']) + configuration_metadata.append(metadata_parameters) + test_cases_ids.append(test_case['name']) + +for param, data in zip(configuration_parameters, configuration_metadata): + print(param) + print(data) + +print(configuration_parameters) +# print(configuration_metadata) +# print(test_cases_ids) \ No newline at end of file diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index 345581d2fa2..fba99b8a41d 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -5,22 +5,21 @@ """ This module will contain all cases for the basic test suite """ - import pytest # qa-integration-framework imports from wazuh_testing import session_parameters - +from wazuh_testing.modules.aws.utils import aws_profile # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGE +from .utils import ERROR_MESSAGE, local_internal_options from .configurator import configurator pytestmark = [pytest.mark.server] # Set module name -configurator.module(test_module='basic_test_module') +configurator.module = "basic_test_module" # -------------------------------------------- TEST_BUCKET_DEFAULTS ---------------------------------------------------- # Configure T1 test @@ -33,9 +32,9 @@ zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_bucket_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, - + configuration, metadata, create_test_bucket, load_wazuh_basic_configuration, set_wazuh_configuration, + clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, + restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -88,7 +87,7 @@ def test_bucket_defaults( parameters = [ 'wodles/aws/aws-s3', '--bucket', metadata['bucket_name'], - '--aws_profile', 'qa', + '--aws_profile', aws_profile, '--type', metadata['bucket_type'], '--debug', '2' ] @@ -98,7 +97,7 @@ def test_bucket_defaults( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_module_start ) - + assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] # Check command was called correctly @@ -106,7 +105,7 @@ def test_bucket_defaults( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_module_called(parameters) ) - + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] # Detect any ERROR message @@ -128,9 +127,9 @@ def test_bucket_defaults( @pytest.mark.parametrize('configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_service_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +def test_service_defaults(configuration, metadata, create_test_bucket, load_wazuh_basic_configuration, + set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, + truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -188,7 +187,7 @@ def test_service_defaults( parameters = [ 'wodles/aws/aws-s3', '--service', metadata['service_type'], - '--aws_profile', 'qa', + '--aws_profile', aws_profile, '--regions', 'us-east-1', '--aws_log_groups', log_groups, '--debug', '2' @@ -215,7 +214,7 @@ def test_service_defaults( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_all_aws_err ) - + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] @@ -229,9 +228,9 @@ def test_service_defaults( @pytest.mark.parametrize('configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_inspector_defaults( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring +def test_inspector_defaults(configuration, metadata, create_test_bucket, load_wazuh_basic_configuration, + set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, + truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -288,7 +287,7 @@ def test_inspector_defaults( parameters = [ 'wodles/aws/aws-s3', '--service', metadata['service_type'], - '--aws_profile', 'qa', + '--aws_profile', aws_profile, '--regions', 'us-east-1', '--debug', '2' ] From 71380866f9b8f49697eaa0d2e17d1fe2b8ea7df5 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Tue, 20 Feb 2024 19:53:10 -0300 Subject: [PATCH 288/419] Added sqs queue fixtures and validations --- tests/integration/test_aws/configurator.py | 23 +++-- tests/integration/test_aws/conftest.py | 96 ++++++++++++++++++- .../custom_bucket_configuration.yaml | 2 - tests/integration/test_aws/test.py | 28 ------ tests/integration/test_aws/test_basic.py | 22 ++--- .../test_aws/test_custom_bucket.py | 18 ++-- 6 files changed, 127 insertions(+), 62 deletions(-) delete mode 100644 tests/integration/test_aws/test.py diff --git a/tests/integration/test_aws/configurator.py b/tests/integration/test_aws/configurator.py index 56c564b85a9..46aa7c92506 100644 --- a/tests/integration/test_aws/configurator.py +++ b/tests/integration/test_aws/configurator.py @@ -82,7 +82,7 @@ def configure_test(self, configuration_file="", cases_file="") -> None: parameters, self._metadata, self._cases_ids = get_test_cases_data(cases_yaml_path) # Modify original data to include session information - self._modify_raw_data(parameters=parameters) + self._modify_metadata(parameters=parameters) # Set test configuration template for tests with config files self._load_configuration_template(configuration_file=configuration_file, @@ -108,7 +108,7 @@ def _load_configuration_template(self, configuration_file: str, parameters: str) self._metadata ) - def _modify_raw_data(self, parameters: list) -> None: + def _modify_metadata(self, parameters: list) -> None: """Modify raw data to add test session information Params @@ -118,19 +118,26 @@ def _modify_raw_data(self, parameters: list) -> None: """ # Add Suffix (_todelete) to alert a safe deletion of resource in case of errors. suffix = f"-{self._session_id}-todelete" + + # Add suffix to metadata for param, data in zip(parameters, self._metadata): try: - if param["RESOURCE_TYPE"] == "bucket": - param["BUCKET_NAME"] += suffix + if "sqs_name" in data: + data["sqs_name"] += suffix + param["SQS_NAME"] += suffix + + if data["resource_type"] == "bucket": data["bucket_name"] += suffix + if "BUCKET_NAME" in param: + param["BUCKET_NAME"] += suffix + + elif data["resource_type"] == "log_group": + param["LOG_GROUP_NAME"] += suffix + data["log_group_name"] += suffix - elif param["RESOURCE_TYPE"] == "log_stream": - param["LOG_STREAM_NAME"] += suffix - data["LOG_STREAM_NAME"] += suffix except KeyError: raise - # Instantiate configurator configurator = TestConfigurator() diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 27f22971c18..fc6429d41b5 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -23,7 +23,12 @@ delete_s3_db, delete_services_db, upload_bucket_file, - generate_file + generate_file, + create_sqs_queue, + get_sqs_queue_arn, + set_sqs_policy, + set_bucket_event_notification_configuration, + delete_sqs_queue ) from wazuh_testing.utils.services import control_service @@ -117,6 +122,39 @@ def log_groups_manager(): }) +@pytest.fixture(scope="session", autouse=True) +def sqs_manager(): + """Initializes a set to manage the creation and deletion of the sqs queues used throughout the test session. + + Yields + ------ + buckets : set + Set of SQS queues + """ + # Create buckets set + sqs_queues: set = set() + + yield sqs_queues + + # Delete all resources created during execution + for sqs in sqs_queues: + try: + delete_sqs_queue(bucket_name=sqs) + except ClientError as error: + logger.warning({ + "message": "Client error deleting sqs queue, delete manually", + "resource_name": sqs, + "error": str(error) + }) + + except Exception as error: + logger.warning({ + "message": "Broad error deleting sqs queue, delete manually", + "resource_name": sqs, + "error": str(error) + }) + + """S3 fixtures""" @@ -200,7 +238,7 @@ def upload_file_to_bucket(metadata: dict): "filename": filename, "error": str(error) }) - pass + raise error except Exception as error: logger.error({ @@ -209,7 +247,7 @@ def upload_file_to_bucket(metadata: dict): "filename": filename, "error": str(error) }) - pass + raise error """CloudWatch fixtures""" @@ -349,6 +387,58 @@ def create_test_events(metadata: dict): pass +"""SQS fixtures""" + + +@pytest.fixture +def set_test_sqs_queue(metadata: dict, sqs_manager): + """Create a test sqs group + + Parameters + ---------- + metadata : dict + The metadata for the sqs queue. + sqs_manager: fixture + The SQS set for the test. + + Returns + ------- + """ + # Get bucket name + bucket_name = metadata["bucket_name"] + # Get SQS name + sqs_name = metadata["sqs_name"] + + try: + # Create SQS and get URL + sqs_queue_url = create_sqs_queue(sqs_name=sqs_name) + # Add it to sqs set + sqs_manager.add(sqs_queue_url) + + # Get SQS Queue ARN + sqs_queue_arn = get_sqs_queue_arn(sqs_url=sqs_queue_url) + + # Set policy + set_sqs_policy(bucket_name=bucket_name, + sqs_queue_url=sqs_queue_url, + sqs_queue_arn=sqs_queue_arn) + + # Set bucket notification configuration + set_bucket_event_notification_configuration(bucket_name=bucket_name, + sqs_queue_arn=sqs_queue_arn) + + except ClientError as error: + # Check if the sqs exist + if error.response['Error']['Code'] == 'ResourceNotFound': + logger.error(f"SQS Queue {sqs_name} already exists") + pass + else: + raise error + + except Exception as error: + raise error + + """DB fixtures""" diff --git a/tests/integration/test_aws/data/configuration_template/custom_bucket_test_module/custom_bucket_configuration.yaml b/tests/integration/test_aws/data/configuration_template/custom_bucket_test_module/custom_bucket_configuration.yaml index 6e9e24b643e..511abd04462 100644 --- a/tests/integration/test_aws/data/configuration_template/custom_bucket_test_module/custom_bucket_configuration.yaml +++ b/tests/integration/test_aws/data/configuration_template/custom_bucket_test_module/custom_bucket_configuration.yaml @@ -11,5 +11,3 @@ elements: - sqs_name: value: SQS_NAME - - aws_profile: - value: qa diff --git a/tests/integration/test_aws/test.py b/tests/integration/test_aws/test.py deleted file mode 100644 index 145a3321801..00000000000 --- a/tests/integration/test_aws/test.py +++ /dev/null @@ -1,28 +0,0 @@ -cases_yaml_path = '/home/eduardoleon/git/wazuh/tests/integration/test_aws/data/test_cases/basic_test_module/cases_bucket_defaults.yaml' - -from copy import deepcopy - -import yaml -with open(cases_yaml_path) as f: - test_cases_data = yaml.safe_load(f) -configuration_parameters = [] -configuration_metadata = [] -test_cases_ids = [] - -for test_case in test_cases_data: - if test_case.get('metadata') is None: - test_case['metadata'] = deepcopy(test_case['configuration_parameters']) - configuration_parameters.append(test_case['configuration_parameters']) - metadata_parameters = { - 'name': test_case['name'], 'description': test_case['description']} - metadata_parameters.update(test_case['metadata']) - configuration_metadata.append(metadata_parameters) - test_cases_ids.append(test_case['name']) - -for param, data in zip(configuration_parameters, configuration_metadata): - print(param) - print(data) - -print(configuration_parameters) -# print(configuration_metadata) -# print(test_cases_ids) \ No newline at end of file diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index fba99b8a41d..e8b0ed15f33 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -9,7 +9,6 @@ # qa-integration-framework imports from wazuh_testing import session_parameters -from wazuh_testing.modules.aws.utils import aws_profile # Local module imports from . import event_monitor @@ -87,7 +86,6 @@ def test_bucket_defaults( parameters = [ 'wodles/aws/aws-s3', '--bucket', metadata['bucket_name'], - '--aws_profile', aws_profile, '--type', metadata['bucket_type'], '--debug', '2' ] @@ -108,13 +106,13 @@ def test_bucket_defaults( assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] - # Detect any ERROR message - log_monitor.start( - timeout=session_parameters.default_timeout, - callback=event_monitor.callback_detect_all_aws_err - ) - - assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + # # Detect any ERROR message + # log_monitor.start( + # timeout=session_parameters.default_timeout, + # callback=event_monitor.callback_detect_all_aws_err + # ) + # + # assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # -------------------------------------------- TEST_CLOUDWATCH_DEFAULTS ------------------------------------------------ @@ -127,7 +125,7 @@ def test_bucket_defaults( @pytest.mark.parametrize('configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_service_defaults(configuration, metadata, create_test_bucket, load_wazuh_basic_configuration, +def test_service_defaults(configuration, metadata, create_test_log_group, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): @@ -187,7 +185,6 @@ def test_service_defaults(configuration, metadata, create_test_bucket, load_wazu parameters = [ 'wodles/aws/aws-s3', '--service', metadata['service_type'], - '--aws_profile', aws_profile, '--regions', 'us-east-1', '--aws_log_groups', log_groups, '--debug', '2' @@ -228,7 +225,7 @@ def test_service_defaults(configuration, metadata, create_test_bucket, load_wazu @pytest.mark.parametrize('configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_inspector_defaults(configuration, metadata, create_test_bucket, load_wazuh_basic_configuration, +def test_inspector_defaults(configuration, metadata, create_test_log_group, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): @@ -287,7 +284,6 @@ def test_inspector_defaults(configuration, metadata, create_test_bucket, load_wa parameters = [ 'wodles/aws/aws-s3', '--service', metadata['service_type'], - '--aws_profile', aws_profile, '--regions', 'us-east-1', '--debug', '2' ] diff --git a/tests/integration/test_aws/test_custom_bucket.py b/tests/integration/test_aws/test_custom_bucket.py index 7f96412aeb9..d0f0f25497a 100644 --- a/tests/integration/test_aws/test_custom_bucket.py +++ b/tests/integration/test_aws/test_custom_bucket.py @@ -13,12 +13,13 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options +from .utils import ERROR_MESSAGE, TIMEOUT, local_internal_options +from .configurator import configurator pytestmark = [pytest.mark.server] # Set test configurator for the module -configurator = TestConfigurator(module='custom_bucket_test_module') +configurator.module = "custom_bucket_test_module" # -------------------------------------------- TEST_CUSTOM_BUCKETS_DEFAULTS ------------------------------------------- # Configure T1 test @@ -30,7 +31,8 @@ @pytest.mark.parametrize('configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_custom_bucket_defaults(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, +def test_custom_bucket_defaults(configuration, metadata, create_test_bucket, set_test_sqs_queue, + load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): @@ -94,9 +96,9 @@ def test_custom_bucket_defaults(configuration, metadata, load_wazuh_basic_config 'wodles/aws/aws-s3', '--subscriber', 'buckets', '--queue', metadata['sqs_name'], - '--aws_profile', 'qa', '--debug', '2' ] + log_header = 'Launching S3 Subscriber Command: ' expected_log = log_header + " ".join(parameters) @@ -135,9 +137,10 @@ def test_custom_bucket_defaults(configuration, metadata, load_wazuh_basic_config @pytest.mark.parametrize('configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_custom_bucket_logs(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, - restart_wazuh_function, file_monitoring, upload_and_delete_file_to_s3 +def test_custom_bucket_logs(configuration, metadata, create_test_bucket, set_test_sqs_queue, upload_file_to_bucket, + load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, + file_monitoring ): """ description: Test the AWS S3 custom bucket module is invoked with the expected parameters and retrieve @@ -209,7 +212,6 @@ def test_custom_bucket_logs(configuration, metadata, load_wazuh_basic_configurat 'wodles/aws/aws-s3', '--subscriber', 'buckets', '--queue', sqs_name, - '--aws_profile', 'qa', '--debug', '2' ] log_header = 'Launching S3 Subscriber Command: ' From dfd4a6209ae5731738ec23440432f8305ccf1d75 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Wed, 21 Feb 2024 18:32:33 -0300 Subject: [PATCH 289/419] Added new boto method and fixture --- tests/integration/test_aws/README.md | 160 +++++++++--------- tests/integration/test_aws/conftest.py | 57 +++++-- .../cases_bucket_custom.yaml | 6 +- .../cases_bucket_custom_logs.yaml | 6 +- .../test_aws/test_custom_bucket.py | 10 +- tests/integration/test_aws/test_parser.py | 3 - .../integration/test_aws/test_path_suffix.py | 3 - 7 files changed, 131 insertions(+), 114 deletions(-) diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index f06aac5e543..662f3bf6127 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -1,63 +1,69 @@ -# AWS Integration +# AWS Integration tests ## Description -It is a _wodle based_ module that has a capability to pull logs from several AWS services. +It is a _wodle based_ module that test the capabilities of the Wazuh AWS integration, pulling logs from different +buckets and services. ## Tests directory structure ```bash -wazuh-qa/tests/integration/test_aws -├── conftest.py +wazuh/tests/integration/test_aws ├── data -│   ├── configuration_template -│   │   ├── basic_test_module -│   │   ├── discard_regex_test_module -│   │   ├── only_logs_after_test_module -│   │   ├── path_suffix_test_module -│   │   ├── path_test_module -│   │   ├── regions_test_module -│   │   └── remove_from_bucket_test_module -│   └── test_cases -│   ├── basic_test_module -│   ├── discard_regex_test_module -│   ├── only_logs_after_test_module -│   ├── path_suffix_test_module -│   ├── path_test_module -│   ├── regions_test_module -│   └── remove_from_bucket_test_module -├── README.MD +│ ├── configuration_template +│ │ ├── basic_test_module +│ │ ├── custom_bucket_test_module +│ │ ├── discard_regex_test_module +│ │ ├── log_groups_test_module +│ │ ├── only_logs_after_test_module +│ │ ├── parser_test_module +│ │ ├── path_suffix_test_module +│ │ ├── path_test_module +│ │ ├── regions_test_module +│ │ └── remove_from_bucket_test_module +│ └── test_cases +│ ├── basic_test_module +│ ├── custom_bucket_test_module +│ ├── discard_regex_test_module +│ ├── log_groups_test_module +│ ├── only_logs_after_test_module +│ ├── parser_test_module +│ ├── path_suffix_test_module +│ ├── path_test_module +│ ├── regions_test_module +│ └── remove_from_bucket_test_module +├── __init__.py +├── README.md +├── conftest.py ├── test_basic.py +├── test_custom_bucket.py ├── test_discard_regex.py +├── test_log_groups.py ├── test_only_logs_after.py ├── test_path.py ├── test_path_suffix.py ├── test_regions.py -└── test_remove_from_bucket.py +├── test_remove_from_bucket.py +└── utils.py ``` -## Deps directory structure +## Requirements -```bash -wazuh-qa/deps/wazuh_testing/wazuh_testing/modules/aws -├── cli_utils.py -├── constants.py -├── data_generator.py -├── db_utils.py -├── event_monitor.py -├── __init__.py -└── s3_utils.py -``` +- [Proper testing environment](#Setting up a test environment) -## Requirements +- [Wazuh](https://github.com/wazuh/qa-integration-framework) repository. -- The only extra dependency is `boto3` -- The module will assume there are already buckets, log groups and an inspector assessment with test data in AWS. +- [Testing framework](https://github.com/wazuh/qa-integration-framework) installed. + +- Configured buckets, log groups and an inspector assessment with test data in AWS. + +For a step-by-step example guide using linux go to the [test setup section](#linux) ## Configuration settings -- **credentials** - Set the credentials at `$HOME/.aws/credentials` (being `HOME` the home directory of the user who runs the tests, more information [here](https://documentation.wazuh.com/current/amazon/services/prerequisites/credentials.html#profiles)) with the content: +- **Credentials**: + Set the credentials at `$HOME/.aws/credentials` (being `HOME` the home directory of the user who runs the tests, + more information [here](https://documentation.wazuh.com/current/amazon/services/prerequisites/credentials.html#profiles) with the content: ```ini [qa] @@ -67,7 +73,7 @@ aws_secret_access_key = ## Setting up a test environment -You will need a proper environment to run the integration tests. You can use any virtual machine you wish. If you have +You will need a proper environment to run the integration tests. You can use Docker or any virtual machine. If you have one already, go to the [integration tests section](#integration-tests) If you use [Vagrant](https://www.vagrantup.com/downloads.html) @@ -88,21 +94,18 @@ _We are using **Ubuntu 22.04** for this example:_ - Install **Wazuh** -- Install python tests dependencies: +- Install Python tests dependencies: - ```shell script - # Install pip - apt install python3-pip - - # Clone your `wazuh-qa` repository within your testing environment - cd wazuh-qa +```shell script +# Install pip +apt install python3-pip git -y - # Install Python libraries - python3 -m pip install -r requirements.txt +# Clone the `qa-integration-framework` repository withing your testing environment +git clone https://github.com/wazuh/qa-integration-framework.git - # Install test dependecies - python3 -m pip install deps/wazuh-testing - ``` +# Install tests dependencies +python3 -m pip install qa-integration-framework/ +``` ## Integration tests @@ -118,15 +121,16 @@ from the closest one, it will look for the next one (if possible) until reaching need to run every test from the following path, where the general _conftest_ is: ```shell script -cd wazuh-qa/tests/integration + cd wazuh/tests/integration/test_aws/ ``` To run any test, we just need to call `pytest` from `python3` using the following line: ```shell script -python3 -m pytest [options] [file_or_dir] [file_or_dir] [...] + python3 -m pytest [options] [file_or_dir] [file_or_dir] [...] ``` + **Options:** - `v`: verbosity level (-v or -vv. Highly recommended to use -vv when tests are failing) @@ -138,7 +142,7 @@ python3 -m pytest [options] [file_or_dir] [file_or_dir] [...] - `--tier`: only run tests with given tier (ex. --tier 2) - `--html`: generates a HTML report for the test results. (ex. --html=report.html) - `--default-timeout`: overwrites the default timeout (in seconds). This value is used to make a test fail if a - condition is not met before the given time lapse. Some tests make use of this value and other has other fixed timeout + condition is not met before the given timelapse. Some tests make use of this value and other has other fixed timeout that cannot be modified. _Use `-h` to see the rest or check its [documentation](https://docs.pytest.org/en/latest/usage.html)._ @@ -149,32 +153,22 @@ check its documentation for further information. #### AWS integration tests example ```bash -# python3 -m pytest -vvx test_aws/ -k cloudtrail -=========================================================== test session starts ====================================================== -platform linux -- Python 3.10.6, pytest-7.1.2, pluggy-1.0.0 -- /usr/bin/python3 -cachedir: .pytest_cache -metadata: {'Python': '3.10.6', 'Platform': 'Linux-5.15.0-58-generic-x86_64-with-glibc2.35', -'Packages': {'pytest': '7.1.2', 'py': '1.10.0', 'pluggy': '1.0.0'}, -'Plugins': {'metadata': '2.0.2', 'html': '3.1.1', 'testinfra': '5.0.0'}} -rootdir: /home/vagrant/qa/tests/integration, configfile: pytest.ini -plugins: metadata-2.0.2, html-3.1.1, testinfra-5.0.0 -collected 15 items - -test_aws/test_basic.py::test_defaults[cloudtrail_defaults] PASSED [ 6%] -test_aws/test_discard_regex.py::test_discard_regex[cloudtrail_discard_regex] PASSED [ 13%] -test_aws/test_only_logs_after.py::test_without_only_logs_after[cloudtrail_without_only_logs_after] PASSED [ 20%] -test_aws/test_only_logs_after.py::test_with_only_logs_after[cloudtrail_with_only_logs_after] PASSED [ 26%] -test_aws/test_only_logs_after.py::test_multiple_calls[cloudtrail_only_logs_after_multiple_calls] PASSED [ 33%] -test_aws/test_path.py::test_path[cloudtrail_path_with_data] PASSED [ 40%] -test_aws/test_path.py::test_path[cloudtrail_path_without_data] PASSED [ 46%] -test_aws/test_path.py::test_path[cloudtrail_inexistent_path] PASSED [ 53%] -test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_path_suffix_with_data] PASSED [ 60%] -test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_path_suffix_without_data] PASSED [ 66%] -test_aws/test_path_suffix.py::test_path_suffix[cloudtrail_inexistent_path_suffix] PASSED [ 73%] -test_aws/test_regions.py::test_regions[cloudtrail_region_with_data] PASSED [ 80%] -test_aws/test_regions.py::test_regions[cloudtrail_regions_with_data] PASSED [ 86%] -test_aws/test_regions.py::test_regions[cloudtrail_inexistent_region] PASSED [ 93%] -test_aws/test_remove_from_bucket.py::test_remove_from_bucket[cloudtrail_remove_from_bucket] PASSED [100%] - -=============================================== 15 passed, 2 warnings in 332.67s (0:05:32) =========================================== -``` +#root@wazuh-master:/wazuh/tests/integration# pytest -x test_aws/ --disable-warnings +==================================== test session starts ==================================== +platform linux -- Python 3.10.12, pytest-7.1.2, pluggy-1.2.0 +rootdir: /wazuh/tests/integration, configfile: pytest.ini +plugins: testinfra-5.0.0, metadata-3.0.0, html-3.1.1 +collected 195 items + +test_aws/test_basic.py ................ [ 8%] +test_aws/test_discard_regex.py .............. [ 15%] +test_aws/test_log_groups.py .. [ 16%] +test_aws/test_only_logs_after.py .............................................x. [ 40%] +test_aws/test_parser.py .......................... [ 53%] +test_aws/test_path.py .......................................... [ 75%] +test_aws/test_path_suffix.py ......... [ 80%] +test_aws/test_regions.py ........................ [ 92%] +test_aws/test_remove_from_bucket.py ...sss......... [100%] + +============ 191 passed, 3 skipped, 1 xfailed, 7 warnings in 3723.08s (1:02:03) ============= +``` \ No newline at end of file diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index fc6429d41b5..8e622bf54ec 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -28,7 +28,8 @@ get_sqs_queue_arn, set_sqs_policy, set_bucket_event_notification_configuration, - delete_sqs_queue + delete_sqs_queue, + delete_bucket_files ) from wazuh_testing.utils.services import control_service @@ -69,19 +70,20 @@ def buckets_manager(): yield buckets - # Delete all resources created during execution + # Delete all buckets created during execution for bucket in buckets: try: + # Delete the bucket delete_bucket(bucket_name=bucket) except ClientError as error: - logger.warning({ + logger.error({ "message": "Client error deleting bucket, delete manually", "resource_name": bucket, "error": str(error) }) except Exception as error: - logger.warning({ + logger.error({ "message": "Broad error deleting bucket, delete manually", "resource_name": bucket, "error": str(error) @@ -107,7 +109,7 @@ def log_groups_manager(): try: delete_log_group(log_group_name=log_group) except ClientError as error: - logger.warning({ + logger.error({ "message": "Client error deleting log_group, delete manually", "resource_name": log_group, "error": str(error) @@ -115,7 +117,7 @@ def log_groups_manager(): raise except Exception as error: - logger.warning({ + logger.error({ "message": "Broad error deleting log_group, delete manually", "resource_name": log_group, "error": str(error) @@ -139,16 +141,16 @@ def sqs_manager(): # Delete all resources created during execution for sqs in sqs_queues: try: - delete_sqs_queue(bucket_name=sqs) + delete_sqs_queue(sqs_queue_url=sqs) except ClientError as error: - logger.warning({ + logger.error({ "message": "Client error deleting sqs queue, delete manually", "resource_name": sqs, "error": str(error) }) except Exception as error: - logger.warning({ + logger.error({ "message": "Broad error deleting sqs queue, delete manually", "resource_name": sqs, "error": str(error) @@ -202,7 +204,7 @@ def create_test_bucket(buckets_manager, @pytest.fixture -def upload_file_to_bucket(metadata: dict): +def manage_bucket_files(metadata: dict): """Upload a file to S3 bucket and delete after the test ends. Parameters @@ -217,25 +219,25 @@ def upload_file_to_bucket(metadata: dict): bucket_type = metadata['bucket_type'] # Generate file - data, filename = generate_file(bucket_type=bucket_type, + data, key = generate_file(bucket_type=bucket_type, bucket_name=bucket_name) try: # Upload file to bucket upload_bucket_file(bucket_name=bucket_name, data=data, - filename=filename) + key=key) - logger.debug('Uploaded file: %s to bucket "%s"', filename, bucket_name) + logger.debug('Uploaded file: %s to bucket "%s"', key, bucket_name) # Set filename for test execution - metadata['uploaded_file'] = filename + metadata['uploaded_file'] = key except ClientError as error: logger.error({ "message": "Client error uploading file to bucket", "bucket_name": bucket_name, - "filename": filename, + "filename": key, "error": str(error) }) raise error @@ -244,7 +246,30 @@ def upload_file_to_bucket(metadata: dict): logger.error({ "message": "Broad error uploading file to bucket", "bucket_name": bucket_name, - "filename": filename, + "filename": key, + "error": str(error) + }) + raise error + + yield + + try: + # Delete all bucket files + delete_bucket_files(bucket_name=bucket_name) + except ClientError as error: + logger.error({ + "message": "Client error deleting files in bucket", + "bucket_name": bucket_name, + "filename": key, + "error": str(error) + }) + raise error + + except Exception as error: + logger.error({ + "message": "Broad error deleting files in bucket", + "bucket_name": bucket_name, + "filename": key, "error": str(error) }) raise error diff --git a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml index f546d1149c5..13b8ad49091 100644 --- a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml +++ b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml @@ -1,9 +1,9 @@ - name: custom_bucket_defaults description: Custom bucket default configuration configuration_parameters: - SQS_NAME: wazuh-sqs-integration-tests + SQS_NAME: wazuh-sqs-integration-tests-t1 metadata: resource_type: bucket - sqs_name: wazuh-sqs-integration-tests - bucket_name: wazuh-sqs-bucket-integration-test + sqs_name: wazuh-sqs-integration-tests-t1 + bucket_name: wazuh-sqs-bucket-integration-test-t1 bucket_type: cloudtrail diff --git a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml index c0feb0ce79d..f13cc77248f 100644 --- a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml +++ b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml @@ -1,9 +1,9 @@ - name: bucket_with_logs description: Logs inside a custom bucket configuration_parameters: - SQS_NAME: wazuh-sqs-integration-tests + SQS_NAME: wazuh-sqs-integration-tests-t2 metadata: resource_type: bucket - sqs_name: wazuh-sqs-integration-tests - bucket_name: wazuh-sqs-bucket-integration-test + sqs_name: wazuh-sqs-integration-tests-t2 + bucket_name: wazuh-sqs-bucket-integration-test-t2 bucket_type: cloudtrail diff --git a/tests/integration/test_aws/test_custom_bucket.py b/tests/integration/test_aws/test_custom_bucket.py index d0f0f25497a..55868eb0098 100644 --- a/tests/integration/test_aws/test_custom_bucket.py +++ b/tests/integration/test_aws/test_custom_bucket.py @@ -5,8 +5,8 @@ """ This module will contain all cases for the custom bucket test suite """ - import pytest +import time # qa-integration-framework imports from wazuh_testing import session_parameters @@ -137,7 +137,7 @@ def test_custom_bucket_defaults(configuration, metadata, create_test_bucket, set @pytest.mark.parametrize('configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_custom_bucket_logs(configuration, metadata, create_test_bucket, set_test_sqs_queue, upload_file_to_bucket, +def test_custom_bucket_logs(configuration, metadata, create_test_bucket, set_test_sqs_queue, manage_bucket_files, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -225,6 +225,9 @@ def test_custom_bucket_logs(configuration, metadata, create_test_bucket, set_tes assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_start'] + # Give time to the queue to retrieve the messages. + time.sleep(30) + # Check command was called correctly log_monitor.start( timeout=session_parameters.default_timeout, @@ -234,7 +237,6 @@ def test_custom_bucket_logs(configuration, metadata, create_test_bucket, set_tes assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] retrieve_pattern = fr'.*Retrieving messages from: {sqs_name}' - message_pattern = fr'.*The message is: .*' # Check if the message was retrieved from the queue log_monitor.start( @@ -244,6 +246,8 @@ def test_custom_bucket_logs(configuration, metadata, create_test_bucket, set_tes assert log_monitor.callback_result is not None, ERROR_MESSAGE['failed_sqs_message_retrieval'] + message_pattern = fr'.*The message is: .*' + # Check if it processes the created file log_monitor.start( timeout=TIMEOUT[10], diff --git a/tests/integration/test_aws/test_parser.py b/tests/integration/test_aws/test_parser.py index 42cdb8e9bf6..7b8497f3e3f 100644 --- a/tests/integration/test_aws/test_parser.py +++ b/tests/integration/test_aws/test_parser.py @@ -423,9 +423,6 @@ def test_invalid_values_in_bucket( input_description: - The `configuration_values_in_bucket` file provides the configuration for this test. """ - - - log_monitor.start( timeout=session_parameters.default_timeout, callback=event_monitor.callback_detect_aws_invalid_value, diff --git a/tests/integration/test_aws/test_path_suffix.py b/tests/integration/test_aws/test_path_suffix.py index c95d2e4aef9..6287d1a2888 100644 --- a/tests/integration/test_aws/test_path_suffix.py +++ b/tests/integration/test_aws/test_path_suffix.py @@ -8,9 +8,6 @@ import pytest -import pydevd_pycharm -pydevd_pycharm.settrace('192.168.56.1', port=55555, stdoutToServer=True, stderrToServer=True) - # qa-integration-framework imports from wazuh_testing import session_parameters from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH From be4cda9e9c443c25be039d711b3ecbf8df9622a4 Mon Sep 17 00:00:00 2001 From: Eduardo Date: Mon, 25 Mar 2024 18:14:12 -0300 Subject: [PATCH 290/419] Fix typos --- tests/integration/conftest.py | 2 +- tests/integration/test_aws/configurator.py | 33 +++++++++++++--------- tests/integration/test_aws/conftest.py | 22 ++++++--------- 3 files changed, 28 insertions(+), 29 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 9299b8b113e..437769488c4 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -149,7 +149,7 @@ def backup_wazuh_configuration() -> None: @pytest.fixture() -def set_wazuh_configuration(test_configuration: dict) -> None: +def set_wazuh_configuration(configuration: dict) -> None: """Set wazuh configuration Args: diff --git a/tests/integration/test_aws/configurator.py b/tests/integration/test_aws/configurator.py index 46aa7c92506..c34d90975a1 100644 --- a/tests/integration/test_aws/configurator.py +++ b/tests/integration/test_aws/configurator.py @@ -3,7 +3,7 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 """ - This file contain the Test Configurator class that will manage all resources and configurations for each test + This file contains the Test Configurator class that will manage all resources and configurations for each test module. """ from os.path import join @@ -70,10 +70,12 @@ def configure_test(self, configuration_file="", cases_file="") -> None: """ Configure and manage the resources for the test. - Params - ------ - - configuration_file (str): The name of the configuration file. - - cases_file (str): The name of the test cases file. + Parameters + ---------- + configuration_file : str + The name of the configuration file. + cases_file : str + The name of the test cases file. """ # Set test cases yaml path cases_yaml_path = join(TEST_DATA_PATH, TEST_CASES_DIR, self.module, cases_file) @@ -91,11 +93,12 @@ def configure_test(self, configuration_file="", cases_file="") -> None: def _load_configuration_template(self, configuration_file: str, parameters: str) -> None: """Set the configuration template of the test - Params - ------ - - configuration_file (str): The name of the configuration file. - - parameters (str): The test parameters. - + Parameters + ---------- + configuration_file : str + The name of the configuration file. + parameters : str + The test parameters. """ if configuration_file != "": # Set config path @@ -111,10 +114,12 @@ def _load_configuration_template(self, configuration_file: str, parameters: str) def _modify_metadata(self, parameters: list) -> None: """Modify raw data to add test session information - Params - ------ - - parameters (list): The parameters of the test. - - metadata (list): The metadata of the test. + Parameters + ---------- + parameters : list + The parameters of the test. + metadata : list + The metadata of the test. """ # Add Suffix (_todelete) to alert a safe deletion of resource in case of errors. suffix = f"-{self._session_id}-todelete" diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 8e622bf54ec..5e5e2043eb0 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -3,7 +3,7 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 """ -This module contain all necessary components (fixtures, classes, methods) to configure the test for its execution. +This module contains all necessary components (fixtures, classes, methods) to configure the test for its execution. """ import pytest @@ -319,16 +319,14 @@ def create_test_log_group(log_groups_manager, @pytest.fixture() -def create_test_log_stream(metadata: dict): +def create_test_log_stream(metadata: dict) -> None: """Create a log stream. Parameters ---------- - metadata (dict): Log group information. + metadata : dict + Log group information. - Returns - ------- - None """ # Get log group log_group_name = metadata['log_group_name'] @@ -362,16 +360,14 @@ def create_test_log_stream(metadata: dict): @pytest.fixture() -def create_test_events(metadata: dict): +def create_test_events(metadata: dict) -> None: """Create a log event in a log stream. Parameters ---------- - metadata (dict): Log group information. + metadata : dict + Log group information. - Returns - ------- - None """ # Get log group name log_group_name = metadata["log_group_name"] @@ -416,7 +412,7 @@ def create_test_events(metadata: dict): @pytest.fixture -def set_test_sqs_queue(metadata: dict, sqs_manager): +def set_test_sqs_queue(metadata: dict, sqs_manager) -> None: """Create a test sqs group Parameters @@ -426,8 +422,6 @@ def set_test_sqs_queue(metadata: dict, sqs_manager): sqs_manager: fixture The SQS set for the test. - Returns - ------- """ # Get bucket name bucket_name = metadata["bucket_name"] From eb81ee3b6af65cb49a718b585f12084f7a14503e Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Wed, 22 May 2024 11:18:46 +0200 Subject: [PATCH 291/419] Backport conftest change after rebase --- tests/integration/conftest.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 437769488c4..724f0ce2f8f 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -149,20 +149,20 @@ def backup_wazuh_configuration() -> None: @pytest.fixture() -def set_wazuh_configuration(configuration: dict) -> None: +def set_wazuh_configuration(test_configuration: dict) -> None: """Set wazuh configuration Args: - configuration (dict): Configuration template data to write in the ossec.conf + test_configuration (dict): Configuration template data to write in the ossec.conf """ # Save current configuration - backup_config = wazuh_configuration.get_wazuh_conf() + backup_config = configuration.get_wazuh_conf() # Configuration for testing - test_config = wazuh_configuration.set_section_wazuh_conf(configuration.get('sections')) + test_config = configuration.set_section_wazuh_conf(test_configuration.get('sections')) # Set new configuration - wazuh_configuration.write_wazuh_conf(test_config) + configuration.write_wazuh_conf(test_config) # Set current configuration session_parameters.current_configuration = test_config @@ -170,7 +170,7 @@ def set_wazuh_configuration(configuration: dict) -> None: yield # Restore previous configuration - wazuh_configuration.write_wazuh_conf(backup_config) + configuration.write_wazuh_conf(backup_config) @pytest.fixture() From 7b49809d7f1507b5cee4c6b35ab3a9d1afa9af5f Mon Sep 17 00:00:00 2001 From: Eduardo Date: Fri, 18 Aug 2023 12:50:16 -0300 Subject: [PATCH 292/419] Update Readme --- tests/integration/test_aws/README.md | 36 ++++++++++++++-------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index 662f3bf6127..75d076577ff 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -49,15 +49,12 @@ wazuh/tests/integration/test_aws ## Requirements -- [Proper testing environment](#Setting up a test environment) +- [Proper testing environment](setting-up-a-test-environment) - [Wazuh](https://github.com/wazuh/qa-integration-framework) repository. +- Install the [qa-integration-framework](https://github.com/wazuh/qa-integration-framework) +- The module will assume there are already buckets, log groups and an inspector assessment with test data in AWS. -- [Testing framework](https://github.com/wazuh/qa-integration-framework) installed. - -- Configured buckets, log groups and an inspector assessment with test data in AWS. - -For a step-by-step example guide using linux go to the [test setup section](#linux) ## Configuration settings @@ -96,17 +93,20 @@ _We are using **Ubuntu 22.04** for this example:_ - Install Python tests dependencies: -```shell script -# Install pip -apt install python3-pip git -y - -# Clone the `qa-integration-framework` repository withing your testing environment -git clone https://github.com/wazuh/qa-integration-framework.git - -# Install tests dependencies -python3 -m pip install qa-integration-framework/ -``` - + ```shell script + # Install pip + apt install python3-pip git -y + + # Clone your `wazuh` repository within your testing environment + git clone https://github.com/wazuh/wazuh.git + + # Clone the `qa-integration-framework` + git clone https://github.com/wazuh/qa-integration-framework.git + + # Install test dependecies + python3 -m pip install qa-integration-framework/ + ``` + ## Integration tests @@ -171,4 +171,4 @@ test_aws/test_regions.py ........................ test_aws/test_remove_from_bucket.py ...sss......... [100%] ============ 191 passed, 3 skipped, 1 xfailed, 7 warnings in 3723.08s (1:02:03) ============= -``` \ No newline at end of file +``` From b8d5577921e2d9d138a0edd6fbd2de73283d281c Mon Sep 17 00:00:00 2001 From: Eduardo Date: Fri, 25 Aug 2023 08:30:35 -0300 Subject: [PATCH 293/419] Improve README and Remove unnecessary fixture calls on conftest --- tests/integration/conftest.py | 8 ++++++-- tests/integration/test_aws/README.md | 16 ++++++++++------ 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 724f0ce2f8f..08d5002a906 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -180,6 +180,10 @@ def configure_local_internal_options_function(request): It uses the test variable local_internal_options. This should be a dictionary wich keys and values corresponds to the internal option configuration, For example: local_internal_options = {'monitord.rotate_log': '0', 'syscheck.debug': '0' } + + Args: + request (fixture): Provide information on the executing test function. + """ try: local_internal_options = request.param @@ -203,7 +207,7 @@ def configure_local_internal_options_function(request): wazuh_configuration.set_local_internal_options_dict(backup_local_internal_options) -@pytest.fixture(scope='function') +@pytest.fixture() def restart_wazuh_function(request): """Restart before starting a test, and stop it after finishing. @@ -239,7 +243,7 @@ def restart_wazuh_function(request): control_service('stop', daemon=daemon) -@pytest.fixture(scope='function') +@pytest.fixture() def file_monitoring(request): """Fixture to handle the monitoring of a specified file. diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index 75d076577ff..d09a519e6df 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -49,11 +49,15 @@ wazuh/tests/integration/test_aws ## Requirements -- [Proper testing environment](setting-up-a-test-environment) +- [Proper testing environment](#Setting up a test environment) - [Wazuh](https://github.com/wazuh/qa-integration-framework) repository. -- Install the [qa-integration-framework](https://github.com/wazuh/qa-integration-framework) -- The module will assume there are already buckets, log groups and an inspector assessment with test data in AWS. + +- [Testing framework](https://github.com/wazuh/qa-integration-framework) installed. + +- Configured buckets, log groups and an inspector assessment with test data in AWS. + +For a step-by-step example guide using linux go to the [test setup section](#linux) ## Configuration settings @@ -97,13 +101,13 @@ _We are using **Ubuntu 22.04** for this example:_ # Install pip apt install python3-pip git -y - # Clone your `wazuh` repository within your testing environment + # Clone `wazuh` repository within your testing environment git clone https://github.com/wazuh/wazuh.git - # Clone the `qa-integration-framework` + # Clone the `qa-integration-framework` repository withing your testing environment git clone https://github.com/wazuh/qa-integration-framework.git - # Install test dependecies + # Install tests dependencies python3 -m pip install qa-integration-framework/ ``` From 35a5a093c76e4c50733d8afb1a4a8b18057d70fa Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Thu, 11 Apr 2024 16:24:49 -0300 Subject: [PATCH 294/419] Add isolated resource creation for discard regex --- tests/integration/test_aws/configurator.py | 8 +- tests/integration/test_aws/conftest.py | 88 +++++-- .../configuration_bucket_discard_regex.yaml | 2 - ...uration_cloudwatch_discard_regex_json.yaml | 2 - ..._cloudwatch_discard_regex_simple_text.yaml | 2 - .../configuration_discard_regex.yaml | 23 -- ...configuration_inspector_discard_regex.yaml | 2 - .../cases_bucket_discard_regex.yaml | 109 ++++---- .../cases_cloudwatch_discard_regex_json.yaml | 13 +- ..._cloudwatch_discard_regex_simple_text.yaml | 9 +- .../cases_discard_regex.yaml | 238 ------------------ .../cases_inspector_discard_regex.yaml | 7 +- tests/integration/test_aws/event_monitor.py | 4 +- .../test_aws/test_discard_regex.py | 61 +++-- 14 files changed, 189 insertions(+), 379 deletions(-) delete mode 100644 tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml delete mode 100644 tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml diff --git a/tests/integration/test_aws/configurator.py b/tests/integration/test_aws/configurator.py index c34d90975a1..691a12af476 100644 --- a/tests/integration/test_aws/configurator.py +++ b/tests/integration/test_aws/configurator.py @@ -133,12 +133,16 @@ def _modify_metadata(self, parameters: list) -> None: if data["resource_type"] == "bucket": data["bucket_name"] += suffix + if 'vpc_name' in data: + data['vpc_name'] += suffix if "BUCKET_NAME" in param: param["BUCKET_NAME"] += suffix elif data["resource_type"] == "log_group": - param["LOG_GROUP_NAME"] += suffix - data["log_group_name"] += suffix + if "LOG_GROUP_NAME" in param: + param["LOG_GROUP_NAME"] += suffix + data["log_group_name"] += suffix + data["log_stream_name"] += suffix except KeyError: raise diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 5e5e2043eb0..9c22a44a71a 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -9,7 +9,6 @@ import pytest from time import time from botocore.exceptions import ClientError -from uuid import uuid4 # qa-integration-framework imports from wazuh_testing.logger import logger @@ -18,8 +17,11 @@ upload_log_events, create_log_group, create_log_stream, + create_flow_log, + delete_vpc, delete_bucket, delete_log_group, + delete_log_stream, delete_s3_db, delete_services_db, upload_bucket_file, @@ -114,7 +116,7 @@ def log_groups_manager(): "resource_name": log_group, "error": str(error) }) - raise + raise error except Exception as error: logger.error({ @@ -122,6 +124,7 @@ def log_groups_manager(): "resource_name": log_group, "error": str(error) }) + raise error @pytest.fixture(scope="session", autouse=True) @@ -218,9 +221,16 @@ def manage_bucket_files(metadata: dict): # Get bucket type bucket_type = metadata['bucket_type'] + # Check if the VPC type is the one to be tested + vpc_bucket = bucket_type == 'vpcflow' + # Generate file - data, key = generate_file(bucket_type=bucket_type, - bucket_name=bucket_name) + if vpc_bucket: + # Create VPC resources + flow_log_id, vpc_id = create_flow_log(vpc_name=metadata['vpc_name'], bucket_name=bucket_name) + data, key = generate_file(bucket_type=bucket_type, bucket_name=bucket_name, flow_log_id=flow_log_id) + else: + data, key = generate_file(bucket_type=bucket_type, bucket_name=bucket_name) try: # Upload file to bucket @@ -256,6 +266,11 @@ def manage_bucket_files(metadata: dict): try: # Delete all bucket files delete_bucket_files(bucket_name=bucket_name) + + if vpc_bucket: + # Delete VPC resources (VPC and Flow Log) + delete_vpc(vpc_id=vpc_id) + except ClientError as error: logger.error({ "message": "Client error deleting files in bucket", @@ -281,7 +296,7 @@ def manage_bucket_files(metadata: dict): @pytest.fixture() def create_test_log_group(log_groups_manager, metadata: dict) -> None: - """Create a bucket. + """Create a log group. Parameters ---------- @@ -298,7 +313,7 @@ def create_test_log_group(log_groups_manager, create_log_group(log_group_name=log_group_name) logger.debug(f"Created log group: {log_group_name}") - # Append created bucket to resource list + # Append created log group to resource list log_groups_manager.add(log_group_name) except ClientError as error: @@ -331,8 +346,8 @@ def create_test_log_stream(metadata: dict) -> None: # Get log group log_group_name = metadata['log_group_name'] - # Create random stream name - log_stream_name = str(uuid4()) + # Get log stream + log_stream_name = metadata['log_stream_name'] try: # Create log stream @@ -359,15 +374,14 @@ def create_test_log_stream(metadata: dict) -> None: raise -@pytest.fixture() -def create_test_events(metadata: dict) -> None: - """Create a log event in a log stream. +@pytest.fixture +def manage_log_group_events(metadata: dict): + """Upload events to a log stream inside a log group and delete the log stream after the test ends. Parameters ---------- metadata : dict - Log group information. - + Metadata to get the parameters. """ # Get log group name log_group_name = metadata["log_group_name"] @@ -379,9 +393,15 @@ def create_test_events(metadata: dict) -> None: event_number = metadata["expected_results"] # Generate event information - events = [ - {'timestamp': int(time() * 1000), 'message': f"Test event number {i}"} for i in range(event_number) - ] + if 'discard_field' in metadata: + events = [ + {'timestamp': int(time() * 1000), 'message': f'{{"message":"Test event number {i}"}}'} + for i in range(event_number) + ] + else: + events = [ + {'timestamp': int(time() * 1000), 'message': f'Test event number {i}'} for i in range(event_number) + ] try: # Insert log events in log group @@ -393,19 +413,45 @@ def create_test_events(metadata: dict) -> None: except ClientError as error: logger.error({ - "message": "Client error creating log stream", + "message": "Client error uploading events to log stream", "log_group": log_group_name, + "log_stream_name": log_stream_name, "error": str(error) }) - pass + raise error except Exception as error: logger.error({ - "message": "Broad error creating log stream", + "message": "Broad error uploading events to log stream", "log_group": log_group_name, + "log_stream_name": log_stream_name, "error": str(error) }) - pass + raise error + + yield + + try: + # Delete log_stream + delete_log_stream(log_stream=log_stream_name, log_group=log_group_name) + + except ClientError as error: + logger.error({ + "message": "Client error deleting log stream", + "log_stream_name": log_stream_name, + "log_group": log_group_name, + "error": str(error) + }) + raise error + + except Exception as error: + logger.error({ + "message": "Broad error deleting log stream", + "log_stream_name": log_stream_name, + "log_group": log_group_name, + "error": str(error) + }) + raise error """SQS fixtures""" @@ -450,7 +496,7 @@ def set_test_sqs_queue(metadata: dict, sqs_manager) -> None: # Check if the sqs exist if error.response['Error']['Code'] == 'ResourceNotFound': logger.error(f"SQS Queue {sqs_name} already exists") - pass + raise error else: raise error diff --git a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_bucket_discard_regex.yaml b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_bucket_discard_regex.yaml index cd7e6175e8c..1e13d34a824 100644 --- a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_bucket_discard_regex.yaml +++ b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_bucket_discard_regex.yaml @@ -9,8 +9,6 @@ attributes: - type: BUCKET_TYPE elements: - - aws_profile: - value: qa - name: value: BUCKET_NAME - only_logs_after: diff --git a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_json.yaml b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_json.yaml index d25c21bc4d3..ed15d3b99b5 100644 --- a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_json.yaml +++ b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_json.yaml @@ -9,8 +9,6 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - aws_log_groups: value: LOG_GROUP_NAME - only_logs_after: diff --git a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_simple_text.yaml b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_simple_text.yaml index cb433b979fd..06d1ea077b3 100644 --- a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_simple_text.yaml +++ b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_cloudwatch_discard_regex_simple_text.yaml @@ -9,8 +9,6 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - aws_log_groups: value: LOG_GROUP_NAME - only_logs_after: diff --git a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml deleted file mode 100644 index cd7e6175e8c..00000000000 --- a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_discard_regex.yaml +++ /dev/null @@ -1,23 +0,0 @@ -- sections: - - section: wodle - attributes: - - name: aws-s3 - elements: - - disabled: - value: 'no' - - bucket: - attributes: - - type: BUCKET_TYPE - elements: - - aws_profile: - value: qa - - name: - value: BUCKET_NAME - - only_logs_after: - value: 2022-NOV-20 - - path: - value: PATH - - discard_regex: - attributes: - - field: DISCARD_FIELD - value: DISCARD_REGEX diff --git a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_inspector_discard_regex.yaml b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_inspector_discard_regex.yaml index fd4086fb9f7..1ed1c570701 100644 --- a/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_inspector_discard_regex.yaml +++ b/tests/integration/test_aws/data/configuration_template/discard_regex_test_module/configuration_inspector_discard_regex.yaml @@ -9,8 +9,6 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - only_logs_after: value: ONLY_LOGS_AFTER - regions: diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_bucket_discard_regex.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_bucket_discard_regex.yaml index 7c37319fb6e..8800160682f 100644 --- a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_bucket_discard_regex.yaml +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_bucket_discard_regex.yaml @@ -4,15 +4,16 @@ BUCKET_TYPE: cloudtrail BUCKET_NAME: wazuh-cloudtrail-integration-tests DISCARD_FIELD: eventSource - DISCARD_REGEX: .*ec2.amazonaws.com.* + DISCARD_REGEX: .*sts.amazonaws.com.* PATH: '' metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests only_logs_after: 2022-NOV-20 discard_field: eventSource - discard_regex: .*ec2.amazonaws.com.* - found_logs: 5 + discard_regex: .*sts.amazonaws.com.* + found_logs: 1 skipped_logs: 1 - name: vpc_discard_regex @@ -20,17 +21,19 @@ configuration_parameters: BUCKET_TYPE: vpcflow BUCKET_NAME: wazuh-vpcflow-integration-tests - DISCARD_FIELD: srcport - DISCARD_REGEX: "5319" + DISCARD_FIELD: action + DISCARD_REGEX: "REJECT" PATH: '' metadata: + resource_type: bucket bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests only_logs_after: 2022-NOV-20 - discard_field: srcport - discard_regex: "5319" - found_logs: 5 - skipped_logs: 1 + discard_field: action + discard_regex: "REJECT" + vpc_name: wazuh-vpc-integration-tests + found_logs: 1 + skipped_logs: 5 - name: config_discard_regex description: Config discard regex configurations @@ -41,6 +44,7 @@ DISCARD_REGEX: .*COMPLIANT.* PATH: '' metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests only_logs_after: 2022-NOV-20 @@ -55,16 +59,17 @@ BUCKET_TYPE: alb BUCKET_NAME: wazuh-alb-integration-tests DISCARD_FIELD: elb_status_code - DISCARD_REGEX: '401' + DISCARD_REGEX: '403' PATH: '' metadata: + resource_type: bucket bucket_type: alb bucket_name: wazuh-alb-integration-tests only_logs_after: 2022-NOV-20 discard_field: elb_status_code - discard_regex: '401' - found_logs: 5 - skipped_logs: 1 + discard_regex: '403' + found_logs: 1 + skipped_logs: 5 - name: clb_discard_regex description: CLB discard regex configurations @@ -72,33 +77,35 @@ BUCKET_TYPE: clb BUCKET_NAME: wazuh-clb-integration-tests DISCARD_FIELD: elb_status_code - DISCARD_REGEX: '401' + DISCARD_REGEX: '403' PATH: '' metadata: + resource_type: bucket bucket_type: clb bucket_name: wazuh-clb-integration-tests only_logs_after: 2022-NOV-20 discard_field: elb_status_code - discard_regex: '401' - found_logs: 5 - skipped_logs: 1 + discard_regex: '403' + found_logs: 1 + skipped_logs: 5 - name: nlb_discard_regex description: NLB discard regex configurations configuration_parameters: BUCKET_TYPE: nlb BUCKET_NAME: wazuh-nlb-integration-tests - DISCARD_FIELD: listener - DISCARD_REGEX: 0CMK2UAG108C7AXK + DISCARD_FIELD: type + DISCARD_REGEX: tls PATH: '' metadata: + resource_type: bucket bucket_type: nlb bucket_name: wazuh-nlb-integration-tests only_logs_after: 2022-NOV-20 - discard_field: listener - discard_regex: 0CMK2UAG108C7AXK - found_logs: 5 - skipped_logs: 1 + discard_field: type + discard_regex: tls + found_logs: 1 + skipped_logs: 5 - name: kms_discard_regex description: KMS discard regex configurations @@ -106,15 +113,16 @@ BUCKET_TYPE: custom BUCKET_NAME: wazuh-kms-integration-tests DISCARD_FIELD: eventName - DISCARD_REGEX: MatchDataKey + DISCARD_REGEX: GenerateDataKey PATH: '' metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-kms-integration-tests only_logs_after: 2022-NOV-20 discard_field: eventName - discard_regex: MatchDataKey - found_logs: 3 + discard_regex: GenerateDataKey + found_logs: 1 skipped_logs: 1 - name: macie_discard_regex @@ -123,14 +131,15 @@ BUCKET_TYPE: custom BUCKET_NAME: wazuh-macie-integration-tests DISCARD_FIELD: severity - DISCARD_REGEX: LOW + DISCARD_REGEX: CRITICAL PATH: '' metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-macie-integration-tests only_logs_after: 2022-NOV-20 discard_field: severity - discard_regex: LOW + discard_regex: CRITICAL found_logs: 3 skipped_logs: 1 @@ -140,15 +149,16 @@ BUCKET_TYPE: custom BUCKET_NAME: wazuh-trusted-advisor-integration-tests DISCARD_FIELD: status - DISCARD_REGEX: ERROR + DISCARD_REGEX: OK PATH: '' metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-trusted-advisor-integration-tests only_logs_after: 2022-NOV-20 discard_field: status - discard_regex: ERROR - found_logs: 3 + discard_regex: OK + found_logs: 1 skipped_logs: 1 - name: guardduty_discard_regex @@ -157,15 +167,16 @@ BUCKET_TYPE: guardduty BUCKET_NAME: wazuh-guardduty-integration-tests DISCARD_FIELD: partition - DISCARD_REGEX: aws-test + DISCARD_REGEX: aws PATH: '' metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-guardduty-integration-tests only_logs_after: 2022-NOV-20 discard_field: partition - discard_regex: aws-test - found_logs: 3 + discard_regex: aws + found_logs: 1 skipped_logs: 1 - name: native_guardduty_discard_regex @@ -174,15 +185,16 @@ BUCKET_TYPE: guardduty BUCKET_NAME: wazuh-native-guardduty-integration-tests DISCARD_FIELD: partition - DISCARD_REGEX: aws-test + DISCARD_REGEX: aws PATH: '' metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-native-guardduty-integration-tests only_logs_after: 2022-NOV-20 discard_field: partition - discard_regex: aws-test - found_logs: 3 + discard_regex: aws + found_logs: 1 skipped_logs: 1 - name: waf_discard_regex @@ -191,15 +203,16 @@ BUCKET_TYPE: waf BUCKET_NAME: wazuh-waf-integration-tests DISCARD_FIELD: action - DISCARD_REGEX: ALLOW + DISCARD_REGEX: BLOCK PATH: '' metadata: + resource_type: bucket bucket_type: waf bucket_name: wazuh-waf-integration-tests only_logs_after: 2022-NOV-20 discard_field: action - discard_regex: ALLOW - found_logs: 3 + discard_regex: BLOCK + found_logs: 1 skipped_logs: 1 - name: server_access_discard_regex @@ -208,15 +221,16 @@ BUCKET_TYPE: server_access BUCKET_NAME: wazuh-server-access-integration-tests DISCARD_FIELD: http_status - DISCARD_REGEX: '200' + DISCARD_REGEX: '404' PATH: '' metadata: + resource_type: bucket bucket_type: server_access bucket_name: wazuh-server-access-integration-tests only_logs_after: 2022-NOV-20 discard_field: http_status - discard_regex: '200' - found_logs: 3 + discard_regex: '404' + found_logs: 1 skipped_logs: 1 - name: cisco_umbrella_discard_regex @@ -225,14 +239,15 @@ BUCKET_TYPE: cisco_umbrella BUCKET_NAME: wazuh-umbrella-integration-tests DISCARD_FIELD: action - DISCARD_REGEX: Blocked + DISCARD_REGEX: Allowed PATH: dnslogs metadata: + resource_type: bucket bucket_type: cisco_umbrella bucket_name: wazuh-umbrella-integration-tests only_logs_after: 2022-NOV-20 discard_field: action - discard_regex: Blocked - found_logs: 3 - skipped_logs: 1 + discard_regex: Allowed + found_logs: 1 + skipped_logs: 5 path: dnslogs diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_json.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_json.yaml index fd3836cc7e2..8f273bbbbbb 100644 --- a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_json.yaml +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_json.yaml @@ -6,14 +6,17 @@ SERVICE_TYPE: cloudwatchlogs LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests REGIONS: us-east-1 - DISCARD_FIELD: networkInterfaces.networkInterfaceId - DISCARD_REGEX: .*eni-networkInterfaceId* + DISCARD_FIELD: message + DISCARD_REGEX: .*event.*number.*0 ONLY_LOGS_AFTER: 2023-JUL-03 metadata: + resource_type: log_group service_type: cloudwatchlogs log_group_name: wazuh-cloudwatchlogs-integration-tests + log_stream_name: wazuh-cloudwatchlogs-integration-tests-stream only_logs_after: 2023-JUL-03 - discard_field: networkInterfaces.networkInterfaceId - discard_regex: .*eni-networkInterfaceId.* + discard_field: message + discard_regex: .*event.*number.*0 regions: us-east-1 - found_logs: 1 + skipped_logs: 1 + expected_results: 3 diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_simple_text.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_simple_text.yaml index d10325cd043..2fbae8be4d5 100644 --- a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_simple_text.yaml +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_cloudwatch_discard_regex_simple_text.yaml @@ -4,14 +4,17 @@ the content inside the incoming simple text log configuration_parameters: SERVICE_TYPE: cloudwatchlogs - LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests + LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests-simple-text REGIONS: us-east-1 DISCARD_REGEX: .*Test.* ONLY_LOGS_AFTER: 2023-JAN-12 metadata: + resource_type: log_group service_type: cloudwatchlogs - log_group_name: wazuh-cloudwatchlogs-integration-tests + log_group_name: wazuh-cloudwatchlogs-integration-tests-simple-text + log_stream_name: wazuh-cloudwatchlogs-integration-tests-stream-simple-text only_logs_after: 2023-JAN-12 discard_regex: .*Test.* regions: us-east-1 - found_logs: 3 + skipped_logs: 1 + expected_results: 3 diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml deleted file mode 100644 index 7c37319fb6e..00000000000 --- a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_discard_regex.yaml +++ /dev/null @@ -1,238 +0,0 @@ -- name: cloudtrail_discard_regex - description: CloudTrail discard regex configurations - configuration_parameters: - BUCKET_TYPE: cloudtrail - BUCKET_NAME: wazuh-cloudtrail-integration-tests - DISCARD_FIELD: eventSource - DISCARD_REGEX: .*ec2.amazonaws.com.* - PATH: '' - metadata: - bucket_type: cloudtrail - bucket_name: wazuh-cloudtrail-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: eventSource - discard_regex: .*ec2.amazonaws.com.* - found_logs: 5 - skipped_logs: 1 - -- name: vpc_discard_regex - description: VPC discard regex configurations - configuration_parameters: - BUCKET_TYPE: vpcflow - BUCKET_NAME: wazuh-vpcflow-integration-tests - DISCARD_FIELD: srcport - DISCARD_REGEX: "5319" - PATH: '' - metadata: - bucket_type: vpcflow - bucket_name: wazuh-vpcflow-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: srcport - discard_regex: "5319" - found_logs: 5 - skipped_logs: 1 - -- name: config_discard_regex - description: Config discard regex configurations - configuration_parameters: - BUCKET_TYPE: config - BUCKET_NAME: wazuh-config-integration-tests - DISCARD_FIELD: configuration.complianceType - DISCARD_REGEX: .*COMPLIANT.* - PATH: '' - metadata: - bucket_type: config - bucket_name: wazuh-config-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: configuration.complianceType - discard_regex: .*COMPLIANT.* - found_logs: 5 - skipped_logs: 1 - -- name: alb_discard_regex - description: ALB discard regex configurations - configuration_parameters: - BUCKET_TYPE: alb - BUCKET_NAME: wazuh-alb-integration-tests - DISCARD_FIELD: elb_status_code - DISCARD_REGEX: '401' - PATH: '' - metadata: - bucket_type: alb - bucket_name: wazuh-alb-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: elb_status_code - discard_regex: '401' - found_logs: 5 - skipped_logs: 1 - -- name: clb_discard_regex - description: CLB discard regex configurations - configuration_parameters: - BUCKET_TYPE: clb - BUCKET_NAME: wazuh-clb-integration-tests - DISCARD_FIELD: elb_status_code - DISCARD_REGEX: '401' - PATH: '' - metadata: - bucket_type: clb - bucket_name: wazuh-clb-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: elb_status_code - discard_regex: '401' - found_logs: 5 - skipped_logs: 1 - -- name: nlb_discard_regex - description: NLB discard regex configurations - configuration_parameters: - BUCKET_TYPE: nlb - BUCKET_NAME: wazuh-nlb-integration-tests - DISCARD_FIELD: listener - DISCARD_REGEX: 0CMK2UAG108C7AXK - PATH: '' - metadata: - bucket_type: nlb - bucket_name: wazuh-nlb-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: listener - discard_regex: 0CMK2UAG108C7AXK - found_logs: 5 - skipped_logs: 1 - -- name: kms_discard_regex - description: KMS discard regex configurations - configuration_parameters: - BUCKET_TYPE: custom - BUCKET_NAME: wazuh-kms-integration-tests - DISCARD_FIELD: eventName - DISCARD_REGEX: MatchDataKey - PATH: '' - metadata: - bucket_type: custom - bucket_name: wazuh-kms-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: eventName - discard_regex: MatchDataKey - found_logs: 3 - skipped_logs: 1 - -- name: macie_discard_regex - description: Macie discard regex configurations - configuration_parameters: - BUCKET_TYPE: custom - BUCKET_NAME: wazuh-macie-integration-tests - DISCARD_FIELD: severity - DISCARD_REGEX: LOW - PATH: '' - metadata: - bucket_type: custom - bucket_name: wazuh-macie-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: severity - discard_regex: LOW - found_logs: 3 - skipped_logs: 1 - -- name: trusted_advisor_discard_regex - description: Trusted Advisor discard regex configurations - configuration_parameters: - BUCKET_TYPE: custom - BUCKET_NAME: wazuh-trusted-advisor-integration-tests - DISCARD_FIELD: status - DISCARD_REGEX: ERROR - PATH: '' - metadata: - bucket_type: custom - bucket_name: wazuh-trusted-advisor-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: status - discard_regex: ERROR - found_logs: 3 - skipped_logs: 1 - -- name: guardduty_discard_regex - description: GuardDuty discard regex configurations - configuration_parameters: - BUCKET_TYPE: guardduty - BUCKET_NAME: wazuh-guardduty-integration-tests - DISCARD_FIELD: partition - DISCARD_REGEX: aws-test - PATH: '' - metadata: - bucket_type: guardduty - bucket_name: wazuh-guardduty-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: partition - discard_regex: aws-test - found_logs: 3 - skipped_logs: 1 - -- name: native_guardduty_discard_regex - description: Native GuardDuty discard regex configurations - configuration_parameters: - BUCKET_TYPE: guardduty - BUCKET_NAME: wazuh-native-guardduty-integration-tests - DISCARD_FIELD: partition - DISCARD_REGEX: aws-test - PATH: '' - metadata: - bucket_type: guardduty - bucket_name: wazuh-native-guardduty-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: partition - discard_regex: aws-test - found_logs: 3 - skipped_logs: 1 - -- name: waf_discard_regex - description: WAF discard regex configurations - configuration_parameters: - BUCKET_TYPE: waf - BUCKET_NAME: wazuh-waf-integration-tests - DISCARD_FIELD: action - DISCARD_REGEX: ALLOW - PATH: '' - metadata: - bucket_type: waf - bucket_name: wazuh-waf-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: action - discard_regex: ALLOW - found_logs: 3 - skipped_logs: 1 - -- name: server_access_discard_regex - description: Server Access discard regex configurations - configuration_parameters: - BUCKET_TYPE: server_access - BUCKET_NAME: wazuh-server-access-integration-tests - DISCARD_FIELD: http_status - DISCARD_REGEX: '200' - PATH: '' - metadata: - bucket_type: server_access - bucket_name: wazuh-server-access-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: http_status - discard_regex: '200' - found_logs: 3 - skipped_logs: 1 - -- name: cisco_umbrella_discard_regex - description: CloudTrail discard regex configurations - configuration_parameters: - BUCKET_TYPE: cisco_umbrella - BUCKET_NAME: wazuh-umbrella-integration-tests - DISCARD_FIELD: action - DISCARD_REGEX: Blocked - PATH: dnslogs - metadata: - bucket_type: cisco_umbrella - bucket_name: wazuh-umbrella-integration-tests - only_logs_after: 2022-NOV-20 - discard_field: action - discard_regex: Blocked - found_logs: 3 - skipped_logs: 1 - path: dnslogs diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_inspector_discard_regex.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_inspector_discard_regex.yaml index 0af561d13ff..502e7f58fba 100644 --- a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_inspector_discard_regex.yaml +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_inspector_discard_regex.yaml @@ -5,13 +5,14 @@ configuration_parameters: SERVICE_TYPE: inspector REGIONS: us-east-1 - DISCARD_FIELD: assetAttributes.tags.value + DISCARD_FIELD: assetAttributes.tags.key DISCARD_REGEX: .*inspector-integration-test.* ONLY_LOGS_AFTER: 2023-JAN-12 metadata: + resource_type: finding service_type: inspector only_logs_after: 2023-JAN-12 - discard_field: assetAttributes.tags.value + discard_field: assetAttributes.tags.key discard_regex: .*inspector-integration-test.* regions: us-east-1 - found_logs: 4 + skipped_logs: 11 diff --git a/tests/integration/test_aws/event_monitor.py b/tests/integration/test_aws/event_monitor.py index 09aaa863e38..f0ccf1d1d86 100644 --- a/tests/integration/test_aws/event_monitor.py +++ b/tests/integration/test_aws/event_monitor.py @@ -223,7 +223,7 @@ def callback_detect_event_processed(line): return line -def callback_detect_event_processed_or_skipped(pattern): +def callback_detect_event_skipped(pattern): """Search for event processed or skipped message in the given line. Args: @@ -232,7 +232,7 @@ def callback_detect_event_processed_or_skipped(pattern): Callable: Callback to match the given line. """ pattern_regex = re.compile(pattern) - return lambda line: pattern_regex.match(line) or callback_detect_event_processed(line) + return lambda line: pattern_regex.match(line) def callback_detect_service_event_processed(expected_results, service_type): diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py index 8c1b3841e05..1ac524afbb6 100644 --- a/tests/integration/test_aws/test_discard_regex.py +++ b/tests/integration/test_aws/test_discard_regex.py @@ -3,7 +3,7 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 """ -This module will contain all cases for the discard_regex test suite +This module contains all the cases for the discard_regex test suite. """ import pytest @@ -15,12 +15,13 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options +from .configurator import configurator +from .utils import ERROR_MESSAGE, TIMEOUT, local_internal_options pytestmark = [pytest.mark.server] -# Set test configurator for the module -configurator = TestConfigurator(module='discard_regex_test_module') +# Set module name +configurator.module = "discard_regex_test_module" # --------------------------------------------- TEST_BUCKET_DISCARD_REGEX --------------------------------------------- # Configure T1 test @@ -33,8 +34,9 @@ zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_bucket_discard_regex( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, + configuration, metadata, create_test_bucket, manage_bucket_files, load_wazuh_basic_configuration, + set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, + truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ description: Check that some bucket logs are excluded when the regex and field defined in @@ -111,7 +113,6 @@ def test_bucket_discard_regex( parameters = [ 'wodles/aws/aws-s3', '--bucket', bucket_name, - '--aws_profile', 'qa', '--only_logs_after', only_logs_after, '--discard-field', discard_field, '--discard-regex', discard_regex, @@ -120,8 +121,8 @@ def test_bucket_discard_regex( ] if path is not None: - parameters.insert(5, path) - parameters.insert(5, '--trail_prefix') + parameters.insert(3, path) + parameters.insert(3, '--trail_prefix') # Check AWS module started log_monitor.start( @@ -141,8 +142,14 @@ def test_bucket_discard_regex( log_monitor.start( timeout=TIMEOUT[20], - callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), - accumulations=found_logs + skipped_logs + callback=event_monitor.callback_detect_event_processed, + accumulations=found_logs + ) + + log_monitor.start( + timeout=TIMEOUT[20], + callback=event_monitor.callback_detect_event_skipped(pattern), + accumulations=skipped_logs ) assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] @@ -161,7 +168,8 @@ def test_bucket_discard_regex( zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_cloudwatch_discard_regex_json( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configuration, metadata, create_test_log_group, create_test_log_stream, manage_log_group_events, + load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ @@ -230,7 +238,7 @@ def test_cloudwatch_discard_regex_json( regions: str = metadata.get('regions') discard_field = metadata.get('discard_field', None) discard_regex = metadata.get('discard_regex') - found_logs = metadata.get('found_logs') + skipped_logs = metadata.get('skipped_logs') pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field.' \ ' The event will be skipped.' @@ -238,7 +246,6 @@ def test_cloudwatch_discard_regex_json( parameters = [ 'wodles/aws/aws-s3', '--service', service_type, - '--aws_profile', 'qa', '--only_logs_after', only_logs_after, '--regions', regions, '--aws_log_groups', log_group_name, @@ -265,8 +272,8 @@ def test_cloudwatch_discard_regex_json( log_monitor.start( timeout=TIMEOUT[20], - callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), - accumulations=found_logs + callback=event_monitor.callback_detect_event_skipped(pattern), + accumulations=skipped_logs ) assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] @@ -285,7 +292,8 @@ def test_cloudwatch_discard_regex_json( zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_cloudwatch_discard_regex_simple_text( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configuration, metadata, create_test_log_group, create_test_log_stream, manage_log_group_events, + load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ @@ -355,14 +363,13 @@ def test_cloudwatch_discard_regex_simple_text( only_logs_after = metadata.get('only_logs_after') regions: str = metadata.get('regions') discard_regex = metadata.get('discard_regex') - found_logs = metadata.get('found_logs') + skipped_logs = metadata.get('skipped_logs') pattern = fr'.*The "{discard_regex}" regex found a match. The event will be skipped.' parameters = [ 'wodles/aws/aws-s3', '--service', service_type, - '--aws_profile', 'qa', '--only_logs_after', only_logs_after, '--regions', regions, '--aws_log_groups', log_group_name, @@ -388,8 +395,8 @@ def test_cloudwatch_discard_regex_simple_text( log_monitor.start( timeout=TIMEOUT[20], - callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), - accumulations=found_logs + callback=event_monitor.callback_detect_event_skipped(pattern), + accumulations=skipped_logs ) assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] @@ -408,8 +415,9 @@ def test_cloudwatch_discard_regex_simple_text( zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_inspector_discard_regex( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, + configuration, metadata, load_wazuh_basic_configuration, + set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, + truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ description: Check that some Inspector logs are excluded when the regex and field defined in @@ -476,7 +484,7 @@ def test_inspector_discard_regex( regions: str = metadata.get('regions') discard_field = metadata.get('discard_field', '') discard_regex = metadata.get('discard_regex') - found_logs = metadata.get('found_logs') + skipped_logs = metadata.get('skipped_logs') pattern = fr'.*The "{discard_regex}" regex found a match in the "{discard_field}" field.' \ ' The event will be skipped.' @@ -484,7 +492,6 @@ def test_inspector_discard_regex( parameters = [ 'wodles/aws/aws-s3', '--service', service_type, - '--aws_profile', 'qa', '--only_logs_after', only_logs_after, '--regions', regions, '--discard-field', discard_field, @@ -510,8 +517,8 @@ def test_inspector_discard_regex( log_monitor.start( timeout=TIMEOUT[20], - callback=event_monitor.callback_detect_event_processed_or_skipped(pattern), - accumulations=found_logs + callback=event_monitor.callback_detect_event_skipped(pattern), + accumulations=skipped_logs ) assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_discard_regex_message'] From db5ac2fdf54cc91b2d78c60aa74b76ab9d950df9 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Fri, 12 Apr 2024 14:48:07 -0300 Subject: [PATCH 295/419] Apply changes for log groups test --- tests/integration/test_aws/configurator.py | 49 +++++---- tests/integration/test_aws/conftest.py | 100 ++++++++++-------- .../configuration_log_groups.yaml | 2 - .../cases_log_groups.yaml | 3 + tests/integration/test_aws/test_log_groups.py | 13 +-- 5 files changed, 96 insertions(+), 71 deletions(-) diff --git a/tests/integration/test_aws/configurator.py b/tests/integration/test_aws/configurator.py index 691a12af476..b5cfec38af0 100644 --- a/tests/integration/test_aws/configurator.py +++ b/tests/integration/test_aws/configurator.py @@ -126,26 +126,35 @@ def _modify_metadata(self, parameters: list) -> None: # Add suffix to metadata for param, data in zip(parameters, self._metadata): - try: - if "sqs_name" in data: - data["sqs_name"] += suffix - param["SQS_NAME"] += suffix - - if data["resource_type"] == "bucket": - data["bucket_name"] += suffix - if 'vpc_name' in data: - data['vpc_name'] += suffix - if "BUCKET_NAME" in param: - param["BUCKET_NAME"] += suffix - - elif data["resource_type"] == "log_group": - if "LOG_GROUP_NAME" in param: - param["LOG_GROUP_NAME"] += suffix - data["log_group_name"] += suffix - data["log_stream_name"] += suffix - - except KeyError: - raise + # Determine whether resource creation is required or not + resource_creation_required = 'resource_type' in data + + if resource_creation_required: + try: + if "sqs_name" in data: + data["sqs_name"] += suffix + param["SQS_NAME"] += suffix + + if data["resource_type"] == "bucket": + data["bucket_name"] += suffix + if 'vpc_name' in data: + data['vpc_name'] += suffix + if "BUCKET_NAME" in param: + param["BUCKET_NAME"] += suffix + + elif data["resource_type"] == "log_group": + if "LOG_GROUP_NAME" in param: + suffixed_log_groups = [] + for log_group in data["log_group_name"].split(','): + log_group += suffix + suffixed_log_groups.append(log_group) + data["log_group_name"] = ','.join(suffixed_log_groups) + param["LOG_GROUP_NAME"] = data["log_group_name"] + if "log_stream_name" in data: # It is not present for basic or parser tests + data["log_stream_name"] += suffix + + except KeyError: + raise # Instantiate configurator diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 9c22a44a71a..62f5425161f 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -305,21 +305,26 @@ def create_test_log_group(log_groups_manager, metadata : dict Log group information. """ - # Get log group name - log_group_name = metadata["log_group_name"] + # Get log group names + log_group_names = metadata["log_group_name"].split(',') + + # If the resource_type is defined, then the resource must be created + resource_creation = 'resource_type' in metadata try: - # Create log group - create_log_group(log_group_name=log_group_name) - logger.debug(f"Created log group: {log_group_name}") + if resource_creation: + # Create log group + for log_group in log_group_names: + create_log_group(log_group_name=log_group) + logger.debug(f"Created log group: {log_group}") - # Append created log group to resource list - log_groups_manager.add(log_group_name) + # Append created log group to resource list + log_groups_manager.add(log_group) except ClientError as error: logger.error({ "message": "Client error creating log group", - "log_group": log_group_name, + "log_group": log_group, "error": str(error) }) raise @@ -327,7 +332,7 @@ def create_test_log_group(log_groups_manager, except Exception as error: logger.error({ "message": "Broad error creating log group", - "log_group": log_group_name, + "log_group": log_group, "error": str(error) }) raise @@ -343,24 +348,27 @@ def create_test_log_stream(metadata: dict) -> None: Log group information. """ - # Get log group - log_group_name = metadata['log_group_name'] + # Get log group names + log_group_names = metadata["log_group_name"].split(',') # Get log stream log_stream_name = metadata['log_stream_name'] - try: - # Create log stream - create_log_stream(log_group=log_group_name, - log_stream=log_stream_name) - logger.debug(f'Created log stream {log_stream_name} within log group {log_group_name}') + # If the resource_type is defined, then the resource must be created + resource_creation = 'resource_type' in metadata - metadata['log_stream'] = log_stream_name + try: + if resource_creation: + # Create log stream for each log group defined + for log_group in log_group_names: + create_log_stream(log_group=log_group, + log_stream=log_stream_name) + logger.debug(f'Created log stream {log_stream_name} within log group {log_group}') except ClientError as error: logger.error({ "message": "Client error creating log stream", - "log_group": log_group_name, + "log_group": log_group, "error": str(error) }) raise @@ -368,7 +376,7 @@ def create_test_log_stream(metadata: dict) -> None: except Exception as error: logger.error({ "message": "Broad error creating log stream", - "log_group": log_group_name, + "log_group": log_group, "error": str(error) }) raise @@ -383,8 +391,8 @@ def manage_log_group_events(metadata: dict): metadata : dict Metadata to get the parameters. """ - # Get log group name - log_group_name = metadata["log_group_name"] + # Get log group names + log_group_names = metadata["log_group_name"].split(',') # Get log stream name log_stream_name = metadata["log_stream_name"] @@ -392,29 +400,33 @@ def manage_log_group_events(metadata: dict): # Get number of events event_number = metadata["expected_results"] - # Generate event information - if 'discard_field' in metadata: - events = [ - {'timestamp': int(time() * 1000), 'message': f'{{"message":"Test event number {i}"}}'} - for i in range(event_number) - ] - else: - events = [ - {'timestamp': int(time() * 1000), 'message': f'Test event number {i}'} for i in range(event_number) - ] + # If the resource_type is defined, then the resource must be created + resource_creation = 'resource_type' in metadata try: - # Insert log events in log group - upload_log_events( - log_stream=log_stream_name, - log_group=log_group_name, - events=events - ) + if resource_creation: + # Generate event information + if 'discard_field' in metadata: + events = [ + {'timestamp': int(time() * 1000), 'message': f'{{"message":"Test event number {i}"}}'} + for i in range(event_number) + ] + else: + events = [ + {'timestamp': int(time() * 1000), 'message': f'Test event number {i}'} for i in range(event_number) + ] + for log_group in log_group_names: + # Insert log events in log group + upload_log_events( + log_stream=log_stream_name, + log_group=log_group, + events=events + ) except ClientError as error: logger.error({ "message": "Client error uploading events to log stream", - "log_group": log_group_name, + "log_group": log_group, "log_stream_name": log_stream_name, "error": str(error) }) @@ -423,7 +435,7 @@ def manage_log_group_events(metadata: dict): except Exception as error: logger.error({ "message": "Broad error uploading events to log stream", - "log_group": log_group_name, + "log_group": log_group, "log_stream_name": log_stream_name, "error": str(error) }) @@ -432,14 +444,16 @@ def manage_log_group_events(metadata: dict): yield try: - # Delete log_stream - delete_log_stream(log_stream=log_stream_name, log_group=log_group_name) + if resource_creation: + for log_group in log_group_names: + # Delete log_stream + delete_log_stream(log_stream=log_stream_name, log_group=log_group) except ClientError as error: logger.error({ "message": "Client error deleting log stream", "log_stream_name": log_stream_name, - "log_group": log_group_name, + "log_group": log_group, "error": str(error) }) raise error @@ -448,7 +462,7 @@ def manage_log_group_events(metadata: dict): logger.error({ "message": "Broad error deleting log stream", "log_stream_name": log_stream_name, - "log_group": log_group_name, + "log_group": log_group, "error": str(error) }) raise error diff --git a/tests/integration/test_aws/data/configuration_template/log_groups_test_module/configuration_log_groups.yaml b/tests/integration/test_aws/data/configuration_template/log_groups_test_module/configuration_log_groups.yaml index 17585a7f2d1..32073dcb2ea 100644 --- a/tests/integration/test_aws/data/configuration_template/log_groups_test_module/configuration_log_groups.yaml +++ b/tests/integration/test_aws/data/configuration_template/log_groups_test_module/configuration_log_groups.yaml @@ -9,8 +9,6 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - aws_log_groups: value: LOG_GROUP_NAME - only_logs_after: diff --git a/tests/integration/test_aws/data/test_cases/log_groups_test_module/cases_log_groups.yaml b/tests/integration/test_aws/data/test_cases/log_groups_test_module/cases_log_groups.yaml index aefd5f59f79..1b03070f483 100644 --- a/tests/integration/test_aws/data/test_cases/log_groups_test_module/cases_log_groups.yaml +++ b/tests/integration/test_aws/data/test_cases/log_groups_test_module/cases_log_groups.yaml @@ -4,8 +4,10 @@ SERVICE_TYPE: cloudwatchlogs LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests,temporary-log-group metadata: + resource_type: log_group service_type: cloudwatchlogs log_group_name: wazuh-cloudwatchlogs-integration-tests,temporary-log-group + log_stream_name: wazuh-cloudwatchlogs-integration-tests-stream only_logs_after: 2023-JAN-12 expected_results: 3 @@ -17,5 +19,6 @@ metadata: service_type: cloudwatchlogs log_group_name: fake-log-group + log_stream_name: wazuh-cloudwatchlogs-integration-tests-stream only_logs_after: 2023-JAN-12 expected_results: 0 diff --git a/tests/integration/test_aws/test_log_groups.py b/tests/integration/test_aws/test_log_groups.py index ecfaca210cb..b9ef80e11dc 100644 --- a/tests/integration/test_aws/test_log_groups.py +++ b/tests/integration/test_aws/test_log_groups.py @@ -17,12 +17,14 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options +from .configurator import configurator +from .utils import ERROR_MESSAGE, TIMEOUT, local_internal_options + pytestmark = [pytest.mark.server] # Set test configurator for the module -configurator = TestConfigurator(module='log_groups_test_module') +configurator.module = 'log_groups_test_module' # ----------------------------------------------- TEST_AWS_LOG_GROUPS -------------------------------------------------- # Configure T1 test @@ -35,9 +37,9 @@ zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_log_groups( - configuration, metadata, create_log_stream, load_wazuh_basic_configuration, set_wazuh_configuration, - clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, - file_monitoring + configuration, metadata, create_test_log_group, create_test_log_stream, manage_log_group_events, + load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ description: Only the events for the specified log_group are processed. @@ -106,7 +108,6 @@ def test_log_groups( parameters = [ 'wodles/aws/aws-s3', '--service', service_type, - '--aws_profile', 'qa', '--only_logs_after', '2023-JAN-12', '--regions', 'us-east-1', '--aws_log_groups', log_group_names, From f6f8c9679db4635f3a6a0e29c63ba1f588bf9dad Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Fri, 26 Apr 2024 14:21:58 +0200 Subject: [PATCH 296/419] Fix fixture error after rebase --- tests/integration/test_aws/test_basic.py | 12 +++---- .../test_aws/test_custom_bucket.py | 8 ++--- .../test_aws/test_discard_regex.py | 16 +++++----- tests/integration/test_aws/test_log_groups.py | 4 +-- .../test_aws/test_only_logs_after.py | 20 ++++++------ tests/integration/test_aws/test_parser.py | 32 +++++++++---------- tests/integration/test_aws/test_path.py | 4 +-- .../integration/test_aws/test_path_suffix.py | 4 +-- tests/integration/test_aws/test_regions.py | 12 +++---- .../test_aws/test_remove_from_bucket.py | 8 ++--- 10 files changed, 60 insertions(+), 60 deletions(-) diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index e8b0ed15f33..52e1f2fa4aa 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -27,11 +27,11 @@ @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_bucket_defaults( - configuration, metadata, create_test_bucket, load_wazuh_basic_configuration, set_wazuh_configuration, + test_configuration, metadata, create_test_bucket, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): @@ -122,10 +122,10 @@ def test_bucket_defaults( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_service_defaults(configuration, metadata, create_test_log_group, load_wazuh_basic_configuration, +def test_service_defaults(test_configuration, metadata, create_test_log_group, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): @@ -222,10 +222,10 @@ def test_service_defaults(configuration, metadata, create_test_log_group, load_w @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_inspector_defaults(configuration, metadata, create_test_log_group, load_wazuh_basic_configuration, +def test_inspector_defaults(test_configuration, metadata, create_test_log_group, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): diff --git a/tests/integration/test_aws/test_custom_bucket.py b/tests/integration/test_aws/test_custom_bucket.py index 55868eb0098..07c7de3c69f 100644 --- a/tests/integration/test_aws/test_custom_bucket.py +++ b/tests/integration/test_aws/test_custom_bucket.py @@ -28,10 +28,10 @@ @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_custom_bucket_defaults(configuration, metadata, create_test_bucket, set_test_sqs_queue, +def test_custom_bucket_defaults(test_configuration, metadata, create_test_bucket, set_test_sqs_queue, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring @@ -134,10 +134,10 @@ def test_custom_bucket_defaults(configuration, metadata, create_test_bucket, set @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_custom_bucket_logs(configuration, metadata, create_test_bucket, set_test_sqs_queue, manage_bucket_files, +def test_custom_bucket_logs(test_configuration, metadata, create_test_bucket, set_test_sqs_queue, manage_bucket_files, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py index 1ac524afbb6..0e95decdaf5 100644 --- a/tests/integration/test_aws/test_discard_regex.py +++ b/tests/integration/test_aws/test_discard_regex.py @@ -30,11 +30,11 @@ @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_bucket_discard_regex( - configuration, metadata, create_test_bucket, manage_bucket_files, load_wazuh_basic_configuration, + test_configuration, metadata, create_test_bucket, manage_bucket_files, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): @@ -164,11 +164,11 @@ def test_bucket_discard_regex( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_cloudwatch_discard_regex_json( - configuration, metadata, create_test_log_group, create_test_log_stream, manage_log_group_events, + test_configuration, metadata, create_test_log_group, create_test_log_stream, manage_log_group_events, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): @@ -288,11 +288,11 @@ def test_cloudwatch_discard_regex_json( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_cloudwatch_discard_regex_simple_text( - configuration, metadata, create_test_log_group, create_test_log_stream, manage_log_group_events, + test_configuration, metadata, create_test_log_group, create_test_log_stream, manage_log_group_events, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): @@ -411,11 +411,11 @@ def test_cloudwatch_discard_regex_simple_text( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_inspector_discard_regex( - configuration, metadata, load_wazuh_basic_configuration, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): diff --git a/tests/integration/test_aws/test_log_groups.py b/tests/integration/test_aws/test_log_groups.py index b9ef80e11dc..2443aedefb8 100644 --- a/tests/integration/test_aws/test_log_groups.py +++ b/tests/integration/test_aws/test_log_groups.py @@ -33,11 +33,11 @@ @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_log_groups( - configuration, metadata, create_test_log_group, create_test_log_stream, manage_log_group_events, + test_configuration, metadata, create_test_log_group, create_test_log_stream, manage_log_group_events, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): diff --git a/tests/integration/test_aws/test_only_logs_after.py b/tests/integration/test_aws/test_only_logs_after.py index d6ae20dd95b..98bf3f01321 100644 --- a/tests/integration/test_aws/test_only_logs_after.py +++ b/tests/integration/test_aws/test_only_logs_after.py @@ -35,11 +35,11 @@ @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_bucket_without_only_logs_after( - configuration, metadata, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, set_wazuh_configuration, + test_configuration, metadata, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): @@ -165,11 +165,11 @@ def test_bucket_without_only_logs_after( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_service_without_only_logs_after( - configuration, metadata, create_log_stream_in_existent_group, load_wazuh_basic_configuration, + test_configuration, metadata, create_log_stream_in_existent_group, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): @@ -283,11 +283,11 @@ def test_service_without_only_logs_after( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_bucket_with_only_logs_after( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ @@ -412,11 +412,11 @@ def test_bucket_with_only_logs_after( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_cloudwatch_with_only_logs_after( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ @@ -539,11 +539,11 @@ def test_cloudwatch_with_only_logs_after( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_inspector_with_only_logs_after( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ diff --git a/tests/integration/test_aws/test_parser.py b/tests/integration/test_aws/test_parser.py index 7b8497f3e3f..e97e8ea1ad3 100644 --- a/tests/integration/test_aws/test_parser.py +++ b/tests/integration/test_aws/test_parser.py @@ -27,11 +27,11 @@ @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_bucket_and_service_missing( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, file_monitoring ): @@ -96,11 +96,11 @@ def test_bucket_and_service_missing( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_type_missing_in_bucket( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, file_monitoring ): @@ -164,11 +164,11 @@ def test_type_missing_in_bucket( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_type_missing_in_service( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, file_monitoring ): @@ -233,11 +233,11 @@ def test_type_missing_in_service( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_empty_values_in_bucket( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, file_monitoring ): @@ -301,11 +301,11 @@ def test_empty_values_in_bucket( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_empty_values_in_service( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, file_monitoring ): @@ -370,11 +370,11 @@ def test_empty_values_in_service( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_invalid_values_in_bucket( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, file_monitoring ): @@ -438,11 +438,11 @@ def test_invalid_values_in_bucket( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_invalid_values_in_service( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, file_monitoring ): @@ -506,11 +506,11 @@ def test_invalid_values_in_service( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_multiple_bucket_and_service_tags( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function_without_exception, file_monitoring ): diff --git a/tests/integration/test_aws/test_path.py b/tests/integration/test_aws/test_path.py index a7bb3bc2a03..2ab85f869aa 100644 --- a/tests/integration/test_aws/test_path.py +++ b/tests/integration/test_aws/test_path.py @@ -29,11 +29,11 @@ @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_path( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ diff --git a/tests/integration/test_aws/test_path_suffix.py b/tests/integration/test_aws/test_path_suffix.py index 6287d1a2888..ea07f74cf96 100644 --- a/tests/integration/test_aws/test_path_suffix.py +++ b/tests/integration/test_aws/test_path_suffix.py @@ -30,11 +30,11 @@ @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_path_suffix( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ diff --git a/tests/integration/test_aws/test_regions.py b/tests/integration/test_aws/test_regions.py index ce526780a80..159636d153b 100644 --- a/tests/integration/test_aws/test_regions.py +++ b/tests/integration/test_aws/test_regions.py @@ -31,11 +31,11 @@ @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_regions( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ @@ -173,11 +173,11 @@ def test_regions( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_cloudwatch_regions( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ @@ -313,11 +313,11 @@ def test_cloudwatch_regions( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_inspector_regions( - configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ diff --git a/tests/integration/test_aws/test_remove_from_bucket.py b/tests/integration/test_aws/test_remove_from_bucket.py index 68a3147e68b..66f46af6f8c 100644 --- a/tests/integration/test_aws/test_remove_from_bucket.py +++ b/tests/integration/test_aws/test_remove_from_bucket.py @@ -28,11 +28,11 @@ @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_remove_from_bucket( - configuration, metadata, mark_cases_as_skipped, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, + test_configuration, metadata, mark_cases_as_skipped, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): @@ -139,11 +139,11 @@ def test_remove_from_bucket( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('configuration, metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_remove_log_stream( - configuration, metadata, create_log_stream, load_wazuh_basic_configuration, set_wazuh_configuration, + test_configuration, metadata, create_log_stream, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): From 48e43c3a5932d04eb5249edf611fc2244b7cbe02 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Mon, 13 May 2024 11:40:11 +0200 Subject: [PATCH 297/419] Add isolated resources for remove tests --- .../configuration_remove_from_bucket.yaml | 2 - .../configuration_remove_log_stream.yaml | 2 - .../cases_remove_from_bucket.yaml | 15 +++++ .../cases_remove_log_streams.yaml | 2 + .../test_aws/test_remove_from_bucket.py | 63 +++++++++++++------ tests/integration/test_aws/utils.py | 3 +- 6 files changed, 64 insertions(+), 23 deletions(-) diff --git a/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_from_bucket.yaml b/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_from_bucket.yaml index 5c8d3f24a74..2ae2eb33fa0 100644 --- a/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_from_bucket.yaml +++ b/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_from_bucket.yaml @@ -9,8 +9,6 @@ attributes: - type: BUCKET_TYPE elements: - - aws_profile: - value: qa - name: value: BUCKET_NAME - remove_from_bucket: diff --git a/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_log_stream.yaml b/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_log_stream.yaml index 2fbdbf07379..88a76a2f14e 100644 --- a/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_log_stream.yaml +++ b/tests/integration/test_aws/data/configuration_template/remove_from_bucket_test_module/configuration_remove_log_stream.yaml @@ -9,8 +9,6 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - aws_log_groups: value: LOG_GROUP_NAME - remove_log_streams: diff --git a/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_from_bucket.yaml b/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_from_bucket.yaml index eb26143283f..bce58ee5b2f 100644 --- a/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_from_bucket.yaml +++ b/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_from_bucket.yaml @@ -5,6 +5,7 @@ BUCKET_NAME: wazuh-cloudtrail-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests @@ -15,6 +16,8 @@ BUCKET_NAME: wazuh-vpcflow-integration-tests PATH: '' metadata: + resource_type: bucket + vpc_name: wazuh-vpc-integration-tests bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests @@ -25,6 +28,7 @@ BUCKET_NAME: wazuh-config-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests @@ -35,6 +39,7 @@ BUCKET_NAME: wazuh-alb-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: alb bucket_name: wazuh-alb-integration-tests @@ -45,6 +50,7 @@ BUCKET_NAME: wazuh-clb-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: clb bucket_name: wazuh-clb-integration-tests @@ -55,6 +61,7 @@ BUCKET_NAME: wazuh-nlb-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: nlb bucket_name: wazuh-nlb-integration-tests @@ -65,6 +72,7 @@ BUCKET_NAME: wazuh-kms-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-kms-integration-tests @@ -75,6 +83,7 @@ BUCKET_NAME: wazuh-macie-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-macie-integration-tests @@ -85,6 +94,7 @@ BUCKET_NAME: wazuh-trusted-advisor-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-trusted-advisor-integration-tests @@ -95,6 +105,7 @@ BUCKET_NAME: wazuh-guardduty-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-guardduty-integration-tests @@ -105,6 +116,7 @@ BUCKET_NAME: wazuh-native-guardduty-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-native-guardduty-integration-tests @@ -115,6 +127,7 @@ BUCKET_NAME: wazuh-waf-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: waf bucket_name: wazuh-waf-integration-tests @@ -125,6 +138,7 @@ BUCKET_NAME: wazuh-server-access-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: server_access bucket_name: wazuh-server-access-integration-tests @@ -135,6 +149,7 @@ BUCKET_NAME: wazuh-umbrella-integration-tests PATH: dnslogs metadata: + resource_type: bucket bucket_type: cisco_umbrella bucket_name: wazuh-umbrella-integration-tests path: dnslogs diff --git a/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_log_streams.yaml b/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_log_streams.yaml index dfc941327c0..239cf0baa00 100644 --- a/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_log_streams.yaml +++ b/tests/integration/test_aws/data/test_cases/remove_from_bucket_test_module/cases_remove_log_streams.yaml @@ -4,5 +4,7 @@ SERVICE_TYPE: cloudwatchlogs LOG_GROUP_NAME: temporary-log-group metadata: + resource_type: log_group service_type: cloudwatchlogs log_group_name: temporary-log-group + log_stream_name: wazuh-cloudwatchlogs-integration-tests-stream diff --git a/tests/integration/test_aws/test_remove_from_bucket.py b/tests/integration/test_aws/test_remove_from_bucket.py index 66f46af6f8c..28b31789384 100644 --- a/tests/integration/test_aws/test_remove_from_bucket.py +++ b/tests/integration/test_aws/test_remove_from_bucket.py @@ -11,15 +11,17 @@ # qa-integration-framework imports from wazuh_testing import session_parameters from wazuh_testing.modules.aws.utils import log_stream_exists, file_exists +from wazuh_testing.modules.aws.patterns import REMOVE_S3_FILE # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGE, TestConfigurator, local_internal_options +from .configurator import configurator +from .utils import ERROR_MESSAGE, TIMEOUT, local_internal_options pytestmark = [pytest.mark.server] # Set test configurator for the module -configurator = TestConfigurator(module='remove_from_bucket_test_module') +configurator.module = 'remove_from_bucket_test_module' # ---------------------------------------------------- TEST_REMOVE_FROM_BUCKET ----------------------------------------- # Configure T1 test @@ -32,9 +34,9 @@ zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_remove_from_bucket( - test_configuration, metadata, mark_cases_as_skipped, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, - set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, - truncate_monitored_files, restart_wazuh_function, file_monitoring + test_configuration, metadata, mark_cases_as_skipped, create_test_bucket, manage_bucket_files, s3_client, + load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The uploaded file was removed after the execution. @@ -53,15 +55,24 @@ def test_remove_from_bucket( - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. - - upload_and_delete_file_to_s3: + - mark_cases_as_skipped: + type: fixture + brief: Mark certain tests as skipped. + - create_test_bucket: + type: fixture + brief: Create temporal bucket. + - manage_bucket_files: type: fixture - brief: Upload a file to S3 bucket for the day of the execution. + brief: Create and delete the resources for the test. + - s3_client: + type: fixture + brief: S3 client to access AWS. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -96,14 +107,13 @@ def test_remove_from_bucket( 'wodles/aws/aws-s3', '--bucket', bucket_name, '--remove', - '--aws_profile', 'qa', '--type', metadata['bucket_type'], '--debug', '2' ] if path is not None: - parameters.insert(6, path) - parameters.insert(6, '--trail_prefix') + parameters.insert(4, path) + parameters.insert(4, '--trail_prefix') # Check AWS module started log_monitor.start( @@ -121,7 +131,14 @@ def test_remove_from_bucket( assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] - assert not file_exists(filename=metadata['uploaded_file'], bucket_name=bucket_name) + log_monitor.start( + timeout=TIMEOUT[20], + callback=event_monitor.make_aws_callback(pattern=fr"{REMOVE_S3_FILE}") + ) + + assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_no_existent_log_group'] + + assert not file_exists(filename=metadata['uploaded_file'], bucket_name=bucket_name, client=s3_client) # Detect any ERROR message log_monitor.start( @@ -143,9 +160,9 @@ def test_remove_from_bucket( zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_remove_log_stream( - test_configuration, metadata, create_log_stream, load_wazuh_basic_configuration, set_wazuh_configuration, - clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, - file_monitoring + test_configuration, metadata, create_test_log_group, create_test_log_stream, manage_log_group_events, + logs_clients, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The created log stream was removed after the execution. @@ -170,9 +187,18 @@ def test_remove_log_stream( - metadata: type: dict brief: Get metadata from the module. - - create_log_stream: + - create_test_log_group: + type: fixture + brief: Create a log group. + - create_test_log_stream: type: fixture brief: Create a log stream with events for the day of execution. + - manage_log_group_events: + type: fixture + brief: Manage events for the created log stream and log group. + - logs_clients: + type: fixture + brief: CloudWatch Logs client to check the log stream existence. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -207,7 +233,6 @@ def test_remove_log_stream( parameters = [ 'wodles/aws/aws-s3', '--service', service_type, - '--aws_profile', 'qa', '--regions', 'us-east-1', '--aws_log_groups', log_group_name, '--remove-log-streams', @@ -230,7 +255,9 @@ def test_remove_log_stream( assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] - assert not log_stream_exists(log_stream=metadata['log_stream'], log_group=log_group_name) + for log_client in logs_clients: + assert not log_stream_exists(log_stream=metadata['log_stream_name'], log_group=log_group_name, + client=log_client) # Detect any ERROR message log_monitor.start( diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py index 454a8954bcc..6fef9bd19bc 100644 --- a/tests/integration/test_aws/utils.py +++ b/tests/integration/test_aws/utils.py @@ -40,7 +40,8 @@ "incorrect_discard_regex_message": "The AWS module did not show the correct message about discard regex or, " "did not process the expected amount of logs", "failed_sqs_message_retrieval": "The AWS module did not retrieve the expected message from the SQS Queue", - "failed_message_handling": "The AWS module did not handle the expected message" + "failed_message_handling": "The AWS module did not handle the expected message", + "file_not_removed": "The AWS did not show the expected removed file from S3 message" } TIMEOUT = { From 46d847f62f642403c2daceb5554560b48a5cf52e Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Tue, 14 May 2024 16:29:32 +0200 Subject: [PATCH 298/419] Update configuration and test cases --- ...et_configuration_with_only_logs_after.yaml | 2 - ...configuration_without_only_logs_after.yaml | 2 - ...ch_configuration_with_only_logs_after.yaml | 2 - ...or_configuration_with_only_logs_after.yaml | 2 - ...configuration_without_only_logs_after.yaml | 2 - ...tion_multiple_bucket_and_service_tags.yaml | 8 --- .../configuration_type_missing_in_bucket.yaml | 2 - ...configuration_type_missing_in_service.yaml | 2 - .../configuration_values_in_bucket.yaml | 2 - .../configuration_values_in_service.yaml | 2 - .../configuration_path_suffix.yaml | 2 - .../path_test_module/configuration_path.yaml | 2 - .../bucket_configuration_regions.yaml | 2 - .../cloudwatch_configuration_regions.yaml | 2 - .../inspector_configuration_regions.yaml | 2 - .../cases_bucket_custom.yaml | 1 + .../cases_bucket_custom_logs.yaml | 1 + .../cases_bucket_discard_regex.yaml | 28 ++++---- .../cases_bucket_multiple_calls.yaml | 72 +++++++++++++++++++ .../cases_bucket_with_only_logs_after.yaml | 15 ++++ .../cases_bucket_without_only_logs_after.yaml | 15 ++++ .../cases_cloudwatch_multiple_calls.yaml | 6 ++ ...cases_cloudwatch_with_only_logs_after.yaml | 3 +- .../cases_inspector_with_only_logs_after.yaml | 2 +- ...cases_service_without_only_logs_after.yaml | 2 + .../cases_invalid_values_in_bucket.yaml | 16 ++--- .../cases_path_suffix.yaml | 12 ++++ .../path_test_module/cases_path.yaml | 45 ++++++++++++ .../cases_bucket_regions.yaml | 25 ++++++- .../cases_cloudwatch_regions.yaml | 17 ++--- .../cases_inspector_regions.yaml | 4 +- 31 files changed, 223 insertions(+), 77 deletions(-) diff --git a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_with_only_logs_after.yaml b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_with_only_logs_after.yaml index 1ccc03ee926..20e3637bc95 100644 --- a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_with_only_logs_after.yaml +++ b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_with_only_logs_after.yaml @@ -9,8 +9,6 @@ attributes: - type: BUCKET_TYPE elements: - - aws_profile: - value: qa - name: value: BUCKET_NAME - only_logs_after: diff --git a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_without_only_logs_after.yaml b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_without_only_logs_after.yaml index 0821bb0b8f2..3a1b01510d5 100644 --- a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_without_only_logs_after.yaml +++ b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/bucket_configuration_without_only_logs_after.yaml @@ -9,8 +9,6 @@ attributes: - type: BUCKET_TYPE elements: - - aws_profile: - value: qa - name: value: BUCKET_NAME - path: diff --git a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/cloudwatch_configuration_with_only_logs_after.yaml b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/cloudwatch_configuration_with_only_logs_after.yaml index 5eacd69893b..ddb2aade57e 100644 --- a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/cloudwatch_configuration_with_only_logs_after.yaml +++ b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/cloudwatch_configuration_with_only_logs_after.yaml @@ -9,8 +9,6 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - aws_log_groups: value: LOG_GROUP_NAME - only_logs_after: diff --git a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/inspector_configuration_with_only_logs_after.yaml b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/inspector_configuration_with_only_logs_after.yaml index d88be0bb12f..576e5a5e2cd 100644 --- a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/inspector_configuration_with_only_logs_after.yaml +++ b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/inspector_configuration_with_only_logs_after.yaml @@ -9,8 +9,6 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - only_logs_after: value: ONLY_LOGS_AFTER - regions: diff --git a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/service_configuration_without_only_logs_after.yaml b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/service_configuration_without_only_logs_after.yaml index c16c07ec92f..5e76159855c 100644 --- a/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/service_configuration_without_only_logs_after.yaml +++ b/tests/integration/test_aws/data/configuration_template/only_logs_after_test_module/service_configuration_without_only_logs_after.yaml @@ -9,8 +9,6 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - aws_log_groups: value: LOG_GROUP_NAME - regions: diff --git a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_multiple_bucket_and_service_tags.yaml b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_multiple_bucket_and_service_tags.yaml index 645fed742cd..508f8ca1d59 100644 --- a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_multiple_bucket_and_service_tags.yaml +++ b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_multiple_bucket_and_service_tags.yaml @@ -9,8 +9,6 @@ attributes: - type: cloudtrail elements: - - aws_profile: - value: qa - name: value: wazuh-cloudtrail-integration-tests - regions: @@ -19,8 +17,6 @@ attributes: - type: cloudtrail elements: - - aws_profile: - value: qa - name: value: wazuh-cloudtrail-integration-tests - regions: @@ -29,8 +25,6 @@ attributes: - type: cloudwatchlogs elements: - - aws_profile: - value: qa - aws_log_groups: value: wazuh-cloudwatchlogs-integration-tests - regions: @@ -39,8 +33,6 @@ attributes: - type: cloudwatchlogs elements: - - aws_profile: - value: qa - aws_log_groups: value: wazuh-cloudwatchlogs-integration-tests - regions: diff --git a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_bucket.yaml b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_bucket.yaml index 36cb9d2c6d6..35905584ceb 100644 --- a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_bucket.yaml +++ b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_bucket.yaml @@ -7,7 +7,5 @@ value: 'no' - bucket: elements: - - aws_profile: - value: qa - name: value: wazuh-cloudtrail-integration-tests diff --git a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_service.yaml b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_service.yaml index 2a5e06849bd..7be1e67f613 100644 --- a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_service.yaml +++ b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_type_missing_in_service.yaml @@ -7,7 +7,5 @@ value: 'no' - service: elements: - - aws_profile: - value: qa - name: aws_log_groups: wazuh-cloudwatch-integration-tests diff --git a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_bucket.yaml b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_bucket.yaml index 5f433bbfa3f..2d6041e70aa 100644 --- a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_bucket.yaml +++ b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_bucket.yaml @@ -9,8 +9,6 @@ attributes: - type: BUCKET_TYPE elements: - - aws_profile: - value: qa - name: value: BUCKET_NAME - only_logs_after: diff --git a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_service.yaml b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_service.yaml index 1d8c91c7d36..3dfe2fcf3fb 100644 --- a/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_service.yaml +++ b/tests/integration/test_aws/data/configuration_template/parser_test_module/configuration_values_in_service.yaml @@ -9,8 +9,6 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - aws_log_groups: value: LOG_GROUPS - only_logs_after: diff --git a/tests/integration/test_aws/data/configuration_template/path_suffix_test_module/configuration_path_suffix.yaml b/tests/integration/test_aws/data/configuration_template/path_suffix_test_module/configuration_path_suffix.yaml index 7b76a2dc222..6a8bd157f4e 100644 --- a/tests/integration/test_aws/data/configuration_template/path_suffix_test_module/configuration_path_suffix.yaml +++ b/tests/integration/test_aws/data/configuration_template/path_suffix_test_module/configuration_path_suffix.yaml @@ -9,8 +9,6 @@ attributes: - type: BUCKET_TYPE elements: - - aws_profile: - value: qa - name: value: BUCKET_NAME - only_logs_after: diff --git a/tests/integration/test_aws/data/configuration_template/path_test_module/configuration_path.yaml b/tests/integration/test_aws/data/configuration_template/path_test_module/configuration_path.yaml index 62b5aaff26f..03912c8f4cb 100644 --- a/tests/integration/test_aws/data/configuration_template/path_test_module/configuration_path.yaml +++ b/tests/integration/test_aws/data/configuration_template/path_test_module/configuration_path.yaml @@ -9,8 +9,6 @@ attributes: - type: BUCKET_TYPE elements: - - aws_profile: - value: qa - name: value: BUCKET_NAME - only_logs_after: diff --git a/tests/integration/test_aws/data/configuration_template/regions_test_module/bucket_configuration_regions.yaml b/tests/integration/test_aws/data/configuration_template/regions_test_module/bucket_configuration_regions.yaml index 45b8fcb4046..9bb441e5889 100644 --- a/tests/integration/test_aws/data/configuration_template/regions_test_module/bucket_configuration_regions.yaml +++ b/tests/integration/test_aws/data/configuration_template/regions_test_module/bucket_configuration_regions.yaml @@ -9,8 +9,6 @@ attributes: - type: BUCKET_TYPE elements: - - aws_profile: - value: qa - name: value: BUCKET_NAME - only_logs_after: diff --git a/tests/integration/test_aws/data/configuration_template/regions_test_module/cloudwatch_configuration_regions.yaml b/tests/integration/test_aws/data/configuration_template/regions_test_module/cloudwatch_configuration_regions.yaml index f6c2b339acc..cedab0988b4 100644 --- a/tests/integration/test_aws/data/configuration_template/regions_test_module/cloudwatch_configuration_regions.yaml +++ b/tests/integration/test_aws/data/configuration_template/regions_test_module/cloudwatch_configuration_regions.yaml @@ -9,8 +9,6 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - aws_log_groups: value: LOG_GROUP_NAME - only_logs_after: diff --git a/tests/integration/test_aws/data/configuration_template/regions_test_module/inspector_configuration_regions.yaml b/tests/integration/test_aws/data/configuration_template/regions_test_module/inspector_configuration_regions.yaml index ca344fac2ec..2ed5a44ce7b 100644 --- a/tests/integration/test_aws/data/configuration_template/regions_test_module/inspector_configuration_regions.yaml +++ b/tests/integration/test_aws/data/configuration_template/regions_test_module/inspector_configuration_regions.yaml @@ -9,8 +9,6 @@ attributes: - type: SERVICE_TYPE elements: - - aws_profile: - value: qa - only_logs_after: value: 2023-JAN-12 - regions: diff --git a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml index 13b8ad49091..cef6697ce33 100644 --- a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml +++ b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom.yaml @@ -7,3 +7,4 @@ sqs_name: wazuh-sqs-integration-tests-t1 bucket_name: wazuh-sqs-bucket-integration-test-t1 bucket_type: cloudtrail + expected_results: 1 diff --git a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml index f13cc77248f..0be1fbc1403 100644 --- a/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml +++ b/tests/integration/test_aws/data/test_cases/custom_bucket_test_module/cases_bucket_custom_logs.yaml @@ -7,3 +7,4 @@ sqs_name: wazuh-sqs-integration-tests-t2 bucket_name: wazuh-sqs-bucket-integration-test-t2 bucket_type: cloudtrail + expected_results: 1 diff --git a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_bucket_discard_regex.yaml b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_bucket_discard_regex.yaml index 8800160682f..4fffec4b83b 100644 --- a/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_bucket_discard_regex.yaml +++ b/tests/integration/test_aws/data/test_cases/discard_regex_test_module/cases_bucket_discard_regex.yaml @@ -13,7 +13,7 @@ only_logs_after: 2022-NOV-20 discard_field: eventSource discard_regex: .*sts.amazonaws.com.* - found_logs: 1 + expected_results: 1 skipped_logs: 1 - name: vpc_discard_regex @@ -32,7 +32,7 @@ discard_field: action discard_regex: "REJECT" vpc_name: wazuh-vpc-integration-tests - found_logs: 1 + expected_results: 1 skipped_logs: 5 - name: config_discard_regex @@ -50,7 +50,7 @@ only_logs_after: 2022-NOV-20 discard_field: configuration.complianceType discard_regex: .*COMPLIANT.* - found_logs: 5 + expected_results: 5 skipped_logs: 1 - name: alb_discard_regex @@ -68,7 +68,7 @@ only_logs_after: 2022-NOV-20 discard_field: elb_status_code discard_regex: '403' - found_logs: 1 + expected_results: 1 skipped_logs: 5 - name: clb_discard_regex @@ -86,7 +86,7 @@ only_logs_after: 2022-NOV-20 discard_field: elb_status_code discard_regex: '403' - found_logs: 1 + expected_results: 1 skipped_logs: 5 - name: nlb_discard_regex @@ -104,7 +104,7 @@ only_logs_after: 2022-NOV-20 discard_field: type discard_regex: tls - found_logs: 1 + expected_results: 1 skipped_logs: 5 - name: kms_discard_regex @@ -122,7 +122,7 @@ only_logs_after: 2022-NOV-20 discard_field: eventName discard_regex: GenerateDataKey - found_logs: 1 + expected_results: 1 skipped_logs: 1 - name: macie_discard_regex @@ -140,7 +140,7 @@ only_logs_after: 2022-NOV-20 discard_field: severity discard_regex: CRITICAL - found_logs: 3 + expected_results: 3 skipped_logs: 1 - name: trusted_advisor_discard_regex @@ -158,7 +158,7 @@ only_logs_after: 2022-NOV-20 discard_field: status discard_regex: OK - found_logs: 1 + expected_results: 1 skipped_logs: 1 - name: guardduty_discard_regex @@ -176,7 +176,7 @@ only_logs_after: 2022-NOV-20 discard_field: partition discard_regex: aws - found_logs: 1 + expected_results: 1 skipped_logs: 1 - name: native_guardduty_discard_regex @@ -194,7 +194,7 @@ only_logs_after: 2022-NOV-20 discard_field: partition discard_regex: aws - found_logs: 1 + expected_results: 1 skipped_logs: 1 - name: waf_discard_regex @@ -212,7 +212,7 @@ only_logs_after: 2022-NOV-20 discard_field: action discard_regex: BLOCK - found_logs: 1 + expected_results: 1 skipped_logs: 1 - name: server_access_discard_regex @@ -230,7 +230,7 @@ only_logs_after: 2022-NOV-20 discard_field: http_status discard_regex: '404' - found_logs: 1 + expected_results: 1 skipped_logs: 1 - name: cisco_umbrella_discard_regex @@ -248,6 +248,6 @@ only_logs_after: 2022-NOV-20 discard_field: action discard_regex: Allowed - found_logs: 1 + expected_results: 1 skipped_logs: 5 path: dnslogs diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_multiple_calls.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_multiple_calls.yaml index 12b19b6226c..53c444b61fd 100644 --- a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_multiple_calls.yaml +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_multiple_calls.yaml @@ -1,98 +1,170 @@ - name: cloudtrail_only_logs_after_multiple_calls description: CloudTrail only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: cloudtrail + BUCKET_NAME: wazuh-cloudtrail-integration-tests metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: vpc_only_logs_after_multiple_calls description: VPC only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: vpcflow + BUCKET_NAME: wazuh-vpcflow-integration-tests metadata: + resource_type: bucket + vpc_name: wazuh-vpc-integration-tests bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: config_only_logs_after_multiple_calls description: Config only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: config + BUCKET_NAME: wazuh-config-integration-tests metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: alb_only_logs_after_multiple_calls description: ALB only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: alb + BUCKET_NAME: wazuh-alb-integration-tests metadata: + resource_type: bucket bucket_type: alb bucket_name: wazuh-alb-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: clb_only_logs_after_multiple_calls description: CLB only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: clb + BUCKET_NAME: wazuh-clb-integration-tests metadata: + resource_type: bucket bucket_type: clb bucket_name: wazuh-clb-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: nlb_only_logs_after_multiple_calls description: NLB only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests metadata: + resource_type: bucket bucket_type: nlb bucket_name: wazuh-nlb-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: kms_only_logs_after_multiple_calls description: KMS only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-kms-integration-tests metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-kms-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: macie_only_logs_after_multiple_calls description: Macie only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: nlb + BUCKET_NAME: wazuh-nlb-integration-tests metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-macie-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: trusted_advisor_only_logs_after_multiple_calls description: Trusted Advisor only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: custom + BUCKET_NAME: wazuh-trusted-advisor-integration-tests metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-trusted-advisor-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: guardduty_only_logs_after_multiple_calls description: GuardDuty only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-guardduty-integration-tests metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: native_guardduty_only_logs_after_multiple_calls description: Native GuardDuty only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: guardduty + BUCKET_NAME: wazuh-native-guardduty-integration-tests metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-native-guardduty-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: waf_only_logs_after_multiple_calls description: WAF only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: waf + BUCKET_NAME: wazuh-waf-integration-tests metadata: + resource_type: bucket bucket_type: waf bucket_name: wazuh-waf-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: server_access_only_logs_after_multiple_calls description: Server Access only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: server_access + BUCKET_NAME: wazuh-server-access-integration-tests metadata: + resource_type: bucket bucket_type: server_access bucket_name: wazuh-server-access-integration-tests + only_logs_after: 2022-NOV-20 + expected_results: 1 - name: cisco_umbrella_only_logs_after_multiple_calls description: Umbrella only_logs_after multiple calls configurations configuration_parameters: + BUCKET_TYPE: cisco_umbrella + BUCKET_NAME: wazuh-umbrella-integration-tests + PATH: dnslogs metadata: + resource_type: bucket bucket_type: cisco_umbrella bucket_name: wazuh-umbrella-integration-tests + only_logs_after: 2022-NOV-20 path: dnslogs + expected_results: 1 diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_with_only_logs_after.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_with_only_logs_after.yaml index a466dcbf445..245b1708bbd 100644 --- a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_with_only_logs_after.yaml +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_with_only_logs_after.yaml @@ -6,6 +6,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests only_logs_after: 2022-NOV-20 @@ -19,6 +20,8 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket + vpc_name: wazuh-vpc-integration-tests bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests only_logs_after: 2022-NOV-20 @@ -32,6 +35,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests only_logs_after: 2022-NOV-20 @@ -45,6 +49,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket bucket_type: alb bucket_name: wazuh-alb-integration-tests only_logs_after: 2022-NOV-20 @@ -58,6 +63,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket bucket_type: clb bucket_name: wazuh-clb-integration-tests only_logs_after: 2022-NOV-20 @@ -71,6 +77,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket bucket_type: nlb bucket_name: wazuh-nlb-integration-tests only_logs_after: 2022-NOV-20 @@ -84,6 +91,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-kms-integration-tests only_logs_after: 2022-NOV-20 @@ -97,6 +105,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-macie-integration-tests only_logs_after: 2022-NOV-20 @@ -110,6 +119,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-trusted-advisor-integration-tests only_logs_after: 2022-NOV-20 @@ -123,6 +133,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-guardduty-integration-tests only_logs_after: 2022-NOV-20 @@ -136,6 +147,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-native-guardduty-integration-tests only_logs_after: 2022-NOV-20 @@ -149,6 +161,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket bucket_type: waf bucket_name: wazuh-waf-integration-tests only_logs_after: 2022-NOV-20 @@ -162,6 +175,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: '' metadata: + resource_type: bucket bucket_type: server_access bucket_name: wazuh-server-access-integration-tests only_logs_after: 2022-NOV-20 @@ -176,6 +190,7 @@ ONLY_LOGS_AFTER: 2022-NOV-20 PATH: dnslogs metadata: + resource_type: bucket bucket_type: cisco_umbrella bucket_name: wazuh-umbrella-integration-tests only_logs_after: 2022-NOV-20 diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_without_only_logs_after.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_without_only_logs_after.yaml index 8b622f44f84..4de40817d55 100644 --- a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_without_only_logs_after.yaml +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_bucket_without_only_logs_after.yaml @@ -5,6 +5,7 @@ BUCKET_NAME: wazuh-cloudtrail-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests expected_results: 1 @@ -16,6 +17,8 @@ BUCKET_NAME: wazuh-vpcflow-integration-tests PATH: '' metadata: + resource_type: bucket + vpc_name: wazuh-vpc-integration-tests bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests expected_results: 1 @@ -27,6 +30,7 @@ BUCKET_NAME: wazuh-config-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests expected_results: 1 @@ -38,6 +42,7 @@ BUCKET_NAME: wazuh-alb-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: alb bucket_name: wazuh-alb-integration-tests expected_results: 1 @@ -49,6 +54,7 @@ BUCKET_NAME: wazuh-clb-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: clb bucket_name: wazuh-clb-integration-tests expected_results: 1 @@ -60,6 +66,7 @@ BUCKET_NAME: wazuh-nlb-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: nlb bucket_name: wazuh-nlb-integration-tests expected_results: 1 @@ -71,6 +78,7 @@ BUCKET_NAME: wazuh-kms-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-kms-integration-tests expected_results: 1 @@ -82,6 +90,7 @@ BUCKET_NAME: wazuh-macie-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-macie-integration-tests expected_results: 1 @@ -93,6 +102,7 @@ BUCKET_NAME: wazuh-trusted-advisor-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-trusted-advisor-integration-tests expected_results: 1 @@ -104,6 +114,7 @@ BUCKET_NAME: wazuh-guardduty-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-guardduty-integration-tests expected_results: 1 @@ -115,6 +126,7 @@ BUCKET_NAME: wazuh-native-guardduty-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-native-guardduty-integration-tests expected_results: 1 @@ -126,6 +138,7 @@ BUCKET_NAME: wazuh-waf-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: waf bucket_name: wazuh-waf-integration-tests expected_results: 1 @@ -137,6 +150,7 @@ BUCKET_NAME: wazuh-server-access-integration-tests PATH: '' metadata: + resource_type: bucket bucket_type: server_access bucket_name: wazuh-server-access-integration-tests expected_results: 1 @@ -149,6 +163,7 @@ BUCKET_NAME: wazuh-umbrella-integration-tests PATH: dnslogs metadata: + resource_type: bucket bucket_type: cisco_umbrella bucket_name: wazuh-umbrella-integration-tests expected_results: 1 diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_multiple_calls.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_multiple_calls.yaml index 808e86d5114..ec14a52babd 100644 --- a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_multiple_calls.yaml +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_multiple_calls.yaml @@ -1,6 +1,12 @@ - name: cloudwatchlogs_only_logs_after_multiple_calls description: CloudWatch only_logs_after multiple calls configurations configuration_parameters: + SERVICE_TYPE: cloudwatchlogs + LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests metadata: service_type: cloudwatchlogs + resource_type: log_group log_group_name: wazuh-cloudwatchlogs-integration-tests + log_stream_name: wazuh-cloudwatchlogs-integration-tests-stream + expected_results: 3 + only_logs_after: 2023-JAN-12 \ No newline at end of file diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_with_only_logs_after.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_with_only_logs_after.yaml index 6fc8afb1571..7a85d155625 100644 --- a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_with_only_logs_after.yaml +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_with_only_logs_after.yaml @@ -6,7 +6,8 @@ ONLY_LOGS_AFTER: 2022-NOV-20 metadata: service_type: cloudwatchlogs + resource_type: log_group log_group_name: wazuh-cloudwatchlogs-integration-tests + log_stream_name: wazuh-cloudwatchlogs-integration-tests-stream only_logs_after: 2022-NOV-20 expected_results: 3 - log_stream: permanent-logs diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_inspector_with_only_logs_after.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_inspector_with_only_logs_after.yaml index 860c37e7976..9416f76ae59 100644 --- a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_inspector_with_only_logs_after.yaml +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_inspector_with_only_logs_after.yaml @@ -6,4 +6,4 @@ metadata: service_type: inspector only_logs_after: 2023-JAN-30 - expected_results: 4 + expected_results: 11 diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_service_without_only_logs_after.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_service_without_only_logs_after.yaml index 40414b43d60..7b9e837af11 100644 --- a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_service_without_only_logs_after.yaml +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_service_without_only_logs_after.yaml @@ -5,5 +5,7 @@ LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests metadata: service_type: cloudwatchlogs + resource_type: log_group + log_stream_name: wazuh-cloudwatchlogs-integration-tests-stream log_group_name: wazuh-cloudwatchlogs-integration-tests expected_results: 1 diff --git a/tests/integration/test_aws/data/test_cases/parser_test_module/cases_invalid_values_in_bucket.yaml b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_invalid_values_in_bucket.yaml index 9edda13e2a2..968031942f9 100644 --- a/tests/integration/test_aws/data/test_cases/parser_test_module/cases_invalid_values_in_bucket.yaml +++ b/tests/integration/test_aws/data/test_cases/parser_test_module/cases_invalid_values_in_bucket.yaml @@ -1,8 +1,8 @@ -- name: parser_invalid_type_in_bucket - description: Parser invalid type in bucket +- name: parser_invalid_name_in_bucket + description: Parser invalid name in bucket configuration_parameters: - BUCKET_TYPE: invalid_value - BUCKET_NAME: wazuh-cloudtrail-integration-tests + BUCKET_TYPE: cloudtrail + BUCKET_NAME: 1 ONLY_LOGS_AFTER: 2023-JAN-31 REGIONS: us-east-1 PATH: test_prefix @@ -10,11 +10,11 @@ REMOVE_FROM_BUCKET: 'no' metadata: [] -- name: parser_invalid_name_in_bucket - description: Parser invalid name in bucket +- name: parser_invalid_type_in_bucket + description: Parser invalid type in bucket configuration_parameters: - BUCKET_TYPE: cloudtrail - BUCKET_NAME: 1 + BUCKET_TYPE: invalid_value + BUCKET_NAME: wazuh-cloudtrail-integration-tests ONLY_LOGS_AFTER: 2023-JAN-31 REGIONS: us-east-1 PATH: test_prefix diff --git a/tests/integration/test_aws/data/test_cases/path_suffix_test_module/cases_path_suffix.yaml b/tests/integration/test_aws/data/test_cases/path_suffix_test_module/cases_path_suffix.yaml index b95f1695a62..ee5869d46ad 100644 --- a/tests/integration/test_aws/data/test_cases/path_suffix_test_module/cases_path_suffix.yaml +++ b/tests/integration/test_aws/data/test_cases/path_suffix_test_module/cases_path_suffix.yaml @@ -5,6 +5,7 @@ BUCKET_NAME: wazuh-cloudtrail-integration-tests PATH_SUFFIX: test_suffix metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests only_logs_after: 2022-NOV-20 @@ -18,6 +19,7 @@ BUCKET_NAME: wazuh-cloudtrail-integration-tests PATH_SUFFIX: empty_suffix metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests only_logs_after: 2022-NOV-20 @@ -31,6 +33,7 @@ BUCKET_NAME: wazuh-cloudtrail-integration-tests PATH_SUFFIX: inexistent_suffix metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests only_logs_after: 2022-NOV-20 @@ -44,8 +47,10 @@ BUCKET_NAME: wazuh-vpcflow-integration-tests PATH_SUFFIX: test_suffix metadata: + resource_type: bucket bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests + vpc_name: wazuh-vpc-integration-tests only_logs_after: 2022-NOV-20 path_suffix: test_suffix expected_results: 1 @@ -57,6 +62,7 @@ BUCKET_NAME: wazuh-config-integration-tests PATH_SUFFIX: test_suffix metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests only_logs_after: 2022-NOV-20 @@ -70,8 +76,10 @@ BUCKET_NAME: wazuh-vpcflow-integration-tests PATH_SUFFIX: empty_suffix metadata: + resource_type: bucket bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests + vpc_name: wazuh-vpc-integration-tests only_logs_after: 2022-NOV-20 path_suffix: empty_suffix expected_results: 0 @@ -83,6 +91,7 @@ BUCKET_NAME: wazuh-config-integration-tests PATH_SUFFIX: empty_suffix metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests only_logs_after: 2022-NOV-20 @@ -96,8 +105,10 @@ BUCKET_NAME: wazuh-vpcflow-integration-tests PATH_SUFFIX: inexistent_suffix metadata: + resource_type: bucket bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests + vpc_name: wazuh-vpc-integration-tests only_logs_after: 2022-NOV-20 path_suffix: inexistent_suffix expected_results: 0 @@ -109,6 +120,7 @@ BUCKET_NAME: wazuh-config-integration-tests PATH_SUFFIX: inexistent_suffix metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests only_logs_after: 2022-NOV-20 diff --git a/tests/integration/test_aws/data/test_cases/path_test_module/cases_path.yaml b/tests/integration/test_aws/data/test_cases/path_test_module/cases_path.yaml index 86b04ea2aa6..e8cf5f3079e 100644 --- a/tests/integration/test_aws/data/test_cases/path_test_module/cases_path.yaml +++ b/tests/integration/test_aws/data/test_cases/path_test_module/cases_path.yaml @@ -5,6 +5,7 @@ BUCKET_NAME: wazuh-cloudtrail-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests only_logs_after: 2022-NOV-20 @@ -18,6 +19,7 @@ BUCKET_NAME: wazuh-cloudtrail-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests only_logs_after: 2022-NOV-20 @@ -31,6 +33,7 @@ BUCKET_NAME: wazuh-cloudtrail-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests only_logs_after: 2022-NOV-20 @@ -44,8 +47,10 @@ BUCKET_NAME: wazuh-vpcflow-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests + vpc_name: wazuh-vpc-integration-tests only_logs_after: 2022-NOV-20 path: test_prefix expected_results: 1 @@ -57,8 +62,10 @@ BUCKET_NAME: wazuh-vpcflow-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests + vpc_name: wazuh-vpc-integration-tests only_logs_after: 2022-NOV-20 path: empty_prefix expected_results: 0 @@ -70,6 +77,7 @@ BUCKET_NAME: wazuh-umbrella-integration-tests PATH: test_prefix/dnslogs metadata: + resource_type: bucket bucket_type: cisco_umbrella bucket_name: wazuh-umbrella-integration-tests only_logs_after: 2022-NOV-20 @@ -83,6 +91,7 @@ BUCKET_NAME: wazuh-umbrella-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: cisco_umbrella bucket_name: wazuh-umbrella-integration-tests only_logs_after: 2022-NOV-20 @@ -96,8 +105,10 @@ BUCKET_NAME: wazuh-vpcflow-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests + vpc_name: wazuh-vpc-integration-tests only_logs_after: 2022-NOV-20 path: inexistent_prefix expected_results: 0 @@ -109,6 +120,7 @@ BUCKET_NAME: wazuh-config-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests only_logs_after: 2022-NOV-20 @@ -122,6 +134,7 @@ BUCKET_NAME: wazuh-config-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests only_logs_after: 2022-NOV-20 @@ -135,6 +148,7 @@ BUCKET_NAME: wazuh-config-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests only_logs_after: 2022-NOV-20 @@ -148,6 +162,7 @@ BUCKET_NAME: wazuh-alb-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: alb bucket_name: wazuh-alb-integration-tests only_logs_after: 2022-NOV-20 @@ -161,6 +176,7 @@ BUCKET_NAME: wazuh-alb-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: alb bucket_name: wazuh-alb-integration-tests only_logs_after: 2022-NOV-20 @@ -174,6 +190,7 @@ BUCKET_NAME: wazuh-alb-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: alb bucket_name: wazuh-alb-integration-tests only_logs_after: 2022-NOV-20 @@ -187,6 +204,7 @@ BUCKET_NAME: wazuh-clb-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: clb bucket_name: wazuh-clb-integration-tests only_logs_after: 2022-NOV-20 @@ -200,6 +218,7 @@ BUCKET_NAME: wazuh-clb-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: clb bucket_name: wazuh-clb-integration-tests only_logs_after: 2022-NOV-20 @@ -213,6 +232,7 @@ BUCKET_NAME: wazuh-clb-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: clb bucket_name: wazuh-clb-integration-tests only_logs_after: 2022-NOV-20 @@ -226,6 +246,7 @@ BUCKET_NAME: wazuh-nlb-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: nlb bucket_name: wazuh-nlb-integration-tests only_logs_after: 2022-NOV-20 @@ -239,6 +260,7 @@ BUCKET_NAME: wazuh-nlb-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: nlb bucket_name: wazuh-nlb-integration-tests only_logs_after: 2022-NOV-20 @@ -252,6 +274,7 @@ BUCKET_NAME: wazuh-nlb-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: nlb bucket_name: wazuh-nlb-integration-tests only_logs_after: 2022-NOV-20 @@ -265,6 +288,7 @@ BUCKET_NAME: wazuh-kms-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-kms-integration-tests only_logs_after: 2022-NOV-20 @@ -278,6 +302,7 @@ BUCKET_NAME: wazuh-kms-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-kms-integration-tests only_logs_after: 2022-NOV-20 @@ -291,6 +316,7 @@ BUCKET_NAME: wazuh-kms-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-kms-integration-tests only_logs_after: 2022-NOV-20 @@ -304,6 +330,7 @@ BUCKET_NAME: wazuh-macie-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-macie-integration-tests only_logs_after: 2022-NOV-20 @@ -317,6 +344,7 @@ BUCKET_NAME: wazuh-macie-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-macie-integration-tests only_logs_after: 2022-NOV-20 @@ -330,6 +358,7 @@ BUCKET_NAME: wazuh-macie-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-macie-integration-tests only_logs_after: 2022-NOV-20 @@ -343,6 +372,7 @@ BUCKET_NAME: wazuh-trusted-advisor-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-trusted-advisor-integration-tests only_logs_after: 2022-NOV-20 @@ -356,6 +386,7 @@ BUCKET_NAME: wazuh-trusted-advisor-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-trusted-advisor-integration-tests only_logs_after: 2022-NOV-20 @@ -369,6 +400,7 @@ BUCKET_NAME: wazuh-trusted-advisor-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: custom bucket_name: wazuh-trusted-advisor-integration-tests only_logs_after: 2022-NOV-20 @@ -382,6 +414,7 @@ BUCKET_NAME: wazuh-guardduty-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-guardduty-integration-tests only_logs_after: 2022-NOV-20 @@ -395,6 +428,7 @@ BUCKET_NAME: wazuh-guardduty-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-guardduty-integration-tests only_logs_after: 2022-NOV-20 @@ -408,6 +442,7 @@ BUCKET_NAME: wazuh-guardduty-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-guardduty-integration-tests only_logs_after: 2022-NOV-20 @@ -421,6 +456,7 @@ BUCKET_NAME: wazuh-native-guardduty-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-native-guardduty-integration-tests only_logs_after: 2022-NOV-20 @@ -434,6 +470,7 @@ BUCKET_NAME: wazuh-native-guardduty-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-native-guardduty-integration-tests only_logs_after: 2022-NOV-20 @@ -447,6 +484,7 @@ BUCKET_NAME: wazuh-native-guardduty-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: guardduty bucket_name: wazuh-native-guardduty-integration-tests only_logs_after: 2022-NOV-20 @@ -460,6 +498,7 @@ BUCKET_NAME: wazuh-waf-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: waf bucket_name: wazuh-waf-integration-tests only_logs_after: 2022-NOV-20 @@ -473,6 +512,7 @@ BUCKET_NAME: wazuh-waf-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: waf bucket_name: wazuh-waf-integration-tests only_logs_after: 2022-NOV-20 @@ -486,6 +526,7 @@ BUCKET_NAME: wazuh-waf-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: waf bucket_name: wazuh-waf-integration-tests only_logs_after: 2022-NOV-20 @@ -499,6 +540,7 @@ BUCKET_NAME: wazuh-server-access-integration-tests PATH: test_prefix metadata: + resource_type: bucket bucket_type: server_access bucket_name: wazuh-server-access-integration-tests only_logs_after: 2022-NOV-20 @@ -513,6 +555,7 @@ BUCKET_NAME: wazuh-server-access-integration-tests PATH: empty_prefix metadata: + resource_type: bucket bucket_type: server_access bucket_name: wazuh-server-access-integration-tests only_logs_after: 2022-NOV-20 @@ -527,6 +570,7 @@ BUCKET_NAME: wazuh-server-access-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: server_access bucket_name: wazuh-server-access-integration-tests only_logs_after: 2022-NOV-20 @@ -541,6 +585,7 @@ BUCKET_NAME: wazuh-umbrella-integration-tests PATH: inexistent_prefix metadata: + resource_type: bucket bucket_type: cisco_umbrella bucket_name: wazuh-umbrella-integration-tests only_logs_after: 2022-NOV-20 diff --git a/tests/integration/test_aws/data/test_cases/regions_test_module/cases_bucket_regions.yaml b/tests/integration/test_aws/data/test_cases/regions_test_module/cases_bucket_regions.yaml index 07231f255a7..f53e98e9715 100644 --- a/tests/integration/test_aws/data/test_cases/regions_test_module/cases_bucket_regions.yaml +++ b/tests/integration/test_aws/data/test_cases/regions_test_module/cases_bucket_regions.yaml @@ -5,6 +5,7 @@ BUCKET_NAME: wazuh-cloudtrail-integration-tests REGIONS: us-east-1 metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests only_logs_after: 2022-NOV-20 @@ -18,6 +19,7 @@ BUCKET_NAME: wazuh-cloudtrail-integration-tests REGIONS: us-east-1,us-east-2 metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests only_logs_after: 2022-NOV-20 @@ -31,6 +33,7 @@ BUCKET_NAME: wazuh-cloudtrail-integration-tests REGIONS: us-fake-1 metadata: + resource_type: bucket bucket_type: cloudtrail bucket_name: wazuh-cloudtrail-integration-tests only_logs_after: 2022-NOV-20 @@ -44,11 +47,13 @@ BUCKET_NAME: wazuh-vpcflow-integration-tests REGIONS: us-east-1 metadata: + resource_type: bucket bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests + vpc_name: wazuh-vpc-integration-tests only_logs_after: 2022-NOV-20 regions: us-east-1 - expected_results: 3 + expected_results: 1 - name: config_region_with_data description: Config regions configurations @@ -57,6 +62,7 @@ BUCKET_NAME: wazuh-config-integration-tests REGIONS: us-east-1 metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests only_logs_after: 2022-NOV-20 @@ -70,6 +76,7 @@ BUCKET_NAME: wazuh-alb-integration-tests REGIONS: us-east-1 metadata: + resource_type: bucket bucket_type: alb bucket_name: wazuh-alb-integration-tests only_logs_after: 2022-NOV-20 @@ -83,11 +90,13 @@ BUCKET_NAME: wazuh-vpcflow-integration-tests REGIONS: us-east-1,us-east-2 metadata: + resource_type: bucket bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests + vpc_name: wazuh-vpc-integration-tests only_logs_after: 2022-NOV-20 regions: us-east-1,us-east-2 - expected_results: 5 + expected_results: 1 - name: config_regions_with_data description: Config regions configurations @@ -96,6 +105,7 @@ BUCKET_NAME: wazuh-config-integration-tests REGIONS: us-east-1,us-east-2 metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests only_logs_after: 2022-NOV-20 @@ -109,6 +119,7 @@ BUCKET_NAME: wazuh-alb-integration-tests REGIONS: us-east-1,us-east-2 metadata: + resource_type: bucket bucket_type: alb bucket_name: wazuh-alb-integration-tests only_logs_after: 2022-NOV-20 @@ -122,8 +133,10 @@ BUCKET_NAME: wazuh-vpcflow-integration-tests REGIONS: us-fake-1 metadata: + resource_type: bucket bucket_type: vpcflow bucket_name: wazuh-vpcflow-integration-tests + vpc_name: wazuh-vpc-integration-tests only_logs_after: 2022-NOV-20 regions: us-fake-1 expected_results: 0 @@ -135,6 +148,7 @@ BUCKET_NAME: wazuh-config-integration-tests REGIONS: us-fake-1 metadata: + resource_type: bucket bucket_type: config bucket_name: wazuh-config-integration-tests only_logs_after: 2022-NOV-20 @@ -148,6 +162,7 @@ BUCKET_NAME: wazuh-alb-integration-tests REGIONS: us-fake-1 metadata: + resource_type: bucket bucket_type: alb bucket_name: wazuh-alb-integration-tests only_logs_after: 2022-NOV-20 @@ -161,6 +176,7 @@ BUCKET_NAME: wazuh-clb-integration-tests REGIONS: us-east-1 metadata: + resource_type: bucket bucket_type: clb bucket_name: wazuh-clb-integration-tests only_logs_after: 2022-NOV-20 @@ -174,6 +190,7 @@ BUCKET_NAME: wazuh-clb-integration-tests REGIONS: us-east-1,us-east-2 metadata: + resource_type: bucket bucket_type: clb bucket_name: wazuh-clb-integration-tests only_logs_after: 2022-NOV-20 @@ -187,6 +204,7 @@ BUCKET_NAME: wazuh-clb-integration-tests REGIONS: us-fake-1 metadata: + resource_type: bucket bucket_type: clb bucket_name: wazuh-clb-integration-tests only_logs_after: 2022-NOV-20 @@ -200,6 +218,7 @@ BUCKET_NAME: wazuh-nlb-integration-tests REGIONS: us-east-1 metadata: + resource_type: bucket bucket_type: nlb bucket_name: wazuh-nlb-integration-tests only_logs_after: 2022-NOV-20 @@ -213,6 +232,7 @@ BUCKET_NAME: wazuh-nlb-integration-tests REGIONS: us-east-1,us-east-2 metadata: + resource_type: bucket bucket_type: nlb bucket_name: wazuh-nlb-integration-tests only_logs_after: 2022-NOV-20 @@ -226,6 +246,7 @@ BUCKET_NAME: wazuh-nlb-integration-tests REGIONS: us-fake-1 metadata: + resource_type: bucket bucket_type: nlb bucket_name: wazuh-nlb-integration-tests only_logs_after: 2022-NOV-20 diff --git a/tests/integration/test_aws/data/test_cases/regions_test_module/cases_cloudwatch_regions.yaml b/tests/integration/test_aws/data/test_cases/regions_test_module/cases_cloudwatch_regions.yaml index d5f4c3b8e45..467fa21b925 100644 --- a/tests/integration/test_aws/data/test_cases/regions_test_module/cases_cloudwatch_regions.yaml +++ b/tests/integration/test_aws/data/test_cases/regions_test_module/cases_cloudwatch_regions.yaml @@ -5,8 +5,10 @@ LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests REGIONS: us-east-1 metadata: + resource_type: log_group service_type: cloudwatchlogs log_group_name: wazuh-cloudwatchlogs-integration-tests + log_stream_name: wazuh-cloudwatchlogs-integration-tests-stream only_logs_after: 2023-JAN-12 regions: us-east-1 expected_results: 3 @@ -18,21 +20,10 @@ LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests REGIONS: us-east-1,us-east-2 metadata: + resource_type: log_group service_type: cloudwatchlogs log_group_name: wazuh-cloudwatchlogs-integration-tests + log_stream_name: wazuh-cloudwatchlogs-integration-tests-stream only_logs_after: 2023-JAN-12 regions: us-east-1,us-east-2 expected_results: 3 - -- name: cloudwatchlogs_inexistent_region - description: CloudWatch regions configurations - configuration_parameters: - SERVICE_TYPE: cloudwatchlogs - LOG_GROUP_NAME: wazuh-cloudwatchlogs-integration-tests - REGIONS: us-fake-1 - metadata: - service_type: cloudwatchlogs - log_group_name: wazuh-cloudwatchlogs-integration-tests - only_logs_after: 2023-JAN-12 - regions: us-fake-1 - expected_results: 0 diff --git a/tests/integration/test_aws/data/test_cases/regions_test_module/cases_inspector_regions.yaml b/tests/integration/test_aws/data/test_cases/regions_test_module/cases_inspector_regions.yaml index fc1fbbd39b6..bdd799957a7 100644 --- a/tests/integration/test_aws/data/test_cases/regions_test_module/cases_inspector_regions.yaml +++ b/tests/integration/test_aws/data/test_cases/regions_test_module/cases_inspector_regions.yaml @@ -7,7 +7,7 @@ service_type: inspector only_logs_after: 2023-JAN-12 regions: us-east-1 - expected_results: 4 + expected_results: 11 - name: inspector_regions_with_data description: Inspector regions configurations @@ -18,7 +18,7 @@ service_type: inspector only_logs_after: 2023-JAN-12 regions: us-east-1,us-east-2 - expected_results: 4 + expected_results: 11 - name: inspector_inexistent_region description: Inspector regions configurations From ce2f59ccd9d76439776af00254b40c4cd3cf5605 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Tue, 14 May 2024 18:46:20 +0200 Subject: [PATCH 299/419] Adapt tests to use isolated resources --- tests/integration/test_aws/conftest.py | 383 ++++++++++-------- tests/integration/test_aws/test_basic.py | 46 ++- .../test_aws/test_custom_bucket.py | 50 +-- .../test_aws/test_discard_regex.py | 61 +-- tests/integration/test_aws/test_log_groups.py | 13 +- .../test_aws/test_only_logs_after.py | 281 ++++++++----- tests/integration/test_aws/test_parser.py | 23 +- tests/integration/test_aws/test_path.py | 19 +- .../integration/test_aws/test_path_suffix.py | 24 +- tests/integration/test_aws/test_regions.py | 50 ++- .../test_aws/test_remove_from_bucket.py | 2 +- tests/integration/test_aws/utils.py | 7 +- 12 files changed, 587 insertions(+), 372 deletions(-) diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 62f5425161f..29373c43683 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -6,8 +6,9 @@ This module contains all necessary components (fixtures, classes, methods) to configure the test for its execution. """ +import os import pytest -from time import time +import boto3 from botocore.exceptions import ClientError # qa-integration-framework imports @@ -34,7 +35,7 @@ delete_bucket_files ) from wazuh_testing.utils.services import control_service - +from wazuh_testing.constants.aws import US_EAST_1_REGION @pytest.fixture def mark_cases_as_skipped(metadata): @@ -55,28 +56,84 @@ def restart_wazuh_function_without_exception(daemon=None): control_service('stop', daemon=daemon) +"""Boto3 client fixtures""" +# Use the environment variable or default to 'dev' +aws_profile = os.environ.get("AWS_PROFILE", "dev") + + +@pytest.fixture() +def boto_session(): + """Create a boto3 Session using the system defined AWS profile.""" + return boto3.Session(profile_name=f'{aws_profile}') + + +@pytest.fixture() +def s3_client(boto_session: boto3.Session): + """Create an S3 client to manage bucket resources. + + Args: + boto_session (boto3.Session): Session used to create the client. + """ + return boto_session.resource(service_name="s3", region_name=US_EAST_1_REGION) + + +@pytest.fixture() +def ec2_client(boto_session: boto3.Session): + """Create an EC2 client to manage VPC resources. + + Args: + boto_session (boto3.Session): Session used to create the client. + """ + return boto_session.client(service_name="ec2", region_name=US_EAST_1_REGION) + + +@pytest.fixture() +def logs_clients(boto_session: boto3.Session, metadata: dict): + """Create CloudWatch Logs clients per region to manage CloudWatch resources. + + Args: + boto_session (boto3.Session): Session used to create the client. + metadata (dict): Metadata from the module to obtain the defined regions. + """ + # A client for each region is required to generate logs accordingly + return [boto_session.client(service_name="logs", region_name=region) + for region in metadata.get('regions', US_EAST_1_REGION).split(',')] + + +@pytest.fixture() +def sqs_client(boto_session: boto3.Session): + """Create an SQS client to manage queues. + + Args: + boto_session (boto3.Session): Session used to create the client. + """ + return boto_session.client(service_name="sqs", region_name=US_EAST_1_REGION) + + """Session fixtures""" -@pytest.fixture(scope="session", autouse=True) -def buckets_manager(): +@pytest.fixture() +def buckets_manager(s3_client): """Initializes a set to manage the creation and deletion of the buckets used throughout the test session. - Yields - ------ - buckets : set - Set of buckets + Args: + s3_client (boto3.resources.base.ServiceResource): S3 client used to manage the bucket resources. + + Yields: + buckets (set): Set of buckets. + s3_client (boto3.resources.base.ServiceResource): S3 client used to manage the bucket resources. """ # Create buckets set buckets: set = set() - yield buckets + yield buckets, s3_client # Delete all buckets created during execution for bucket in buckets: try: # Delete the bucket - delete_bucket(bucket_name=bucket) + delete_bucket(bucket_name=bucket, client=s3_client) except ClientError as error: logger.error({ "message": "Client error deleting bucket, delete manually", @@ -92,24 +149,27 @@ def buckets_manager(): }) -@pytest.fixture(scope="session", autouse=True) -def log_groups_manager(): +@pytest.fixture() +def log_groups_manager(logs_clients): """Initializes a set to manage the creation and deletion of the log groups used throughout the test session. - Yields - ------ - log_groups : set - Set of log groups. + Args: + logs_clients (Service client instance): CloudWatch Logs client to manage the CloudWatch resources. + + Yields: + log_groups (set): Set of log groups. + logs_clients (Service client instance): CloudWatch Logs client to manage the CloudWatch resources. """ # Create log groups set log_groups: set = set() - yield log_groups + yield log_groups, logs_clients # Delete all resources created during execution for log_group in log_groups: try: - delete_log_group(log_group_name=log_group) + for logs_client in logs_clients: + delete_log_group(log_group_name=log_group, client=logs_client) except ClientError as error: logger.error({ "message": "Client error deleting log_group, delete manually", @@ -127,24 +187,26 @@ def log_groups_manager(): raise error -@pytest.fixture(scope="session", autouse=True) -def sqs_manager(): +@pytest.fixture() +def sqs_manager(sqs_client): """Initializes a set to manage the creation and deletion of the sqs queues used throughout the test session. - Yields - ------ - buckets : set - Set of SQS queues + Args: + sqs_client (Service client instance): SQS client to manage the SQS resources. + + Yields: + sqs_queues (set): Set of SQS queues. + sqs_client (Service client instance): SQS client to manage the SQS resources. """ # Create buckets set sqs_queues: set = set() - yield sqs_queues + yield sqs_queues, sqs_client # Delete all resources created during execution for sqs in sqs_queues: try: - delete_sqs_queue(sqs_queue_url=sqs) + delete_sqs_queue(sqs_queue_url=sqs, client=sqs_client) except ClientError as error: logger.error({ "message": "Client error deleting sqs queue, delete manually", @@ -168,24 +230,21 @@ def create_test_bucket(buckets_manager, metadata: dict): """Create a bucket. - Parameters - ---------- - buckets_manager : fixture - Set of buckets. - metadata : dict - Bucket information. - + Args: + buckets_manager (fixture): Set of buckets. + metadata (dict): Bucket information. """ bucket_name = metadata["bucket_name"] bucket_type = metadata["bucket_type"] + buckets, s3_client = buckets_manager try: # Create bucket - create_bucket(bucket_name=bucket_name) + create_bucket(bucket_name=bucket_name, client=s3_client) logger.debug(f"Created new bucket: type {bucket_name}") # Append created bucket to resource set - buckets_manager.add(bucket_name) + buckets.add(bucket_name) except ClientError as error: logger.error({ @@ -207,13 +266,13 @@ def create_test_bucket(buckets_manager, @pytest.fixture -def manage_bucket_files(metadata: dict): +def manage_bucket_files(metadata: dict, s3_client, ec2_client): """Upload a file to S3 bucket and delete after the test ends. - Parameters - ---------- - metadata : dict - Metadata to get the parameters. + Args: + metadata (dict): Metadata to get the parameters. + s3_client (boto3.resources.base.ServiceResource): S3 client used to manage the bucket resources. + ec2_client (Service client instance): EC2 client to manage VPC resources. """ # Get bucket name bucket_name = metadata['bucket_name'] @@ -221,70 +280,99 @@ def manage_bucket_files(metadata: dict): # Get bucket type bucket_type = metadata['bucket_type'] + # Get only_logs_after, regions, prefix and suffix if set to generate file accordingly + file_creation_date = metadata.get('only_logs_after') + regions = metadata.get('regions', US_EAST_1_REGION).split(',') + prefix = metadata.get('path', '') + suffix = metadata.get('path_suffix', '') + # Check if the VPC type is the one to be tested vpc_bucket = bucket_type == 'vpcflow' - # Generate file - if vpc_bucket: - # Create VPC resources - flow_log_id, vpc_id = create_flow_log(vpc_name=metadata['vpc_name'], bucket_name=bucket_name) - data, key = generate_file(bucket_type=bucket_type, bucket_name=bucket_name, flow_log_id=flow_log_id) - else: - data, key = generate_file(bucket_type=bucket_type, bucket_name=bucket_name) - - try: - # Upload file to bucket - upload_bucket_file(bucket_name=bucket_name, - data=data, - key=key) - - logger.debug('Uploaded file: %s to bucket "%s"', key, bucket_name) + # Check if logs need to be created + log_number = metadata.get("expected_results", 1) > 0 - # Set filename for test execution - metadata['uploaded_file'] = key + # Generate files + if log_number: + files_to_upload = [] + metadata['uploaded_file'] = '' + try: + if vpc_bucket: + # Create VPC resources + flow_log_id, vpc_id = create_flow_log(vpc_name=metadata['vpc_name'], + bucket_name=bucket_name, + client=ec2_client) + metadata['flow_log_id'] = flow_log_id + for region in regions: + data, key = generate_file(bucket_type=bucket_type, + bucket_name=bucket_name, + date=file_creation_date, + region=region, + prefix=prefix, + suffix=suffix, + flow_log_id=flow_log_id) + files_to_upload.append((data, key)) + else: + for region in regions: + data, key = generate_file(bucket_type=bucket_type, + bucket_name=bucket_name, + region=region, + prefix=prefix, + suffix=suffix, + date=file_creation_date) + files_to_upload.append((data, key)) + + for data, key in files_to_upload: + # Upload file to bucket + upload_bucket_file(bucket_name=bucket_name, + data=data, + key=key, + client=s3_client) + + logger.debug('Uploaded file: %s to bucket "%s"', key, bucket_name) + + # Set filename for test execution + metadata['uploaded_file'] += key - except ClientError as error: - logger.error({ - "message": "Client error uploading file to bucket", - "bucket_name": bucket_name, - "filename": key, - "error": str(error) - }) - raise error + except ClientError as error: + logger.error({ + "message": "Client error uploading file to bucket", + "bucket_name": bucket_name, + "error": str(error) + }) + raise error - except Exception as error: - logger.error({ - "message": "Broad error uploading file to bucket", - "bucket_name": bucket_name, - "filename": key, - "error": str(error) - }) - raise error + except Exception as error: + logger.error({ + "message": "Broad error uploading file to bucket", + "bucket_name": bucket_name, + "error": str(error) + }) + raise error yield try: - # Delete all bucket files - delete_bucket_files(bucket_name=bucket_name) + if log_number: + # Delete all bucket files + delete_bucket_files(bucket_name=bucket_name, client=s3_client) - if vpc_bucket: - # Delete VPC resources (VPC and Flow Log) - delete_vpc(vpc_id=vpc_id) + if vpc_bucket: + # Delete VPC resources (VPC and Flow Log) + delete_vpc(vpc_id=vpc_id, flow_log_id=flow_log_id, client=ec2_client) except ClientError as error: logger.error({ - "message": "Client error deleting files in bucket", + "message": "Client error deleting resources from bucket", "bucket_name": bucket_name, - "filename": key, "error": str(error) }) raise error except Exception as error: logger.error({ - "message": "Broad error deleting files in bucket", + "message": "Broad error deleting resources from bucket", "bucket_name": bucket_name, - "filename": key, "error": str(error) }) raise error @@ -298,12 +386,9 @@ def create_test_log_group(log_groups_manager, metadata: dict) -> None: """Create a log group. - Parameters - ---------- - log_groups_manager : fixture - Log groups set. - metadata : dict - Log group information. + Args: + log_groups_manager (fixture): Log groups set and CloudWatch clients. + metadata (dict): Log group information. """ # Get log group names log_group_names = metadata["log_group_name"].split(',') @@ -311,15 +396,18 @@ def create_test_log_group(log_groups_manager, # If the resource_type is defined, then the resource must be created resource_creation = 'resource_type' in metadata + log_groups, logs_clients = log_groups_manager + try: if resource_creation: # Create log group for log_group in log_group_names: - create_log_group(log_group_name=log_group) - logger.debug(f"Created log group: {log_group}") + for logs_client in logs_clients: + create_log_group(log_group_name=log_group, client=logs_client) + logger.debug(f"Created log group: {log_group}") # Append created log group to resource list - log_groups_manager.add(log_group) + log_groups.add(log_group) except ClientError as error: logger.error({ @@ -339,14 +427,12 @@ def create_test_log_group(log_groups_manager, @pytest.fixture() -def create_test_log_stream(metadata: dict) -> None: +def create_test_log_stream(metadata: dict, log_groups_manager) -> None: """Create a log stream. - Parameters - ---------- - metadata : dict - Log group information. - + Args: + metadata (dict): Log group information. + log_groups_manager (fixture): Log groups set and CloudWatch clients. """ # Get log group names log_group_names = metadata["log_group_name"].split(',') @@ -357,13 +443,17 @@ def create_test_log_stream(metadata: dict) -> None: # If the resource_type is defined, then the resource must be created resource_creation = 'resource_type' in metadata + _, logs_clients = log_groups_manager + try: if resource_creation: # Create log stream for each log group defined for log_group in log_group_names: - create_log_stream(log_group=log_group, - log_stream=log_stream_name) - logger.debug(f'Created log stream {log_stream_name} within log group {log_group}') + for logs_client in logs_clients: + create_log_stream(log_group=log_group, + log_stream=log_stream_name, + client=logs_client) + logger.debug(f'Created log stream {log_stream_name} within log group {log_group}') except ClientError as error: logger.error({ @@ -383,13 +473,12 @@ def create_test_log_stream(metadata: dict) -> None: @pytest.fixture -def manage_log_group_events(metadata: dict): +def manage_log_group_events(metadata: dict, logs_clients): """Upload events to a log stream inside a log group and delete the log stream after the test ends. - Parameters - ---------- - metadata : dict - Metadata to get the parameters. + Args: + metadata (dict): Metadata to get the parameters. + logs_clients (Service client instance): CloudWatch Logs client to manage the CloudWatch resources. """ # Get log group names log_group_names = metadata["log_group_name"].split(',') @@ -398,30 +487,25 @@ def manage_log_group_events(metadata: dict): log_stream_name = metadata["log_stream_name"] # Get number of events - event_number = metadata["expected_results"] + event_number = metadata.get("expected_results", 1) # If the resource_type is defined, then the resource must be created resource_creation = 'resource_type' in metadata try: if resource_creation: - # Generate event information - if 'discard_field' in metadata: - events = [ - {'timestamp': int(time() * 1000), 'message': f'{{"message":"Test event number {i}"}}'} - for i in range(event_number) - ] - else: - events = [ - {'timestamp': int(time() * 1000), 'message': f'Test event number {i}'} for i in range(event_number) - ] + log_creation_date = metadata.get('only_logs_after') for log_group in log_group_names: - # Insert log events in log group - upload_log_events( - log_stream=log_stream_name, - log_group=log_group, - events=events - ) + for logs_client in logs_clients: + # Create log events in log group + upload_log_events( + log_stream=log_stream_name, + log_group=log_group, + date=log_creation_date, + type_json='discard_field' in metadata, + events_number=event_number, + client=logs_client + ) except ClientError as error: logger.error({ @@ -443,68 +527,45 @@ def manage_log_group_events(metadata: dict): yield - try: - if resource_creation: - for log_group in log_group_names: - # Delete log_stream - delete_log_stream(log_stream=log_stream_name, log_group=log_group) - - except ClientError as error: - logger.error({ - "message": "Client error deleting log stream", - "log_stream_name": log_stream_name, - "log_group": log_group, - "error": str(error) - }) - raise error - - except Exception as error: - logger.error({ - "message": "Broad error deleting log stream", - "log_stream_name": log_stream_name, - "log_group": log_group, - "error": str(error) - }) - raise error - """SQS fixtures""" @pytest.fixture -def set_test_sqs_queue(metadata: dict, sqs_manager) -> None: - """Create a test sqs group - - Parameters - ---------- - metadata : dict - The metadata for the sqs queue. - sqs_manager: fixture - The SQS set for the test. +def set_test_sqs_queue(metadata: dict, sqs_manager, s3_client) -> None: + """Create a test SQS queue. + Args: + metadata (dict): The metadata for the SQS queue. + sqs_manager (fixture): The SQS set for the test. + s3_client (boto3.resources.base.ServiceResource): S3 client used to manage bucket resources. """ # Get bucket name bucket_name = metadata["bucket_name"] # Get SQS name sqs_name = metadata["sqs_name"] + sqs_queues, sqs_client = sqs_manager + try: # Create SQS and get URL - sqs_queue_url = create_sqs_queue(sqs_name=sqs_name) + sqs_queue_url = create_sqs_queue(sqs_name=sqs_name, client=sqs_client) # Add it to sqs set - sqs_manager.add(sqs_queue_url) + sqs_queues.add(sqs_queue_url) # Get SQS Queue ARN - sqs_queue_arn = get_sqs_queue_arn(sqs_url=sqs_queue_url) + sqs_queue_arn = get_sqs_queue_arn(sqs_url=sqs_queue_url, client=sqs_client) # Set policy set_sqs_policy(bucket_name=bucket_name, sqs_queue_url=sqs_queue_url, - sqs_queue_arn=sqs_queue_arn) + sqs_queue_arn=sqs_queue_arn, + client=sqs_client) # Set bucket notification configuration set_bucket_event_notification_configuration(bucket_name=bucket_name, - sqs_queue_arn=sqs_queue_arn) + sqs_queue_arn=sqs_queue_arn, + client=s3_client) except ClientError as error: # Check if the sqs exist @@ -523,7 +584,7 @@ def set_test_sqs_queue(metadata: dict, sqs_manager) -> None: @pytest.fixture def clean_s3_cloudtrail_db(): - """Delete the DB file before and after the test execution""" + """Delete the DB file before and after the test execution.""" delete_s3_db() yield diff --git a/tests/integration/test_aws/test_basic.py b/tests/integration/test_aws/test_basic.py index 52e1f2fa4aa..fb3fea9e80c 100644 --- a/tests/integration/test_aws/test_basic.py +++ b/tests/integration/test_aws/test_basic.py @@ -52,18 +52,24 @@ def test_bucket_defaults( - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_bucket: + type: fixture + brief: Create temporal bucket. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. - set_wazuh_configuration: type: fixture brief: Apply changes to the ossec.conf configuration. + - clean_s3_cloudtrail_db: + type: fixture + brief: Delete the DB file before and after the test execution. - configure_local_internal_options_function: type: fixture brief: Apply changes to the local_internal_options.conf configuration. @@ -106,13 +112,13 @@ def test_bucket_defaults( assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] - # # Detect any ERROR message - # log_monitor.start( - # timeout=session_parameters.default_timeout, - # callback=event_monitor.callback_detect_all_aws_err - # ) - # - # assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] + # Detect any ERROR message + log_monitor.start( + timeout=session_parameters.default_timeout, + callback=event_monitor.callback_detect_all_aws_err + ) + + assert log_monitor.callback_result is None, ERROR_MESSAGE['error_found'] # -------------------------------------------- TEST_CLOUDWATCH_DEFAULTS ------------------------------------------------ @@ -125,9 +131,10 @@ def test_bucket_defaults( @pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_service_defaults(test_configuration, metadata, create_test_log_group, load_wazuh_basic_configuration, - set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, - truncate_monitored_files, restart_wazuh_function, file_monitoring +def test_service_defaults( + test_configuration, metadata, create_test_log_group, load_wazuh_basic_configuration, + set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, + truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -146,12 +153,15 @@ def test_service_defaults(test_configuration, metadata, create_test_log_group, l - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_log_group: + type: fixture + brief: Create a log group. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -225,9 +235,10 @@ def test_service_defaults(test_configuration, metadata, create_test_log_group, l @pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_inspector_defaults(test_configuration, metadata, create_test_log_group, load_wazuh_basic_configuration, - set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, - truncate_monitored_files, restart_wazuh_function, file_monitoring +def test_inspector_defaults( + test_configuration, metadata, create_test_log_group, load_wazuh_basic_configuration, + set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, + truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: The module is invoked with the expected parameters and no error occurs. @@ -246,12 +257,15 @@ def test_inspector_defaults(test_configuration, metadata, create_test_log_group, - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_log_group: + type: fixture + brief: Create a log group. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. diff --git a/tests/integration/test_aws/test_custom_bucket.py b/tests/integration/test_aws/test_custom_bucket.py index 07c7de3c69f..ab2d003fdfd 100644 --- a/tests/integration/test_aws/test_custom_bucket.py +++ b/tests/integration/test_aws/test_custom_bucket.py @@ -31,14 +31,14 @@ @pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_custom_bucket_defaults(test_configuration, metadata, create_test_bucket, set_test_sqs_queue, - load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, - restart_wazuh_function, file_monitoring +def test_custom_bucket_defaults( + test_configuration, metadata, create_test_bucket, set_test_sqs_queue, + load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, + restart_wazuh_function, file_monitoring ): """ description: Test the AWS S3 custom bucket module is invoked with the expected parameters and no error occurs. - test_phases: - setup: - Load Wazuh light configuration. @@ -54,17 +54,19 @@ def test_custom_bucket_defaults(test_configuration, metadata, create_test_bucket - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.7.0 - parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. - - upload_and_delete_file_to_s3: + - create_test_bucket: + type: fixture + brief: Create temporal bucket. + - set_test_sqs_queue: type: fixture - brief: Upload a file to S3 bucket for the day of the execution. + brief: Create temporal SQS queue. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -83,11 +85,9 @@ def test_custom_bucket_defaults(test_configuration, metadata, create_test_bucket - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. - assertions: - Check in the log that the module was called with correct parameters. - Check in the log that no errors occurs. - input_description: - The `configuration_defaults` file provides the module configuration for this test. - The `cases_defaults` file provides the test cases. @@ -137,15 +137,15 @@ def test_custom_bucket_defaults(test_configuration, metadata, create_test_bucket @pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) -def test_custom_bucket_logs(test_configuration, metadata, create_test_bucket, set_test_sqs_queue, manage_bucket_files, - load_wazuh_basic_configuration, set_wazuh_configuration, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, - file_monitoring +def test_custom_bucket_logs( + test_configuration, metadata, create_test_bucket, set_test_sqs_queue, manage_bucket_files, + load_wazuh_basic_configuration, set_wazuh_configuration, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, + file_monitoring ): """ description: Test the AWS S3 custom bucket module is invoked with the expected parameters and retrieve the messages from the SQS Queue. - test_phases: - setup: - Load Wazuh light configuration. @@ -164,17 +164,22 @@ def test_custom_bucket_logs(test_configuration, metadata, create_test_bucket, se - Deletes the file created in the S3 Bucket. wazuh_min_version: 4.7.0 - parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. - - upload_and_delete_file_to_s3: + - create_test_bucket: + type: fixture + brief: Create temporal bucket. + - set_test_sqs_queue: + type: fixture + brief: Create temporal SQS queue. + - manage_bucket_files: type: fixture - brief: Upload a file to S3 bucket for the day of the execution. + brief: S3 buckets manager. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -193,15 +198,10 @@ def test_custom_bucket_logs(test_configuration, metadata, create_test_bucket, se - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. - - upload_and_delete_file_to_s3: - type: fixture - brief: Upload a file to S3 bucket for the day of the execution. - assertions: - Check in the log that the module was called with correct parameters. - Check that the module retrieved a message from the SQS Queue. - Check that the module processed a message from the SQS Queue. - input_description: - The `configuration_defaults` file provides the module configuration for this test. - The `cases_defaults` file provides the test cases. diff --git a/tests/integration/test_aws/test_discard_regex.py b/tests/integration/test_aws/test_discard_regex.py index 0e95decdaf5..8ddea4f6c7f 100644 --- a/tests/integration/test_aws/test_discard_regex.py +++ b/tests/integration/test_aws/test_discard_regex.py @@ -34,14 +34,13 @@ zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_bucket_discard_regex( - test_configuration, metadata, create_test_bucket, manage_bucket_files, load_wazuh_basic_configuration, - set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, - truncate_monitored_files, restart_wazuh_function, file_monitoring, + test_configuration, metadata, create_test_bucket, manage_bucket_files, + load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring, ): """ description: Check that some bucket logs are excluded when the regex and field defined in match an event. - test_phases: - setup: - Load Wazuh light configuration. @@ -57,16 +56,20 @@ def test_bucket_discard_regex( - teardown: - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - wazuh_min_version: 4.6.0 - parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_bucket: + type: fixture + brief: Create temporal bucket. + - manage_bucket_files: + type: fixture + brief: S3 buckets manager. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -88,12 +91,10 @@ def test_bucket_discard_regex( - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. - assertions: - Check in the log that the module was called with correct parameters. - Check the expected number of events were forwarded to analysisd. - Check the database was created and updated accordingly. - input_description: - The `configuration_bucket_discard_regex` file provides the module configuration for this test. - The `cases_bucket_discard_regex` file provides the test cases. @@ -103,7 +104,7 @@ def test_bucket_discard_regex( only_logs_after = metadata['only_logs_after'] discard_field = metadata['discard_field'] discard_regex = metadata['discard_regex'] - found_logs = metadata['found_logs'] + expected_results = metadata['expected_results'] skipped_logs = metadata['skipped_logs'] path = metadata['path'] if 'path' in metadata else None @@ -143,7 +144,7 @@ def test_bucket_discard_regex( log_monitor.start( timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed, - accumulations=found_logs + accumulations=expected_results ) log_monitor.start( @@ -175,7 +176,6 @@ def test_cloudwatch_discard_regex_json( """ description: Check that some CloudWatch JSON logs are excluded when the regex and field defined in match an event. - test_phases: - setup: - Load Wazuh light configuration. @@ -191,16 +191,23 @@ def test_cloudwatch_discard_regex_json( - teardown: - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - wazuh_min_version: 4.6.0 - parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_log_group: + type: fixture + brief: Create a log group. + - create_test_log_stream: + type: fixture + brief: Create a log stream with events for the day of execution. + - manage_log_group_events: + type: fixture + brief: Manage events for the created log stream and log group. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -222,12 +229,10 @@ def test_cloudwatch_discard_regex_json( - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. - assertions: - Check in the log that the module was called with correct parameters. - Check the expected number of events were forwarded to analysisd. - Check the database was created and updated accordingly. - input_description: - The `configuration_cloudwatch_discard_regex` file provides the module configuration for this test. - The `cases_cloudwatch_discard_regex` file provides the test cases. @@ -299,7 +304,6 @@ def test_cloudwatch_discard_regex_simple_text( """ description: Check that some CloudWatch simple text logs are excluded when the regex defined in matches an event. - test_phases: - setup: - Load Wazuh light configuration. @@ -318,14 +322,22 @@ def test_cloudwatch_discard_regex_simple_text( - Delete the uploaded file wazuh_min_version: 4.6.0 - parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_log_group: + type: fixture + brief: Create a log group. + - create_test_log_stream: + type: fixture + brief: Create a log stream with events for the day of execution. + - manage_log_group_events: + type: fixture + brief: Manage events for the created log stream and log group. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -347,12 +359,10 @@ def test_cloudwatch_discard_regex_simple_text( - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. - assertions: - Check in the log that the module was called with correct parameters. - Check the expected number of events were forwarded to analysisd. - Check the database was created and updated accordingly. - input_description: - The `configuration_cloudwatch_discard_regex_simple_text` file provides the module configuration for this test. @@ -422,7 +432,6 @@ def test_inspector_discard_regex( """ description: Check that some Inspector logs are excluded when the regex and field defined in match an event. - test_phases: - setup: - Load Wazuh light configuration. @@ -438,11 +447,9 @@ def test_inspector_discard_regex( - teardown: - Truncate wazuh logs. - Restore initial configuration, both ossec.conf and local_internal_options.conf. - wazuh_min_version: 4.6.0 - parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: @@ -469,12 +476,10 @@ def test_inspector_discard_regex( - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. - assertions: - Check in the log that the module was called with correct parameters. - Check the expected number of events were forwarded to analysisd. - Check the database was created and updated accordingly. - input_description: - The `configuration_inspector_discard_regex` file provides the module configuration for this test. - The `cases_inspector_discard_regex` file provides the test cases. diff --git a/tests/integration/test_aws/test_log_groups.py b/tests/integration/test_aws/test_log_groups.py index 2443aedefb8..2441e659c39 100644 --- a/tests/integration/test_aws/test_log_groups.py +++ b/tests/integration/test_aws/test_log_groups.py @@ -33,7 +33,7 @@ @pytest.mark.tier(level=0) -@pytest.mark.parametrize('test_configuration metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_log_groups( @@ -63,12 +63,21 @@ def test_log_groups( - Delete the uploaded file. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_log_group: + type: fixture + brief: Create a log group. + - create_test_log_stream: + type: fixture + brief: Create a log stream with events for the day of execution. + - manage_log_group_events: + type: fixture + brief: Manage events for the created log stream and log group. - create_log_stream: type: fixture brief: Create a log stream with events for the day of execution. diff --git a/tests/integration/test_aws/test_only_logs_after.py b/tests/integration/test_aws/test_only_logs_after.py index 98bf3f01321..b904c053a03 100644 --- a/tests/integration/test_aws/test_only_logs_after.py +++ b/tests/integration/test_aws/test_only_logs_after.py @@ -8,25 +8,26 @@ import pytest from datetime import datetime - # qa-integration-framework imports from wazuh_testing import session_parameters from wazuh_testing.constants.paths.aws import S3_CLOUDTRAIL_DB_PATH, AWS_SERVICES_DB_PATH -from wazuh_testing.constants.aws import ONLY_LOGS_AFTER_PARAM, PATH_DATE_FORMAT, VPC_FLOW_TYPE, INSPECTOR_TYPE +from wazuh_testing.constants.aws import ONLY_LOGS_AFTER_PARAM, VPC_FLOW_TYPE, US_EAST_1_REGION from wazuh_testing.utils.db_queries.aws_db import get_multiple_s3_db_row, get_service_db_row, get_s3_db_row -from wazuh_testing.modules.aws.utils import (call_aws_module, create_log_events, create_log_stream, path_exist, - get_last_file_key, upload_file, analyze_command_output) +from wazuh_testing.modules.aws.utils import (call_aws_module, upload_log_events, create_log_stream, path_exist, + get_last_file_key, analyze_command_output, + generate_file, upload_bucket_file) from wazuh_testing.modules.aws.patterns import (NO_LOG_PROCESSED, NO_BUCKET_LOG_PROCESSED, MARKER, NO_NEW_EVENTS, EVENT_SENT) # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options +from .configurator import configurator +from .utils import ERROR_MESSAGE, TIMEOUT, local_internal_options pytestmark = [pytest.mark.server] # Set test configurator for the module -configurator = TestConfigurator(module='only_logs_after_test_module') +configurator.module = 'only_logs_after_test_module' # --------------------------------------------- TEST_BUCKET_WITHOUT_ONLY_LOGS_AFTER ------------------------------------ # Configure T1 test @@ -35,13 +36,14 @@ @pytest.mark.tier(level=0) -@pytest.mark.parametrize('test_configuration metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_bucket_without_only_logs_after( - test_configuration, metadata, upload_and_delete_file_to_s3, load_wazuh_basic_configuration, set_wazuh_configuration, - clean_s3_cloudtrail_db, configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, - file_monitoring + test_configuration, metadata, create_test_bucket, manage_bucket_files, + load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, + file_monitoring ): """ description: Only the log uploaded during execution is processed. @@ -63,15 +65,18 @@ def test_bucket_without_only_logs_after( - Delete the uploaded file. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. - - upload_and_delete_file_to_s3: + - create_test_bucket: + type: fixture + brief: Create temporal bucket. + - manage_bucket_files: type: fixture - brief: Upload a file for the day of the execution and delete after the test. + brief: S3 buckets manager. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -109,14 +114,13 @@ def test_bucket_without_only_logs_after( parameters = [ 'wodles/aws/aws-s3', '--bucket', bucket_name, - '--aws_profile', 'qa', '--type', bucket_type, '--debug', '2' ] if path is not None: - parameters.insert(5, path) - parameters.insert(5, '--trail_prefix') + parameters.insert(3, path) + parameters.insert(3, '--trail_prefix') # Check AWS module started log_monitor.start( @@ -135,7 +139,7 @@ def test_bucket_without_only_logs_after( assert log_monitor.callback_result is not None, ERROR_MESSAGE['incorrect_parameters'] log_monitor.start( - timeout=session_parameters.default_timeout, + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_event_processed, accumulations=expected_results ) @@ -165,13 +169,13 @@ def test_bucket_without_only_logs_after( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('test_configuration metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_service_without_only_logs_after( - test_configuration, metadata, create_log_stream_in_existent_group, load_wazuh_basic_configuration, - set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, truncate_monitored_files, - restart_wazuh_function, file_monitoring + test_configuration, metadata, create_test_log_group, create_test_log_stream, manage_log_group_events, + load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: Only the event created during execution is processed. @@ -193,15 +197,21 @@ def test_service_without_only_logs_after( - Delete the uploaded file. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. - - create_log_stream_in_existent_group: + - create_test_log_group: + type: fixture + brief: Create a log group. + - create_test_log_stream: type: fixture brief: Create a log stream with events for the day of execution. + - manage_log_group_events: + type: fixture + brief: Manage events for the created log stream and log group. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -237,8 +247,7 @@ def test_service_without_only_logs_after( parameters = [ 'wodles/aws/aws-s3', '--service', service_type, - '--aws_profile', 'qa', - '--regions', 'us-east-1', + '--regions', US_EAST_1_REGION, '--aws_log_groups', log_group_name, '--debug', '2' ] @@ -265,7 +274,7 @@ def test_service_without_only_logs_after( assert log_group_name == data.aws_log_group - assert metadata['log_stream'] == data.aws_log_stream + assert metadata['log_stream_name'] == data.aws_log_stream # Detect any ERROR message log_monitor.start( @@ -283,12 +292,13 @@ def test_service_without_only_logs_after( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('test_configuration metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_bucket_with_only_logs_after( - test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + test_configuration, metadata, create_test_bucket, manage_bucket_files, + load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: All logs with a timestamp greater than the only_logs_after value are processed. @@ -310,12 +320,18 @@ def test_bucket_with_only_logs_after( - Delete the uploaded file. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_bucket: + type: fixture + brief: Create temporal bucket. + - manage_bucket_files: + type: fixture + brief: S3 buckets manager. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -354,15 +370,14 @@ def test_bucket_with_only_logs_after( parameters = [ 'wodles/aws/aws-s3', '--bucket', bucket_name, - '--aws_profile', 'qa', '--only_logs_after', only_logs_after, '--type', bucket_type, '--debug', '2' ] if path is not None: - parameters.insert(5, path) - parameters.insert(5, '--trail_prefix') + parameters.insert(3, path) + parameters.insert(3, '--trail_prefix') # Check AWS module started log_monitor.start( @@ -393,7 +408,7 @@ def test_bucket_with_only_logs_after( for row in get_multiple_s3_db_row(table_name=table_name): assert bucket_name in row.bucket_path assert ( - datetime.strptime(only_logs_after, '%Y-%b-%d') < datetime.strptime(str(row.created_date), '%Y%m%d') + datetime.strptime(only_logs_after, '%Y-%b-%d') == datetime.strptime(str(row.created_date), '%Y%m%d') ) # Detect any ERROR message @@ -412,12 +427,13 @@ def test_bucket_with_only_logs_after( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('test_configuration metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_cloudwatch_with_only_logs_after( - test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + test_configuration, metadata, create_test_log_group, create_test_log_stream, manage_log_group_events, + load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: All events with a timestamp greater than the only_logs_after value are processed. @@ -439,12 +455,21 @@ def test_cloudwatch_with_only_logs_after( - Delete the uploaded file. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_log_group: + type: fixture + brief: Create a log group. + - create_test_log_stream: + type: fixture + brief: Create a log stream with events for the day of execution. + - manage_log_group_events: + type: fixture + brief: Manage events for the created log stream and log group. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -486,9 +511,8 @@ def test_cloudwatch_with_only_logs_after( parameters = [ 'wodles/aws/aws-s3', '--service', service_type, - '--aws_profile', 'qa', '--only_logs_after', only_logs_after, - '--regions', 'us-east-1', + '--regions', US_EAST_1_REGION, '--aws_log_groups', log_group_name, '--debug', '2' ] @@ -521,7 +545,7 @@ def test_cloudwatch_with_only_logs_after( data = get_service_db_row(table_name=table_name_map[service_type]) assert log_group_name == data.aws_log_group - assert metadata['log_stream'] == data.aws_log_stream + assert metadata['log_stream_name'] == data.aws_log_stream # Detect any ERROR message log_monitor.start( @@ -539,12 +563,13 @@ def test_cloudwatch_with_only_logs_after( @pytest.mark.tier(level=0) -@pytest.mark.parametrize('test_configuration metadata', +@pytest.mark.parametrize('test_configuration, metadata', zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_inspector_with_only_logs_after( - test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + test_configuration, metadata, + load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: All events with a timestamp greater than the only_logs_after value are processed. @@ -566,7 +591,7 @@ def test_inspector_with_only_logs_after( - Delete the uploaded file. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: @@ -612,9 +637,8 @@ def test_inspector_with_only_logs_after( parameters = [ 'wodles/aws/aws-s3', '--service', service_type, - '--aws_profile', 'qa', '--only_logs_after', only_logs_after, - '--regions', 'us-east-1', + '--regions', US_EAST_1_REGION, '--debug', '2' ] @@ -657,11 +681,12 @@ def test_inspector_with_only_logs_after( @pytest.mark.tier(level=1) -@pytest.mark.parametrize('metadata', - configurator.metadata, +@pytest.mark.parametrize('metadata', + configurator.metadata, ids=configurator.cases_ids) def test_bucket_multiple_calls( - metadata, clean_s3_cloudtrail_db, load_wazuh_basic_configuration, restart_wazuh_function, delete_file_from_s3 + metadata, clean_s3_cloudtrail_db, s3_client, create_test_bucket, manage_bucket_files, + load_wazuh_basic_configuration, restart_wazuh_function ): """ description: Call the AWS module multiple times with different only_logs_after values. @@ -690,6 +715,15 @@ def test_bucket_multiple_calls( - metadata: type: dict brief: Get metadata from the module. + - s3_client: + type: fixture + brief: S3 client to access the bucket. + - create_test_bucket: + type: fixture + brief: Create temporal bucket. + - manage_bucket_files: + type: fixture + brief: S3 buckets manager. - clean_s3_cloudtrail_db: type: fixture brief: Delete the DB file before and after the test execution. @@ -708,13 +742,14 @@ def test_bucket_multiple_calls( bucket_type = metadata['bucket_type'] bucket_name = metadata['bucket_name'] + expected_results = metadata['expected_results'] path = metadata.get('path') + region = US_EAST_1_REGION base_parameters = [ '--bucket', bucket_name, '--type', bucket_type, - '--regions', 'us-east-1', - '--aws_profile', 'qa', + '--regions', region, '--debug', '2' ] @@ -722,52 +757,100 @@ def test_bucket_multiple_calls( base_parameters.extend(['--trail_prefix', path]) # Call the module without only_logs_after and check that no logs were processed - last_marker_key = datetime.utcnow().strftime(PATH_DATE_FORMAT) - # Get bucket type if bucket_type == VPC_FLOW_TYPE: pattern = fr"{NO_LOG_PROCESSED}" + # Check for the non 'processed' messages in the given output. + # For VPC the number of messages depend on the number of flow log IDs obtained by the module which may vary. + analyze_command_output( + command_output=call_aws_module(*base_parameters), + callback=event_monitor.make_aws_callback(pattern), + error_message=ERROR_MESSAGE['event_not_found'], + match_exact_number=False + ) else: pattern = fr"{NO_BUCKET_LOG_PROCESSED}" - - # Check for the non 'processed' messages in the given output. - analyze_command_output( - command_output=call_aws_module(*base_parameters), - callback=event_monitor.make_aws_callback(pattern), - expected_results=1, - error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] - ) + # Check for the non 'processed' messages in the given output. + analyze_command_output( + command_output=call_aws_module(*base_parameters), + callback=event_monitor.make_aws_callback(pattern), + expected_results=1, + error_message=ERROR_MESSAGE['unexpected_number_of_events_found'] + ) # Call the module with only_logs_after set in the past and check that the expected number of logs were processed analyze_command_output( command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-20'), callback=event_monitor.callback_detect_event_processed, - expected_results=3, + expected_results=expected_results, error_message=ERROR_MESSAGE['incorrect_event_number'] ) - # Call the module with the same parameters in and check there were no duplicates - expected_skipped_logs_step_3 = metadata.get('expected_skipped_logs_step_3', 1) - analyze_command_output( - command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-20'), - callback=event_monitor.make_aws_callback(pattern), - expected_results=expected_skipped_logs_step_3, - error_message=ERROR_MESSAGE['incorrect_event_number'] - ) + if bucket_type == VPC_FLOW_TYPE: + # Call the module with the same parameters in and check there were no duplicates + # For VPC the number of messages depend on the number of flow log IDs obtained by the module which may vary. + expected_skipped_logs_step_3 = metadata.get('expected_skipped_logs_step_3', 1) + analyze_command_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-20'), + callback=event_monitor.make_aws_callback(pattern), + expected_results=expected_skipped_logs_step_3, + error_message=ERROR_MESSAGE['incorrect_event_number'], + match_exact_number=False + ) - # Call the module with only_logs_after set with an early date than the one set previously and check that no logs - # were processed, there were no duplicates - analyze_command_output( - command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-22'), - callback=event_monitor.make_aws_callback(pattern), - expected_results=expected_skipped_logs_step_3 - 1 if expected_skipped_logs_step_3 > 1 else 1, - error_message=ERROR_MESSAGE['incorrect_event_number'] - ) + # Call the module with only_logs_after set with an early date than the one set previously and check that no logs + # were processed, there were no duplicates + analyze_command_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-22'), + callback=event_monitor.make_aws_callback(pattern), + expected_results=expected_skipped_logs_step_3 - 1 if expected_skipped_logs_step_3 > 1 else 1, + error_message=ERROR_MESSAGE['incorrect_event_number'], + match_exact_number=False + ) + else: + # Call the module with the same parameters in and check there were no duplicates + expected_skipped_logs_step_3 = metadata.get('expected_skipped_logs_step_3', 1) + analyze_command_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-20'), + callback=event_monitor.make_aws_callback(pattern), + expected_results=expected_skipped_logs_step_3, + error_message=ERROR_MESSAGE['incorrect_event_number'] + ) + + # Call the module with only_logs_after set with an early date than the one set previously and check that no logs + # were processed, there were no duplicates + analyze_command_output( + command_output=call_aws_module(*base_parameters, ONLY_LOGS_AFTER_PARAM, '2022-NOV-22'), + callback=event_monitor.make_aws_callback(pattern), + expected_results=expected_skipped_logs_step_3 - 1 if expected_skipped_logs_step_3 > 1 else 1, + error_message=ERROR_MESSAGE['incorrect_event_number'] + ) # Upload a log file for the day of the test execution and call the module without only_logs_after and check that # only the uploaded logs were processed and the last marker is specified in the DB. - last_marker_key = get_last_file_key(bucket_type, bucket_name, datetime.utcnow()) - metadata['filename'] = upload_file(bucket_type, bucket_name) + last_marker_key = get_last_file_key(bucket_type, bucket_name, datetime.utcnow(), region, s3_client) + if bucket_type == VPC_FLOW_TYPE: + data, key = generate_file(bucket_type=bucket_type, + bucket_name=bucket_name, + region=region, + prefix='', + suffix='', + date='', + flow_log_id=metadata['flow_log_id']) + else: + data, key = generate_file(bucket_type=bucket_type, + bucket_name=bucket_name, + region=region, + prefix='', + suffix='', + date='') + metadata['filename'] = key + + upload_bucket_file(bucket_name=bucket_name, + data=data, + key=key, + client=s3_client) + pattern = fr"{MARKER}{last_marker_key}" analyze_command_output( @@ -784,8 +867,8 @@ def test_bucket_multiple_calls( @pytest.mark.tier(level=1) -@pytest.mark.parametrize('metadata', - configurator.metadata, +@pytest.mark.parametrize('metadata', + configurator.metadata, ids=configurator.cases_ids) @pytest.mark.xfail def test_inspector_multiple_calls( @@ -827,8 +910,7 @@ def test_inspector_multiple_calls( base_parameters = [ '--service', service_type, - '--regions', 'us-east-1', - '--aws_profile', 'qa', + '--regions', US_EAST_1_REGION, '--debug', '2' ] @@ -872,11 +954,13 @@ def test_inspector_multiple_calls( @pytest.mark.tier(level=1) -@pytest.mark.parametrize('metadata', - configurator.metadata, +@pytest.mark.parametrize('metadata', + configurator.metadata, ids=configurator.cases_ids) +@pytest.mark.xfail def test_cloudwatch_multiple_calls( - metadata, clean_aws_services_db, load_wazuh_basic_configuration, restart_wazuh_function, delete_log_stream + metadata, clean_aws_services_db, create_test_log_group, create_test_log_stream, manage_log_group_events, + logs_clients, load_wazuh_basic_configuration, restart_wazuh_function ): """ description: Call the AWS module multiple times with different only_logs_after values. @@ -903,6 +987,15 @@ def test_cloudwatch_multiple_calls( - metadata: type: dict brief: Get metadata from the module. + - create_test_log_group: + type: fixture + brief: Create a log group. + - create_test_log_stream: + type: fixture + brief: Create a log stream with events for the day of execution. + - manage_log_group_events: + type: fixture + brief: Manage events for the created log stream and log group. - clean_aws_services_db: type: fixture brief: Delete the DB file before and after the test execution. @@ -921,12 +1014,15 @@ def test_cloudwatch_multiple_calls( service_type = metadata['service_type'] log_group_name = metadata['log_group_name'] + log_stream_name = metadata['log_stream_name'] + + # Obtain generated client for test case + log_client = logs_clients[0] base_parameters = [ '--service', service_type, '--aws_log_groups', log_group_name, - '--regions', 'us-east-1', - '--aws_profile', 'qa', + '--regions', US_EAST_1_REGION, '--debug', '2' ] @@ -968,9 +1064,8 @@ def test_cloudwatch_multiple_calls( # Upload a log file for the day of the test execution and call the module without only_logs_after and check that # only the uploaded logs were processed. - log_stream = create_log_stream() - metadata['log_stream'] = log_stream - create_log_events(log_stream) + upload_log_events(log_stream=log_stream_name, log_group=log_group_name, date='', + type_json=False, events_number=1, client=log_client) analyze_command_output( command_output=call_aws_module(*base_parameters), diff --git a/tests/integration/test_aws/test_parser.py b/tests/integration/test_aws/test_parser.py index e97e8ea1ad3..0ad1d9c50dc 100644 --- a/tests/integration/test_aws/test_parser.py +++ b/tests/integration/test_aws/test_parser.py @@ -13,12 +13,13 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options +from .configurator import configurator +from .utils import ERROR_MESSAGE, TIMEOUT, local_internal_options pytestmark = [pytest.mark.server] # Set test configurator for the module -configurator = TestConfigurator(module='parser_test_module') +configurator.module = 'parser_test_module' # --------------------------------------------TEST_BUCKET_AND_SERVICE_MISSING ------------------------------------------ # Configure T1 test @@ -51,7 +52,7 @@ def test_bucket_and_service_missing( - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: @@ -120,7 +121,7 @@ def test_type_missing_in_bucket( - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: @@ -188,7 +189,7 @@ def test_type_missing_in_service( - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: @@ -257,7 +258,7 @@ def test_empty_values_in_bucket( - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: @@ -325,7 +326,7 @@ def test_empty_values_in_service( - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: @@ -394,7 +395,7 @@ def test_invalid_values_in_bucket( - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: @@ -424,7 +425,7 @@ def test_invalid_values_in_bucket( - The `configuration_values_in_bucket` file provides the configuration for this test. """ log_monitor.start( - timeout=session_parameters.default_timeout, + timeout=TIMEOUT[20], callback=event_monitor.callback_detect_aws_invalid_value, ) @@ -462,7 +463,7 @@ def test_invalid_values_in_service( - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: @@ -530,7 +531,7 @@ def test_multiple_bucket_and_service_tags( - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: diff --git a/tests/integration/test_aws/test_path.py b/tests/integration/test_aws/test_path.py index 2ab85f869aa..ff3ab4053b6 100644 --- a/tests/integration/test_aws/test_path.py +++ b/tests/integration/test_aws/test_path.py @@ -15,12 +15,13 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options +from .configurator import configurator +from .utils import ERROR_MESSAGE, TIMEOUT, local_internal_options pytestmark = [pytest.mark.server] # Set test configurator for the module -configurator = TestConfigurator(module='path_test_module') +configurator.module = 'path_test_module' # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- # Configure T1 test @@ -33,8 +34,9 @@ zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_path( - test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + test_configuration, metadata, load_wazuh_basic_configuration, create_test_bucket, manage_bucket_files, + set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, + truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: Only logs within a path are processed. @@ -57,12 +59,18 @@ def test_path( - Delete the uploaded file. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_bucket: + type: fixture + brief: Create temporal bucket. + - manage_bucket_files: + type: fixture + brief: S3 buckets manager. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -103,7 +111,6 @@ def test_path( parameters = [ 'wodles/aws/aws-s3', '--bucket', bucket_name, - '--aws_profile', 'qa', '--trail_prefix', path, '--only_logs_after', only_logs_after, '--type', bucket_type, diff --git a/tests/integration/test_aws/test_path_suffix.py b/tests/integration/test_aws/test_path_suffix.py index ea07f74cf96..da71d7e1819 100644 --- a/tests/integration/test_aws/test_path_suffix.py +++ b/tests/integration/test_aws/test_path_suffix.py @@ -16,12 +16,13 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options +from .configurator import configurator +from .utils import ERROR_MESSAGE, TIMEOUT, local_internal_options pytestmark = [pytest.mark.server] # Set test configurator for the module -configurator = TestConfigurator(module='path_suffix_test_module') +configurator.module = 'path_suffix_test_module' # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- # Configure T1 test @@ -34,8 +35,9 @@ zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_path_suffix( - test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + test_configuration, metadata, load_wazuh_basic_configuration, create_test_bucket, manage_bucket_files, + set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, + truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: Only logs within a path_suffix are processed. @@ -58,12 +60,18 @@ def test_path_suffix( - Delete the uploaded file. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_bucket: + type: fixture + brief: Create temporal bucket. + - manage_bucket_files: + type: fixture + brief: S3 buckets manager. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -98,15 +106,11 @@ def test_path_suffix( only_logs_after = metadata['only_logs_after'] path_suffix = metadata['path_suffix'] expected_results = metadata['expected_results'] - pattern = ( - fr".*No logs found in 'AWSLogs/{path_suffix}/'. " - fr"Check the provided prefix and the location of the logs for the bucket type '{bucket_type}'*" - ) + pattern = fr".*WARNING: Bucket: - No files were found in '{bucket_name}/{path_suffix}/'. No logs will be processed.\n+" parameters = [ 'wodles/aws/aws-s3', '--bucket', bucket_name, - '--aws_profile', 'qa', '--trail_suffix', path_suffix, '--only_logs_after', only_logs_after, '--type', bucket_type, diff --git a/tests/integration/test_aws/test_regions.py b/tests/integration/test_aws/test_regions.py index 159636d153b..0cd809b7526 100644 --- a/tests/integration/test_aws/test_regions.py +++ b/tests/integration/test_aws/test_regions.py @@ -17,12 +17,13 @@ # Local module imports from . import event_monitor -from .utils import ERROR_MESSAGE, TIMEOUT, TestConfigurator, local_internal_options +from .configurator import configurator +from .utils import ERROR_MESSAGE, TIMEOUT, local_internal_options pytestmark = [pytest.mark.server] # Set test configurator for the module -configurator = TestConfigurator(module='regions_test_module') +configurator.module = 'regions_test_module' # ---------------------------------------------------- TEST_PATH ------------------------------------------------------- # Configure T1 test @@ -35,8 +36,9 @@ zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_regions( - test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_s3_cloudtrail_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + test_configuration, metadata, load_wazuh_basic_configuration, create_test_bucket, manage_bucket_files, + set_wazuh_configuration, clean_s3_cloudtrail_db, configure_local_internal_options_function, + truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: Only the logs for the specified region are processed. @@ -60,12 +62,18 @@ def test_regions( - Delete the uploaded file. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_bucket: + type: fixture + brief: Create temporal bucket. + - manage_bucket_files: + type: fixture + brief: S3 buckets manager. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -105,7 +113,6 @@ def test_regions( parameters = [ 'wodles/aws/aws-s3', '--bucket', bucket_name, - '--aws_profile', 'qa', '--only_logs_after', only_logs_after, '--regions', regions, '--type', bucket_type, @@ -130,7 +137,7 @@ def test_regions( if expected_results: log_monitor.start( - timeout=TIMEOUT[20], + timeout=TIMEOUT[50], callback=event_monitor.callback_detect_event_processed, accumulations=expected_results ) @@ -177,8 +184,9 @@ def test_regions( zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_cloudwatch_regions( - test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + test_configuration, metadata, load_wazuh_basic_configuration, create_test_log_group, create_test_log_stream, + manage_log_group_events, set_wazuh_configuration, clean_aws_services_db, + configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: Only the logs for the specified region are processed. @@ -202,12 +210,21 @@ def test_cloudwatch_regions( - Delete the uploaded file. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: type: dict brief: Get metadata from the module. + - create_test_log_group: + type: fixture + brief: Create a log group. + - create_test_log_stream: + type: fixture + brief: Create a log stream with events for the day of execution. + - manage_log_group_events: + type: fixture + brief: Manage events for the created log stream and log group. - load_wazuh_basic_configuration: type: fixture brief: Load basic wazuh configuration. @@ -247,7 +264,6 @@ def test_cloudwatch_regions( parameters = [ 'wodles/aws/aws-s3', '--service', service_type, - '--aws_profile', 'qa', '--only_logs_after', only_logs_after, '--regions', regions, '--aws_log_groups', log_group_name, @@ -282,7 +298,7 @@ def test_cloudwatch_regions( log_monitor.start( timeout=session_parameters.default_timeout, callback=event_monitor.make_aws_callback( - fr".*\+\+\+ ERROR: The region '{regions}' is not a valid one." + fr".*\+\+\+ ERROR: Invalid region '{regions}'" ), ) @@ -317,8 +333,9 @@ def test_cloudwatch_regions( zip(configurator.test_configuration_template, configurator.metadata), ids=configurator.cases_ids) def test_inspector_regions( - test_configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration, clean_aws_services_db, - configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_function, file_monitoring + test_configuration, metadata, load_wazuh_basic_configuration, + set_wazuh_configuration, clean_aws_services_db, configure_local_internal_options_function, + truncate_monitored_files, restart_wazuh_function, file_monitoring ): """ description: Only the logs for the specified region are processed. @@ -342,7 +359,7 @@ def test_inspector_regions( - Delete the uploaded file. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: @@ -386,7 +403,6 @@ def test_inspector_regions( parameters = [ 'wodles/aws/aws-s3', '--service', service_type, - '--aws_profile', 'qa', '--only_logs_after', only_logs_after, '--regions', regions, '--debug', '2' @@ -420,7 +436,7 @@ def test_inspector_regions( log_monitor.start( timeout=session_parameters.default_timeout, callback=event_monitor.make_aws_callback( - fr".*\+\+\+ ERROR: The region '{regions}' is not a valid one." + fr".*\+\+\+ ERROR: Unsupported region '{regions}'" ), ) diff --git a/tests/integration/test_aws/test_remove_from_bucket.py b/tests/integration/test_aws/test_remove_from_bucket.py index 28b31789384..bf8c8e84e62 100644 --- a/tests/integration/test_aws/test_remove_from_bucket.py +++ b/tests/integration/test_aws/test_remove_from_bucket.py @@ -181,7 +181,7 @@ def test_remove_log_stream( - Restore initial configuration, both ossec.conf and local_internal_options.conf. wazuh_min_version: 4.6.0 parameters: - - configuration: + - test_configuration: type: dict brief: Get configurations from the module. - metadata: diff --git a/tests/integration/test_aws/utils.py b/tests/integration/test_aws/utils.py index 6fef9bd19bc..5f133b31860 100644 --- a/tests/integration/test_aws/utils.py +++ b/tests/integration/test_aws/utils.py @@ -35,6 +35,7 @@ "incorrect_service_calls_amount": "The AWS module was not called for bucket or service the right amount of times", "unexpected_number_of_events_found": "Some logs may have been processed, " "or the results found are more than expected", + "event_not_found": "The expected log pattern was not found", "incorrect_marker": "The AWS module did not use the correct marker", "incorrect_no_region_found_message": "The AWS module did not show correct message about non-existent region", "incorrect_discard_regex_message": "The AWS module did not show the correct message about discard regex or, " @@ -45,9 +46,11 @@ } TIMEOUT = { - 10: 10, - 20: 20 + 20: 20, + 30: 30, + 40: 40, + 50: 50 } # Paths From a0c49ce205b480bc081c2dbe0d9af1d96beac489 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Wed, 15 May 2024 09:27:03 +0200 Subject: [PATCH 300/419] Update README --- tests/integration/test_aws/README.md | 13 +++++++++---- tests/integration/test_aws/conftest.py | 2 +- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index d09a519e6df..3952cae996d 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -2,7 +2,7 @@ ## Description -It is a _wodle based_ module that test the capabilities of the Wazuh AWS integration, pulling logs from different +It is a _wodle based_ module that tests the capabilities of the Wazuh AWS integration, pulling logs from different buckets and services. ## Tests directory structure @@ -49,13 +49,13 @@ wazuh/tests/integration/test_aws ## Requirements -- [Proper testing environment](#Setting up a test environment) +- [Proper testing environment](#setting-up-a-test-environment) - [Wazuh](https://github.com/wazuh/qa-integration-framework) repository. - [Testing framework](https://github.com/wazuh/qa-integration-framework) installed. -- Configured buckets, log groups and an inspector assessment with test data in AWS. +- An Inspector assessment with test data in AWS. The rest of the necessary resources are created in test execution time. For a step-by-step example guide using linux go to the [test setup section](#linux) @@ -67,11 +67,16 @@ For a step-by-step example guide using linux go to the [test setup section](#lin more information [here](https://documentation.wazuh.com/current/amazon/services/prerequisites/credentials.html#profiles) with the content: ```ini -[qa] +[default] aws_access_key_id = aws_secret_access_key = ``` +The tests do not expect a particular profile given these can be executed for any AWS environment in which the +profile has the corresponding permissions to create, delete and list S3, VPC (like Flow Logs) and +CloudWatch Logs resources. If a particular profile is required, it can be defined by declaring the `AWS_PROFILE` +environment variable. + ## Setting up a test environment You will need a proper environment to run the integration tests. You can use Docker or any virtual machine. If you have diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index 29373c43683..d46ab9a1861 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -58,7 +58,7 @@ def restart_wazuh_function_without_exception(daemon=None): """Boto3 client fixtures""" # Use the environment variable or default to 'dev' -aws_profile = os.environ.get("AWS_PROFILE", "dev") +aws_profile = os.environ.get("AWS_PROFILE", "default") @pytest.fixture() From cef61bc1a9e87a0ad73c7be0fa21461952ba5d56 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Tue, 21 May 2024 10:39:10 +0200 Subject: [PATCH 301/419] Apply suggested changes --- tests/integration/test_aws/configurator.py | 89 ++++++++++--------- tests/integration/test_aws/conftest.py | 22 +++-- .../cases_cloudwatch_multiple_calls.yaml | 3 +- 3 files changed, 64 insertions(+), 50 deletions(-) diff --git a/tests/integration/test_aws/configurator.py b/tests/integration/test_aws/configurator.py index b5cfec38af0..65baaa5be4c 100644 --- a/tests/integration/test_aws/configurator.py +++ b/tests/integration/test_aws/configurator.py @@ -20,6 +20,18 @@ # Local imports from .utils import TEST_DATA_PATH, TEMPLATE_DIR, TEST_CASES_DIR +# Constants +METADATA_SQS = 'sqs_name' +METADATA_RESOURCE_TYPE = 'resource_type' +METADATA_BUCKET = 'bucket_name' +METADATA_VPC = 'vpc_name' +METADATA_LOG_GROUP = 'log_group_name' +METADATA_LOG_STREAM = 'log_stream_name' + +CONFIG_SQS = 'SQS_NAME' +CONFIG_BUCKET = 'BUCKET_NAME' +CONFIG_LOG_GROUP = 'LOG_GROUP_NAME' + # Classes class TestConfigurator: @@ -67,15 +79,11 @@ def _set_session_id(self) -> None: logger.info(f"This test session id is: {self._session_id}") def configure_test(self, configuration_file="", cases_file="") -> None: - """ - Configure and manage the resources for the test. - - Parameters - ---------- - configuration_file : str - The name of the configuration file. - cases_file : str - The name of the test cases file. + """Configure and manage the resources for the test. + + Args: + configuration_file (str): The name of the configuration file. + cases_file (str): The name of the test cases file. """ # Set test cases yaml path cases_yaml_path = join(TEST_DATA_PATH, TEST_CASES_DIR, self.module, cases_file) @@ -91,14 +99,11 @@ def configure_test(self, configuration_file="", cases_file="") -> None: parameters=parameters) def _load_configuration_template(self, configuration_file: str, parameters: str) -> None: - """Set the configuration template of the test - - Parameters - ---------- - configuration_file : str - The name of the configuration file. - parameters : str - The test parameters. + """Set the configuration template of the test. + + Args: + configuration_file (str): The name of the configuration file. + parameters (str): The test parameters. """ if configuration_file != "": # Set config path @@ -112,14 +117,10 @@ def _load_configuration_template(self, configuration_file: str, parameters: str) ) def _modify_metadata(self, parameters: list) -> None: - """Modify raw data to add test session information - - Parameters - ---------- - parameters : list - The parameters of the test. - metadata : list - The metadata of the test. + """Modify raw data to add test session information. + + Args: + parameters (list): The parameters of the test. """ # Add Suffix (_todelete) to alert a safe deletion of resource in case of errors. suffix = f"-{self._session_id}-todelete" @@ -127,31 +128,31 @@ def _modify_metadata(self, parameters: list) -> None: # Add suffix to metadata for param, data in zip(parameters, self._metadata): # Determine whether resource creation is required or not - resource_creation_required = 'resource_type' in data + resource_creation_required = METADATA_RESOURCE_TYPE in data if resource_creation_required: try: - if "sqs_name" in data: - data["sqs_name"] += suffix - param["SQS_NAME"] += suffix - - if data["resource_type"] == "bucket": - data["bucket_name"] += suffix - if 'vpc_name' in data: - data['vpc_name'] += suffix - if "BUCKET_NAME" in param: - param["BUCKET_NAME"] += suffix - - elif data["resource_type"] == "log_group": - if "LOG_GROUP_NAME" in param: + if METADATA_SQS in data: + data[METADATA_SQS] += suffix + param[CONFIG_SQS] += suffix + + if data[METADATA_RESOURCE_TYPE] == "bucket": + data[METADATA_BUCKET] += suffix + if METADATA_VPC in data: + data[METADATA_VPC] += suffix + if CONFIG_BUCKET in param: + param[CONFIG_BUCKET] += suffix + + elif data[METADATA_RESOURCE_TYPE] == "log_group": + if CONFIG_LOG_GROUP in param: suffixed_log_groups = [] - for log_group in data["log_group_name"].split(','): + for log_group in data[METADATA_LOG_GROUP].split(','): log_group += suffix suffixed_log_groups.append(log_group) - data["log_group_name"] = ','.join(suffixed_log_groups) - param["LOG_GROUP_NAME"] = data["log_group_name"] - if "log_stream_name" in data: # It is not present for basic or parser tests - data["log_stream_name"] += suffix + data[METADATA_LOG_GROUP] = ','.join(suffixed_log_groups) + param[CONFIG_LOG_GROUP] = data[METADATA_LOG_GROUP] + if METADATA_LOG_STREAM in data: # It is not present for basic or parser tests + data[METADATA_LOG_STREAM] += suffix except KeyError: raise diff --git a/tests/integration/test_aws/conftest.py b/tests/integration/test_aws/conftest.py index d46ab9a1861..7e9bb9ea2d8 100644 --- a/tests/integration/test_aws/conftest.py +++ b/tests/integration/test_aws/conftest.py @@ -73,6 +73,9 @@ def s3_client(boto_session: boto3.Session): Args: boto_session (boto3.Session): Session used to create the client. + + Returns: + boto3.resources.base.ServiceResource: S3 client to manage bucket resources. """ return boto_session.resource(service_name="s3", region_name=US_EAST_1_REGION) @@ -83,6 +86,9 @@ def ec2_client(boto_session: boto3.Session): Args: boto_session (boto3.Session): Session used to create the client. + + Returns: + Service client instance: EC2 client to manage VPC resources. """ return boto_session.client(service_name="ec2", region_name=US_EAST_1_REGION) @@ -94,6 +100,9 @@ def logs_clients(boto_session: boto3.Session, metadata: dict): Args: boto_session (boto3.Session): Session used to create the client. metadata (dict): Metadata from the module to obtain the defined regions. + + Returns: + list(Service client instance): CloudWatch client list to manage the service's resources in multiple regions. """ # A client for each region is required to generate logs accordingly return [boto_session.client(service_name="logs", region_name=region) @@ -106,6 +115,9 @@ def sqs_client(boto_session: boto3.Session): Args: boto_session (boto3.Session): Session used to create the client. + + Returns: + Service client instance: SQS client to manage the queue resources. """ return boto_session.client(service_name="sqs", region_name=US_EAST_1_REGION) @@ -154,11 +166,11 @@ def log_groups_manager(logs_clients): """Initializes a set to manage the creation and deletion of the log groups used throughout the test session. Args: - logs_clients (Service client instance): CloudWatch Logs client to manage the CloudWatch resources. + logs_clients (list(Service client instance)): CloudWatch Logs client list to manage the CloudWatch resources. Yields: log_groups (set): Set of log groups. - logs_clients (Service client instance): CloudWatch Logs client to manage the CloudWatch resources. + logs_clients (list(Service client instance)): CloudWatch Logs client list to manage the CloudWatch resources. """ # Create log groups set log_groups: set = set() @@ -387,7 +399,7 @@ def create_test_log_group(log_groups_manager, """Create a log group. Args: - log_groups_manager (fixture): Log groups set and CloudWatch clients. + log_groups_manager (tuple): Log groups set and CloudWatch clients. metadata (dict): Log group information. """ # Get log group names @@ -432,7 +444,7 @@ def create_test_log_stream(metadata: dict, log_groups_manager) -> None: Args: metadata (dict): Log group information. - log_groups_manager (fixture): Log groups set and CloudWatch clients. + log_groups_manager (tuple): Log groups set and CloudWatch clients. """ # Get log group names log_group_names = metadata["log_group_name"].split(',') @@ -478,7 +490,7 @@ def manage_log_group_events(metadata: dict, logs_clients): Args: metadata (dict): Metadata to get the parameters. - logs_clients (Service client instance): CloudWatch Logs client to manage the CloudWatch resources. + logs_clients (list(Service client instance)): CloudWatch Logs client list to manage the CloudWatch resources. """ # Get log group names log_group_names = metadata["log_group_name"].split(',') diff --git a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_multiple_calls.yaml b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_multiple_calls.yaml index ec14a52babd..02ef034784e 100644 --- a/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_multiple_calls.yaml +++ b/tests/integration/test_aws/data/test_cases/only_logs_after_test_module/cases_cloudwatch_multiple_calls.yaml @@ -9,4 +9,5 @@ log_group_name: wazuh-cloudwatchlogs-integration-tests log_stream_name: wazuh-cloudwatchlogs-integration-tests-stream expected_results: 3 - only_logs_after: 2023-JAN-12 \ No newline at end of file + only_logs_after: 2023-JAN-12 + \ No newline at end of file From 84f9aab2f5cf29fefb768bc66c8d89c5817b5bba Mon Sep 17 00:00:00 2001 From: Eduardo Date: Fri, 18 Aug 2023 12:50:16 -0300 Subject: [PATCH 302/419] Update Readme --- tests/integration/test_aws/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index 3952cae996d..a8645d51d51 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -105,7 +105,7 @@ _We are using **Ubuntu 22.04** for this example:_ ```shell script # Install pip apt install python3-pip git -y - + # Clone `wazuh` repository within your testing environment git clone https://github.com/wazuh/wazuh.git From 7225d6180c87bea7776a0409ed44c4151596fe57 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Mon, 20 May 2024 18:00:38 +0200 Subject: [PATCH 303/419] Add AWS integration tests workflow --- .../integration-tests-aws-tier-0-1.yml | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 .github/workflows/integration-tests-aws-tier-0-1.yml diff --git a/.github/workflows/integration-tests-aws-tier-0-1.yml b/.github/workflows/integration-tests-aws-tier-0-1.yml new file mode 100644 index 00000000000..064e5617a3e --- /dev/null +++ b/.github/workflows/integration-tests-aws-tier-0-1.yml @@ -0,0 +1,87 @@ +name: Integration tests for AWS - Tier 0 and 1 + +on: + workflow_dispatch: + inputs: + base_branch: + description: 'Base branch' + required: true + default: 'main' + pull_request: + types: [opened, ready_for_review] + paths: + - wodles/aws/** + +jobs: + build: + env: + BRANCH_NAME: ${{ github.head_ref || github.ref_name }} + BRANCH_BASE: ${{ github.base_ref || inputs.base_branch }} + AWS_ACCESS_KEY_ID: ${{ secrets.ITS_AWS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.ITS_AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: 'us-east-1' + runs-on: ubuntu-latest + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version-file: ".github/workflows/.python-version-it" + architecture: x64 + # Install wazuh server for linux. + - name: Install wazuh server for linux + run: | + echo 'USER_LANGUAGE="en"' > ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo 'USER_NO_STOP="y"' >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo 'USER_INSTALL_TYPE="server"' >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo "USER_DIR=/var/ossec" >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo 'USER_ENABLE_EMAIL="n"' >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo 'USER_ENABLE_SYSCHECK="n"' >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo 'USER_ENABLE_ROOTCHECK="n"' >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo 'USER_ENABLE_SYSCOLLECTOR="n"' >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo 'USER_ENABLE_SCA="n"' >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo 'USER_WHITE_LIST="n"' >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo 'USER_ENABLE_SYSLOG="n"' >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo 'USER_ENABLE_AUTHD="n"' >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo 'USER_ENABLE_UPDATE_CHECK="n"' >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + echo 'USER_AUTO_START="y"' >> ./etc/preloaded-vars.conf + echo "" >> ./etc/preloaded-vars.conf + sudo sh install.sh + rm ./etc/preloaded-vars.conf + # Build wazuh server for linux. + - name: Build wazuh server for linux + run: | + make deps -C src TARGET=server -j2 + make -C src TARGET=server -j2 + # Download and install integration tests framework. + - name: Download and install integration tests framework + run: | + if [ "X`git ls-remote https://github.com/wazuh/qa-integration-framework.git ${BRANCH_NAME}`" != "X" ]; then + QA_BRANCH=${BRANCH_NAME} + elif [ "X`git ls-remote https://github.com/wazuh/qa-integration-framework.git ${BRANCH_BASE}`" != "X" ]; then + QA_BRANCH=${BRANCH_BASE} + else + QA_BRANCH="main" + fi + git clone -b ${QA_BRANCH} --single-branch https://github.com/wazuh/qa-integration-framework.git + sudo pip install qa-integration-framework/ + sudo rm -rf qa-integration-framework/ + # Run AWS integration tests. + - name: Run AWS integration tests + run: | + cd tests/integration + sudo python -m pytest --tier 0 --tier 1 test_aws/ From 651af867184aee5144954133147de1bb942ab582 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Fri, 24 May 2024 16:42:28 +0200 Subject: [PATCH 304/419] Add test execution depending on modified files --- .../integration-tests-aws-tier-0-1.yml | 105 +++++++++++++++++- 1 file changed, 99 insertions(+), 6 deletions(-) diff --git a/.github/workflows/integration-tests-aws-tier-0-1.yml b/.github/workflows/integration-tests-aws-tier-0-1.yml index 064e5617a3e..cb816effd2a 100644 --- a/.github/workflows/integration-tests-aws-tier-0-1.yml +++ b/.github/workflows/integration-tests-aws-tier-0-1.yml @@ -7,10 +7,6 @@ on: description: 'Base branch' required: true default: 'main' - pull_request: - types: [opened, ready_for_review] - paths: - - wodles/aws/** jobs: build: @@ -80,8 +76,105 @@ jobs: git clone -b ${QA_BRANCH} --single-branch https://github.com/wazuh/qa-integration-framework.git sudo pip install qa-integration-framework/ sudo rm -rf qa-integration-framework/ + - name: Set AWS credentials file + run: | + cat >> /root/.aws/credentials << EOF + [default] + aws_access_key_id=$AWS_ACCESS_KEY_ID + aws_secret_access_key=$AWS_SECRET_ACCESS_KEY + region=us-east-1 + EOF # Run AWS integration tests. - - name: Run AWS integration tests + - name: Run Parser related tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/aws_s3.py') || + contains(steps.get_modified_files.outputs.files, 'wodles/aws/aws_tools.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k kms test_aws/test_parser.py + - name: Run every test due to base WazuhIntegration class change + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/wazuh_integration.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k kms test_aws/ + - name: Run Buckets tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/aws_s3.py') || + contains(steps.get_modified_files.outputs.files, 'wodles/aws/aws_tools.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k kms test_aws/test_parser.py + # Bucket tests + - name: Run Buckets tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/aws_bucket.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k kms test_aws/ + sudo python3 -m pytest --tier 0 --tier 1 -k macie test_aws/ + sudo python3 -m pytest --tier 0 --tier 1 -k trusted_advisor test_aws/ + - name: Run Config tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/config.py') || + contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/aws_bucket.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k config test_aws/ + - name: Run GuardDuty tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/guardduty.py') || + contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/aws_bucket.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k guardduty test_aws/ + - name: Run CloudTrail tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/cloudtrail.py') || + contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/aws_bucket.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k config test_aws/ + - name: Run Load Balancers tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/load_balancers.py') || + contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/aws_bucket.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k alb test_aws/ + sudo python3 -m pytest --tier 0 --tier 1 -k clb test_aws/ + sudo python3 -m pytest --tier 0 --tier 1 -k nlb test_aws/ + - name: Run Server Access tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/server_access.py') || + contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/aws_bucket.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k server_access test_aws/ + - name: Run Umbrella tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/umbrella.py') || + contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/aws_bucket.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k cisco test_aws/ + - name: Run VPC Flow tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/vpcflow.py') || + contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/aws_bucket.py') run: | cd tests/integration - sudo python -m pytest --tier 0 --tier 1 test_aws/ + sudo python3 -m pytest --tier 0 --tier 1 -k vpc test_aws/ + - name: Run WAF tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/waf.py') || + contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/aws_bucket.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k waf test_aws/ + # Services tests + - name: Run CloudWatch tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/services/cloudwatchlogs.py') || + contains(steps.get_modified_files.outputs.files, 'wodles/aws/services/aws_service.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k cloudwatch test_aws/ + - name: Run Inspector tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/services/inspector.py') || + contains(steps.get_modified_files.outputs.files, 'wodles/aws/services/aws_service.py') + run: | + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 -k inspector test_aws/ + # Custom Logs Buckets tests + - name: Run Inspector tests + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/subscribers/**') + cd tests/integration + sudo python3 -m pytest --tier 0 --tier 1 test_aws/test_custom_bucket.py \ No newline at end of file From 88db70f442ef0d5a0f51e74e843f78c896be2dc4 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Fri, 24 May 2024 16:42:53 +0200 Subject: [PATCH 305/419] Update with required minimum AWS permissions --- tests/integration/test_aws/README.md | 54 +++++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_aws/README.md b/tests/integration/test_aws/README.md index a8645d51d51..9f54831a42f 100644 --- a/tests/integration/test_aws/README.md +++ b/tests/integration/test_aws/README.md @@ -72,10 +72,56 @@ aws_access_key_id = aws_secret_access_key = ``` -The tests do not expect a particular profile given these can be executed for any AWS environment in which the -profile has the corresponding permissions to create, delete and list S3, VPC (like Flow Logs) and -CloudWatch Logs resources. If a particular profile is required, it can be defined by declaring the `AWS_PROFILE` -environment variable. +The provided credentials must have the following set of minimum permissions defined in AWS: +``` + "s3:PutObject", + "s3:PutObjectAcl", + "s3:GetObject", + "s3:GetObjectAcl", + "s3:ListBucket", + "s3:CreateBucket", + "s3:DeleteObject", + "s3:DeleteBucket", + "s3:PutBucketNotification", + "s3:GetBucketAcl" + + "ec2:CreateVpc", + "ec2:CreateSubnet", + "ec2:DescribeAvailabilityZones", + "ec2:CreateRouteTable", + "ec2:CreateRoute", + "ec2:AssociateRouteTable", + "ec2:ModifyVpcAttribute", + "ec2:DeleteFlowLogs", + "ec2:DeleteVpc", + "ec2:DeleteRouteTable", + "ec2:DeleteRoute", + "ec2:CreateFlowLogs", + "ec2:DescribeFlowLogs", + "ec2:CreateTags" + + "logs:CreateLogStream", + "logs:DeleteLogGroup", + "logs:DescribeLogStreams", + "logs:CreateLogGroup", + "logs:GetLogEvents", + "logs:DeleteLogStream", + "logs:PutLogEvents", + "logs:CreateLogDelivery", + "logs:DeleteLogDelivery", + "logs:PutResourcePolicy" + + "sqs:ReceiveMessage", + "sqs:CreateQueue", + "sqs:DeleteMessage", + "sqs:DeleteQueue", + "sqs:SetQueueAttributes", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl" + + "inspector:ListFindings", + "inspector:DescribeFindings" +``` ## Setting up a test environment From bf473200ce2b597ea87a66976b44d568469be502 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Fri, 24 May 2024 17:17:00 +0200 Subject: [PATCH 306/419] Add manual execution condition --- .github/workflows/integration-tests-aws-tier-0-1.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/integration-tests-aws-tier-0-1.yml b/.github/workflows/integration-tests-aws-tier-0-1.yml index cb816effd2a..eb80e31f03f 100644 --- a/.github/workflows/integration-tests-aws-tier-0-1.yml +++ b/.github/workflows/integration-tests-aws-tier-0-1.yml @@ -7,6 +7,9 @@ on: description: 'Base branch' required: true default: 'main' + pull_request: + paths: + - "wodles/aws/**" jobs: build: @@ -92,7 +95,8 @@ jobs: cd tests/integration sudo python3 -m pytest --tier 0 --tier 1 -k kms test_aws/test_parser.py - name: Run every test due to base WazuhIntegration class change - if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/wazuh_integration.py') + if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/wazuh_integration.py') || + ${{ github.event_name == 'workflow_dispatch' }} run: | cd tests/integration sudo python3 -m pytest --tier 0 --tier 1 -k kms test_aws/ From 5ea6faa0e622065200accd3e59d38c13da04bcd1 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Mon, 27 May 2024 12:41:39 +0200 Subject: [PATCH 307/419] Modify secrets names --- .github/workflows/integration-tests-aws-tier-0-1.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/integration-tests-aws-tier-0-1.yml b/.github/workflows/integration-tests-aws-tier-0-1.yml index eb80e31f03f..299ac91f038 100644 --- a/.github/workflows/integration-tests-aws-tier-0-1.yml +++ b/.github/workflows/integration-tests-aws-tier-0-1.yml @@ -16,8 +16,8 @@ jobs: env: BRANCH_NAME: ${{ github.head_ref || github.ref_name }} BRANCH_BASE: ${{ github.base_ref || inputs.base_branch }} - AWS_ACCESS_KEY_ID: ${{ secrets.ITS_AWS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.ITS_AWS_SECRET_ACCESS_KEY }} + AWS_ACCESS_KEY_ID: ${{ secrets.IT_AWS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.IT_AWS_SECRET_ACCESS_KEY }} AWS_DEFAULT_REGION: 'us-east-1' runs-on: ubuntu-latest steps: From ffa6d341767533c0530eb076995ef6abc60799eb Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Tue, 28 May 2024 11:00:18 +0200 Subject: [PATCH 308/419] Add Custom Logs Buckets test run --- .../integration-tests-aws-tier-0-1.yml | 25 +++++++++---------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/.github/workflows/integration-tests-aws-tier-0-1.yml b/.github/workflows/integration-tests-aws-tier-0-1.yml index 299ac91f038..658d05eaaac 100644 --- a/.github/workflows/integration-tests-aws-tier-0-1.yml +++ b/.github/workflows/integration-tests-aws-tier-0-1.yml @@ -28,6 +28,16 @@ jobs: with: python-version-file: ".github/workflows/.python-version-it" architecture: x64 + - name: Set AWS credentials file + run: | + sudo aws configure set aws_access_key_id ${{ secrets.IT_AWS_KEY_ID }} --profile default + sudo aws configure set aws_secret_access_key ${{ secrets.IT_AWS_SECRET_ACCESS_KEY }} --profile default + sudo aws configure set default.region us-east-1 --profile default + # Build wazuh server for linux. + - name: Build wazuh server for linux + run: | + make deps -C src TARGET=server -j2 + make -C src TARGET=server -j2 # Install wazuh server for linux. - name: Install wazuh server for linux run: | @@ -61,11 +71,6 @@ jobs: echo "" >> ./etc/preloaded-vars.conf sudo sh install.sh rm ./etc/preloaded-vars.conf - # Build wazuh server for linux. - - name: Build wazuh server for linux - run: | - make deps -C src TARGET=server -j2 - make -C src TARGET=server -j2 # Download and install integration tests framework. - name: Download and install integration tests framework run: | @@ -77,16 +82,9 @@ jobs: QA_BRANCH="main" fi git clone -b ${QA_BRANCH} --single-branch https://github.com/wazuh/qa-integration-framework.git + sudo pip install -r qa-integration-framework/requirements.txt sudo pip install qa-integration-framework/ sudo rm -rf qa-integration-framework/ - - name: Set AWS credentials file - run: | - cat >> /root/.aws/credentials << EOF - [default] - aws_access_key_id=$AWS_ACCESS_KEY_ID - aws_secret_access_key=$AWS_SECRET_ACCESS_KEY - region=us-east-1 - EOF # Run AWS integration tests. - name: Run Parser related tests if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/aws_s3.py') || @@ -180,5 +178,6 @@ jobs: # Custom Logs Buckets tests - name: Run Inspector tests if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/subscribers/**') + run: | cd tests/integration sudo python3 -m pytest --tier 0 --tier 1 test_aws/test_custom_bucket.py \ No newline at end of file From 9bb0bfa637e7abea800a0ea6c5c43b39e59db133 Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Tue, 28 May 2024 13:57:32 +0200 Subject: [PATCH 309/419] Fix qa-it-framework use --- .../integration-tests-aws-tier-0-1.yml | 44 +++++++++---------- 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/.github/workflows/integration-tests-aws-tier-0-1.yml b/.github/workflows/integration-tests-aws-tier-0-1.yml index 658d05eaaac..54c59de1937 100644 --- a/.github/workflows/integration-tests-aws-tier-0-1.yml +++ b/.github/workflows/integration-tests-aws-tier-0-1.yml @@ -7,6 +7,10 @@ on: description: 'Base branch' required: true default: 'main' + base_qa_it_fw_branch: + description: 'Base qa-integration-framework branch' + required: true + default: 'main' pull_request: paths: - "wodles/aws/**" @@ -16,6 +20,7 @@ jobs: env: BRANCH_NAME: ${{ github.head_ref || github.ref_name }} BRANCH_BASE: ${{ github.base_ref || inputs.base_branch }} + QA_IT_FW_BRANCH: ${{ inputs.base_qa_it_fw_branch }} AWS_ACCESS_KEY_ID: ${{ secrets.IT_AWS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.IT_AWS_SECRET_ACCESS_KEY }} AWS_DEFAULT_REGION: 'us-east-1' @@ -33,6 +38,17 @@ jobs: sudo aws configure set aws_access_key_id ${{ secrets.IT_AWS_KEY_ID }} --profile default sudo aws configure set aws_secret_access_key ${{ secrets.IT_AWS_SECRET_ACCESS_KEY }} --profile default sudo aws configure set default.region us-east-1 --profile default + # Download and install integration tests framework. + - name: Download and install integration tests framework + run: | + if [ "X`git ls-remote https://github.com/wazuh/qa-integration-framework.git ${QA_IT_FW_BRANCH}`" != "X" ]; then + QA_BRANCH=${QA_IT_FW_BRANCH} + else + QA_BRANCH="main" + fi + git clone -b ${QA_BRANCH} --single-branch https://github.com/wazuh/qa-integration-framework.git + sudo pip install qa-integration-framework/ + sudo rm -rf qa-integration-framework/ # Build wazuh server for linux. - name: Build wazuh server for linux run: | @@ -71,41 +87,21 @@ jobs: echo "" >> ./etc/preloaded-vars.conf sudo sh install.sh rm ./etc/preloaded-vars.conf - # Download and install integration tests framework. - - name: Download and install integration tests framework - run: | - if [ "X`git ls-remote https://github.com/wazuh/qa-integration-framework.git ${BRANCH_NAME}`" != "X" ]; then - QA_BRANCH=${BRANCH_NAME} - elif [ "X`git ls-remote https://github.com/wazuh/qa-integration-framework.git ${BRANCH_BASE}`" != "X" ]; then - QA_BRANCH=${BRANCH_BASE} - else - QA_BRANCH="main" - fi - git clone -b ${QA_BRANCH} --single-branch https://github.com/wazuh/qa-integration-framework.git - sudo pip install -r qa-integration-framework/requirements.txt - sudo pip install qa-integration-framework/ - sudo rm -rf qa-integration-framework/ # Run AWS integration tests. - name: Run Parser related tests if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/aws_s3.py') || contains(steps.get_modified_files.outputs.files, 'wodles/aws/aws_tools.py') run: | cd tests/integration - sudo python3 -m pytest --tier 0 --tier 1 -k kms test_aws/test_parser.py - - name: Run every test due to base WazuhIntegration class change + sudo python3 -m pytest --tier 0 --tier 1 test_aws/test_parser.py + - name: Run every test due to base WazuhIntegration class change or manual dispatch if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/wazuh_integration.py') || ${{ github.event_name == 'workflow_dispatch' }} run: | cd tests/integration - sudo python3 -m pytest --tier 0 --tier 1 -k kms test_aws/ - - name: Run Buckets tests - if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/aws_s3.py') || - contains(steps.get_modified_files.outputs.files, 'wodles/aws/aws_tools.py') - run: | - cd tests/integration - sudo python3 -m pytest --tier 0 --tier 1 -k kms test_aws/test_parser.py + sudo python3 -m pytest --tier 0 --tier 1 test_aws/ # Bucket tests - - name: Run Buckets tests + - name: Run Custom Buckets tests if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/aws_bucket.py') run: | cd tests/integration From 978fc5880ab439ce4849dede1bb6142d83c1cf3f Mon Sep 17 00:00:00 2001 From: Facundo Dalmau Date: Tue, 28 May 2024 17:23:58 +0200 Subject: [PATCH 310/419] Apply suggested enhancements --- .../integration-tests-aws-tier-0-1.yml | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/.github/workflows/integration-tests-aws-tier-0-1.yml b/.github/workflows/integration-tests-aws-tier-0-1.yml index 54c59de1937..071e0d9c0f9 100644 --- a/.github/workflows/integration-tests-aws-tier-0-1.yml +++ b/.github/workflows/integration-tests-aws-tier-0-1.yml @@ -13,6 +13,7 @@ on: default: 'main' pull_request: paths: + - ".github/workflows/integration-tests-aws-tier-0-1.yml" - "wodles/aws/**" jobs: @@ -20,7 +21,7 @@ jobs: env: BRANCH_NAME: ${{ github.head_ref || github.ref_name }} BRANCH_BASE: ${{ github.base_ref || inputs.base_branch }} - QA_IT_FW_BRANCH: ${{ inputs.base_qa_it_fw_branch }} + QA_IT_FW_BRANCH: ${{ github.base_ref || inputs.base_qa_it_fw_branch }} AWS_ACCESS_KEY_ID: ${{ secrets.IT_AWS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.IT_AWS_SECRET_ACCESS_KEY }} AWS_DEFAULT_REGION: 'us-east-1' @@ -33,15 +34,12 @@ jobs: with: python-version-file: ".github/workflows/.python-version-it" architecture: x64 - - name: Set AWS credentials file - run: | - sudo aws configure set aws_access_key_id ${{ secrets.IT_AWS_KEY_ID }} --profile default - sudo aws configure set aws_secret_access_key ${{ secrets.IT_AWS_SECRET_ACCESS_KEY }} --profile default - sudo aws configure set default.region us-east-1 --profile default # Download and install integration tests framework. - name: Download and install integration tests framework run: | - if [ "X`git ls-remote https://github.com/wazuh/qa-integration-framework.git ${QA_IT_FW_BRANCH}`" != "X" ]; then + if [ "X`git ls-remote https://github.com/wazuh/qa-integration-framework.git ${BRANCH_BASE}`" != "X" ]; then + QA_BRANCH=${BRANCH_BASE} + elif [ "X`git ls-remote https://github.com/wazuh/qa-integration-framework.git ${QA_IT_FW_BRANCH}`" != "X" ]; then QA_BRANCH=${QA_IT_FW_BRANCH} else QA_BRANCH="main" @@ -49,6 +47,11 @@ jobs: git clone -b ${QA_BRANCH} --single-branch https://github.com/wazuh/qa-integration-framework.git sudo pip install qa-integration-framework/ sudo rm -rf qa-integration-framework/ + - name: Set AWS credentials file + run: | + sudo aws configure set aws_access_key_id ${{ secrets.IT_AWS_KEY_ID }} --profile default + sudo aws configure set aws_secret_access_key ${{ secrets.IT_AWS_SECRET_ACCESS_KEY }} --profile default + sudo aws configure set default.region ${AWS_DEFAULT_REGION} --profile default # Build wazuh server for linux. - name: Build wazuh server for linux run: | @@ -125,7 +128,7 @@ jobs: contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/aws_bucket.py') run: | cd tests/integration - sudo python3 -m pytest --tier 0 --tier 1 -k config test_aws/ + sudo python3 -m pytest --tier 0 --tier 1 -k cloudtrail test_aws/ - name: Run Load Balancers tests if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/load_balancers.py') || contains(steps.get_modified_files.outputs.files, 'wodles/aws/buckets_s3/aws_bucket.py') @@ -176,4 +179,4 @@ jobs: if: contains(steps.get_modified_files.outputs.files, 'wodles/aws/subscribers/**') run: | cd tests/integration - sudo python3 -m pytest --tier 0 --tier 1 test_aws/test_custom_bucket.py \ No newline at end of file + sudo python3 -m pytest --tier 0 --tier 1 test_aws/test_custom_bucket.py From 756652c6e4d6a350581f9397b1e4897fb783f7ea Mon Sep 17 00:00:00 2001 From: GGP1 Date: Tue, 28 May 2024 15:04:31 -0300 Subject: [PATCH 311/419] Add restart_delay variable --- api/test/integration/common.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/api/test/integration/common.yaml b/api/test/integration/common.yaml index 029c0410177..03207ebe49e 100644 --- a/api/test/integration/common.yaml +++ b/api/test/integration/common.yaml @@ -18,6 +18,7 @@ variables: # delays reconnect_delay: 30 + restart_delay: 90 restart_delay_cluster: 150 upgrade_delay: 60 global_db_delay: 30 From ddbd74eb41a598e44dec236bfa2e8658d0883335 Mon Sep 17 00:00:00 2001 From: GGP1 Date: Tue, 28 May 2024 15:04:44 -0300 Subject: [PATCH 312/419] Reorder logs and CTI test cases --- .../test_cluster_endpoints.tavern.yaml | 818 +++++------ .../test_manager_endpoints.tavern.yaml | 1278 ++++++++--------- 2 files changed, 1028 insertions(+), 1068 deletions(-) diff --git a/api/test/integration/test_cluster_endpoints.tavern.yaml b/api/test/integration/test_cluster_endpoints.tavern.yaml index 8f0003eaec4..9108c9b3349 100644 --- a/api/test/integration/test_cluster_endpoints.tavern.yaml +++ b/api/test/integration/test_cluster_endpoints.tavern.yaml @@ -1485,7 +1485,7 @@ stages: total_failed_items: 0 --- -test_name: GET /cluster/{node_id}/daemons/stats +test_name: GET /cluster/{node_id}/logs marks: - cluster @@ -1498,11 +1498,10 @@ marks: stages: - # GET /cluster/{node_id}/daemons/stats - - name: Get all daemons' statistics from {node_id} - request: + - name: Read logs {node_id} + request: &get_cluster_logs verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/logs" method: GET headers: Authorization: "Bearer {test_login_token}" @@ -1511,358 +1510,273 @@ stages: json: error: 0 data: - affected_items: - - name: wazuh-remoted - - name: wazuh-analysisd - - name: wazuh-db - total_affected_items: 3 + affected_items: !anything failed_items: [] + total_affected_items: !anyint total_failed_items: 0 - # GET /cluster/{node_id}/daemons/stats?daemons_list=wazuh-remoted - - name: Get statistics from a single daemon from {node_id} + - name: Read logs with filters -> limit=3 {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs params: - daemons_list: wazuh-remoted + limit: 3 response: status_code: 200 json: error: 0 data: affected_items: - - name: wazuh-remoted - total_affected_items: 1 + - &cluster_log + description: !anystr + level: !anystr + tag: !anystr + timestamp: !anystr + - <<: *cluster_log + - <<: *cluster_log failed_items: [] + total_affected_items: !anyint total_failed_items: 0 - # GET /cluster/{node_id}/daemons/stats?daemons_list=wazuh-remoted,wazuh-db,wazuh-analysisd - - name: Get statistics from a list of daemons from {node_id} + - name: Read logs with filters -> limit=4 {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs params: - daemons_list: wazuh-remoted,wazuh-db,wazuh-analysisd + limit: 4 response: status_code: 200 json: error: 0 data: affected_items: - - name: wazuh-remoted - - name: wazuh-db - - name: wazuh-analysisd - total_affected_items: 3 + - <<: *cluster_log + - <<: *cluster_log + - <<: *cluster_log + - <<: *cluster_log failed_items: [] + total_affected_items: !anyint total_failed_items: 0 - # GET /cluster/{node_id}/daemons/stats?daemons_list=wrong-daemon-name - - name: Try to get statistics from a wrong daemon from {node_id} + - name: Read logs with filters -> limit=2, sort=tag {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs params: - daemons_list: wrong-daemon-name - response: - status_code: 400 - ---- -test_name: GET /cluster/{node_id}/stats - -marks: - - cluster - - parametrize: - key: node_id - vals: - - master-node - - worker1 - - worker2 - -stages: - - - name: Cluster stats {node_id} today - request: &get_cluster_stats - verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + limit: 2 + sort: tag response: - status_code: 400 - json: - error: 1308 + verify_response_with: + - function: tavern_utils:test_sort_response + extra_kwargs: + key: "tag" + status_code: 200 - - name: Cluster stats {node_id} 2019-08-27 + - name: Read logs with filters -> limit=1, sort=-level {node_id} request: verify: False - <<: *get_cluster_stats + <<: *get_cluster_logs params: - date: "2019-08-27" + limit: 1 + sort: -level response: + verify_response_with: + - function: tavern_utils:test_sort_response + extra_kwargs: + key: "level" + reverse: true status_code: 200 - json: - error: 0 - data: !anything - - name: Cluster stats {node_id} day without stats + - name: Read logs with filters -> offset=2, limit=3 {node_id} request: verify: False - <<: *get_cluster_stats + <<: *get_cluster_logs params: - date: "1970-01-01" + limit: 3 + offset: 2 response: - status_code: 400 + status_code: 200 json: - error: 1308 - ---- -test_name: GET /cluster/wrong_node/stats - -marks: - - cluster - -stages: + error: 0 + data: + affected_items: + - <<: *cluster_log + - <<: *cluster_log + - <<: *cluster_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Unexisting_node stats + - name: Read logs with filters -> offset=5, limit=2 {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/unexisting-node/stats" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs + params: + limit: 2 + offset: 5 response: status_code: 200 json: - error: 1 + error: 0 data: - affected_items: [] - total_affected_items: 0 - failed_items: - - error: - code: 1730 - id: - - "unexisting-node" - ---- -test_name: GET /cluster/{node_id}/stats/analysisd - -marks: - - cluster - - parametrize: - key: node_id - vals: - - master-node - - worker1 - - worker2 - -stages: + affected_items: + - <<: *cluster_log + - <<: *cluster_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Analysisd stats {node_id} + - name: Read logs with filters -> tag=wazuh-analysisd, limit=1 {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/analysisd" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs + params: + tag: wazuh-analysisd + limit: 1 response: status_code: 200 json: error: 0 data: affected_items: - - total_events_decoded: !anyfloat - syscheck_events_decoded: !anyfloat - syscollector_events_decoded: !anyfloat - rootcheck_events_decoded: !anyfloat - sca_events_decoded: !anyfloat - winevt_events_decoded: !anyfloat - other_events_decoded: !anyfloat - events_processed: !anyfloat - events_received: !anyfloat - events_dropped: !anyfloat - alerts_written: !anyfloat - firewall_written: !anyfloat - fts_written: !anyfloat - syscheck_queue_usage: !anyfloat - syscheck_queue_size: !anyfloat - syscollector_queue_usage: !anyfloat - syscollector_queue_size: !anyfloat - rootcheck_queue_usage: !anyfloat - rootcheck_queue_size: !anyfloat - sca_queue_usage: !anyfloat - sca_queue_size: !anyfloat - hostinfo_queue_usage: !anyfloat - hostinfo_queue_size: !anyfloat - winevt_queue_usage: !anyfloat - winevt_queue_size: !anyfloat - event_queue_usage: !anyfloat - event_queue_size: !anyfloat - rule_matching_queue_usage: !anyfloat - rule_matching_queue_size: !anyfloat - alerts_queue_usage: !anyfloat - alerts_queue_size: !anyfloat - firewall_queue_usage: !anyfloat - firewall_queue_size: !anyfloat - statistical_queue_usage: !anyfloat - statistical_queue_size: !anyfloat - archives_queue_usage: !anyfloat - archives_queue_size: !anyfloat - total_affected_items: 1 + - <<: *cluster_log failed_items: [] + total_affected_items: !anyint total_failed_items: 0 ---- -test_name: GET /cluster/{node_id}/stats/hourly - -marks: - - cluster - - parametrize: - key: node_id - vals: - - master-node - - worker1 - - worker2 - -stages: - - - name: Hourly node stats {node_id} + - name: Read logs with filters -> tag=wazuh-syscheckd, limit=2 {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/hourly" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs + params: + tag: wazuh-syscheckd + limit: 2 response: status_code: 200 json: error: 0 data: affected_items: - - averages: !anything - interactions: !anyint - total_affected_items: 1 + - <<: *cluster_log + tag: wazuh-syscheckd + - <<: *cluster_log + tag: wazuh-syscheckd failed_items: [] + total_affected_items: !anyint total_failed_items: 0 ---- -test_name: GET /cluster/{node_id}/stats/remoted - -marks: - - cluster - - parametrize: - key: node_id - vals: - - master-node - - worker1 - - worker2 - -stages: + - name: Read logs with filters -> tag=wazuh-unknown-daemon {node_id} + request: + verify: False + <<: *get_cluster_logs + params: + tag: wazuh-unknown-daemon + response: + status_code: 200 + json: + error: 0 + data: + affected_items: [] + failed_items: [] + total_affected_items: 0 + total_failed_items: 0 - - name: Remoted stats {node_id} + - name: Read logs with filters -> level=info, limit=1 {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/remoted" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs + params: + level: info + limit: 1 response: status_code: 200 json: error: 0 data: affected_items: - - queue_size: !anyfloat - total_queue_size: !anyfloat - tcp_sessions: !anyfloat - evt_count: !anyfloat - ctrl_msg_count: !anyfloat - discarded_count: !anyfloat - sent_bytes: !anyfloat - recv_bytes: !anyfloat - total_affected_items: 1 + - <<: *cluster_log + level: info failed_items: [] + total_affected_items: !anyint total_failed_items: 0 ---- -test_name: GET /cluster/{node_id}/stats/weekly - -marks: - - cluster - - parametrize: - key: node_id - vals: - - master-node - - worker1 - - worker2 - -stages: + - name: Read logs with filters by query (tag=wazuh-syscheckd, level=info) {node_id} + request: + verify: False + <<: *get_cluster_logs + params: + q: tag=wazuh-syscheckd;level=info + response: + status_code: 200 + verify_response_with: + - function: tavern_utils:test_expected_value + extra_kwargs: + key: "tag" + expected_values: "wazuh-syscheckd" + - function: tavern_utils:test_expected_value + extra_kwargs: + key: "level" + expected_values: "info" - - name: Weekly node stats {node_id} + - name: Read logs using valid select request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/weekly" - method: GET - headers: - Authorization: "Bearer {test_login_token}" + <<: *get_cluster_logs + params: + select: 'timestamp,tag' response: + verify_response_with: + # Check response item keys are the selected keys + function: tavern_utils:test_select_key_affected_items + extra_kwargs: + select_key: 'timestamp,tag' status_code: 200 json: error: 0 data: - affected_items: - - Sun: - hours: !anything - interactions: !anyint - - Mon: - hours: !anything - interactions: !anyint - - Tue: - hours: !anything - interactions: !anyint - - Wed: - hours: !anything - interactions: !anyint - - Thu: - hours: !anything - interactions: !anyint - - Fri: - hours: !anything - interactions: !anyint - - Sat: - hours: !anything - interactions: !anyint - total_affected_items: 7 + total_affected_items: !anyint failed_items: [] total_failed_items: 0 + - name: Try to read logs using invalid select + request: + verify: False + <<: *get_cluster_logs + params: + select: 'noexists' + response: + status_code: 400 + json: &invalid_select + error: 1724 + + - name: Get distinct cluster node logs + request: + verify: False + <<: *get_cluster_logs + params: + distinct: true + response: + status_code: 200 + verify_response_with: + function: tavern_utils:test_distinct_key + --- -test_name: GET /cluster/{node_id}/status +test_name: GET /cluster/{node_id}/logs/summary marks: - cluster - parametrize: key: node_id vals: - - master-node - worker1 - worker2 stages: - - name: Read status {node_id} + - name: Read logs summary {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/status" + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/logs/summary" method: GET headers: Authorization: "Bearer {test_login_token}" @@ -1875,9 +1789,10 @@ stages: failed_items: [] total_affected_items: !anyint total_failed_items: 0 + message: !anystr --- -test_name: GET /cluster/{node_id}/logs +test_name: GET /cluster/{node_id}/daemons/stats marks: - cluster @@ -1890,10 +1805,11 @@ marks: stages: - - name: Read logs {node_id} - request: &get_cluster_logs + # GET /cluster/{node_id}/daemons/stats + - name: Get all daemons' statistics from {node_id} + request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/logs" + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" method: GET headers: Authorization: "Bearer {test_login_token}" @@ -1902,273 +1818,358 @@ stages: json: error: 0 data: - affected_items: !anything + affected_items: + - name: wazuh-remoted + - name: wazuh-analysisd + - name: wazuh-db + total_affected_items: 3 failed_items: [] - total_affected_items: !anyint total_failed_items: 0 - - name: Read logs with filters -> limit=3 {node_id} + # GET /cluster/{node_id}/daemons/stats?daemons_list=wazuh-remoted + - name: Get statistics from a single daemon from {node_id} request: verify: False - <<: *get_cluster_logs + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" + method: GET + headers: + Authorization: "Bearer {test_login_token}" params: - limit: 3 + daemons_list: wazuh-remoted response: status_code: 200 json: error: 0 data: affected_items: - - &cluster_log - description: !anystr - level: !anystr - tag: !anystr - timestamp: !anystr - - <<: *cluster_log - - <<: *cluster_log + - name: wazuh-remoted + total_affected_items: 1 failed_items: [] - total_affected_items: !anyint total_failed_items: 0 - - name: Read logs with filters -> limit=4 {node_id} + # GET /cluster/{node_id}/daemons/stats?daemons_list=wazuh-remoted,wazuh-db,wazuh-analysisd + - name: Get statistics from a list of daemons from {node_id} request: verify: False - <<: *get_cluster_logs + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" + method: GET + headers: + Authorization: "Bearer {test_login_token}" params: - limit: 4 + daemons_list: wazuh-remoted,wazuh-db,wazuh-analysisd response: status_code: 200 json: error: 0 data: affected_items: - - <<: *cluster_log - - <<: *cluster_log - - <<: *cluster_log - - <<: *cluster_log + - name: wazuh-remoted + - name: wazuh-db + - name: wazuh-analysisd + total_affected_items: 3 failed_items: [] - total_affected_items: !anyint total_failed_items: 0 - - name: Read logs with filters -> limit=2, sort=tag {node_id} + # GET /cluster/{node_id}/daemons/stats?daemons_list=wrong-daemon-name + - name: Try to get statistics from a wrong daemon from {node_id} request: verify: False - <<: *get_cluster_logs + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/daemons/stats" + method: GET + headers: + Authorization: "Bearer {test_login_token}" params: - limit: 2 - sort: tag + daemons_list: wrong-daemon-name response: - verify_response_with: - - function: tavern_utils:test_sort_response - extra_kwargs: - key: "tag" - status_code: 200 + status_code: 400 - - name: Read logs with filters -> limit=1, sort=-level {node_id} +--- +test_name: GET /cluster/{node_id}/stats + +marks: + - cluster + - parametrize: + key: node_id + vals: + - master-node + - worker1 + - worker2 + +stages: + + - name: Cluster stats {node_id} today + request: &get_cluster_stats + verify: False + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats" + method: GET + headers: + Authorization: "Bearer {test_login_token}" + response: + status_code: 400 + json: + error: 1308 + + - name: Cluster stats {node_id} 2019-08-27 request: verify: False - <<: *get_cluster_logs + <<: *get_cluster_stats params: - limit: 1 - sort: -level - response: - verify_response_with: - - function: tavern_utils:test_sort_response - extra_kwargs: - key: "level" - reverse: true - status_code: 200 - - - name: Read logs with filters -> offset=2, limit=3 {node_id} - request: - verify: False - <<: *get_cluster_logs - params: - limit: 3 - offset: 2 + date: "2019-08-27" response: status_code: 200 json: error: 0 - data: - affected_items: - - <<: *cluster_log - - <<: *cluster_log - - <<: *cluster_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + data: !anything - - name: Read logs with filters -> offset=5, limit=2 {node_id} + - name: Cluster stats {node_id} day without stats request: verify: False - <<: *get_cluster_logs + <<: *get_cluster_stats params: - limit: 2 - offset: 5 + date: "1970-01-01" response: - status_code: 200 + status_code: 400 json: - error: 0 - data: - affected_items: - - <<: *cluster_log - - <<: *cluster_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + error: 1308 - - name: Read logs with filters -> tag=wazuh-analysisd, limit=1 {node_id} +--- +test_name: GET /cluster/wrong_node/stats + +marks: + - cluster + +stages: + + - name: Unexisting_node stats request: verify: False - <<: *get_cluster_logs - params: - tag: wazuh-analysisd - limit: 1 + url: "{protocol:s}://{host:s}:{port:d}/cluster/unexisting-node/stats" + method: GET + headers: + Authorization: "Bearer {test_login_token}" response: status_code: 200 json: - error: 0 + error: 1 data: - affected_items: - - <<: *cluster_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + affected_items: [] + total_affected_items: 0 + failed_items: + - error: + code: 1730 + id: + - "unexisting-node" - - name: Read logs with filters -> tag=wazuh-syscheckd, limit=2 {node_id} +--- +test_name: GET /cluster/{node_id}/stats/analysisd + +marks: + - cluster + - parametrize: + key: node_id + vals: + - master-node + - worker1 + - worker2 + +stages: + + - name: Analysisd stats {node_id} request: verify: False - <<: *get_cluster_logs - params: - tag: wazuh-syscheckd - limit: 2 + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/analysisd" + method: GET + headers: + Authorization: "Bearer {test_login_token}" response: status_code: 200 json: error: 0 data: affected_items: - - <<: *cluster_log - tag: wazuh-syscheckd - - <<: *cluster_log - tag: wazuh-syscheckd + - total_events_decoded: !anyfloat + syscheck_events_decoded: !anyfloat + syscollector_events_decoded: !anyfloat + rootcheck_events_decoded: !anyfloat + sca_events_decoded: !anyfloat + winevt_events_decoded: !anyfloat + other_events_decoded: !anyfloat + events_processed: !anyfloat + events_received: !anyfloat + events_dropped: !anyfloat + alerts_written: !anyfloat + firewall_written: !anyfloat + fts_written: !anyfloat + syscheck_queue_usage: !anyfloat + syscheck_queue_size: !anyfloat + syscollector_queue_usage: !anyfloat + syscollector_queue_size: !anyfloat + rootcheck_queue_usage: !anyfloat + rootcheck_queue_size: !anyfloat + sca_queue_usage: !anyfloat + sca_queue_size: !anyfloat + hostinfo_queue_usage: !anyfloat + hostinfo_queue_size: !anyfloat + winevt_queue_usage: !anyfloat + winevt_queue_size: !anyfloat + event_queue_usage: !anyfloat + event_queue_size: !anyfloat + rule_matching_queue_usage: !anyfloat + rule_matching_queue_size: !anyfloat + alerts_queue_usage: !anyfloat + alerts_queue_size: !anyfloat + firewall_queue_usage: !anyfloat + firewall_queue_size: !anyfloat + statistical_queue_usage: !anyfloat + statistical_queue_size: !anyfloat + archives_queue_usage: !anyfloat + archives_queue_size: !anyfloat + total_affected_items: 1 failed_items: [] - total_affected_items: !anyint total_failed_items: 0 - - name: Read logs with filters -> tag=wazuh-unknown-daemon {node_id} +--- +test_name: GET /cluster/{node_id}/stats/hourly + +marks: + - cluster + - parametrize: + key: node_id + vals: + - master-node + - worker1 + - worker2 + +stages: + + - name: Hourly node stats {node_id} request: verify: False - <<: *get_cluster_logs - params: - tag: wazuh-unknown-daemon + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/hourly" + method: GET + headers: + Authorization: "Bearer {test_login_token}" response: status_code: 200 json: error: 0 data: - affected_items: [] + affected_items: + - averages: !anything + interactions: !anyint + total_affected_items: 1 failed_items: [] - total_affected_items: 0 total_failed_items: 0 - - name: Read logs with filters -> level=info, limit=1 {node_id} +--- +test_name: GET /cluster/{node_id}/stats/remoted + +marks: + - cluster + - parametrize: + key: node_id + vals: + - master-node + - worker1 + - worker2 + +stages: + + - name: Remoted stats {node_id} request: verify: False - <<: *get_cluster_logs - params: - level: info - limit: 1 + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/remoted" + method: GET + headers: + Authorization: "Bearer {test_login_token}" response: status_code: 200 json: error: 0 data: affected_items: - - <<: *cluster_log - level: info + - queue_size: !anyfloat + total_queue_size: !anyfloat + tcp_sessions: !anyfloat + evt_count: !anyfloat + ctrl_msg_count: !anyfloat + discarded_count: !anyfloat + sent_bytes: !anyfloat + recv_bytes: !anyfloat + total_affected_items: 1 failed_items: [] - total_affected_items: !anyint total_failed_items: 0 - - name: Read logs with filters by query (tag=sca, level=info) {node_id} - request: - verify: False - <<: *get_cluster_logs - params: - q: tag=sca;level=info - response: - status_code: 200 - verify_response_with: - - function: tavern_utils:test_expected_value - extra_kwargs: - key: "tag" - expected_values: "sca" - - function: tavern_utils:test_expected_value - extra_kwargs: - key: "level" - expected_values: "info" +--- +test_name: GET /cluster/{node_id}/stats/weekly - - name: Read logs using valid select +marks: + - cluster + - parametrize: + key: node_id + vals: + - master-node + - worker1 + - worker2 + +stages: + + - name: Weekly node stats {node_id} request: verify: False - <<: *get_cluster_logs - params: - select: 'timestamp,tag' + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/stats/weekly" + method: GET + headers: + Authorization: "Bearer {test_login_token}" response: - verify_response_with: - # Check response item keys are the selected keys - function: tavern_utils:test_select_key_affected_items - extra_kwargs: - select_key: 'timestamp,tag' status_code: 200 json: error: 0 data: - total_affected_items: !anyint + affected_items: + - Sun: + hours: !anything + interactions: !anyint + - Mon: + hours: !anything + interactions: !anyint + - Tue: + hours: !anything + interactions: !anyint + - Wed: + hours: !anything + interactions: !anyint + - Thu: + hours: !anything + interactions: !anyint + - Fri: + hours: !anything + interactions: !anyint + - Sat: + hours: !anything + interactions: !anyint + total_affected_items: 7 failed_items: [] total_failed_items: 0 - - name: Try to read logs using invalid select - request: - verify: False - <<: *get_cluster_logs - params: - select: 'noexists' - response: - status_code: 400 - json: &invalid_select - error: 1724 - - - name: Get distinct cluster node logs - request: - verify: False - <<: *get_cluster_logs - params: - distinct: true - response: - status_code: 200 - verify_response_with: - function: tavern_utils:test_distinct_key - --- -test_name: GET /cluster/{node_id}/logs/summary +test_name: GET /cluster/{node_id}/status marks: - cluster - parametrize: key: node_id vals: + - master-node - worker1 - worker2 stages: - - name: Read logs summary {node_id} + - name: Read status {node_id} request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/logs/summary" + url: "{protocol:s}://{host:s}:{port:d}/cluster/{node_id}/status" method: GET headers: Authorization: "Bearer {test_login_token}" @@ -2181,7 +2182,6 @@ stages: failed_items: [] total_affected_items: !anyint total_failed_items: 0 - message: !anystr --- test_name: PUT /cluster/restart diff --git a/api/test/integration/test_manager_endpoints.tavern.yaml b/api/test/integration/test_manager_endpoints.tavern.yaml index 5937ff9f4cc..27dca4a8556 100644 --- a/api/test/integration/test_manager_endpoints.tavern.yaml +++ b/api/test/integration/test_manager_endpoints.tavern.yaml @@ -582,256 +582,218 @@ stages: total_failed_items: 0 --- -test_name: GET /manager/api/config +test_name: GET /manager/logs stages: - # GET /manager/api/config - - name: Get API configuration + # GET /manager/logs + - name: Request request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/api/config" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET headers: Authorization: "Bearer {test_login_token}" response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - node_name: !anystr - node_api_config: - host: !anystr - port: !anyint - - https: - enabled: !anybool - key: !anystr - cert: !anystr - use_ca: !anybool - ca: !anystr - ssl_protocol: !anystr - ssl_ciphers: !anystr - logs: - level: !anystr - max_size: - enabled: !anybool - size: !anystr - cors: - enabled: !anybool - source_route: !anystr - expose_headers: !anystr - allow_headers: !anystr - allow_credentials: !anybool - access: - max_login_attempts: !anyint - block_time: !anyint - max_request_per_minute: !anyint - drop_privileges: !anybool - experimental_features: !anybool - upload_configuration: - remote_commands: - localfile: - allow: !anybool - exceptions: !anything - wodle_command: - allow: !anybool - exceptions: !anything - limits: - eps: - allow: !anybool - agents: - allow_higher_versions: - allow: !anybool - indexer: - allow: !anybool - total_affected_items: 1 - total_failed_items: 0 - failed_items: [] - ---- -test_name: GET /manager/configuration/validation (OK) - -stages: + status_code: 200 + json: + error: 0 + data: + affected_items: !anything + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - # GET /manager/configuration/validation - - name: Request validation + # GET /manager/logs + - name: Filters -> limit=4 request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/validation" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET headers: Authorization: "Bearer {test_login_token}" + params: + limit: 4 response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - name: !anystr - status: 'OK' - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 - ---- -test_name: GET /manager/configuration/{component}/{configuration} - -stages: + status_code: 200 + json: + error: 0 + data: + affected_items: + - &manager_log + description: !anystr + level: !anystr + tag: !anystr + timestamp: !anystr + - <<: *manager_log + - <<: *manager_log + - <<: *manager_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Show the config of analysis/global in the manager + # GET /manager/logs + - name: Filters -> limit=2, sort=-level request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/global" - headers: - Authorization: "Bearer {test_login_token}" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" method: GET - response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - global: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 - - - name: Show the config of analysis/active_response in the manager - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/active_response" headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + limit: 2 + sort: -level response: + verify_response_with: + - function: tavern_utils:test_sort_response + extra_kwargs: + key: "level" + reverse: true status_code: 200 - json: - error: 0 - data: - affected_items: - - active-response: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 - message: !anystr - - name: Show the config of analysis/alerts in the manager + # GET /manager/logs + - name: Filters -> offset=3, limit=3 request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/alerts" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + limit: 3 + offset: 3 response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - alerts: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: + - <<: *manager_log + - <<: *manager_log + - <<: *manager_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Show the config of analysis/command in the manager + # GET /manager/logs + - name: Filters -> offset=3, level=info, limit=4 request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/command" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + limit: 4 + offset: 3 + level: info response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - command: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: + - <<: *manager_log + - <<: *manager_log + - <<: *manager_log + - <<: *manager_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Show the config of analysis/internal in the manager + # GET /manager/logs + - name: Filters -> tag=wazuh-analysisd, limit=1 request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/internal" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + tag: wazuh-analysisd + limit: 1 response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - internal: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: + - <<: *manager_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Show the config of auth in the manager + # GET /manager/logs + - name: Filters -> tag=wazuh-syscheckd, limit=1 request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/auth/auth" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + tag: wazuh-syscheckd + limit: 1 response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - auth: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: + - <<: *manager_log + failed_items: [] + total_affected_items: !anyint + total_failed_items: 0 - - name: Show the config of com/internal in the manager + - name: Filters by query (tag=wazuh-syscheckd, level=info) request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/com/internal" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + q: tag=wazuh-syscheckd;level=info response: status_code: 200 - json: - error: 0 - data: - affected_items: - - internal: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + verify_response_with: + - function: tavern_utils:test_expected_value + extra_kwargs: + key: "tag" + expected_values: "wazuh-syscheckd" + - function: tavern_utils:test_expected_value + extra_kwargs: + key: "level" + expected_values: "info" - - name: Show the config of logcollector/localfile in the manager + - name: Filters by query (timestamp<2021-07-01) request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/logcollector/localfile" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + q: timestamp<2021-07-01 response: status_code: 200 json: error: 0 data: - affected_items: - - localfile: !anything + affected_items: [] failed_items: [] - total_affected_items: 1 + total_affected_items: 0 total_failed_items: 0 - - name: Show the config of logcollector/socket in the manager + - name: Filter by non-existent tag request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/logcollector/socket" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + tag: wazuh-unknown-daemon response: status_code: 200 json: @@ -842,118 +804,183 @@ stages: total_affected_items: 0 total_failed_items: 0 - - name: Show the config of logcollector/internal in the manager + - name: Read logs using valid select request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/logcollector/internal" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + select: 'timestamp,tag' response: + verify_response_with: + # Check response item keys are the selected keys + function: tavern_utils:test_select_key_affected_items + extra_kwargs: + select_key: 'timestamp,tag' status_code: 200 json: error: 0 data: - affected_items: - - internal: !anything + total_affected_items: !anyint failed_items: [] - total_affected_items: 1 total_failed_items: 0 - - name: Show the config of monitor/internal in the manager + - name: Try to read logs using invalid select request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/monitor/internal" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + select: 'noexists' response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - monitord: !anything - failed_items: [] - total_affected_items: 1 - total_failed_items: 0 + status_code: 400 + json: &invalid_select + error: 1724 - - name: Show the config of monitor/reports in the manager + - name: Get distinct manager logs request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/monitor/reports" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET + params: + distinct: true response: status_code: 200 - json: - error: 0 - data: - affected_items: - - reports: !anything - failed_items: [ ] - total_affected_items: 1 - total_failed_items: 0 + verify_response_with: + function: tavern_utils:test_distinct_key - - name: Show the config of request/remote in the manager +--- +test_name: GET /manager/logs/summary + +stages: + + # GET /manager/logs/summary + - name: Request request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/request/remote" + url: "{protocol:s}://{host:s}:{port:d}/manager/logs/summary" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET response: status_code: 200 json: error: 0 data: - affected_items: - - remote: !anything + affected_items: !anything failed_items: [] - total_affected_items: 1 + total_affected_items: !anyint total_failed_items: 0 - - name: Show the config of request/internal in the manager +--- +test_name: GET /manager/api/config + +stages: + + # GET /manager/api/config + - name: Get API configuration request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/request/internal" + url: "{protocol:s}://{host:s}:{port:d}/manager/api/config" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET response: status_code: 200 json: error: 0 data: affected_items: - - internal: !anything - failed_items: [] + - node_name: !anystr + node_api_config: + host: !anystr + port: !anyint + + https: + enabled: !anybool + key: !anystr + cert: !anystr + use_ca: !anybool + ca: !anystr + ssl_protocol: !anystr + ssl_ciphers: !anystr + logs: + level: !anystr + max_size: + enabled: !anybool + size: !anystr + cors: + enabled: !anybool + source_route: !anystr + expose_headers: !anystr + allow_headers: !anystr + allow_credentials: !anybool + access: + max_login_attempts: !anyint + block_time: !anyint + max_request_per_minute: !anyint + drop_privileges: !anybool + experimental_features: !anybool + upload_configuration: + remote_commands: + localfile: + allow: !anybool + exceptions: !anything + wodle_command: + allow: !anybool + exceptions: !anything + limits: + eps: + allow: !anybool + agents: + allow_higher_versions: + allow: !anybool + indexer: + allow: !anybool total_affected_items: 1 total_failed_items: 0 + failed_items: [] - - name: Show the config of syscheck/syscheck in the manager +--- +test_name: GET /manager/configuration/validation (OK) + +stages: + + # GET /manager/configuration/validation + - name: Request validation request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/syscheck/syscheck" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/validation" + method: GET headers: Authorization: "Bearer {test_login_token}" - method: GET response: status_code: 200 json: error: 0 data: affected_items: - - syscheck: !anything + - name: !anystr + status: 'OK' failed_items: [] total_affected_items: 1 total_failed_items: 0 - - name: Show the config of syscheck/rootcheck in the manager +--- +test_name: GET /manager/configuration/{component}/{configuration} + +stages: + + - name: Show the config of analysis/global in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/syscheck/rootcheck" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/global" headers: Authorization: "Bearer {test_login_token}" method: GET @@ -963,15 +990,15 @@ stages: error: 0 data: affected_items: - - rootcheck: !anything + - global: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - - name: Show the config of syscheck/internal in the manager + - name: Show the config of analysis/active_response in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/syscheck/internal" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/active_response" headers: Authorization: "Bearer {test_login_token}" method: GET @@ -981,60 +1008,52 @@ stages: error: 0 data: affected_items: - - internal: !anything + - active-response: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 + message: !anystr - - name: Show the config of wazuh-db/internal in the manager + - name: Show the config of analysis/alerts in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/wazuh-db/internal" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/alerts" headers: Authorization: "Bearer {test_login_token}" + method: GET response: status_code: 200 json: - error: !anyint + error: 0 data: affected_items: - - wazuh_db: - commit_time_max: !anyint - commit_time_min: !anyint - open_db_limit: !anyint - worker_pool_size: !anyint + - alerts: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - - name: Show the config of wazuh-db/wdb in the manager + - name: Show the config of analysis/command in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/wazuh-db/wdb" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/command" headers: Authorization: "Bearer {test_login_token}" + method: GET response: status_code: 200 json: - error: !anyint + error: 0 data: affected_items: - - wdb: - backup: - - database: !anystr - enabled: !anybool - interval: !anyint - max_files: !anyint + - command: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - - name: Try to show the config of wmodules/wmodules in the manager + - name: Show the config of analysis/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/wmodules/wmodules" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/analysis/internal" headers: Authorization: "Bearer {test_login_token}" method: GET @@ -1044,375 +1063,313 @@ stages: error: 0 data: affected_items: - - wmodules: !anything + - internal: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - - name: Try to show the invalid config of component in the manager + - name: Show the config of auth in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/agent/wmodules" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/auth/auth" headers: Authorization: "Bearer {test_login_token}" method: GET response: - status_code: 400 + status_code: 200 json: - error: 1128 - ---- -test_name: PUT /manager/configuration - -stages: + error: 0 + data: + affected_items: + - auth: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - # PUT /manager/configuration - - name: Upload a valid configuration + - name: Show the config of com/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{valid_ossec_conf:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/com/internal" headers: Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream + method: GET response: status_code: 200 json: + error: 0 data: affected_items: - - 'manager' + - internal: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - error: 0 - # GET /manager/configuration/ - - name: Ensure the new config has been applied by checking a field + - name: Show the config of logcollector/localfile in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/logcollector/localfile" headers: Authorization: "Bearer {test_login_token}" method: GET - params: - section: "alerts" - field: "log_alert_level" response: status_code: 200 json: error: 0 data: affected_items: - - alerts: - log_alert_level: '300' + - localfile: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - - # PUT /manager/configuration - - name: Try to upload an invalid configuration + - name: Show the config of logcollector/socket in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{invalid_ossec_conf:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/logcollector/socket" headers: Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream + method: GET response: status_code: 200 json: - error: 1 + error: 0 data: affected_items: [] - failed_items: - - error: - code: 1113 - id: - - 'manager' + failed_items: [] total_affected_items: 0 - total_failed_items: 1 + total_failed_items: 0 - # PUT /manager/configuration - - name: Try to upload an empty configuration + - name: Show the config of logcollector/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{invalid_ossec_conf:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/logcollector/internal" headers: Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream + method: GET response: status_code: 200 json: - error: 1 + error: 0 data: - affected_items: [] - failed_items: - - error: - code: 1113 - id: - - 'manager' - total_affected_items: 0 - total_failed_items: 1 + affected_items: + - internal: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - - name: Try to upload an invalid xml + - name: Show the config of monitor/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{invalid_ossec_xml:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/monitor/internal" headers: Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream + method: GET response: status_code: 200 json: - error: 1 + error: 0 data: - affected_items: [] - failed_items: - - error: - code: 1908 - id: - - 'manager' - total_affected_items: 0 - total_failed_items: 1 + affected_items: + - monitord: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - # PUT /manager/configuration - - name: Try to upload an invalid configuration with an invalid content-type + - name: Show the config of monitor/reports in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{invalid_ossec_conf:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/monitor/reports" headers: Authorization: "Bearer {test_login_token}" - content-type: application/json + method: GET response: - status_code: 415 + status_code: 200 json: - title: Unsupported Media Type + error: 0 + data: + affected_items: + - reports: !anything + failed_items: [ ] + total_affected_items: 1 + total_failed_items: 0 - # GET /manager/configuration/ - - name: Ensure the config didn't change + - name: Show the config of request/remote in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/request/remote" headers: Authorization: "Bearer {test_login_token}" method: GET - params: - section: "alerts" - field: "log_alert_level" response: status_code: 200 json: error: 0 data: affected_items: - - alerts: - log_alert_level: '300' + - remote: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 ---- -test_name: GET /manager/version/check - -stages: - - name: Get wazuh version - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/" - method: GET - headers: - Authorization: "Bearer {test_login_token}" - response: - status_code: 200 - save: - json: - wazuh_version: data.api_version - - - name: Enable update check + - name: Show the config of request/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{valid_ossec_conf_with_update_check_enabled:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/request/internal" headers: Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream + method: GET response: status_code: 200 json: + error: 0 data: affected_items: - - 'manager' + - internal: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - error: 0 - - name: Restart manager to apply the configuration + - name: Show the config of syscheck/syscheck in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/restart" - method: PUT + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/syscheck/syscheck" headers: Authorization: "Bearer {test_login_token}" + method: GET response: status_code: 200 json: error: 0 data: affected_items: - - !anystr + - syscheck: !anything failed_items: [] total_affected_items: 1 total_failed_items: 0 - delay_after: 50 - - name: Get available updates + - name: Show the config of syscheck/rootcheck in the manager request: - verify: false - url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" - method: GET + verify: False + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/syscheck/rootcheck" headers: Authorization: "Bearer {test_login_token}" - response: - status_code: - - 200 - - 500 - verify_response_with: - function: tavern_utils:validate_update_check_response - extra_kwargs: - current_version: "v{wazuh_version:s}" - update_check: true - - - name: Get available updates with force option - request: - verify: false - url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" method: GET - headers: - Authorization: "Bearer {test_login_token}" - params: - force_query: "true" response: - status_code: - - 200 - - 500 - verify_response_with: - function: tavern_utils:validate_update_check_response - extra_kwargs: - current_version: "v{wazuh_version:s}" - update_check: true - - ---- -test_name: GET /manager/version/check with update_check disabled - -stages: + status_code: 200 + json: + error: 0 + data: + affected_items: + - rootcheck: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - - name: Get wazuh version + - name: Show the config of syscheck/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/syscheck/internal" headers: Authorization: "Bearer {test_login_token}" + method: GET response: status_code: 200 - save: - json: - wazuh_version: data.api_version + json: + error: 0 + data: + affected_items: + - internal: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - - name: Disable the update check + - name: Show the config of wazuh-db/internal in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" - method: PUT - data: "{valid_ossec_conf:s}" + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/wazuh-db/internal" + method: GET headers: Authorization: "Bearer {test_login_token}" - content-type: application/octet-stream response: status_code: 200 json: + error: !anyint data: affected_items: - - 'manager' + - wazuh_db: + commit_time_max: !anyint + commit_time_min: !anyint + open_db_limit: !anyint + worker_pool_size: !anyint failed_items: [] total_affected_items: 1 total_failed_items: 0 - error: 0 - - name: Restart manager to apply the configuration + - name: Show the config of wazuh-db/wdb in the manager request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/restart" - method: PUT + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/wazuh-db/wdb" + method: GET headers: Authorization: "Bearer {test_login_token}" response: status_code: 200 json: - error: 0 + error: !anyint data: affected_items: - - !anystr + - wdb: + backup: + - database: !anystr + enabled: !anybool + interval: !anyint + max_files: !anyint failed_items: [] total_affected_items: 1 total_failed_items: 0 - delay_after: 50 - - name: Get available updates + - name: Try to show the config of wmodules/wmodules in the manager request: - verify: false - url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" - method: GET + verify: False + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/wmodules/wmodules" headers: Authorization: "Bearer {test_login_token}" + method: GET response: status_code: 200 - verify_response_with: - function: tavern_utils:validate_update_check_response - extra_kwargs: - current_version: "v{wazuh_version:s}" - update_check: false + json: + error: 0 + data: + affected_items: + - wmodules: !anything + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - - name: Get available updates with force option + - name: Try to show the invalid config of component in the manager request: - verify: false - url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" - method: GET + verify: False + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration/agent/wmodules" headers: Authorization: "Bearer {test_login_token}" - params: - force_query: "true" + method: GET response: - status_code: 200 - verify_response_with: - function: tavern_utils:validate_update_check_response - extra_kwargs: - current_version: "v{wazuh_version:s}" - update_check: false + status_code: 400 + json: + error: 1128 --- -test_name: GET /manager/version/check with update check service error +test_name: PUT /manager/configuration stages: - - name: Set an invalid CTI url + # PUT /manager/configuration + - name: Upload a valid configuration request: verify: False url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" method: PUT - data: "{ossec_conf_with_invalid_cti_url:s}" + data: "{valid_ossec_conf:s}" headers: Authorization: "Bearer {test_login_token}" content-type: application/octet-stream @@ -1427,336 +1384,311 @@ stages: total_failed_items: 0 error: 0 - - name: Restart manager to apply the configuration + # GET /manager/configuration/ + - name: Ensure the new config has been applied by checking a field request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/restart" - method: PUT + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" headers: Authorization: "Bearer {test_login_token}" + method: GET + params: + section: "alerts" + field: "log_alert_level" response: status_code: 200 json: error: 0 data: affected_items: - - !anystr + - alerts: + log_alert_level: '300' failed_items: [] total_affected_items: 1 total_failed_items: 0 - delay_after: 50 - - - name: Try to get available updates - request: - verify: false - url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" - method: GET - headers: - Authorization: "Bearer {test_login_token}" - response: - status_code: 500 - json: - error: 2100 - - - name: Try to get available updates with force option - request: - verify: false - url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" - method: GET - headers: - Authorization: "Bearer {test_login_token}" - params: - force_query: "true" - response: - status_code: 500 - json: - error: 2100 - ---- -test_name: GET /manager/logs - -stages: - # GET /manager/logs - - name: Request - request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET - headers: - Authorization: "Bearer {test_login_token}" - response: - status_code: 200 - json: - error: 0 - data: - affected_items: !anything - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 - # GET /manager/logs - - name: Filters -> limit=4 + # PUT /manager/configuration + - name: Try to upload an invalid configuration request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + method: PUT + data: "{invalid_ossec_conf:s}" headers: Authorization: "Bearer {test_login_token}" - params: - limit: 4 + content-type: application/octet-stream response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - &manager_log - description: !anystr - level: !anystr - tag: !anystr - timestamp: !anystr - - <<: *manager_log - - <<: *manager_log - - <<: *manager_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + status_code: 200 + json: + error: 1 + data: + affected_items: [] + failed_items: + - error: + code: 1113 + id: + - 'manager' + total_affected_items: 0 + total_failed_items: 1 - # GET /manager/logs - - name: Filters -> limit=2, sort=-level + # PUT /manager/configuration + - name: Try to upload an empty configuration request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + method: PUT + data: "{invalid_ossec_conf:s}" headers: Authorization: "Bearer {test_login_token}" - params: - limit: 2 - sort: -level + content-type: application/octet-stream response: - verify_response_with: - - function: tavern_utils:test_sort_response - extra_kwargs: - key: "level" - reverse: true status_code: 200 + json: + error: 1 + data: + affected_items: [] + failed_items: + - error: + code: 1113 + id: + - 'manager' + total_affected_items: 0 + total_failed_items: 1 - # GET /manager/logs - - name: Filters -> offset=3, limit=3 + - name: Try to upload an invalid xml request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + method: PUT + data: "{invalid_ossec_xml:s}" headers: Authorization: "Bearer {test_login_token}" - params: - limit: 3 - offset: 3 + content-type: application/octet-stream response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - <<: *manager_log - - <<: *manager_log - - <<: *manager_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + status_code: 200 + json: + error: 1 + data: + affected_items: [] + failed_items: + - error: + code: 1908 + id: + - 'manager' + total_affected_items: 0 + total_failed_items: 1 - # GET /manager/logs - - name: Filters -> offset=3, level=debug, limit=4 + # PUT /manager/configuration + - name: Try to upload an invalid configuration with an invalid content-type request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + method: PUT + data: "{invalid_ossec_conf:s}" headers: Authorization: "Bearer {test_login_token}" - params: - limit: 4 - offset: 3 - level: debug + content-type: application/json response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - <<: *manager_log - - <<: *manager_log - - <<: *manager_log - - <<: *manager_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + status_code: 415 + json: + title: Unsupported Media Type - # GET /manager/logs - - name: Filters -> tag=wazuh-modulesd, limit=1 + # GET /manager/configuration/ + - name: Ensure the config didn't change request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" headers: Authorization: "Bearer {test_login_token}" + method: GET params: - tag: wazuh-modulesd - limit: 1 + section: "alerts" + field: "log_alert_level" response: - status_code: 200 - json: - error: 0 - data: - affected_items: - - <<: *manager_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + status_code: 200 + json: + error: 0 + data: + affected_items: + - alerts: + log_alert_level: '300' + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 - # GET /manager/logs - - name: Filters -> tag=wazuh-analysisd, limit=1 +--- +test_name: GET /manager/version/check with update_check disabled + +stages: + + - name: Get wazuh version request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + url: "{protocol:s}://{host:s}:{port:d}/" method: GET headers: Authorization: "Bearer {test_login_token}" - params: - tag: wazuh-analysisd - limit: 1 response: - status_code: 200 + status_code: 200 + save: json: - error: 0 - data: - affected_items: - - <<: *manager_log - failed_items: [] - total_affected_items: !anyint - total_failed_items: 0 + wazuh_version: data.api_version - - name: Filters by query (tag=wazuh-modulesd, level=debug) + - name: Get available updates request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + verify: false + url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" method: GET headers: Authorization: "Bearer {test_login_token}" - params: - q: tag=wazuh-modulesd;level=debug response: status_code: 200 verify_response_with: - - function: tavern_utils:test_expected_value - extra_kwargs: - key: "tag" - expected_values: "wazuh-modulesd" - - function: tavern_utils:test_expected_value - extra_kwargs: - key: "level" - expected_values: "debug" + function: tavern_utils:validate_update_check_response + extra_kwargs: + current_version: "v{wazuh_version:s}" + update_check: false - - name: Filters by query (timestamp<2021-07-01) + - name: Get available updates with force option request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + verify: false + url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" method: GET headers: Authorization: "Bearer {test_login_token}" params: - q: timestamp<2021-07-01 + force_query: "true" response: status_code: 200 - json: - error: 0 - data: - affected_items: [] - failed_items: [] - total_affected_items: 0 - total_failed_items: 0 + verify_response_with: + function: tavern_utils:validate_update_check_response + extra_kwargs: + current_version: "v{wazuh_version:s}" + update_check: false - - name: Filter by non-existent tag +--- +test_name: GET /manager/version/check with update_check enabled + +stages: + - name: Get wazuh version request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + url: "{protocol:s}://{host:s}:{port:d}/" method: GET headers: Authorization: "Bearer {test_login_token}" - params: - tag: wazuh-unknown-daemon + response: + status_code: 200 + save: + json: + wazuh_version: data.api_version + + - name: Enable update check + request: + verify: False + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + method: PUT + data: "{valid_ossec_conf_with_update_check_enabled:s}" + headers: + Authorization: "Bearer {test_login_token}" + content-type: application/octet-stream response: status_code: 200 json: - error: 0 data: - affected_items: [] + affected_items: + - 'manager' failed_items: [] - total_affected_items: 0 + total_affected_items: 1 total_failed_items: 0 + error: 0 - - name: Read logs using valid select + - name: Restart manager to apply the configuration request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/restart" + method: PUT headers: Authorization: "Bearer {test_login_token}" - params: - select: 'timestamp,tag' response: - verify_response_with: - # Check response item keys are the selected keys - function: tavern_utils:test_select_key_affected_items - extra_kwargs: - select_key: 'timestamp,tag' status_code: 200 json: error: 0 data: - total_affected_items: !anyint + affected_items: + - !anystr failed_items: [] + total_affected_items: 1 total_failed_items: 0 + delay_after: !float "{restart_delay}" - - name: Try to read logs using invalid select + - name: Get available updates request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + verify: false + url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" method: GET headers: Authorization: "Bearer {test_login_token}" - params: - select: 'noexists' response: - status_code: 400 - json: &invalid_select - error: 1724 + status_code: + - 200 + - 500 + verify_response_with: + function: tavern_utils:validate_update_check_response + extra_kwargs: + current_version: "v{wazuh_version:s}" + update_check: true - - name: Get distinct manager logs + - name: Get available updates with force option request: - verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs" + verify: false + url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" method: GET headers: Authorization: "Bearer {test_login_token}" params: - distinct: true + force_query: "true" response: - status_code: 200 + status_code: + - 200 + - 500 verify_response_with: - function: tavern_utils:test_distinct_key + function: tavern_utils:validate_update_check_response + extra_kwargs: + current_version: "v{wazuh_version:s}" + update_check: true --- -test_name: GET /manager/logs/summary +test_name: GET /manager/version/check with update_check service error stages: - # GET /manager/logs/summary - - name: Request + - name: Set an invalid CTI url request: verify: False - url: "{protocol:s}://{host:s}:{port:d}/manager/logs/summary" - method: GET + url: "{protocol:s}://{host:s}:{port:d}/manager/configuration" + method: PUT + data: "{ossec_conf_with_invalid_cti_url:s}" + headers: + Authorization: "Bearer {test_login_token}" + content-type: application/octet-stream + response: + status_code: 200 + json: + data: + affected_items: + - 'manager' + failed_items: [] + total_affected_items: 1 + total_failed_items: 0 + error: 0 + + - name: Restart manager to apply the configuration + request: + verify: False + url: "{protocol:s}://{host:s}:{port:d}/manager/restart" + method: PUT headers: Authorization: "Bearer {test_login_token}" response: @@ -1764,10 +1696,38 @@ stages: json: error: 0 data: - affected_items: !anything + affected_items: + - !anystr failed_items: [] - total_affected_items: !anyint + total_affected_items: 1 total_failed_items: 0 + delay_after: !float "{restart_delay}" + + - name: Try to get available updates + request: + verify: false + url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" + method: GET + headers: + Authorization: "Bearer {test_login_token}" + response: + status_code: 500 + json: + error: 2100 + + - name: Try to get available updates with force option + request: + verify: false + url: "{protocol:s}://{host:s}:{port:d}/manager/version/check" + method: GET + headers: + Authorization: "Bearer {test_login_token}" + params: + force_query: "true" + response: + status_code: 500 + json: + error: 2100 --- test_name: PUT /manager/restart From 2bd76fdd64d0daf3263dabb17173d4652f98f375 Mon Sep 17 00:00:00 2001 From: Luis Enrique Chico Capistrano Date: Tue, 28 May 2024 20:39:31 -0300 Subject: [PATCH 313/419] Fix: Enhance the error handling in the script generate_compiled_windows_agent.sh --- packages/windows/generate_compiled_windows_agent.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/windows/generate_compiled_windows_agent.sh b/packages/windows/generate_compiled_windows_agent.sh index 95f72bdfd5f..b8ca3534ec7 100755 --- a/packages/windows/generate_compiled_windows_agent.sh +++ b/packages/windows/generate_compiled_windows_agent.sh @@ -121,6 +121,10 @@ main() { esac done + if [ -z "${ZIP_NAME}" ]; then + help |grep -B5 --color "^.*--output.*$" & exit 1 + fi + if [ ! -d "${OUTDIR}" ]; then echo "Creating building directory at ${OUTDIR}" mkdir -p ${OUTDIR} From e39070ee6263f9f55f4968ef97419b3aef887a43 Mon Sep 17 00:00:00 2001 From: Luis Enrique Chico Capistrano Date: Tue, 28 May 2024 20:42:16 -0300 Subject: [PATCH 314/419] Fix: Clarify if Workflow Uploads Both Package and Checksum --- .github/workflows/packages-build-manager.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/workflows/packages-build-manager.yml b/.github/workflows/packages-build-manager.yml index c24fe1575a2..627dee1fe59 100644 --- a/.github/workflows/packages-build-manager.yml +++ b/.github/workflows/packages-build-manager.yml @@ -150,6 +150,9 @@ jobs: - name: Upload package to S3 working-directory: packages run: | - for file in /tmp/*manager*; do - aws s3 cp $file s3://packages-dev.internal.wazuh.com/development/wazuh/4.x/main/packages/ - done + aws s3 cp /tmp/*manager*.${{ inputs.system }} s3://packages-dev.internal.wazuh.com/development/wazuh/4.x/main/packages/ + + - name: Upload checksum to S3 + if: ${{ inputs.checksum == true }} + run: | + aws s3 cp /tmp/*manager*.${{ inputs.system }}.sha512 s3://packages-dev.internal.wazuh.com/development/wazuh/4.x/main/packages/ \ No newline at end of file From 76a854be9794d35bc5da54a141d029f17b9f7c8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcel=20Kemp=20Mu=C3=B1oz?= Date: Wed, 29 May 2024 14:28:45 +0200 Subject: [PATCH 315/419] style: added line break at end of file --- .github/workflows/packages-build-manager.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/packages-build-manager.yml b/.github/workflows/packages-build-manager.yml index 627dee1fe59..413b0c585d7 100644 --- a/.github/workflows/packages-build-manager.yml +++ b/.github/workflows/packages-build-manager.yml @@ -155,4 +155,4 @@ jobs: - name: Upload checksum to S3 if: ${{ inputs.checksum == true }} run: | - aws s3 cp /tmp/*manager*.${{ inputs.system }}.sha512 s3://packages-dev.internal.wazuh.com/development/wazuh/4.x/main/packages/ \ No newline at end of file + aws s3 cp /tmp/*manager*.${{ inputs.system }}.sha512 s3://packages-dev.internal.wazuh.com/development/wazuh/4.x/main/packages/ From 9f8a63de4bcc9406c5aa641a9e392ccd741bdb3a Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Wed, 29 May 2024 16:27:18 +0200 Subject: [PATCH 316/419] build: bump revision to 40718 --- api/api/spec/spec.yaml | 2 +- framework/wazuh/core/cluster/__init__.py | 2 +- src/Doxyfile | 2 +- src/REVISION | 2 +- src/init/wazuh-client.sh | 2 +- src/init/wazuh-local.sh | 2 +- src/init/wazuh-server.sh | 2 +- src/win32/wazuh-installer.nsi | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index fdf4093a297..d4ac5f0e4b4 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -41,7 +41,7 @@ info: version: '4.7.5' - x-revision: '40718' + x-revision: '40719' title: 'Wazuh API REST' license: name: 'GPL 2.0' diff --git a/framework/wazuh/core/cluster/__init__.py b/framework/wazuh/core/cluster/__init__.py index 3754106f44c..c49a33b76f0 100644 --- a/framework/wazuh/core/cluster/__init__.py +++ b/framework/wazuh/core/cluster/__init__.py @@ -5,7 +5,7 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 __version__ = '4.7.5' -__revision__ = '40718' +__revision__ = '40719' __author__ = "Wazuh Inc" __wazuh_name__ = "Wazuh" __licence__ = "\ diff --git a/src/Doxyfile b/src/Doxyfile index ff0d51ba7ec..599e3fd3f7f 100644 --- a/src/Doxyfile +++ b/src/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "WAZUH" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = "v4.7.5-40718" +PROJECT_NUMBER = "v4.7.5-40719" # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/src/REVISION b/src/REVISION index 4119a90ff7a..bcc76d3f7a1 100644 --- a/src/REVISION +++ b/src/REVISION @@ -1 +1 @@ -40718 +40719 diff --git a/src/init/wazuh-client.sh b/src/init/wazuh-client.sh index bc50b5daebf..7742369b336 100755 --- a/src/init/wazuh-client.sh +++ b/src/init/wazuh-client.sh @@ -12,7 +12,7 @@ DIR=`dirname $PWD`; # Installation info VERSION="v4.7.5" -REVISION="40718" +REVISION="40719" TYPE="agent" ### Do not modify below here ### diff --git a/src/init/wazuh-local.sh b/src/init/wazuh-local.sh index 24bef291051..be49ea3496c 100644 --- a/src/init/wazuh-local.sh +++ b/src/init/wazuh-local.sh @@ -14,7 +14,7 @@ PLIST=${DIR}/bin/.process_list; # Installation info VERSION="v4.7.5" -REVISION="40718" +REVISION="40719" TYPE="local" ### Do not modify below here ### diff --git a/src/init/wazuh-server.sh b/src/init/wazuh-server.sh index 12fb7d699e3..a1800dcd007 100755 --- a/src/init/wazuh-server.sh +++ b/src/init/wazuh-server.sh @@ -14,7 +14,7 @@ PLIST=${DIR}/bin/.process_list; # Installation info VERSION="v4.7.5" -REVISION="40718" +REVISION="40719" TYPE="server" ### Do not modify below here ### diff --git a/src/win32/wazuh-installer.nsi b/src/win32/wazuh-installer.nsi index 99741ed3741..06e15139504 100644 --- a/src/win32/wazuh-installer.nsi +++ b/src/win32/wazuh-installer.nsi @@ -21,7 +21,7 @@ !define MUI_ICON install.ico !define MUI_UNICON uninstall.ico !define VERSION "4.7.5" -!define REVISION "40718" +!define REVISION "40719" !define NAME "Wazuh" !define SERVICE "WazuhSvc" From ee33145814fcebb1a9b06aa014f4dcf2e71eb853 Mon Sep 17 00:00:00 2001 From: javier Date: Wed, 24 Jan 2024 13:38:50 +0100 Subject: [PATCH 317/419] Update IT API with HAProxy-lb --- api/test/integration/conftest.py | 26 +++++---- .../base/{nginx-lb => haproxy-lb}/Dockerfile | 4 +- .../{nginx-lb => haproxy-lb}/entrypoint.sh | 4 +- .../env/base/haproxy-lb/haproxy.conf | 39 +++++++++++++ .../integration/env/base/nginx-lb/nginx.conf | 55 ------------------- .../agent/configuration_files/ossec_base.conf | 2 +- api/test/integration/env/docker-compose.yml | 48 ++++++++-------- .../integration_test_api_endpoints.json | 4 +- 8 files changed, 86 insertions(+), 96 deletions(-) rename api/test/integration/env/base/{nginx-lb => haproxy-lb}/Dockerfile (71%) rename api/test/integration/env/base/{nginx-lb => haproxy-lb}/entrypoint.sh (56%) create mode 100644 api/test/integration/env/base/haproxy-lb/haproxy.conf delete mode 100644 api/test/integration/env/base/nginx-lb/nginx.conf diff --git a/api/test/integration/conftest.py b/api/test/integration/conftest.py index b2fa8ee9269..2167b1c76fd 100644 --- a/api/test/integration/conftest.py +++ b/api/test/integration/conftest.py @@ -130,7 +130,7 @@ def build_and_up(env_mode: str, interval: int = 10, build: bool = True): if up_process.returncode == 0: break - + time.sleep(interval) retries += 1 @@ -154,7 +154,7 @@ def check_health(node_type: str = 'manager', agents: list = None, Parameters ---------- node_type : str - Can be agent, manager or nginx-lb. + Can be agent, manager or haproxy-lb. agents : list List of active agents for the current test (only needed if the agents need a custom healthcheck). @@ -181,9 +181,9 @@ def check_health(node_type: str = 'manager', agents: list = None, shell=True) if not health.startswith(b'"healthy"'): return False - elif node_type == 'nginx-lb': + elif node_type == 'haproxy-lb': health = subprocess.check_output( - f"docker inspect env-nginx-lb-1 -f '{{{{json .State.Health.Status}}}}'", shell=True) + "docker inspect env-haproxy-lb-1 -f '{{json .State.Health.Status}}'", shell=True) if not health.startswith(b'"healthy"'): return False else: @@ -232,7 +232,7 @@ def enable_white_mode(): 'security.yaml'), '+r') as rbac_conf: content = rbac_conf.read() rbac_conf.seek(0) - rbac_conf.write(re.sub(r'rbac_mode: (white|black)', f'rbac_mode: white', content)) + rbac_conf.write(re.sub(r'rbac_mode: (white|black)', 'rbac_mode: white', content)) def clean_tmp_folder(): @@ -303,7 +303,7 @@ def rbac_custom_config_generator(module: str, rbac_mode: str): def save_logs(test_name: str): """Save API, cluster and Wazuh logs from every cluster node and Wazuh logs from every agent if tests fail. - Save nginx-lb log. + Save haproxy-lb log. Examples: "test_{test_name}-{node/agent}-{log}" -> "test_decoder-worker1-api.log" @@ -338,10 +338,10 @@ def save_logs(test_name: str): except subprocess.CalledProcessError: continue - # Save nginx-lb log - with open(os.path.join(test_logs_path, f'test_{test_name}-nginx-lb.log'), mode='w') as f_log: + # Save haproxy-lb log + with open(os.path.join(test_logs_path, f'test_{test_name}-haproxy-lb.log'), mode='w') as f_log: current_process = subprocess.Popen( - ["docker", "logs", "env-nginx-lb-1"], + ["docker", "logs", "env-haproxy-lb-1"], stdout=f_log, stderr=subprocess.STDOUT, universal_newlines=True) current_process.wait() @@ -403,6 +403,11 @@ def clean_up_env(env_mode: str): agents_health = check_health(node_type='agent', agents=list(range(1, 9))) nginx_health = check_health(node_type='nginx-lb') + while values['retries'] < values['max_retries']: + managers_health = check_health(interval=values['interval'], + only_check_master_health=env_mode == standalone_env_mode) + agents_health = check_health(interval=values['interval'], node_type='agent', agents=list(range(1, 9))) + haproxy_health = check_health(interval=values['interval'], node_type='haproxy-lb') # Check if entrypoint was successful try: error_message = subprocess.check_output(["docker", "exec", "-t", "env-wazuh-master-1", "sh", "-c", @@ -411,7 +416,8 @@ def clean_up_env(env_mode: str): except subprocess.CalledProcessError: pass - if managers_health and agents_health and nginx_health: + if managers_health and agents_health and haproxy_health: + time.sleep(values['interval']) return retries += 1 diff --git a/api/test/integration/env/base/nginx-lb/Dockerfile b/api/test/integration/env/base/haproxy-lb/Dockerfile similarity index 71% rename from api/test/integration/env/base/nginx-lb/Dockerfile rename to api/test/integration/env/base/haproxy-lb/Dockerfile index 2dd21df0cc6..2e9d453a399 100644 --- a/api/test/integration/env/base/nginx-lb/Dockerfile +++ b/api/test/integration/env/base/haproxy-lb/Dockerfile @@ -1,6 +1,6 @@ -FROM nginx +FROM haproxy -COPY nginx.conf /etc/nginx/nginx.conf +COPY haproxy.conf /etc/haproxy/haproxy.conf ADD entrypoint.sh /scripts/entrypoint.sh diff --git a/api/test/integration/env/base/nginx-lb/entrypoint.sh b/api/test/integration/env/base/haproxy-lb/entrypoint.sh similarity index 56% rename from api/test/integration/env/base/nginx-lb/entrypoint.sh rename to api/test/integration/env/base/haproxy-lb/entrypoint.sh index 3481866ffe9..14e7514bf40 100755 --- a/api/test/integration/env/base/nginx-lb/entrypoint.sh +++ b/api/test/integration/env/base/haproxy-lb/entrypoint.sh @@ -2,7 +2,7 @@ if [ $1 == "standalone" ]; then # Remove workers upstream configurations (in upstream mycluster and upstream register) - sed -i -E '/wazuh-worker1|wazuh-worker2/d' /etc/nginx/nginx.conf; + sed -i -E '/wazuh-worker1|wazuh-worker2/d' /etc/haproxy/haproxy.conf; fi - exec nginx -g 'daemon off;' +exec haproxy -f /etc/haproxy/haproxy.conf diff --git a/api/test/integration/env/base/haproxy-lb/haproxy.conf b/api/test/integration/env/base/haproxy-lb/haproxy.conf new file mode 100644 index 00000000000..90ac6b44216 --- /dev/null +++ b/api/test/integration/env/base/haproxy-lb/haproxy.conf @@ -0,0 +1,39 @@ +global + log /dev/log local0 + log /dev/log local1 notice + chroot /var/lib/haproxy + stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners + stats timeout 30s + user haproxy + group haproxy + daemon + +defaults + log global + mode tcp + option tcplog + option dontlognull + timeout connect 5000 + timeout client 50000 + timeout server 50000 + +frontend mycluster + bind *:1514 + default_backend mycluster_backend + +frontend register + bind *:1515 + default_backend register_backend + +backend mycluster_backend + balance roundrobin + server wazuh-master wazuh-master:1514 + server wazuh-worker1 wazuh-worker1:1514 + server wazuh-worker2 wazuh-worker2:1514 + +backend register_backend + balance roundrobin + server wazuh-master wazuh-master:1515 + server wazuh-worker1 wazuh-worker1:1515 + server wazuh-worker2 wazuh-worker2:1515 + diff --git a/api/test/integration/env/base/nginx-lb/nginx.conf b/api/test/integration/env/base/nginx-lb/nginx.conf deleted file mode 100644 index 87026d70d05..00000000000 --- a/api/test/integration/env/base/nginx-lb/nginx.conf +++ /dev/null @@ -1,55 +0,0 @@ - -user nginx; -worker_processes 1; - -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; - - -events { - worker_connections 1024; -} - - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - #tcp_nopush on; - - keepalive_timeout 65; - - #gzip on; - - include /etc/nginx/conf.d/*.conf; -} - - -stream { - upstream mycluster { - hash $remote_addr consistent; - server wazuh-master:1514; - server wazuh-worker1:1514; - server wazuh-worker2:1514; - } - upstream register { - server wazuh-master:1515; - server wazuh-worker1:1515; - server wazuh-worker2:1515; - } - server { - listen 1514; - proxy_pass mycluster; - } - server { - listen 1515; - proxy_pass register; - } -} diff --git a/api/test/integration/env/configurations/base/agent/configuration_files/ossec_base.conf b/api/test/integration/env/configurations/base/agent/configuration_files/ossec_base.conf index 0264f6a7b29..b3881e5f252 100644 --- a/api/test/integration/env/configurations/base/agent/configuration_files/ossec_base.conf +++ b/api/test/integration/env/configurations/base/agent/configuration_files/ossec_base.conf @@ -1,4 +1,4 @@ -
nginx-lb
+
haproxy-lb
tcp
diff --git a/api/test/integration/env/docker-compose.yml b/api/test/integration/env/docker-compose.yml index d400ea982c3..4cc9bef2e02 100644 --- a/api/test/integration/env/docker-compose.yml +++ b/api/test/integration/env/docker-compose.yml @@ -66,10 +66,10 @@ services: - ./tools/:/tools entrypoint: - /scripts/entrypoint.sh - - nginx-lb + - haproxy-lb - wazuh-agent1 depends_on: - - nginx-lb + - haproxy-lb wazuh-agent2: profiles: @@ -82,11 +82,11 @@ services: - ./tools/:/tools entrypoint: - /scripts/entrypoint.sh - - nginx-lb + - haproxy-lb - wazuh-agent2 depends_on: - wazuh-agent1 - - nginx-lb + - haproxy-lb wazuh-agent3: profiles: @@ -99,11 +99,11 @@ services: - ./tools/:/tools entrypoint: - /scripts/entrypoint.sh - - nginx-lb + - haproxy-lb - wazuh-agent3 depends_on: - - wazuh-agent1 - - nginx-lb + - wazuh-agent2 + - haproxy-lb wazuh-agent4: profiles: @@ -116,11 +116,11 @@ services: - ./tools/:/tools entrypoint: - /scripts/entrypoint.sh - - nginx-lb + - haproxy-lb - wazuh-agent4 depends_on: - - wazuh-agent1 - - nginx-lb + - wazuh-agent3 + - haproxy-lb wazuh-agent5: profiles: @@ -136,11 +136,12 @@ services: - ./tools/:/tools entrypoint: - /scripts/entrypoint.sh - - nginx-lb + - haproxy-lb - wazuh-agent5 - agent_old depends_on: - - nginx-lb + - wazuh-agent4 + - haproxy-lb wazuh-agent6: profiles: @@ -153,12 +154,12 @@ services: - ./tools/:/tools entrypoint: - /scripts/entrypoint.sh - - nginx-lb + - haproxy-lb - wazuh-agent6 - agent_old depends_on: - wazuh-agent5 - - nginx-lb + - haproxy-lb wazuh-agent7: profiles: @@ -171,12 +172,12 @@ services: - ./tools/:/tools entrypoint: - /scripts/entrypoint.sh - - nginx-lb + - haproxy-lb - wazuh-agent7 - agent_old depends_on: - - wazuh-agent5 - - nginx-lb + - wazuh-agent6 + - haproxy-lb wazuh-agent8: profiles: @@ -189,21 +190,20 @@ services: - ./tools/:/tools entrypoint: - /scripts/entrypoint.sh - - nginx-lb + - haproxy-lb - wazuh-agent8 - agent_old depends_on: - - wazuh-agent5 - - nginx-lb + - wazuh-agent7 + - haproxy-lb - nginx-lb: + haproxy-lb: profiles: - standalone - cluster build: - context: ./base/nginx-lb - image: integration_test_nginx-lb - restart: always + context: ./base/haproxy-lb + image: integration_test_haproxy-lb entrypoint: - /scripts/entrypoint.sh - ${ENV_MODE} diff --git a/api/test/integration/mapping/integration_test_api_endpoints.json b/api/test/integration/mapping/integration_test_api_endpoints.json index 164802af87e..14350e827c9 100644 --- a/api/test/integration/mapping/integration_test_api_endpoints.json +++ b/api/test/integration/mapping/integration_test_api_endpoints.json @@ -1470,7 +1470,7 @@ "test_rbac_white_security_endpoints.tavern.yaml", "test_rbac_white_syscheck_endpoints.tavern.yaml", "test_rbac_white_syscollector_endpoints.tavern.yaml", - "test_rbac_white_task_endpoints.tavern.yaml", + "test_rbac_white_task_endpoints.tavern.yaml" ] }, { @@ -2139,7 +2139,7 @@ ] }, { - "path": "api/test/integration/env/base/nginx-lb", + "path": "api/test/integration/env/base/haproxy-lb", "files": [ { "name": "entrypoint.sh", From 2da4fb5d827582f898718ab19ee4fb16c41ba216 Mon Sep 17 00:00:00 2001 From: javier Date: Fri, 26 Jan 2024 17:28:22 +0100 Subject: [PATCH 318/419] Change entrypoint.sh --- api/test/integration/env/base/haproxy-lb/entrypoint.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/api/test/integration/env/base/haproxy-lb/entrypoint.sh b/api/test/integration/env/base/haproxy-lb/entrypoint.sh index 14e7514bf40..993551506bb 100755 --- a/api/test/integration/env/base/haproxy-lb/entrypoint.sh +++ b/api/test/integration/env/base/haproxy-lb/entrypoint.sh @@ -5,4 +5,6 @@ if [ $1 == "standalone" ]; then sed -i -E '/wazuh-worker1|wazuh-worker2/d' /etc/haproxy/haproxy.conf; fi +mkdir -p /run/haproxy/ + exec haproxy -f /etc/haproxy/haproxy.conf From 698809aeb1771ff6e63c706c3ec7b329db6c0a41 Mon Sep 17 00:00:00 2001 From: javier Date: Mon, 29 Jan 2024 16:29:50 +0100 Subject: [PATCH 319/419] Change haproxy conf --- api/test/integration/README.md | 2 +- api/test/integration/env/base/haproxy-lb/Dockerfile | 2 +- api/test/integration/env/base/haproxy-lb/entrypoint.sh | 5 ++--- api/test/integration/env/base/haproxy-lb/haproxy.conf | 10 ++++++---- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/api/test/integration/README.md b/api/test/integration/README.md index 9a61e9c0ef0..bd912b5fc4c 100644 --- a/api/test/integration/README.md +++ b/api/test/integration/README.md @@ -29,7 +29,7 @@ The Wazuh environment used to perform the API integration tests is built using ` This environment is composed of **12 docker containers**. These containers have the following components installed: 3 Wazuh managers, that compose a Wazuh cluster (1 master, 2 workers); 4 Wazuh agents with the same version as the managers -forming the cluster; 4 Wazuh agents with version 3.13.2 (old); and 1 NGINX load balancer. +forming the cluster; 4 Wazuh agents with version 3.13.2 (old); and 1 HAProxy load balancer. The Wazuh version used for the managers and non-old agents is the one specified by the branch used to perform the API integration tests. diff --git a/api/test/integration/env/base/haproxy-lb/Dockerfile b/api/test/integration/env/base/haproxy-lb/Dockerfile index 2e9d453a399..50b73a76a1b 100644 --- a/api/test/integration/env/base/haproxy-lb/Dockerfile +++ b/api/test/integration/env/base/haproxy-lb/Dockerfile @@ -1,4 +1,4 @@ -FROM haproxy +FROM haproxytech/haproxy-ubuntu:2.7.8 COPY haproxy.conf /etc/haproxy/haproxy.conf diff --git a/api/test/integration/env/base/haproxy-lb/entrypoint.sh b/api/test/integration/env/base/haproxy-lb/entrypoint.sh index 993551506bb..a983ee5924c 100755 --- a/api/test/integration/env/base/haproxy-lb/entrypoint.sh +++ b/api/test/integration/env/base/haproxy-lb/entrypoint.sh @@ -5,6 +5,5 @@ if [ $1 == "standalone" ]; then sed -i -E '/wazuh-worker1|wazuh-worker2/d' /etc/haproxy/haproxy.conf; fi -mkdir -p /run/haproxy/ - -exec haproxy -f /etc/haproxy/haproxy.conf +haproxy -f /etc/haproxy/haproxy.conf +tail -f /dev/null diff --git a/api/test/integration/env/base/haproxy-lb/haproxy.conf b/api/test/integration/env/base/haproxy-lb/haproxy.conf index 90ac6b44216..6fd2acef74c 100644 --- a/api/test/integration/env/base/haproxy-lb/haproxy.conf +++ b/api/test/integration/env/base/haproxy-lb/haproxy.conf @@ -1,11 +1,7 @@ global log /dev/log local0 log /dev/log local1 notice - chroot /var/lib/haproxy - stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners stats timeout 30s - user haproxy - group haproxy daemon defaults @@ -16,6 +12,12 @@ defaults timeout connect 5000 timeout client 50000 timeout server 50000 + default-server init-addr last,libc,none + +frontend health + mode http + bind 127.0.0.1:80 + http-request return status 200 if { src 127.0.0.0/8 } frontend mycluster bind *:1514 From 68fe4e76ef65784f99291f3df47e55ea6769bb96 Mon Sep 17 00:00:00 2001 From: javier Date: Tue, 30 Jan 2024 13:20:10 +0100 Subject: [PATCH 320/419] Change haproxy version --- api/test/integration/env/base/haproxy-lb/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/test/integration/env/base/haproxy-lb/Dockerfile b/api/test/integration/env/base/haproxy-lb/Dockerfile index 50b73a76a1b..e5a2bb5a742 100644 --- a/api/test/integration/env/base/haproxy-lb/Dockerfile +++ b/api/test/integration/env/base/haproxy-lb/Dockerfile @@ -1,4 +1,4 @@ -FROM haproxytech/haproxy-ubuntu:2.7.8 +FROM haproxytech/haproxy-ubuntu:2.7 COPY haproxy.conf /etc/haproxy/haproxy.conf From d4060330df74fcb1c4e841af128d2af240eae2e1 Mon Sep 17 00:00:00 2001 From: javier Date: Wed, 31 Jan 2024 10:51:40 +0100 Subject: [PATCH 321/419] Change balance leastconn --- api/test/integration/env/base/haproxy-lb/haproxy.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/test/integration/env/base/haproxy-lb/haproxy.conf b/api/test/integration/env/base/haproxy-lb/haproxy.conf index 6fd2acef74c..64c379bb22c 100644 --- a/api/test/integration/env/base/haproxy-lb/haproxy.conf +++ b/api/test/integration/env/base/haproxy-lb/haproxy.conf @@ -28,7 +28,7 @@ frontend register default_backend register_backend backend mycluster_backend - balance roundrobin + balance leastconn server wazuh-master wazuh-master:1514 server wazuh-worker1 wazuh-worker1:1514 server wazuh-worker2 wazuh-worker2:1514 From e57388c4ebe25a939601f76ecea504cd0219bd0b Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Tue, 6 Feb 2024 17:16:35 -0300 Subject: [PATCH 322/419] First approach to include HAProxy helper --- .../wazuh/core/cluster/hap_helper/__init__.py | 0 .../core/cluster/hap_helper/configuration.py | 43 ++ .../core/cluster/hap_helper/custom_logging.py | 113 +++++ .../hap_helper/data/configuration.yaml | 53 ++ .../hap_helper/data/configuration_schema.json | 68 +++ .../core/cluster/hap_helper/exception.py | 42 ++ .../core/cluster/hap_helper/hap_helper.py | 457 ++++++++++++++++++ .../wazuh/core/cluster/hap_helper/process.py | 38 ++ .../wazuh/core/cluster/hap_helper/proxy.py | 409 ++++++++++++++++ .../wazuh/core/cluster/hap_helper/wazuh.py | 185 +++++++ 10 files changed, 1408 insertions(+) create mode 100644 framework/wazuh/core/cluster/hap_helper/__init__.py create mode 100644 framework/wazuh/core/cluster/hap_helper/configuration.py create mode 100644 framework/wazuh/core/cluster/hap_helper/custom_logging.py create mode 100644 framework/wazuh/core/cluster/hap_helper/data/configuration.yaml create mode 100644 framework/wazuh/core/cluster/hap_helper/data/configuration_schema.json create mode 100644 framework/wazuh/core/cluster/hap_helper/exception.py create mode 100644 framework/wazuh/core/cluster/hap_helper/hap_helper.py create mode 100644 framework/wazuh/core/cluster/hap_helper/process.py create mode 100644 framework/wazuh/core/cluster/hap_helper/proxy.py create mode 100644 framework/wazuh/core/cluster/hap_helper/wazuh.py diff --git a/framework/wazuh/core/cluster/hap_helper/__init__.py b/framework/wazuh/core/cluster/hap_helper/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/framework/wazuh/core/cluster/hap_helper/configuration.py b/framework/wazuh/core/cluster/hap_helper/configuration.py new file mode 100644 index 00000000000..83f78094e9d --- /dev/null +++ b/framework/wazuh/core/cluster/hap_helper/configuration.py @@ -0,0 +1,43 @@ +import json +from os import path + +import jsonschema +import yaml +from wazuh_coordinator.exception import CoordinatorError + + +def validate_custom_configuration(custom_configuration: dict): + with open( + path.join(path.abspath((path.dirname(__file__))), 'data', 'configuration_schema.json'), 'r' + ) as schema_file: + json_schema = json.loads(schema_file.read()) + + try: + jsonschema.validate(instance=custom_configuration, schema=json_schema) + except jsonschema.ValidationError as validation_err: + raise CoordinatorError(101, extra_msg=f"({'> '.join(validation_err.path)}) {validation_err.message}") + + +def merge_configurations(default: dict, config: dict) -> dict: + for key, value in config.items(): + if isinstance(value, dict): + default[key] = merge_configurations(default.get(key, {}), value) + else: + default[key] = value + return default + + +def parse_configuration(custom_configuration_path: str = '') -> dict: + with open( + path.join(path.abspath((path.dirname(__file__))), 'data', 'configuration.yaml'), 'r' + ) as default_conf_file: + default_configuration = yaml.safe_load(default_conf_file) + + if not custom_configuration_path: + return default_configuration + + with open(custom_configuration_path, 'r') as custom_conf_file: + custom_configuration = yaml.safe_load(custom_conf_file) + + validate_custom_configuration(custom_configuration) + return merge_configurations(default_configuration, custom_configuration) diff --git a/framework/wazuh/core/cluster/hap_helper/custom_logging.py b/framework/wazuh/core/cluster/hap_helper/custom_logging.py new file mode 100644 index 00000000000..f4f2403e6fa --- /dev/null +++ b/framework/wazuh/core/cluster/hap_helper/custom_logging.py @@ -0,0 +1,113 @@ +import calendar +import gzip +import logging +import re +import shutil +from copy import copy +from glob import glob +from logging.handlers import TimedRotatingFileHandler +from os import chmod, unlink, path, makedirs + + +class CustomFileRotatingHandler(TimedRotatingFileHandler): + def doRollover(self): + logging.handlers.TimedRotatingFileHandler.doRollover(self) + + rotated_file = glob(f'{self.baseFilename}.*')[0] + + new_rotated_file = self.compute_archives_directory(rotated_file) + with open(rotated_file, 'rb') as f_in, gzip.open(new_rotated_file, 'wb') as f_out: + shutil.copyfileobj(f_in, f_out) + chmod(new_rotated_file, 0o640) + unlink(rotated_file) + + def compute_archives_directory(self, rotated_filepath): + rotated_file = path.basename(rotated_filepath) + year, month, day = re.match(r'[\w.-]+\.(\d+)-(\d+)-(\d+)', rotated_file).groups() + month = calendar.month_abbr[int(month)] + log_path = path.join(path.splitext(self.baseFilename)[0], year, month) + if not path.exists(log_path): + makedirs(log_path) + + return f'{log_path}/{path.basename(self.baseFilename)}-{day}.gz' + + +class LoggingFilter(logging.Filter): + def __init__(self, module_name: str): + super().__init__() + self.module_name = module_name + + def filter(self, record) -> bool: + record.levelname = f'{record.levelname}:' + record.module_name = f'[{self.module_name}]' + return True + + +class ColoredFormatter(logging.Formatter): + GREY = '\x1b[38;20m' + YELLOW = '\x1b[33;20m' + RED = '\x1b[31;20m' + BOLD_RED = '\x1b[31;1m' + ORANGE = '\x1b[33m;20m' + DARK_BLUE = '\x1b[34m' + GREY_BLUE = '\x1b[36m' + RESET = '\x1b[0m' + + TRACE_LEVEL = 5 + + def __init__(self, fmt, style='%', datefmt='', *args, **kwargs): + super().__init__(fmt, *args, **kwargs) + self.style = style + self.datefmt = datefmt + + self.FORMATS = { + logging.DEBUG: self.DARK_BLUE + fmt + self.RESET, + logging.INFO: self.GREY + fmt + self.RESET, + logging.WARNING: self.YELLOW + fmt + self.RESET, + logging.ERROR: self.RED + fmt + self.RESET, + logging.CRITICAL: self.BOLD_RED + fmt + self.RESET, + self.TRACE_LEVEL: self.GREY_BLUE + fmt + self.RESET, + } + + def format(self, record): + record_copy = copy(record) + log_fmt = self.FORMATS.get(record_copy.levelno) + formatter = logging.Formatter(log_fmt, style=self.style, datefmt=self.datefmt) + return formatter.format(record_copy) + + +class CustomLogger: + TRACE_LEVEL = 5 + + def __init__(self, name: str, file_path: str = '', tag: str = 'Main', level: int = logging.INFO): + logging.addLevelName(self.TRACE_LEVEL, 'TRACE') + logger = logging.getLogger(name) + logger.trace = self.trace + logger.addFilter(LoggingFilter(tag)) + logger.propagate = False + + colored_formatter = ColoredFormatter( + '%(asctime)s %(levelname)-9s %(module_name)-11s %(message)s', style='%', datefmt='%Y/%m/%d %H:%M:%S' + ) + colored_handler = logging.StreamHandler() + colored_handler.setFormatter(colored_formatter) + + if file_path: + logger_formatter = logging.Formatter( + '%(asctime)s %(levelname)-9s %(module_name)-11s %(message)s', style='%', datefmt='%Y/%m/%d %H:%M:%S' + ) + fh = CustomFileRotatingHandler(filename=file_path, when='midnight') + fh.setFormatter(logger_formatter) + logger.addHandler(fh) + + logger.addHandler(colored_handler) + logger.setLevel(level) + + self.logger = logger + + def get_logger(self) -> logging.Logger: + return self.logger + + def trace(self, message, *args, **kwargs): + if self.logger.isEnabledFor(self.TRACE_LEVEL): + self.logger._log(self.TRACE_LEVEL, message, args, **kwargs) diff --git a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml new file mode 100644 index 00000000000..524a58f15bf --- /dev/null +++ b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml @@ -0,0 +1,53 @@ +--- +wazuh: + # Wazuh API configuration + api: + # Wazuh API address + address: localhost + # Wazuh API port + port: 55000 + # Wazuh API username. It must have read permissions for cluster and agents and reconnect permissions for agents + user: wazuh + # Wazuh API password + password: wazuh + # Wazuh cluster configuration + connection: + # Wazuh agents connection service port (TCP) + port: 1514 + # Wazuh cluster nodes to exclude on auto mode + excluded_nodes: [ ] + +proxy: + # Wazuh Proxy API configuration + api: + # Wazuh Proxy API address + address: localhost + # Wazuh Proxy API port + port: 7777 + # Wazuh Proxy API username + user: wazuh + # Wazuh Proxy API password + password: wazuh + + # Defined Proxy backend (frontend will append '_front' to it) + backend: wazuh_cluster + + # Defines the list of DNS servers to translate DNS names to IP adresses. + # This configuration is recommended but not mandatory. + # If it is configured, a resolvers section must be properly defined in + # the haproxy.cfg file. + # resolver: wazuh_resolver + +coordinator: + # Seconds to sleep between each coordinator iteration + sleep_time: 60 + # Seconds to sleep after the end of the agent reconnection phase + agent_reconnection_stability_time: 60 + # Agent chunk size (each chunk defines the max number of agents to be reconnected at once) + agent_reconnection_chunk_size: 100 + # Seconds to sleep after an agent chunk reconnection (if there is more than one) + agent_reconnection_time: 5 + # Agent imbalance tolerance + agent_tolerance: 0.1 + # Time in minutes before removing a disconnected Wazuh node from the backend + remove_disconnected_node_after: 60 diff --git a/framework/wazuh/core/cluster/hap_helper/data/configuration_schema.json b/framework/wazuh/core/cluster/hap_helper/data/configuration_schema.json new file mode 100644 index 00000000000..ac018281788 --- /dev/null +++ b/framework/wazuh/core/cluster/hap_helper/data/configuration_schema.json @@ -0,0 +1,68 @@ + +{ + "type": "object", + "additionalProperties": false, + "properties": { + "wazuh": { + "type": "object", + "additionalProperties": false, + "properties": { + "api": { + "type": "object", + "additionalProperties": false, + "properties": { + "address": {"type": "string"}, + "port": {"type": "integer"}, + "user": {"type": "string"}, + "password": {"type": "string"} + } + }, + + "connection": { + "type": "object", + "additionalProperties": false, + "properties": { + "port": {"type": "integer"} + } + }, + + "excluded_nodes": {"type": "array", "items": {"type": "string"}} + } + }, + + + "proxy": { + "type": "object", + "additionalProperties": false, + "properties": { + "api": { + "type": "object", + "additionalProperties": false, + "properties": { + "address": {"type": "string"}, + "port": {"type": "integer"}, + "user": {"type": "string"}, + "password": {"type": "string"} + } + }, + + "backend": {"type": "string"}, + "resolver": {"type": "string"} + } + }, + + + "coordinator": { + "type": "object", + "additionalProperties": false, + "properties": { + "sleep_time": {"type": "integer", "minimum": 10}, + "agent_reconnection_stability_time": {"type": "integer", "minimum": 10}, + "agent_reconnection_chunk_size": {"type": "integer", "minimum": 100}, + "agent_reconnection_time": {"type": "integer", "minimum": 0}, + "agent_tolerance": {"type": "number", "minimum": 0, "exclusiveMinimum": true, "maximum": 1}, + "remove_disconnected_node_after": {"type": "integer", "minimum": 0} + } + } + } +} diff --git a/framework/wazuh/core/cluster/hap_helper/exception.py b/framework/wazuh/core/cluster/hap_helper/exception.py new file mode 100644 index 00000000000..13dfbfb5140 --- /dev/null +++ b/framework/wazuh/core/cluster/hap_helper/exception.py @@ -0,0 +1,42 @@ +class CustomException(Exception): + PREFIX = 'U' + ERRORS = {} + + def __init__(self, code: int, extra_msg: str = ''): + self._code = code + self._message = self.ERRORS[self._code] + if extra_msg: + self._message += f' - {extra_msg}' + + def __str__(self): + return f'({self.PREFIX}{self._code}) {self._message}' + + +class CoordinatorError(CustomException): + PREFIX = 'C' + ERRORS = {100: 'Server status check timed out after adding new servers', 101: 'User configuration is not valid'} + + +class WazuhError(CustomException): + PREFIX = 'W' + ERRORS = { + 99: 'Cannot initialize Wazuh API', + 100: 'Unexpected error trying to connect to the Wazuh API', + 101: 'Unexpected response from the Wazuh API', + 102: 'Invalid credentials for the Wazuh API', + 103: 'The given Wazuh API user does not have permissions to make the request', + 104: 'Too many API requests retries', + } + + +class ProxyError(CustomException): + PREFIX = 'P' + ERRORS = { + 99: 'Cannot initialize Proxy API', + 100: 'Unexpected error trying to connect to Proxy API', + 101: 'Unexpected response from the Proxy API', + 102: 'Invalid credentials for the Proxy API', + 103: 'Invalid HAProxy Dataplane API specification configured', + 104: 'Cannot detect a valid HAProxy process linked to the Dataplane API', + 105: 'Unexpected response from HAProxy Dataplane API', + } diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py new file mode 100644 index 00000000000..027b0a7dfae --- /dev/null +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -0,0 +1,457 @@ +import argparse +import logging +import time +from math import ceil, floor + +from wazuh_coordinator.configuration import parse_configuration +from wazuh_coordinator.custom_logging import CustomLogger +from wazuh_coordinator.exception import CoordinatorError, ProxyError, WazuhError +from wazuh_coordinator.process import run_in_background +from wazuh_coordinator.proxy import Proxy, ProxyAPI, ProxyServerState +from wazuh_coordinator.wazuh import WazuhAgent, WazuhAPI + + +class Coordinator: + UPDATED_BACKEND_STATUS_TIMEOUT: int = 60 + AGENT_STATUS_SYNC_TIME: int = 25 # Default agent notify time + cluster sync + 5s + SERVER_ADMIN_STATE_DELAY: int = 5 + + def __init__(self, proxy: Proxy, wazuh_api: WazuhAPI, logger: logging.Logger, options: dict): + self.logger = logger + self.proxy = proxy + self.wazuh_api = wazuh_api + + self.sleep_time: int = options['sleep_time'] + self.agent_reconnection_stability_time: int = options['agent_reconnection_stability_time'] + self.agent_reconnection_chunk_size: int = options['agent_reconnection_chunk_size'] + self.agent_reconnection_time: int = options['agent_reconnection_time'] + self.agent_tolerance: float = options['agent_tolerance'] + self.remove_disconnected_node_after: int = options['remove_disconnected_node_after'] + + def initialize_components(self): + try: + self.wazuh_api.initialize() + self.proxy.initialize() + self.logger.info('Main components were initialized') + except (WazuhError, ProxyError) as init_exc: + self.logger.critical('Cannot initialize main components') + self.logger.critical(init_exc) + exit(1) + + def initialize_wazuh_cluster_configuration(self): + if not self.proxy.exists_backend(self.proxy.wazuh_backend): + self.logger.info(f"Could not find Wazuh backend '{self.proxy.wazuh_backend}'") + self.proxy.add_new_backend(name=self.proxy.wazuh_backend) + self.logger.info('Added Wazuh backend') + + if not self.proxy.exists_frontend(f'{self.proxy.wazuh_backend}_front'): + self.logger.info(f"Could not find Wazuh frontend '{self.proxy.wazuh_backend}_front'") + self.proxy.add_new_frontend( + name=f'{self.proxy.wazuh_backend}_front', + port=self.proxy.wazuh_connection_port, + backend=self.proxy.wazuh_backend, + ) + self.logger.info('Added Wazuh frontend') + + def check_node_to_delete(self, node_name: str) -> bool: + node_downtime = self.proxy.get_wazuh_server_stats(server_name=node_name)['lastchg'] + self.logger.trace(f"Server '{node_name}' has been disconnected for {node_downtime}s") + + if node_downtime < self.remove_disconnected_node_after * 60: + self.logger.info(f"Server '{node_name}' has not been disconnected enough time to remove it") + return False + self.logger.info( + f"Server '{node_name}' has been disconnected for over {self.remove_disconnected_node_after} " 'minutes' + ) + return True + + def check_proxy_processes(self, auto_mode: bool = False, warn: bool = True) -> bool: + if not self.proxy.is_proxy_process_single(): + warn and self.logger.warning('Detected more than one Proxy processes') + if not auto_mode and input(' Do you wish to fix them? (y/N): ').lower() != 'y': + return False + self.manage_proxy_processes() + return True + + def backend_servers_state_healthcheck(self): + for server in self.proxy.get_current_backend_servers().keys(): + if self.proxy.is_server_drain(server_name=server): + self.logger.warning(f"Server '{server}' was found {ProxyServerState.DRAIN.value.upper()}. Fixing it") + self.proxy.allow_server_new_connections(server_name=server) + + def obtain_nodes_to_configure(self, wazuh_cluster_nodes: dict, proxy_backend_servers: dict) -> tuple[list, list]: + add_nodes, remove_nodes = [], [] + + for node_name, node_address in wazuh_cluster_nodes.items(): + if node_name not in proxy_backend_servers: + add_nodes.append(node_name) + elif node_address != proxy_backend_servers[node_name]: + remove_nodes.append(node_name) + add_nodes.append(node_name) + for node_name in proxy_backend_servers.keys() - wazuh_cluster_nodes.keys(): + if node_name in self.wazuh_api.excluded_nodes: + self.logger.info(f"Server '{node_name}' has been excluded but is currently active. Removing it") + elif self.check_node_to_delete(node_name): + pass + else: + continue + remove_nodes.append(node_name) + + return add_nodes, remove_nodes + + def update_agent_connections(self, agent_list: list[str]): + self.logger.debug('Reconnecting agents') + self.logger.debug( + f'Agent reconnection chunk size is set to {self.agent_reconnection_chunk_size}. ' + f'Total iterations: {ceil(len(agent_list) / self.agent_reconnection_chunk_size)}' + ) + for index in range(0, len(agent_list), self.agent_reconnection_chunk_size): + self.wazuh_api.reconnect_agents(agent_list[index : index + self.agent_reconnection_chunk_size]) + self.logger.debug(f'Delay between agent reconnections. Sleeping {self.agent_reconnection_time}s...') + time.sleep(self.agent_reconnection_time) + + def force_agent_reconnection_to_server(self, chosen_server: str, agents_list: list[dict]): + current_servers = self.proxy.get_current_backend_servers().keys() + affected_servers = current_servers - {chosen_server} + for server_name in affected_servers: + self.proxy.restrain_server_new_connections(server_name=server_name) + time.sleep(self.SERVER_ADMIN_STATE_DELAY) + eligible_agents = WazuhAgent.get_agents_able_to_reconnect(agents_list=agents_list) + if len(eligible_agents) != len(agents_list): + self.logger.warning( + f"Some agents from '{chosen_server}' are not compatible with the reconnection endpoint." + ' Those connections will be balanced afterwards' + ) + self.update_agent_connections(agent_list=eligible_agents) + for server_name in affected_servers: + self.proxy.allow_server_new_connections(server_name=server_name) + time.sleep(self.SERVER_ADMIN_STATE_DELAY) + + def manage_proxy_processes(self): + current_proxy_pid = self.proxy.api.get_runtime_info()['pid'] + response = self.proxy.api.kill_proxy_processes(pid_to_exclude=current_proxy_pid) + + if response['error'] > 0: + self.logger.error("Could not manage all proxy processes: " f"{response['data']}") + elif len(response['data']) > 0: + self.logger.info('Managed proxy processes') + + def migrate_old_connections(self, new_servers: list[str], deleted_servers: list[str]): + wazuh_backend_stats = {} + backend_stats_iteration = 1 + while any([server not in wazuh_backend_stats for server in new_servers]): + if backend_stats_iteration > self.UPDATED_BACKEND_STATUS_TIMEOUT: + self.logger.error(f'Some of the new servers did not go UP: {set(new_servers) - wazuh_backend_stats}') + raise CoordinatorError(100) + + self.logger.debug('Waiting for new servers to go UP') + time.sleep(1) + backend_stats_iteration += 1 + wazuh_backend_stats = self.proxy.get_wazuh_backend_stats().keys() + + self.logger.debug('All new servers are UP') + previous_agent_distribution = self.wazuh_api.get_agents_node_distribution() + previous_connection_distribution = self.proxy.get_wazuh_backend_server_connections() | { + server: len(previous_agent_distribution[server]) + for server in previous_agent_distribution + if server not in new_servers + } + + unbalanced_connections = self.check_for_balance( + current_connections_distribution=previous_connection_distribution + ) + agents_to_balance = [] + + for wazuh_worker, agents in previous_agent_distribution.items(): + if wazuh_worker in deleted_servers: + agents_to_balance += [agent['id'] for agent in agents] + continue + try: + agents_to_balance += [agent['id'] for agent in agents[: unbalanced_connections[wazuh_worker]]] + agents = agents[unbalanced_connections[wazuh_worker] :] + except KeyError: + pass + + self.logger.info(f"Migrating {len(agents)} connections from server '{wazuh_worker}'") + self.force_agent_reconnection_to_server(chosen_server=wazuh_worker, agents_list=agents) + + if agents_to_balance: + self.logger.info('Balancing exceeding connections after changes on the Wazuh backend') + self.update_agent_connections(agent_list=agents_to_balance) + + self.check_proxy_processes(auto_mode=True, warn=False) + + self.logger.info('Waiting for agent connections stability') + self.logger.debug(f'Sleeping {self.agent_reconnection_stability_time}s...') + time.sleep(self.agent_reconnection_stability_time) + + def check_for_balance(self, current_connections_distribution: dict) -> dict: + if not current_connections_distribution: + self.logger.debug('There are not connections at the moment') + return {} + self.logger.debug( + 'Checking for agent balance. Current connections distribution: ' f'{current_connections_distribution}' + ) + + total_agents = sum(current_connections_distribution.values()) + try: + mean = floor(total_agents / len(current_connections_distribution.keys())) + except ZeroDivisionError: + return {} + + if ( + max(current_connections_distribution.values()) <= mean * (1 + self.agent_tolerance) + and min(current_connections_distribution.values()) >= mean * (1 - self.agent_tolerance) + ) or ( + max(current_connections_distribution.values()) - min(current_connections_distribution.values()) <= 1 + and total_agents % len(current_connections_distribution.keys()) != 0 + ): + self.logger.debug('Current balance is under tolerance') + return {} + + unbalanced_connections = {} + for server, connections in current_connections_distribution.items(): + exceeding_connections = connections - mean + if exceeding_connections > 0: + unbalanced_connections[server] = exceeding_connections + + return unbalanced_connections + + def calculate_agents_to_balance(self, affected_servers: dict) -> dict: + agents_to_balance = {} + for server_name, n_agents in affected_servers.items(): + agent_candidates = self.wazuh_api.get_agents_belonging_to_node(node_name=server_name, limit=n_agents) + eligible_agents = WazuhAgent.get_agents_able_to_reconnect(agents_list=agent_candidates) + if len(eligible_agents) != len(agent_candidates): + self.logger.warning( + f'Some agents from node {server_name} are not compatible with the reconnection ' + 'endpoint. Balance might not be precise' + ) + agents_to_balance[server_name] = eligible_agents + + return agents_to_balance + + def balance_agents(self, affected_servers: dict): + self.logger.info('Attempting to balance agent connections') + agents_to_balance = self.calculate_agents_to_balance(affected_servers) + for node_name, agent_ids in agents_to_balance.items(): + self.logger.info(f"Balancing {len(agent_ids)} agents from '{node_name}'") + self.update_agent_connections(agent_list=agent_ids) + + def manage_wazuh_cluster_nodes(self): + while True: + try: + self.backend_servers_state_healthcheck() + self.check_proxy_processes(auto_mode=True) and time.sleep(self.AGENT_STATUS_SYNC_TIME) + current_wazuh_cluster = self.wazuh_api.get_cluster_nodes() + current_proxy_backend = self.proxy.get_current_backend_servers() + + nodes_to_add, nodes_to_remove = self.obtain_nodes_to_configure( + current_wazuh_cluster, current_proxy_backend + ) + if nodes_to_add or nodes_to_remove: + self.logger.info( + 'Detected changes in Wazuh cluster nodes. Current cluster: ' f'{current_wazuh_cluster}' + ) + self.logger.info('Attempting to update proxy backend') + + for node_to_remove in nodes_to_remove: + self.proxy.remove_wazuh_manager(manager_name=node_to_remove) + + for node_to_add in nodes_to_add: + self.proxy.add_wazuh_manager( + manager_name=node_to_add, + manager_address=current_wazuh_cluster[node_to_add], + resolver=self.proxy.resolver, + ) + self.migrate_old_connections(new_servers=nodes_to_add, deleted_servers=nodes_to_remove) + continue + + self.logger.info('Load balancer backend is up to date') + unbalanced_connections = self.check_for_balance( + current_connections_distribution=self.proxy.get_wazuh_backend_server_connections() + ) + if not unbalanced_connections: + if self.logger.level <= logging.DEBUG: + self.logger.debug( + 'Current backend stats: ' f'{self.proxy.get_wazuh_backend_server_connections()}' + ) + self.logger.info('Load balancer backend is balanced') + else: + self.logger.info('Agent imbalance detected. Waiting for agent status sync...') + time.sleep(self.AGENT_STATUS_SYNC_TIME) + self.balance_agents(affected_servers=unbalanced_connections) + + self.logger.debug(f'Sleeping {self.sleep_time}s...') + time.sleep(self.sleep_time) + except (CoordinatorError, ProxyError, WazuhError) as handled_exc: + self.logger.error(str(handled_exc)) + self.logger.warning( + f'Tasks may not perform as expected. Sleeping {self.sleep_time}s ' 'before continuing...' + ) + time.sleep(self.sleep_time) + + +def setup_loggers(log_file_path: str, log_level: int) -> tuple[logging.Logger, logging.Logger, logging.Logger]: + main_logger = CustomLogger('wazuh-coordinator', file_path=log_file_path, level=log_level).get_logger() + proxy_logger = CustomLogger('proxy-logger', file_path=log_file_path, level=log_level, tag='Proxy').get_logger() + wazuh_api_logger = CustomLogger( + 'wazuh-api-logger', file_path=log_file_path, level=log_level, tag='Wazuh API' + ).get_logger() + + return main_logger, proxy_logger, wazuh_api_logger + + +def parse_arguments() -> argparse.Namespace: + parser = argparse.ArgumentParser(description='Wazuh coordinator') + group = parser.add_mutually_exclusive_group() + group.add_argument( + '--auto', + dest='auto', + action='store_true', + help='Run coordinator capabilities on auto mode (full functionality)', + ) + group.add_argument( + '-a', + '--add-server', + dest='add_server', + action='store', + type=str, + nargs=2, + metavar=('SERVER_NAME', 'SERVER_ADDRESS'), + help='Add a new server to configured backend', + ) + group.add_argument( + '-r', + '--remove-server', + dest='remove_server', + action='store', + type=str, + metavar='SERVER_NAME', + help='Remove server from configured backend', + ) + group.add_argument( + '-cb', + '--check-balance', + dest='check_for_balance', + action='store_true', + help='Check if the environment needs to be balanced', + ) + parser.add_argument( + '-c', + '--configuration-file', + dest='configuration_file', + action='store', + default='', + help='Path to the test result file', + ) + parser.add_argument('-l', '--log-file', dest='log_file', action='store', help='Path to the logging file') + parser.add_argument( + '-b', '--background', dest='background', action='store_true', help='Run coordinator on background' + ) + parser.add_argument( + '-d', + '--debug', + dest='log_debug', + action='store', + type=str, + choices=['debug', 'trace'], + help='Enable debug logging', + ) + + return parser.parse_args() + + +def main(): + arguments = parse_arguments() + + user_config_path = arguments.configuration_file + log_file = arguments.log_file + log_level = ( + CustomLogger.TRACE_LEVEL + if arguments.log_debug == 'trace' + else logging.DEBUG + if arguments.log_debug == 'debug' + else logging.INFO + ) + + if arguments.background: + run_in_background() + + main_logger, proxy_logger, wazuh_api_logger = setup_loggers(log_file_path=log_file, log_level=log_level) + + try: + configuration = parse_configuration(custom_configuration_path=user_config_path) + resolver = configuration['proxy'].get('resolver', None) + + proxy_api = ProxyAPI( + username=configuration['proxy']['api']['user'], + password=configuration['proxy']['api']['password'], + address=configuration['proxy']['api']['address'], + port=configuration['proxy']['api']['port'], + ) + proxy = Proxy( + wazuh_backend=configuration['proxy']['backend'], + wazuh_connection_port=configuration['wazuh']['connection']['port'], + proxy_api=proxy_api, + logger=proxy_logger, + resolver=resolver, + ) + + wazuh_api = WazuhAPI( + address=configuration['wazuh']['api']['address'], + port=configuration['wazuh']['api']['port'], + username=configuration['wazuh']['api']['user'], + password=configuration['wazuh']['api']['password'], + excluded_nodes=configuration['wazuh']['excluded_nodes'], + logger=wazuh_api_logger, + ) + + coordinator = Coordinator( + proxy=proxy, wazuh_api=wazuh_api, logger=main_logger, options=configuration['coordinator'] + ) + + coordinator.initialize_components() + coordinator.initialize_wazuh_cluster_configuration() + if arguments.auto: + main_logger.info('Starting coordinator on auto mode') + coordinator.manage_wazuh_cluster_nodes() + elif arguments.add_server: + server_name, server_address = arguments.add_server + coordinator.backend_servers_state_healthcheck() + proxy.add_wazuh_manager(manager_name=server_name, manager_address=server_address, resolver=resolver) + main_logger.info(f"Server '{server_name}' was successfully added") + main_logger.info(f'Attempting to migrate connections') + coordinator.migrate_old_connections(new_servers=[server_name], deleted_servers=[]) + elif arguments.remove_server: + server_name = arguments.remove_server + coordinator.backend_servers_state_healthcheck() + proxy.remove_wazuh_manager(manager_name=server_name) + main_logger.info(f"Server '{server_name}' was successfully removed") + main_logger.info(f'Attempting to migrate connections') + coordinator.migrate_old_connections(new_servers=[], deleted_servers=[server_name]) + elif arguments.check_for_balance: + if coordinator.check_proxy_processes(): + main_logger.info(f'Sleeping {coordinator.AGENT_STATUS_SYNC_TIME}s before continuing...') + time.sleep(coordinator.AGENT_STATUS_SYNC_TIME) + unbalanced_connections = coordinator.check_for_balance( + current_connections_distribution=proxy.get_wazuh_backend_server_connections() + ) + if not unbalanced_connections: + main_logger.info('Load balancer backend is balanced') + exit(0) + main_logger.info(f'Agent imbalance detected. Surplus agents per node: {unbalanced_connections}') + if input(' Do you wish to balance agents? (y/N): ').lower() == 'y': + coordinator.balance_agents(affected_servers=unbalanced_connections) + except (CoordinatorError, ProxyError) as main_exc: + main_logger.error(str(main_exc)) + except KeyboardInterrupt: + pass + except Exception as unexpected_exc: + main_logger.critical(f'Unexpected exception: {unexpected_exc}', exc_info=True) + finally: + main_logger.info('Process ended') + + +if __name__ == '__main__': + main() diff --git a/framework/wazuh/core/cluster/hap_helper/process.py b/framework/wazuh/core/cluster/hap_helper/process.py new file mode 100644 index 00000000000..a2932af915b --- /dev/null +++ b/framework/wazuh/core/cluster/hap_helper/process.py @@ -0,0 +1,38 @@ +import os +import sys + + +def run_in_background(): + try: + pid = os.fork() + if pid > 0: + # Exit first parent + sys.exit(0) + except OSError as e: + sys.stderr.write('fork #1 failed: %d (%s)\n' % (e.errno, e.strerror)) + sys.exit(1) + + os.setsid() + + # Do second fork + try: + pid = os.fork() + if pid > 0: + # Exit from second parent + sys.exit(0) + except OSError as e: + sys.stderr.write('fork #2 failed: %d (%s)\n' % (e.errno, e.strerror)) + sys.exit(1) + + # Redirect standard file descriptors + sys.stdout.flush() + sys.stderr.flush() + si = open('/dev/null', 'r') + so = open('/dev/null', 'a+') + se = open('/dev/null', 'ab+', 0) + os.dup2(si.fileno(), sys.stdin.fileno()) + os.dup2(so.fileno(), sys.stdout.fileno()) + os.dup2(se.fileno(), sys.stderr.fileno()) + + # Decouple from parent environment + os.chdir('/') diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py new file mode 100644 index 00000000000..844aa130e94 --- /dev/null +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -0,0 +1,409 @@ +import logging +import ipaddress +from enum import Enum +from typing import TypeAlias, Optional + +import requests +from wazuh_coordinator.exception import ProxyError + +JSON_TYPE: TypeAlias = dict | list[dict] +PROXY_API_RESPONSE: TypeAlias = JSON_TYPE + + +class ProxyAPIMethod(Enum): + GET = 'get' + POST = 'post' + PUT = 'put' + DELETE = 'delete' + + +class ProxyServerState(Enum): + READY = 'ready' + MAINTENANCE = 'maint' + DRAIN = 'drain' + + +class CommunicationProtocol(Enum): + TCP = 'tcp' + HTTP = 'http' + + +class ProxyBalanceAlgorithm(Enum): + ROUND_ROBIN = 'roundrobin' + LEAST_CONNECTIONS = 'leastconn' + + +class ProxyAPI: + HAPEE_ENDPOINT = '/hapee' + + def __init__(self, username: str, password: str, address: str = 'localhost', port: int = 7777): + self.username = username + self.password = password + self.address = address + self.port = port + + self.version = 0 + + def initialize(self): + try: + response = requests.post( + f'https://{self.address}:{self.port}/', auth=(self.username, self.password), verify=False + ) + if response.status_code == 401: + raise ProxyError(102) + elif response.status_code == 404: + raise ProxyError(103) + except requests.ConnectionError: + raise ProxyError(99, extra_msg='Check connectivity and the configuration file') + except requests.RequestException as req_exc: + raise ProxyError(99, extra_msg=str(req_exc)) + + def _make_hapee_request( + self, + endpoint: str, + method: ProxyAPIMethod = ProxyAPIMethod.GET, + query_parameters: dict | None = None, + json_body: dict | None = None, + ) -> PROXY_API_RESPONSE: + uri = f'https://{self.address}:{self.port}{self.HAPEE_ENDPOINT}' + query_parameters = query_parameters or {} + query_parameters.update({'version': self.version}) + + hapee_json_body = { + 'method': method.value, + 'uri': endpoint, + 'query_parameters': query_parameters, + 'json_body': json_body or {}, + } + + try: + response = requests.post(uri, auth=(self.username, self.password), json=hapee_json_body, verify=False) + except requests.RequestException as request_exc: + raise ProxyError(100, extra_msg=str(request_exc)) + + if response.status_code == 200: + full_decoded_response = response.json() + decoded_response = full_decoded_response['data']['response'] + if full_decoded_response['error'] != 0: + raise ProxyError(105, extra_msg=f'Full response: {response.status_code} | {response.json()}') + if isinstance(decoded_response, dict) and '_version' in decoded_response: + self.version = decoded_response['_version'] + elif method != ProxyAPIMethod.GET and 'configuration' in endpoint: + self.update_configuration_version() + + return decoded_response + elif response.status_code == 401: + raise ProxyError(102) + else: + raise ProxyError(101, extra_msg=f'Full response: {response.status_code} | {response.json()}') + + def _make_proxy_request( + self, + endpoint: str, + method: ProxyAPIMethod = ProxyAPIMethod.GET, + query_parameters: dict | None = None, + json_body: dict | None = None, + ) -> PROXY_API_RESPONSE: + uri = f'https://{self.address}:{self.port}{endpoint}' + + try: + response = getattr(requests, str(method.value))( + uri, auth=(self.username, self.password), params=query_parameters, json=json_body, verify=False + ) + except requests.RequestException as request_exc: + raise ProxyError(100, extra_msg=str(request_exc)) + + if response.status_code == 200: + return response.json() + elif response.status_code == 401: + raise ProxyError(102) + else: + raise ProxyError(101, extra_msg=f'Full response: {response.status_code} | {response.json()}') + + def update_configuration_version(self): + configuration_version = self._make_hapee_request('/services/haproxy/configuration/version') + self.version = configuration_version + + def get_runtime_info(self) -> PROXY_API_RESPONSE: + return self._make_hapee_request('/services/haproxy/runtime/info')[0]['info'] + + def get_backends(self) -> PROXY_API_RESPONSE: + return self._make_hapee_request(endpoint='/services/haproxy/configuration/backends') + + def add_backend( + self, + name: str, + mode: CommunicationProtocol = CommunicationProtocol.TCP, + algorithm: ProxyBalanceAlgorithm = ProxyBalanceAlgorithm.LEAST_CONNECTIONS, + ) -> PROXY_API_RESPONSE: + query_params = {'force_reload': True} + json_body = {'name': name, 'mode': mode.value, 'balance': {'algorithm': algorithm.value}} + + return self._make_hapee_request( + '/services/haproxy/configuration/backends', + method=ProxyAPIMethod.POST, + query_parameters=query_params, + json_body=json_body, + ) + + def get_backend_servers(self, backend: str) -> PROXY_API_RESPONSE: + return self._make_hapee_request( + '/services/haproxy/configuration/servers', query_parameters={'backend': backend} + ) + + def add_server_to_backend( + self, backend: str, server_name: str, server_address: str, port: int, resolver: Optional[str] + ) -> PROXY_API_RESPONSE: + query_params = {'backend': backend, 'force_reload': True} + json_body = {'check': 'enabled', 'name': server_name, 'address': server_address, 'port': port} + # check that server_address is in ip address format + is_ip_address = None + try: + is_ip_address = ipaddress.ip_address(server_address) and True + except ValueError: + # the server_addr is not in ip address format + is_ip_address = False + json_body.update( + {'resolvers': resolver, 'init-addr': 'last,libc,none'} if resolver and not is_ip_address else {} + ) + + return self._make_hapee_request( + '/services/haproxy/configuration/servers', + method=ProxyAPIMethod.POST, + query_parameters=query_params, + json_body=json_body, + ) + + def remove_server_from_backend(self, backend: str, server_name: str) -> PROXY_API_RESPONSE: + query_params = {'backend': backend, 'force_reload': True} + + return self._make_hapee_request( + f'/services/haproxy/configuration/servers/{server_name}', + method=ProxyAPIMethod.DELETE, + query_parameters=query_params, + ) + + def get_frontends(self) -> PROXY_API_RESPONSE: + return self._make_hapee_request(endpoint='/services/haproxy/configuration/frontends') + + def add_frontend( + self, name: str, port: int, backend: str, mode: CommunicationProtocol = CommunicationProtocol.TCP + ) -> PROXY_API_RESPONSE: + frontend_query_params = {'force_reload': True} + frontend_json_body = {'name': name, 'mode': mode.value, 'default_backend': backend} + + frontend_response = self._make_hapee_request( + '/services/haproxy/configuration/frontends', + method=ProxyAPIMethod.POST, + query_parameters=frontend_query_params, + json_body=frontend_json_body, + ) + frontend_name = frontend_response['name'] + + bind_query_params = {'force_reload': True, 'frontend': frontend_name} + bind_json_body = {'port': port, 'name': f'{frontend_name}_bind'} + + self._make_hapee_request( + '/services/haproxy/configuration/binds', + method=ProxyAPIMethod.POST, + query_parameters=bind_query_params, + json_body=bind_json_body, + ) + + return frontend_response + + def get_backend_server_runtime_settings(self, backend_name: str, server_name: str) -> PROXY_API_RESPONSE: + query_params = {'backend': backend_name, 'name': server_name} + + return self._make_hapee_request( + f'/services/haproxy/runtime/servers/{server_name}', query_parameters=query_params + ) + + def change_backend_server_state( + self, backend_name: str, server_name: str, state: ProxyServerState + ) -> PROXY_API_RESPONSE: + query_params = {'backend': backend_name} + json_body = {'admin_state': state.value} + + return self._make_hapee_request( + f'/services/haproxy/runtime/servers/{server_name}', + method=ProxyAPIMethod.PUT, + query_parameters=query_params, + json_body=json_body, + ) + + def get_backend_stats(self, backend_name: str) -> PROXY_API_RESPONSE: + query_params = {'type': 'backend', 'name': backend_name} + + return self._make_hapee_request('/services/haproxy/stats/native', query_parameters=query_params) + + def get_backend_server_stats(self, backend_name: str, server_name: str) -> PROXY_API_RESPONSE: + query_params = {'type': 'server', 'parent': backend_name, 'name': server_name.lower()} + + return self._make_hapee_request('/services/haproxy/stats/native', query_parameters=query_params) + + def get_proxy_processes(self) -> PROXY_API_RESPONSE: + return self._make_proxy_request('/haproxy/processes') + + def kill_proxy_processes(self, pid_to_exclude: int = 0) -> PROXY_API_RESPONSE: + query_params = {'exclude_pid': pid_to_exclude} + + return self._make_proxy_request( + '/haproxy/processes', method=ProxyAPIMethod.DELETE, query_parameters=query_params + ) + + +def check_proxy_api(func): + def wrapper(self, *args, **kwargs): + if self.api is None: + raise ProxyError(103) + + return func(self, *args, **kwargs) + + return wrapper + + +class Proxy: + def __init__( + self, + wazuh_backend: str, + proxy_api: ProxyAPI, + logger: logging.Logger, + wazuh_connection_port: int = 1514, + resolver: str = None, + ): + self.logger = logger + self.wazuh_backend = wazuh_backend + self.wazuh_connection_port = wazuh_connection_port + self.api = proxy_api + self.resolver = resolver + + def initialize(self): + self.api.initialize() + try: + self.api.get_runtime_info()['version'] + except (KeyError, IndexError): + raise ProxyError(104) + + @check_proxy_api + def get_current_pid(self) -> int: + return self.api.get_runtime_info()['pid'] + + @check_proxy_api + def get_current_backends(self) -> dict: + api_response = self.api.get_backends() + self.logger.trace('Obtained proxy backends') + return {backend['name']: backend for backend in api_response['data']} + + def exists_backend(self, backend_name: str) -> bool: + return backend_name in self.get_current_backends() + + @check_proxy_api + def get_current_frontends(self) -> dict: + api_response = self.api.get_frontends() + self.logger.trace('Obtained proxy frontends') + return {frontend['name']: frontend for frontend in api_response['data'] if 'default_backend' in frontend} + + def exists_frontend(self, frontend_name: str) -> bool: + return frontend_name in self.get_current_frontends() + + @check_proxy_api + def add_new_backend( + self, + name: str, + mode: CommunicationProtocol = CommunicationProtocol.TCP, + algorithm: ProxyBalanceAlgorithm = ProxyBalanceAlgorithm.LEAST_CONNECTIONS, + ): + self.api.add_backend(name=name, mode=mode, algorithm=algorithm) + self.logger.trace(f"Added new proxy backend: '{name}'") + + @check_proxy_api + def add_new_frontend( + self, name: str, port: int, backend: str, mode: CommunicationProtocol = CommunicationProtocol.TCP + ): + self.api.add_frontend(name=name, port=port, backend=backend, mode=mode) + self.logger.trace(f"Added new proxy frontend: '{name}'") + + @check_proxy_api + def get_current_backend_servers(self) -> dict: + api_response = self.api.get_backend_servers(self.wazuh_backend) + self.logger.trace('Obtained proxy servers') + return {server['name']: server['address'] for server in api_response['data']} + + @check_proxy_api + def add_wazuh_manager(self, manager_name: str, manager_address: str, resolver: Optional[str]) -> dict: + api_response = self.api.add_server_to_backend( + backend=self.wazuh_backend, + server_name=manager_name, + server_address=manager_address, + port=self.wazuh_connection_port, + resolver=resolver, + ) + self.logger.trace( + f"Added new server '{manager_name}' {manager_address}:{self.wazuh_connection_port} to backend" + f" '{self.wazuh_backend}'" + ) + return api_response + + @check_proxy_api + def remove_wazuh_manager(self, manager_name: str) -> dict: + api_response = self.api.remove_server_from_backend(backend=self.wazuh_backend, server_name=manager_name) + + self.logger.trace(f"Removed server {manager_name} from backend '{self.wazuh_backend}'") + return api_response + + @check_proxy_api + def restrain_server_new_connections(self, server_name: str) -> dict: + api_response = self.api.change_backend_server_state( + backend_name=self.wazuh_backend, server_name=server_name, state=ProxyServerState.DRAIN + ) + self.logger.trace(f"Changed Wazuh server '{server_name}' to {ProxyServerState.DRAIN.value.upper()} state") + return api_response + + @check_proxy_api + def allow_server_new_connections(self, server_name: str) -> dict: + api_response = self.api.change_backend_server_state( + backend_name=self.wazuh_backend, server_name=server_name, state=ProxyServerState.READY + ) + self.logger.trace(f"Changed Wazuh server '{server_name}' to {ProxyServerState.READY.value.upper()} state") + return api_response + + @check_proxy_api + def get_wazuh_server_stats(self, server_name: str) -> dict: + server_stats = self.api.get_backend_server_stats(backend_name=self.wazuh_backend, server_name=server_name)[0][ + 'stats' + ][0]['stats'] + + self.logger.trace(f"Obtained server '{server_name}' stats") + return server_stats + + @check_proxy_api + def is_server_drain(self, server_name: str) -> bool: + server_stats = self.api.get_backend_server_runtime_settings( + backend_name=self.wazuh_backend, server_name=server_name + ) + return server_stats['admin_state'] == ProxyServerState.DRAIN.value + + @check_proxy_api + def get_wazuh_backend_stats(self, only_actives: bool = True) -> dict: + backend_servers = [server['name'] for server in self.api.get_backend_servers(self.wazuh_backend)['data']] + stats = {} + + for server_name in backend_servers: + server_stats = self.get_wazuh_server_stats(server_name=server_name) + if only_actives and server_stats['status'] != 'UP': + continue + stats[server_name] = server_stats + + return stats + + @check_proxy_api + def get_wazuh_backend_server_connections(self) -> dict: + current_connections_key = 'scur' + server_stats = self.get_wazuh_backend_stats() + return {server_name: server_stats[server_name][current_connections_key] for server_name in server_stats} + + @check_proxy_api + def is_proxy_process_single(self) -> bool: + haproxy_processes = self.api.get_proxy_processes() + return len(haproxy_processes['data']['processes']) == 1 diff --git a/framework/wazuh/core/cluster/hap_helper/wazuh.py b/framework/wazuh/core/cluster/hap_helper/wazuh.py new file mode 100644 index 00000000000..ff0d40fa63e --- /dev/null +++ b/framework/wazuh/core/cluster/hap_helper/wazuh.py @@ -0,0 +1,185 @@ +import logging +import re +import time +from collections import defaultdict +from enum import Enum + +import requests +import urllib3 +from wazuh_coordinator.exception import WazuhError + +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # type: ignore + + +class WazuhAPIMethod(Enum): + GET = 'get' + POST = 'post' + PUT = 'put' + DELETE = 'delete' + + +class WazuhAgent: + RECONNECTION_VERSION_MAJOR = 4 + RECONNECTION_VERSION_MINOR = 3 + AGENT_VERSION_REGEX = re.compile(r'.*v(\d+)\.(\d+)\.\d+') + + @classmethod + def can_reconnect(cls, agent_version: str) -> bool: + major, minor = cls.AGENT_VERSION_REGEX.match(agent_version).groups() + return int(major) >= cls.RECONNECTION_VERSION_MAJOR and int(minor) >= cls.RECONNECTION_VERSION_MINOR + + @classmethod + def get_agents_able_to_reconnect(cls, agents_list: list[dict]) -> list[str]: + return [agent['id'] for agent in agents_list if cls.can_reconnect(agent['version'])] + + +class WazuhAPI: + AGENTS_MAX_LIMIT = 100000 + API_RETRIES = 5 + TIMEOUT_ERROR_CODE = 3021 + + def __init__( + self, + address: str, + logger: logging.Logger, + port: int = 55000, + username: str = 'wazuh', + password: str = 'wazuh', + excluded_nodes: list | None = None, + ): + self.logger = logger + self.address = address + self.port = port + self.username = username + self.password = password + self.excluded_nodes = excluded_nodes or [] + + self.token = '' + + def initialize(self): + try: + requests.get(f'https://{self.address}:{self.port}/', verify=False) + except requests.ConnectionError: + raise WazuhError(99, extra_msg='Check connectivity and the configuration file') + except requests.RequestException as req_exc: + raise WazuhError(99, extra_msg=req_exc) + + def _obtain_token(self, token_endpoint_method: WazuhAPIMethod = WazuhAPIMethod.GET): + endpoint = f'https://{self.address}:{self.port}/security/user/authenticate' + response = getattr(requests, str(token_endpoint_method.value))( + endpoint, auth=(self.username, self.password), verify=False + ) + if response.status_code == 200: + self.token = response.json()['data']['token'] + self.logger.debug(f'Requested API token ({self.username})') + elif response.status_code == 405: + self._obtain_token(token_endpoint_method=WazuhAPIMethod.POST) + elif response.status_code == 401: + raise WazuhError(102) + else: + raise WazuhError(100, extra_msg=f'Full response: {response.status_code} | {response.json()}') + + def _security_headers(self): + if not self.token: + self._obtain_token() + + return {'Authorization': f'Bearer {self.token}'} + + def _make_request( + self, + endpoint: str, + method: WazuhAPIMethod = WazuhAPIMethod.GET, + query_parameters: dict = None, + json_body: dict = None, + ) -> dict: + response = self._retry_request_if_failed(endpoint, method, query_parameters, json_body) + if response.status_code == 200: + return response.json() + elif response.status_code == 401: + self._obtain_token() + return self._make_request(endpoint, method=method, query_parameters=query_parameters, json_body=json_body) + elif response.status_code == 403: + raise WazuhError(103, extra_msg=f"Endpoint '{endpoint}'") + else: + raise WazuhError(101, extra_msg=f'Full response: {response.status_code} | {response.json()}') + + def _retry_request_if_failed( + self, + endpoint: str, + method: WazuhAPIMethod = WazuhAPIMethod.GET, + query_parameters: dict = None, + json_body: dict = None, + ) -> requests.Response: + last_handled_exception = '' + uri = f'https://{self.address}:{self.port}{endpoint}' + + for _ in range(self.API_RETRIES): + try: + response = getattr(requests, str(method.value))( + uri, headers=self._security_headers(), json=json_body, params=query_parameters, verify=False + ) + self.logger.trace( + f"{method.value.upper()} '{endpoint}' - Parameters: {query_parameters or {} }" + f' - JSON body: {json_body or {} } [{response.status_code}]' + ) + if response.status_code == 500: + if response.json().get('error', '') == self.TIMEOUT_ERROR_CODE: + last_handled_exception = TimeoutError(response.json()['detail']) + self.logger.debug('Timeout executing API request') + else: + last_handled_exception = WazuhError(101, extra_msg=str(response.json())) + self.logger.debug('Unexpected error executing API request') + time.sleep(1) + else: + return response + except requests.ConnectionError as request_err: + last_handled_exception = request_err + self.logger.debug(f'Could not connect to Wazuh API') + time.sleep(1) + else: + raise WazuhError(104, str(last_handled_exception)) + + def get_cluster_nodes(self) -> dict: + api_response = self._make_request('/cluster/nodes') + return { + item['name']: item['ip'] + for item in api_response['data']['affected_items'] + if item['name'] not in self.excluded_nodes + } + + def reconnect_agents(self, agent_list: list = None) -> dict: + query_params = None + if agent_list is not None: + query_params = {'agents_list': ','.join(agent_list)} + + return self._make_request('/agents/reconnect', method=WazuhAPIMethod.PUT, query_parameters=query_params) + + def get_agents_node_distribution(self) -> dict: + agent_distribution = defaultdict(list) + + query_params = { + 'select': 'node_name,version', + 'sort': '-version,id', + 'status': 'active', + 'q': 'id!=000', + 'limit': self.AGENTS_MAX_LIMIT, + } + api_response = self._make_request('/agents', query_parameters=query_params) + + for agent in api_response['data']['affected_items']: + agent_distribution[agent['node_name']].append({'id': agent['id'], 'version': agent['version']}) + + return agent_distribution + + def get_agents_belonging_to_node(self, node_name: str, limit: int = None) -> list[dict]: + query_params = { + 'select': 'version', + 'sort': '-version,id', + 'status': 'active', + 'q': 'id!=000', + 'node_name': node_name, + 'limit': limit or self.AGENTS_MAX_LIMIT, + } + api_response = self._make_request('/agents', query_parameters=query_params) + + return api_response['data']['affected_items'] From ed33118c8c939d2ed8a33f4ee55b7a664f8d40f6 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 7 Feb 2024 15:16:39 -0300 Subject: [PATCH 323/419] Migrate coordinator CLI to a stand-alone class --- .../hap_helper/data/configuration.yaml | 2 +- .../core/cluster/hap_helper/hap_helper.py | 221 +++++------------- 2 files changed, 58 insertions(+), 165 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml index 524a58f15bf..641eb2e9d77 100644 --- a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml +++ b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml @@ -38,7 +38,7 @@ proxy: # the haproxy.cfg file. # resolver: wazuh_resolver -coordinator: +hap_helper: # Seconds to sleep between each coordinator iteration sleep_time: 60 # Seconds to sleep after the end of the agent reconnection phase diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index 027b0a7dfae..7d8d37e4d77 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -1,17 +1,17 @@ -import argparse import logging import time from math import ceil, floor +from os import path -from wazuh_coordinator.configuration import parse_configuration -from wazuh_coordinator.custom_logging import CustomLogger -from wazuh_coordinator.exception import CoordinatorError, ProxyError, WazuhError -from wazuh_coordinator.process import run_in_background -from wazuh_coordinator.proxy import Proxy, ProxyAPI, ProxyServerState -from wazuh_coordinator.wazuh import WazuhAgent, WazuhAPI +from wazuh.core import common +from wazuh.core.cluster.hap_helper.configuration import parse_configuration +from wazuh.core.cluster.hap_helper.custom_logging import CustomLogger +from wazuh.core.cluster.hap_helper.exception import CoordinatorError, ProxyError, WazuhError +from wazuh.core.cluster.hap_helper.proxy import Proxy, ProxyAPI, ProxyServerState +from wazuh.core.cluster.hap_helper.wazuh import WazuhAgent, WazuhAPI -class Coordinator: +class HAPHelper: UPDATED_BACKEND_STATUS_TIMEOUT: int = 60 AGENT_STATUS_SYNC_TIME: int = 25 # Default agent notify time + cluster sync + 5s SERVER_ADMIN_STATE_DELAY: int = 5 @@ -291,8 +291,56 @@ def manage_wazuh_cluster_nodes(self): ) time.sleep(self.sleep_time) + @classmethod + async def run(cls): + try: + configuration = parse_configuration() + main_logger, proxy_logger, wazuh_api_logger = setup_loggers( + log_level=configuration['hap_helper'].get('log_level', logging.INFO) + ) + + proxy_api = ProxyAPI( + username=configuration['proxy']['api']['user'], + password=configuration['proxy']['api']['password'], + address=configuration['proxy']['api']['address'], + port=configuration['proxy']['api']['port'], + ) + proxy = Proxy( + wazuh_backend=configuration['proxy']['backend'], + wazuh_connection_port=configuration['wazuh']['connection']['port'], + proxy_api=proxy_api, + logger=proxy_logger, + resolver=configuration['proxy'].get('resolver', None), + ) + + wazuh_api = WazuhAPI( + address=configuration['wazuh']['api']['address'], + port=configuration['wazuh']['api']['port'], + username=configuration['wazuh']['api']['user'], + password=configuration['wazuh']['api']['password'], + excluded_nodes=configuration['wazuh']['excluded_nodes'], + logger=wazuh_api_logger, + ) + + helper = cls(proxy=proxy, wazuh_api=wazuh_api, logger=main_logger, options=configuration['hap_helper']) + + helper.initialize_components() + helper.initialize_wazuh_cluster_configuration() + + main_logger.info('Starting HAProxy Helper on auto mode') + await helper.manage_wazuh_cluster_nodes() + except (CoordinatorError, ProxyError) as main_exc: + main_logger.error(str(main_exc)) + except KeyboardInterrupt: + pass + except Exception as unexpected_exc: + main_logger.critical(f'Unexpected exception: {unexpected_exc}', exc_info=True) + finally: + main_logger.info('Process ended') + -def setup_loggers(log_file_path: str, log_level: int) -> tuple[logging.Logger, logging.Logger, logging.Logger]: +def setup_loggers(str, log_level: int) -> tuple[logging.Logger, logging.Logger, logging.Logger]: + log_file_path = path.join(common.WAZUH_LOGS, 'hap_helper.log') main_logger = CustomLogger('wazuh-coordinator', file_path=log_file_path, level=log_level).get_logger() proxy_logger = CustomLogger('proxy-logger', file_path=log_file_path, level=log_level, tag='Proxy').get_logger() wazuh_api_logger = CustomLogger( @@ -300,158 +348,3 @@ def setup_loggers(log_file_path: str, log_level: int) -> tuple[logging.Logger, l ).get_logger() return main_logger, proxy_logger, wazuh_api_logger - - -def parse_arguments() -> argparse.Namespace: - parser = argparse.ArgumentParser(description='Wazuh coordinator') - group = parser.add_mutually_exclusive_group() - group.add_argument( - '--auto', - dest='auto', - action='store_true', - help='Run coordinator capabilities on auto mode (full functionality)', - ) - group.add_argument( - '-a', - '--add-server', - dest='add_server', - action='store', - type=str, - nargs=2, - metavar=('SERVER_NAME', 'SERVER_ADDRESS'), - help='Add a new server to configured backend', - ) - group.add_argument( - '-r', - '--remove-server', - dest='remove_server', - action='store', - type=str, - metavar='SERVER_NAME', - help='Remove server from configured backend', - ) - group.add_argument( - '-cb', - '--check-balance', - dest='check_for_balance', - action='store_true', - help='Check if the environment needs to be balanced', - ) - parser.add_argument( - '-c', - '--configuration-file', - dest='configuration_file', - action='store', - default='', - help='Path to the test result file', - ) - parser.add_argument('-l', '--log-file', dest='log_file', action='store', help='Path to the logging file') - parser.add_argument( - '-b', '--background', dest='background', action='store_true', help='Run coordinator on background' - ) - parser.add_argument( - '-d', - '--debug', - dest='log_debug', - action='store', - type=str, - choices=['debug', 'trace'], - help='Enable debug logging', - ) - - return parser.parse_args() - - -def main(): - arguments = parse_arguments() - - user_config_path = arguments.configuration_file - log_file = arguments.log_file - log_level = ( - CustomLogger.TRACE_LEVEL - if arguments.log_debug == 'trace' - else logging.DEBUG - if arguments.log_debug == 'debug' - else logging.INFO - ) - - if arguments.background: - run_in_background() - - main_logger, proxy_logger, wazuh_api_logger = setup_loggers(log_file_path=log_file, log_level=log_level) - - try: - configuration = parse_configuration(custom_configuration_path=user_config_path) - resolver = configuration['proxy'].get('resolver', None) - - proxy_api = ProxyAPI( - username=configuration['proxy']['api']['user'], - password=configuration['proxy']['api']['password'], - address=configuration['proxy']['api']['address'], - port=configuration['proxy']['api']['port'], - ) - proxy = Proxy( - wazuh_backend=configuration['proxy']['backend'], - wazuh_connection_port=configuration['wazuh']['connection']['port'], - proxy_api=proxy_api, - logger=proxy_logger, - resolver=resolver, - ) - - wazuh_api = WazuhAPI( - address=configuration['wazuh']['api']['address'], - port=configuration['wazuh']['api']['port'], - username=configuration['wazuh']['api']['user'], - password=configuration['wazuh']['api']['password'], - excluded_nodes=configuration['wazuh']['excluded_nodes'], - logger=wazuh_api_logger, - ) - - coordinator = Coordinator( - proxy=proxy, wazuh_api=wazuh_api, logger=main_logger, options=configuration['coordinator'] - ) - - coordinator.initialize_components() - coordinator.initialize_wazuh_cluster_configuration() - if arguments.auto: - main_logger.info('Starting coordinator on auto mode') - coordinator.manage_wazuh_cluster_nodes() - elif arguments.add_server: - server_name, server_address = arguments.add_server - coordinator.backend_servers_state_healthcheck() - proxy.add_wazuh_manager(manager_name=server_name, manager_address=server_address, resolver=resolver) - main_logger.info(f"Server '{server_name}' was successfully added") - main_logger.info(f'Attempting to migrate connections') - coordinator.migrate_old_connections(new_servers=[server_name], deleted_servers=[]) - elif arguments.remove_server: - server_name = arguments.remove_server - coordinator.backend_servers_state_healthcheck() - proxy.remove_wazuh_manager(manager_name=server_name) - main_logger.info(f"Server '{server_name}' was successfully removed") - main_logger.info(f'Attempting to migrate connections') - coordinator.migrate_old_connections(new_servers=[], deleted_servers=[server_name]) - elif arguments.check_for_balance: - if coordinator.check_proxy_processes(): - main_logger.info(f'Sleeping {coordinator.AGENT_STATUS_SYNC_TIME}s before continuing...') - time.sleep(coordinator.AGENT_STATUS_SYNC_TIME) - unbalanced_connections = coordinator.check_for_balance( - current_connections_distribution=proxy.get_wazuh_backend_server_connections() - ) - if not unbalanced_connections: - main_logger.info('Load balancer backend is balanced') - exit(0) - main_logger.info(f'Agent imbalance detected. Surplus agents per node: {unbalanced_connections}') - if input(' Do you wish to balance agents? (y/N): ').lower() == 'y': - coordinator.balance_agents(affected_servers=unbalanced_connections) - except (CoordinatorError, ProxyError) as main_exc: - main_logger.error(str(main_exc)) - except KeyboardInterrupt: - pass - except Exception as unexpected_exc: - main_logger.critical(f'Unexpected exception: {unexpected_exc}', exc_info=True) - finally: - main_logger.info('Process ended') - - -if __name__ == '__main__': - main() From 6c14a921cc926861d9495be138b3dca87d98edc3 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 7 Feb 2024 15:23:33 -0300 Subject: [PATCH 324/419] Remove unnecessary module --- .../wazuh/core/cluster/hap_helper/process.py | 38 ------------------- 1 file changed, 38 deletions(-) delete mode 100644 framework/wazuh/core/cluster/hap_helper/process.py diff --git a/framework/wazuh/core/cluster/hap_helper/process.py b/framework/wazuh/core/cluster/hap_helper/process.py deleted file mode 100644 index a2932af915b..00000000000 --- a/framework/wazuh/core/cluster/hap_helper/process.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import sys - - -def run_in_background(): - try: - pid = os.fork() - if pid > 0: - # Exit first parent - sys.exit(0) - except OSError as e: - sys.stderr.write('fork #1 failed: %d (%s)\n' % (e.errno, e.strerror)) - sys.exit(1) - - os.setsid() - - # Do second fork - try: - pid = os.fork() - if pid > 0: - # Exit from second parent - sys.exit(0) - except OSError as e: - sys.stderr.write('fork #2 failed: %d (%s)\n' % (e.errno, e.strerror)) - sys.exit(1) - - # Redirect standard file descriptors - sys.stdout.flush() - sys.stderr.flush() - si = open('/dev/null', 'r') - so = open('/dev/null', 'a+') - se = open('/dev/null', 'ab+', 0) - os.dup2(si.fileno(), sys.stdin.fileno()) - os.dup2(so.fileno(), sys.stdout.fileno()) - os.dup2(se.fileno(), sys.stderr.fileno()) - - # Decouple from parent environment - os.chdir('/') From d1f6cd35df04ad4d2eaf6939878ba392f2fb9adf Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 14 Feb 2024 17:12:16 -0300 Subject: [PATCH 325/419] Make async HAProxy helper code --- .../core/cluster/hap_helper/configuration.py | 4 +- .../core/cluster/hap_helper/custom_logging.py | 2 +- .../hap_helper/data/configuration.yaml | 8 +- .../core/cluster/hap_helper/exception.py | 2 +- .../core/cluster/hap_helper/hap_helper.py | 158 +++++++------- .../wazuh/core/cluster/hap_helper/proxy.py | 198 +++++++++--------- .../wazuh/core/cluster/hap_helper/wazuh.py | 171 +++++---------- framework/wazuh/core/cluster/local_server.py | 10 +- 8 files changed, 251 insertions(+), 302 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/configuration.py b/framework/wazuh/core/cluster/hap_helper/configuration.py index 83f78094e9d..e411a1bb780 100644 --- a/framework/wazuh/core/cluster/hap_helper/configuration.py +++ b/framework/wazuh/core/cluster/hap_helper/configuration.py @@ -3,7 +3,7 @@ import jsonschema import yaml -from wazuh_coordinator.exception import CoordinatorError +from wazuh.core.cluster.hap_helper.exception import HAPHelperError def validate_custom_configuration(custom_configuration: dict): @@ -15,7 +15,7 @@ def validate_custom_configuration(custom_configuration: dict): try: jsonschema.validate(instance=custom_configuration, schema=json_schema) except jsonschema.ValidationError as validation_err: - raise CoordinatorError(101, extra_msg=f"({'> '.join(validation_err.path)}) {validation_err.message}") + raise HAPHelperError(101, extra_msg=f"({'> '.join(validation_err.path)}) {validation_err.message}") def merge_configurations(default: dict, config: dict) -> dict: diff --git a/framework/wazuh/core/cluster/hap_helper/custom_logging.py b/framework/wazuh/core/cluster/hap_helper/custom_logging.py index f4f2403e6fa..074172e0411 100644 --- a/framework/wazuh/core/cluster/hap_helper/custom_logging.py +++ b/framework/wazuh/core/cluster/hap_helper/custom_logging.py @@ -6,7 +6,7 @@ from copy import copy from glob import glob from logging.handlers import TimedRotatingFileHandler -from os import chmod, unlink, path, makedirs +from os import chmod, makedirs, path, unlink class CustomFileRotatingHandler(TimedRotatingFileHandler): diff --git a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml index 641eb2e9d77..3c097438efd 100644 --- a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml +++ b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml @@ -15,20 +15,19 @@ wazuh: # Wazuh agents connection service port (TCP) port: 1514 # Wazuh cluster nodes to exclude on auto mode - excluded_nodes: [ ] + excluded_nodes: [] proxy: # Wazuh Proxy API configuration api: # Wazuh Proxy API address - address: localhost + address: wazuh-proxy # Wazuh Proxy API port port: 7777 # Wazuh Proxy API username user: wazuh # Wazuh Proxy API password password: wazuh - # Defined Proxy backend (frontend will append '_front' to it) backend: wazuh_cluster @@ -44,10 +43,11 @@ hap_helper: # Seconds to sleep after the end of the agent reconnection phase agent_reconnection_stability_time: 60 # Agent chunk size (each chunk defines the max number of agents to be reconnected at once) - agent_reconnection_chunk_size: 100 + agent_reconnection_chunk_size: 120 # Seconds to sleep after an agent chunk reconnection (if there is more than one) agent_reconnection_time: 5 # Agent imbalance tolerance agent_tolerance: 0.1 # Time in minutes before removing a disconnected Wazuh node from the backend remove_disconnected_node_after: 60 + log_level: debug diff --git a/framework/wazuh/core/cluster/hap_helper/exception.py b/framework/wazuh/core/cluster/hap_helper/exception.py index 13dfbfb5140..71fb7f48eef 100644 --- a/framework/wazuh/core/cluster/hap_helper/exception.py +++ b/framework/wazuh/core/cluster/hap_helper/exception.py @@ -12,7 +12,7 @@ def __str__(self): return f'({self.PREFIX}{self._code}) {self._message}' -class CoordinatorError(CustomException): +class HAPHelperError(CustomException): PREFIX = 'C' ERRORS = {100: 'Server status check timed out after adding new servers', 101: 'User configuration is not valid'} diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index 7d8d37e4d77..aab2317502f 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -1,12 +1,13 @@ import logging import time +from asyncio import sleep from math import ceil, floor from os import path from wazuh.core import common from wazuh.core.cluster.hap_helper.configuration import parse_configuration from wazuh.core.cluster.hap_helper.custom_logging import CustomLogger -from wazuh.core.cluster.hap_helper.exception import CoordinatorError, ProxyError, WazuhError +from wazuh.core.cluster.hap_helper.exception import HAPHelperError, ProxyError, WazuhError from wazuh.core.cluster.hap_helper.proxy import Proxy, ProxyAPI, ProxyServerState from wazuh.core.cluster.hap_helper.wazuh import WazuhAgent, WazuhAPI @@ -28,33 +29,33 @@ def __init__(self, proxy: Proxy, wazuh_api: WazuhAPI, logger: logging.Logger, op self.agent_tolerance: float = options['agent_tolerance'] self.remove_disconnected_node_after: int = options['remove_disconnected_node_after'] - def initialize_components(self): + async def initialize_components(self): try: - self.wazuh_api.initialize() - self.proxy.initialize() + # self.wazuh_api.initialize() + await self.proxy.initialize() self.logger.info('Main components were initialized') except (WazuhError, ProxyError) as init_exc: self.logger.critical('Cannot initialize main components') self.logger.critical(init_exc) exit(1) - def initialize_wazuh_cluster_configuration(self): - if not self.proxy.exists_backend(self.proxy.wazuh_backend): + async def initialize_wazuh_cluster_configuration(self): + if not await self.proxy.exists_backend(self.proxy.wazuh_backend): self.logger.info(f"Could not find Wazuh backend '{self.proxy.wazuh_backend}'") - self.proxy.add_new_backend(name=self.proxy.wazuh_backend) + await self.proxy.add_new_backend(name=self.proxy.wazuh_backend) self.logger.info('Added Wazuh backend') - if not self.proxy.exists_frontend(f'{self.proxy.wazuh_backend}_front'): + if not await self.proxy.exists_frontend(f'{self.proxy.wazuh_backend}_front'): self.logger.info(f"Could not find Wazuh frontend '{self.proxy.wazuh_backend}_front'") - self.proxy.add_new_frontend( + await self.proxy.add_new_frontend( name=f'{self.proxy.wazuh_backend}_front', port=self.proxy.wazuh_connection_port, backend=self.proxy.wazuh_backend, ) self.logger.info('Added Wazuh frontend') - def check_node_to_delete(self, node_name: str) -> bool: - node_downtime = self.proxy.get_wazuh_server_stats(server_name=node_name)['lastchg'] + async def check_node_to_delete(self, node_name: str) -> bool: + node_downtime = (await self.proxy.get_wazuh_server_stats(server_name=node_name))['lastchg'] self.logger.trace(f"Server '{node_name}' has been disconnected for {node_downtime}s") if node_downtime < self.remove_disconnected_node_after * 60: @@ -65,21 +66,24 @@ def check_node_to_delete(self, node_name: str) -> bool: ) return True - def check_proxy_processes(self, auto_mode: bool = False, warn: bool = True) -> bool: - if not self.proxy.is_proxy_process_single(): + # TODO: This must be deprecated + async def check_proxy_processes(self, auto_mode: bool = False, warn: bool = True) -> bool: + if not await self.proxy.is_proxy_process_single(): warn and self.logger.warning('Detected more than one Proxy processes') if not auto_mode and input(' Do you wish to fix them? (y/N): ').lower() != 'y': return False - self.manage_proxy_processes() + await self.manage_proxy_processes() return True - def backend_servers_state_healthcheck(self): - for server in self.proxy.get_current_backend_servers().keys(): - if self.proxy.is_server_drain(server_name=server): + async def backend_servers_state_healthcheck(self): + for server in (await self.proxy.get_current_backend_servers()).keys(): + if await self.proxy.is_server_drain(server_name=server): self.logger.warning(f"Server '{server}' was found {ProxyServerState.DRAIN.value.upper()}. Fixing it") - self.proxy.allow_server_new_connections(server_name=server) + await self.proxy.allow_server_new_connections(server_name=server) - def obtain_nodes_to_configure(self, wazuh_cluster_nodes: dict, proxy_backend_servers: dict) -> tuple[list, list]: + async def obtain_nodes_to_configure( + self, wazuh_cluster_nodes: dict, proxy_backend_servers: dict + ) -> tuple[list, list]: add_nodes, remove_nodes = [], [] for node_name, node_address in wazuh_cluster_nodes.items(): @@ -91,7 +95,7 @@ def obtain_nodes_to_configure(self, wazuh_cluster_nodes: dict, proxy_backend_ser for node_name in proxy_backend_servers.keys() - wazuh_cluster_nodes.keys(): if node_name in self.wazuh_api.excluded_nodes: self.logger.info(f"Server '{node_name}' has been excluded but is currently active. Removing it") - elif self.check_node_to_delete(node_name): + elif await self.check_node_to_delete(node_name): pass else: continue @@ -99,59 +103,60 @@ def obtain_nodes_to_configure(self, wazuh_cluster_nodes: dict, proxy_backend_ser return add_nodes, remove_nodes - def update_agent_connections(self, agent_list: list[str]): + async def update_agent_connections(self, agent_list: list[str]): self.logger.debug('Reconnecting agents') self.logger.debug( f'Agent reconnection chunk size is set to {self.agent_reconnection_chunk_size}. ' f'Total iterations: {ceil(len(agent_list) / self.agent_reconnection_chunk_size)}' ) for index in range(0, len(agent_list), self.agent_reconnection_chunk_size): - self.wazuh_api.reconnect_agents(agent_list[index : index + self.agent_reconnection_chunk_size]) + await self.wazuh_api.reconnect_agents(agent_list[index : index + self.agent_reconnection_chunk_size]) self.logger.debug(f'Delay between agent reconnections. Sleeping {self.agent_reconnection_time}s...') - time.sleep(self.agent_reconnection_time) + await sleep(self.agent_reconnection_time) - def force_agent_reconnection_to_server(self, chosen_server: str, agents_list: list[dict]): - current_servers = self.proxy.get_current_backend_servers().keys() + async def force_agent_reconnection_to_server(self, chosen_server: str, agents_list: list[dict]): + current_servers = (await self.proxy.get_current_backend_servers()).keys() affected_servers = current_servers - {chosen_server} for server_name in affected_servers: - self.proxy.restrain_server_new_connections(server_name=server_name) - time.sleep(self.SERVER_ADMIN_STATE_DELAY) + await self.proxy.restrain_server_new_connections(server_name=server_name) + await sleep(self.SERVER_ADMIN_STATE_DELAY) eligible_agents = WazuhAgent.get_agents_able_to_reconnect(agents_list=agents_list) if len(eligible_agents) != len(agents_list): self.logger.warning( f"Some agents from '{chosen_server}' are not compatible with the reconnection endpoint." ' Those connections will be balanced afterwards' ) - self.update_agent_connections(agent_list=eligible_agents) + await self.update_agent_connections(agent_list=eligible_agents) for server_name in affected_servers: - self.proxy.allow_server_new_connections(server_name=server_name) - time.sleep(self.SERVER_ADMIN_STATE_DELAY) + await self.proxy.allow_server_new_connections(server_name=server_name) + await sleep(self.SERVER_ADMIN_STATE_DELAY) - def manage_proxy_processes(self): - current_proxy_pid = self.proxy.api.get_runtime_info()['pid'] - response = self.proxy.api.kill_proxy_processes(pid_to_exclude=current_proxy_pid) + # TODO: This must be deprecated + async def manage_proxy_processes(self): + current_proxy_pid = (await self.proxy.api.get_runtime_info())['pid'] + response = await self.proxy.api.kill_proxy_processes(pid_to_exclude=current_proxy_pid) if response['error'] > 0: self.logger.error("Could not manage all proxy processes: " f"{response['data']}") elif len(response['data']) > 0: self.logger.info('Managed proxy processes') - def migrate_old_connections(self, new_servers: list[str], deleted_servers: list[str]): + async def migrate_old_connections(self, new_servers: list[str], deleted_servers: list[str]): wazuh_backend_stats = {} backend_stats_iteration = 1 while any([server not in wazuh_backend_stats for server in new_servers]): if backend_stats_iteration > self.UPDATED_BACKEND_STATUS_TIMEOUT: self.logger.error(f'Some of the new servers did not go UP: {set(new_servers) - wazuh_backend_stats}') - raise CoordinatorError(100) + raise HAPHelperError(100) self.logger.debug('Waiting for new servers to go UP') time.sleep(1) backend_stats_iteration += 1 - wazuh_backend_stats = self.proxy.get_wazuh_backend_stats().keys() + wazuh_backend_stats = (await self.proxy.get_wazuh_backend_stats()).keys() self.logger.debug('All new servers are UP') - previous_agent_distribution = self.wazuh_api.get_agents_node_distribution() - previous_connection_distribution = self.proxy.get_wazuh_backend_server_connections() | { + previous_agent_distribution = await self.wazuh_api.get_agents_node_distribution() + previous_connection_distribution = await self.proxy.get_wazuh_backend_server_connections() | { server: len(previous_agent_distribution[server]) for server in previous_agent_distribution if server not in new_servers @@ -173,17 +178,17 @@ def migrate_old_connections(self, new_servers: list[str], deleted_servers: list[ pass self.logger.info(f"Migrating {len(agents)} connections from server '{wazuh_worker}'") - self.force_agent_reconnection_to_server(chosen_server=wazuh_worker, agents_list=agents) + await self.force_agent_reconnection_to_server(chosen_server=wazuh_worker, agents_list=agents) if agents_to_balance: self.logger.info('Balancing exceeding connections after changes on the Wazuh backend') - self.update_agent_connections(agent_list=agents_to_balance) + await self.update_agent_connections(agent_list=agents_to_balance) - self.check_proxy_processes(auto_mode=True, warn=False) + await self.check_proxy_processes(auto_mode=True, warn=False) self.logger.info('Waiting for agent connections stability') self.logger.debug(f'Sleeping {self.agent_reconnection_stability_time}s...') - time.sleep(self.agent_reconnection_stability_time) + await sleep(self.agent_reconnection_stability_time) def check_for_balance(self, current_connections_distribution: dict) -> dict: if not current_connections_distribution: @@ -217,10 +222,10 @@ def check_for_balance(self, current_connections_distribution: dict) -> dict: return unbalanced_connections - def calculate_agents_to_balance(self, affected_servers: dict) -> dict: + async def calculate_agents_to_balance(self, affected_servers: dict) -> dict: agents_to_balance = {} for server_name, n_agents in affected_servers.items(): - agent_candidates = self.wazuh_api.get_agents_belonging_to_node(node_name=server_name, limit=n_agents) + agent_candidates = await self.wazuh_api.get_agents_belonging_to_node(node_name=server_name, limit=n_agents) eligible_agents = WazuhAgent.get_agents_able_to_reconnect(agents_list=agent_candidates) if len(eligible_agents) != len(agent_candidates): self.logger.warning( @@ -231,22 +236,22 @@ def calculate_agents_to_balance(self, affected_servers: dict) -> dict: return agents_to_balance - def balance_agents(self, affected_servers: dict): + async def balance_agents(self, affected_servers: dict): self.logger.info('Attempting to balance agent connections') - agents_to_balance = self.calculate_agents_to_balance(affected_servers) + agents_to_balance = await self.calculate_agents_to_balance(affected_servers) for node_name, agent_ids in agents_to_balance.items(): self.logger.info(f"Balancing {len(agent_ids)} agents from '{node_name}'") - self.update_agent_connections(agent_list=agent_ids) + await self.update_agent_connections(agent_list=agent_ids) - def manage_wazuh_cluster_nodes(self): + async def manage_wazuh_cluster_nodes(self): while True: try: - self.backend_servers_state_healthcheck() - self.check_proxy_processes(auto_mode=True) and time.sleep(self.AGENT_STATUS_SYNC_TIME) - current_wazuh_cluster = self.wazuh_api.get_cluster_nodes() - current_proxy_backend = self.proxy.get_current_backend_servers() + await self.backend_servers_state_healthcheck() + await self.check_proxy_processes(auto_mode=True) and await sleep(self.AGENT_STATUS_SYNC_TIME) + current_wazuh_cluster = await self.wazuh_api.get_cluster_nodes() + current_proxy_backend = await self.proxy.get_current_backend_servers() - nodes_to_add, nodes_to_remove = self.obtain_nodes_to_configure( + nodes_to_add, nodes_to_remove = await self.obtain_nodes_to_configure( current_wazuh_cluster, current_proxy_backend ) if nodes_to_add or nodes_to_remove: @@ -256,47 +261,46 @@ def manage_wazuh_cluster_nodes(self): self.logger.info('Attempting to update proxy backend') for node_to_remove in nodes_to_remove: - self.proxy.remove_wazuh_manager(manager_name=node_to_remove) + await self.proxy.remove_wazuh_manager(manager_name=node_to_remove) for node_to_add in nodes_to_add: - self.proxy.add_wazuh_manager( + await self.proxy.add_wazuh_manager( manager_name=node_to_add, manager_address=current_wazuh_cluster[node_to_add], resolver=self.proxy.resolver, ) - self.migrate_old_connections(new_servers=nodes_to_add, deleted_servers=nodes_to_remove) + await self.migrate_old_connections(new_servers=nodes_to_add, deleted_servers=nodes_to_remove) continue self.logger.info('Load balancer backend is up to date') unbalanced_connections = self.check_for_balance( - current_connections_distribution=self.proxy.get_wazuh_backend_server_connections() + current_connections_distribution=await self.proxy.get_wazuh_backend_server_connections() ) if not unbalanced_connections: - if self.logger.level <= logging.DEBUG: - self.logger.debug( - 'Current backend stats: ' f'{self.proxy.get_wazuh_backend_server_connections()}' - ) + self.logger.debug( + 'Current backend stats: ' f'{await self.proxy.get_wazuh_backend_server_connections()}' + ) self.logger.info('Load balancer backend is balanced') else: self.logger.info('Agent imbalance detected. Waiting for agent status sync...') - time.sleep(self.AGENT_STATUS_SYNC_TIME) - self.balance_agents(affected_servers=unbalanced_connections) + await sleep(self.AGENT_STATUS_SYNC_TIME) + await self.balance_agents(affected_servers=unbalanced_connections) self.logger.debug(f'Sleeping {self.sleep_time}s...') - time.sleep(self.sleep_time) - except (CoordinatorError, ProxyError, WazuhError) as handled_exc: + await sleep(self.sleep_time) + except (HAPHelperError, ProxyError, WazuhError) as handled_exc: self.logger.error(str(handled_exc)) self.logger.warning( f'Tasks may not perform as expected. Sleeping {self.sleep_time}s ' 'before continuing...' ) - time.sleep(self.sleep_time) + await sleep(self.sleep_time) @classmethod async def run(cls): try: configuration = parse_configuration() main_logger, proxy_logger, wazuh_api_logger = setup_loggers( - log_level=configuration['hap_helper'].get('log_level', logging.INFO) + log_level=configuration['hap_helper'].get('log_level', 'INFO') ) proxy_api = ProxyAPI( @@ -324,24 +328,26 @@ async def run(cls): helper = cls(proxy=proxy, wazuh_api=wazuh_api, logger=main_logger, options=configuration['hap_helper']) - helper.initialize_components() - helper.initialize_wazuh_cluster_configuration() + await helper.initialize_components() + await helper.initialize_wazuh_cluster_configuration() - main_logger.info('Starting HAProxy Helper on auto mode') + helper.logger.info('Starting HAProxy Helper on auto mode') await helper.manage_wazuh_cluster_nodes() - except (CoordinatorError, ProxyError) as main_exc: - main_logger.error(str(main_exc)) + except (HAPHelperError, ProxyError) as main_exc: + helper.logger.error(str(main_exc)) except KeyboardInterrupt: pass except Exception as unexpected_exc: - main_logger.critical(f'Unexpected exception: {unexpected_exc}', exc_info=True) + helper.logger.critical(f'Unexpected exception: {unexpected_exc}', exc_info=True) finally: - main_logger.info('Process ended') + helper.logger.info('Process ended') + +def setup_loggers(log_level: str) -> tuple[logging.Logger, logging.Logger, logging.Logger]: + log_level = logging.getLevelName(log_level.upper()) -def setup_loggers(str, log_level: int) -> tuple[logging.Logger, logging.Logger, logging.Logger]: log_file_path = path.join(common.WAZUH_LOGS, 'hap_helper.log') - main_logger = CustomLogger('wazuh-coordinator', file_path=log_file_path, level=log_level).get_logger() + main_logger = CustomLogger('wazuh-haphelper', file_path=log_file_path, level=log_level).get_logger() proxy_logger = CustomLogger('proxy-logger', file_path=log_file_path, level=log_level, tag='Proxy').get_logger() wazuh_api_logger = CustomLogger( 'wazuh-api-logger', file_path=log_file_path, level=log_level, tag='Wazuh API' diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 844aa130e94..a734bbbf946 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -1,10 +1,10 @@ -import logging import ipaddress +import logging from enum import Enum -from typing import TypeAlias, Optional +from typing import Optional, TypeAlias -import requests -from wazuh_coordinator.exception import ProxyError +import httpx +from wazuh.core.cluster.hap_helper.exception import ProxyError JSON_TYPE: TypeAlias = dict | list[dict] PROXY_API_RESPONSE: TypeAlias = JSON_TYPE @@ -44,21 +44,22 @@ def __init__(self, username: str, password: str, address: str = 'localhost', por self.version = 0 - def initialize(self): + async def initialize(self): try: - response = requests.post( - f'https://{self.address}:{self.port}/', auth=(self.username, self.password), verify=False - ) - if response.status_code == 401: - raise ProxyError(102) - elif response.status_code == 404: - raise ProxyError(103) - except requests.ConnectionError: + async with httpx.AsyncClient(verify=False) as client: + response = await client.post( + f'https://{self.address}:{self.port}/', auth=(self.username, self.password) + ) + if response.status_code == 401: + raise ProxyError(102) + elif response.status_code == 404: + raise ProxyError(103) + except httpx.ConnectError: raise ProxyError(99, extra_msg='Check connectivity and the configuration file') - except requests.RequestException as req_exc: + except httpx.RequestError as req_exc: raise ProxyError(99, extra_msg=str(req_exc)) - def _make_hapee_request( + async def _make_hapee_request( self, endpoint: str, method: ProxyAPIMethod = ProxyAPIMethod.GET, @@ -77,8 +78,9 @@ def _make_hapee_request( } try: - response = requests.post(uri, auth=(self.username, self.password), json=hapee_json_body, verify=False) - except requests.RequestException as request_exc: + async with httpx.AsyncClient(verify=False, follow_redirects=True) as client: + response = await client.post(uri, auth=(self.username, self.password), json=hapee_json_body) + except httpx.RequestError as request_exc: raise ProxyError(100, extra_msg=str(request_exc)) if response.status_code == 200: @@ -89,7 +91,7 @@ def _make_hapee_request( if isinstance(decoded_response, dict) and '_version' in decoded_response: self.version = decoded_response['_version'] elif method != ProxyAPIMethod.GET and 'configuration' in endpoint: - self.update_configuration_version() + await self.update_configuration_version() return decoded_response elif response.status_code == 401: @@ -97,7 +99,7 @@ def _make_hapee_request( else: raise ProxyError(101, extra_msg=f'Full response: {response.status_code} | {response.json()}') - def _make_proxy_request( + async def _make_proxy_request( self, endpoint: str, method: ProxyAPIMethod = ProxyAPIMethod.GET, @@ -107,10 +109,15 @@ def _make_proxy_request( uri = f'https://{self.address}:{self.port}{endpoint}' try: - response = getattr(requests, str(method.value))( - uri, auth=(self.username, self.password), params=query_parameters, json=json_body, verify=False - ) - except requests.RequestException as request_exc: + async with httpx.AsyncClient(verify=False, follow_redirects=True) as client: + response = await client.request( + method.value, + uri, + auth=(self.username, self.password), + params=query_parameters, + json=json_body, + ) + except httpx.RequestError as request_exc: raise ProxyError(100, extra_msg=str(request_exc)) if response.status_code == 200: @@ -120,17 +127,17 @@ def _make_proxy_request( else: raise ProxyError(101, extra_msg=f'Full response: {response.status_code} | {response.json()}') - def update_configuration_version(self): - configuration_version = self._make_hapee_request('/services/haproxy/configuration/version') + async def update_configuration_version(self): + configuration_version = await self._make_hapee_request('/services/haproxy/configuration/version') self.version = configuration_version - def get_runtime_info(self) -> PROXY_API_RESPONSE: - return self._make_hapee_request('/services/haproxy/runtime/info')[0]['info'] + async def get_runtime_info(self) -> PROXY_API_RESPONSE: + return (await self._make_hapee_request('/services/haproxy/runtime/info'))[0]['info'] - def get_backends(self) -> PROXY_API_RESPONSE: - return self._make_hapee_request(endpoint='/services/haproxy/configuration/backends') + async def get_backends(self) -> PROXY_API_RESPONSE: + return await self._make_hapee_request(endpoint='/services/haproxy/configuration/backends') - def add_backend( + async def add_backend( self, name: str, mode: CommunicationProtocol = CommunicationProtocol.TCP, @@ -139,19 +146,19 @@ def add_backend( query_params = {'force_reload': True} json_body = {'name': name, 'mode': mode.value, 'balance': {'algorithm': algorithm.value}} - return self._make_hapee_request( + return await self._make_hapee_request( '/services/haproxy/configuration/backends', method=ProxyAPIMethod.POST, query_parameters=query_params, json_body=json_body, ) - def get_backend_servers(self, backend: str) -> PROXY_API_RESPONSE: - return self._make_hapee_request( + async def get_backend_servers(self, backend: str) -> PROXY_API_RESPONSE: + return await self._make_hapee_request( '/services/haproxy/configuration/servers', query_parameters={'backend': backend} ) - def add_server_to_backend( + async def add_server_to_backend( self, backend: str, server_name: str, server_address: str, port: int, resolver: Optional[str] ) -> PROXY_API_RESPONSE: query_params = {'backend': backend, 'force_reload': True} @@ -167,32 +174,32 @@ def add_server_to_backend( {'resolvers': resolver, 'init-addr': 'last,libc,none'} if resolver and not is_ip_address else {} ) - return self._make_hapee_request( + return await self._make_hapee_request( '/services/haproxy/configuration/servers', method=ProxyAPIMethod.POST, query_parameters=query_params, json_body=json_body, ) - def remove_server_from_backend(self, backend: str, server_name: str) -> PROXY_API_RESPONSE: + async def remove_server_from_backend(self, backend: str, server_name: str) -> PROXY_API_RESPONSE: query_params = {'backend': backend, 'force_reload': True} - return self._make_hapee_request( + return await self._make_hapee_request( f'/services/haproxy/configuration/servers/{server_name}', method=ProxyAPIMethod.DELETE, query_parameters=query_params, ) - def get_frontends(self) -> PROXY_API_RESPONSE: - return self._make_hapee_request(endpoint='/services/haproxy/configuration/frontends') + async def get_frontends(self) -> PROXY_API_RESPONSE: + return await self._make_hapee_request(endpoint='/services/haproxy/configuration/frontends') - def add_frontend( + async def add_frontend( self, name: str, port: int, backend: str, mode: CommunicationProtocol = CommunicationProtocol.TCP ) -> PROXY_API_RESPONSE: frontend_query_params = {'force_reload': True} frontend_json_body = {'name': name, 'mode': mode.value, 'default_backend': backend} - frontend_response = self._make_hapee_request( + frontend_response = await self._make_hapee_request( '/services/haproxy/configuration/frontends', method=ProxyAPIMethod.POST, query_parameters=frontend_query_params, @@ -203,7 +210,7 @@ def add_frontend( bind_query_params = {'force_reload': True, 'frontend': frontend_name} bind_json_body = {'port': port, 'name': f'{frontend_name}_bind'} - self._make_hapee_request( + await self._make_hapee_request( '/services/haproxy/configuration/binds', method=ProxyAPIMethod.POST, query_parameters=bind_query_params, @@ -212,43 +219,44 @@ def add_frontend( return frontend_response - def get_backend_server_runtime_settings(self, backend_name: str, server_name: str) -> PROXY_API_RESPONSE: + async def get_backend_server_runtime_settings(self, backend_name: str, server_name: str) -> PROXY_API_RESPONSE: query_params = {'backend': backend_name, 'name': server_name} - return self._make_hapee_request( + return await self._make_hapee_request( f'/services/haproxy/runtime/servers/{server_name}', query_parameters=query_params ) - def change_backend_server_state( + async def change_backend_server_state( self, backend_name: str, server_name: str, state: ProxyServerState ) -> PROXY_API_RESPONSE: query_params = {'backend': backend_name} json_body = {'admin_state': state.value} - return self._make_hapee_request( + return await self._make_hapee_request( f'/services/haproxy/runtime/servers/{server_name}', method=ProxyAPIMethod.PUT, query_parameters=query_params, json_body=json_body, ) - def get_backend_stats(self, backend_name: str) -> PROXY_API_RESPONSE: + async def get_backend_stats(self, backend_name: str) -> PROXY_API_RESPONSE: query_params = {'type': 'backend', 'name': backend_name} - return self._make_hapee_request('/services/haproxy/stats/native', query_parameters=query_params) + return await self._make_hapee_request('/services/haproxy/stats/native', query_parameters=query_params) - def get_backend_server_stats(self, backend_name: str, server_name: str) -> PROXY_API_RESPONSE: + async def get_backend_server_stats(self, backend_name: str, server_name: str) -> PROXY_API_RESPONSE: query_params = {'type': 'server', 'parent': backend_name, 'name': server_name.lower()} - return self._make_hapee_request('/services/haproxy/stats/native', query_parameters=query_params) + return await self._make_hapee_request('/services/haproxy/stats/native', query_parameters=query_params) - def get_proxy_processes(self) -> PROXY_API_RESPONSE: - return self._make_proxy_request('/haproxy/processes') + async def get_proxy_processes(self) -> PROXY_API_RESPONSE: + return await self._make_proxy_request('/haproxy/processes') - def kill_proxy_processes(self, pid_to_exclude: int = 0) -> PROXY_API_RESPONSE: + # TODO: This must be deprecated + async def kill_proxy_processes(self, pid_to_exclude: int = 0) -> PROXY_API_RESPONSE: query_params = {'exclude_pid': pid_to_exclude} - return self._make_proxy_request( + return await self._make_proxy_request( '/haproxy/processes', method=ProxyAPIMethod.DELETE, query_parameters=query_params ) @@ -278,61 +286,61 @@ def __init__( self.api = proxy_api self.resolver = resolver - def initialize(self): - self.api.initialize() + async def initialize(self): + await self.api.initialize() try: - self.api.get_runtime_info()['version'] + (await self.api.get_runtime_info())['version'] except (KeyError, IndexError): raise ProxyError(104) @check_proxy_api - def get_current_pid(self) -> int: - return self.api.get_runtime_info()['pid'] + async def get_current_pid(self) -> int: + return (await self.api.get_runtime_info())['pid'] @check_proxy_api - def get_current_backends(self) -> dict: - api_response = self.api.get_backends() + async def get_current_backends(self) -> dict: + api_response = await self.api.get_backends() self.logger.trace('Obtained proxy backends') return {backend['name']: backend for backend in api_response['data']} - def exists_backend(self, backend_name: str) -> bool: - return backend_name in self.get_current_backends() + async def exists_backend(self, backend_name: str) -> bool: + return backend_name in await self.get_current_backends() @check_proxy_api - def get_current_frontends(self) -> dict: - api_response = self.api.get_frontends() + async def get_current_frontends(self) -> dict: + api_response = await self.api.get_frontends() self.logger.trace('Obtained proxy frontends') return {frontend['name']: frontend for frontend in api_response['data'] if 'default_backend' in frontend} - def exists_frontend(self, frontend_name: str) -> bool: - return frontend_name in self.get_current_frontends() + async def exists_frontend(self, frontend_name: str) -> bool: + return frontend_name in await self.get_current_frontends() @check_proxy_api - def add_new_backend( + async def add_new_backend( self, name: str, mode: CommunicationProtocol = CommunicationProtocol.TCP, algorithm: ProxyBalanceAlgorithm = ProxyBalanceAlgorithm.LEAST_CONNECTIONS, ): - self.api.add_backend(name=name, mode=mode, algorithm=algorithm) + await self.api.add_backend(name=name, mode=mode, algorithm=algorithm) self.logger.trace(f"Added new proxy backend: '{name}'") @check_proxy_api - def add_new_frontend( + async def add_new_frontend( self, name: str, port: int, backend: str, mode: CommunicationProtocol = CommunicationProtocol.TCP ): - self.api.add_frontend(name=name, port=port, backend=backend, mode=mode) + await self.api.add_frontend(name=name, port=port, backend=backend, mode=mode) self.logger.trace(f"Added new proxy frontend: '{name}'") @check_proxy_api - def get_current_backend_servers(self) -> dict: - api_response = self.api.get_backend_servers(self.wazuh_backend) + async def get_current_backend_servers(self) -> dict: + api_response = await self.api.get_backend_servers(self.wazuh_backend) self.logger.trace('Obtained proxy servers') return {server['name']: server['address'] for server in api_response['data']} @check_proxy_api - def add_wazuh_manager(self, manager_name: str, manager_address: str, resolver: Optional[str]) -> dict: - api_response = self.api.add_server_to_backend( + async def add_wazuh_manager(self, manager_name: str, manager_address: str, resolver: Optional[str]) -> dict: + api_response = await self.api.add_server_to_backend( backend=self.wazuh_backend, server_name=manager_name, server_address=manager_address, @@ -346,51 +354,53 @@ def add_wazuh_manager(self, manager_name: str, manager_address: str, resolver: O return api_response @check_proxy_api - def remove_wazuh_manager(self, manager_name: str) -> dict: - api_response = self.api.remove_server_from_backend(backend=self.wazuh_backend, server_name=manager_name) + async def remove_wazuh_manager(self, manager_name: str) -> dict: + api_response = await self.api.remove_server_from_backend(backend=self.wazuh_backend, server_name=manager_name) self.logger.trace(f"Removed server {manager_name} from backend '{self.wazuh_backend}'") return api_response @check_proxy_api - def restrain_server_new_connections(self, server_name: str) -> dict: - api_response = self.api.change_backend_server_state( + async def restrain_server_new_connections(self, server_name: str) -> dict: + api_response = await self.api.change_backend_server_state( backend_name=self.wazuh_backend, server_name=server_name, state=ProxyServerState.DRAIN ) self.logger.trace(f"Changed Wazuh server '{server_name}' to {ProxyServerState.DRAIN.value.upper()} state") return api_response @check_proxy_api - def allow_server_new_connections(self, server_name: str) -> dict: - api_response = self.api.change_backend_server_state( + async def allow_server_new_connections(self, server_name: str) -> dict: + api_response = await self.api.change_backend_server_state( backend_name=self.wazuh_backend, server_name=server_name, state=ProxyServerState.READY ) self.logger.trace(f"Changed Wazuh server '{server_name}' to {ProxyServerState.READY.value.upper()} state") return api_response @check_proxy_api - def get_wazuh_server_stats(self, server_name: str) -> dict: - server_stats = self.api.get_backend_server_stats(backend_name=self.wazuh_backend, server_name=server_name)[0][ - 'stats' - ][0]['stats'] + async def get_wazuh_server_stats(self, server_name: str) -> dict: + server_stats = ( + await self.api.get_backend_server_stats(backend_name=self.wazuh_backend, server_name=server_name) + )[0]['stats'][0]['stats'] self.logger.trace(f"Obtained server '{server_name}' stats") return server_stats @check_proxy_api - def is_server_drain(self, server_name: str) -> bool: - server_stats = self.api.get_backend_server_runtime_settings( + async def is_server_drain(self, server_name: str) -> bool: + server_stats = await self.api.get_backend_server_runtime_settings( backend_name=self.wazuh_backend, server_name=server_name ) return server_stats['admin_state'] == ProxyServerState.DRAIN.value @check_proxy_api - def get_wazuh_backend_stats(self, only_actives: bool = True) -> dict: - backend_servers = [server['name'] for server in self.api.get_backend_servers(self.wazuh_backend)['data']] + async def get_wazuh_backend_stats(self, only_actives: bool = True) -> dict: + backend_servers = [ + server['name'] for server in (await self.api.get_backend_servers(self.wazuh_backend))['data'] + ] stats = {} for server_name in backend_servers: - server_stats = self.get_wazuh_server_stats(server_name=server_name) + server_stats = await self.get_wazuh_server_stats(server_name=server_name) if only_actives and server_stats['status'] != 'UP': continue stats[server_name] = server_stats @@ -398,12 +408,12 @@ def get_wazuh_backend_stats(self, only_actives: bool = True) -> dict: return stats @check_proxy_api - def get_wazuh_backend_server_connections(self) -> dict: + async def get_wazuh_backend_server_connections(self) -> dict: current_connections_key = 'scur' - server_stats = self.get_wazuh_backend_stats() + server_stats = await self.get_wazuh_backend_stats() return {server_name: server_stats[server_name][current_connections_key] for server_name in server_stats} @check_proxy_api - def is_proxy_process_single(self) -> bool: - haproxy_processes = self.api.get_proxy_processes() + async def is_proxy_process_single(self) -> bool: + haproxy_processes = await self.api.get_proxy_processes() return len(haproxy_processes['data']['processes']) == 1 diff --git a/framework/wazuh/core/cluster/hap_helper/wazuh.py b/framework/wazuh/core/cluster/hap_helper/wazuh.py index ff0d40fa63e..6b63f7f3a40 100644 --- a/framework/wazuh/core/cluster/hap_helper/wazuh.py +++ b/framework/wazuh/core/cluster/hap_helper/wazuh.py @@ -1,14 +1,12 @@ import logging import re -import time from collections import defaultdict from enum import Enum -import requests -import urllib3 -from wazuh_coordinator.exception import WazuhError - -urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # type: ignore +from wazuh.agent import get_agents, reconnect_agents +from wazuh.cluster import get_nodes_info +from wazuh.core.cluster.control import get_system_nodes +from wazuh.core.cluster.dapi.dapi import DistributedAPI class WazuhAPIMethod(Enum): @@ -56,130 +54,65 @@ def __init__( self.token = '' - def initialize(self): - try: - requests.get(f'https://{self.address}:{self.port}/', verify=False) - except requests.ConnectionError: - raise WazuhError(99, extra_msg='Check connectivity and the configuration file') - except requests.RequestException as req_exc: - raise WazuhError(99, extra_msg=req_exc) - - def _obtain_token(self, token_endpoint_method: WazuhAPIMethod = WazuhAPIMethod.GET): - endpoint = f'https://{self.address}:{self.port}/security/user/authenticate' - response = getattr(requests, str(token_endpoint_method.value))( - endpoint, auth=(self.username, self.password), verify=False - ) - if response.status_code == 200: - self.token = response.json()['data']['token'] - self.logger.debug(f'Requested API token ({self.username})') - elif response.status_code == 405: - self._obtain_token(token_endpoint_method=WazuhAPIMethod.POST) - elif response.status_code == 401: - raise WazuhError(102) - else: - raise WazuhError(100, extra_msg=f'Full response: {response.status_code} | {response.json()}') - - def _security_headers(self): - if not self.token: - self._obtain_token() - - return {'Authorization': f'Bearer {self.token}'} - - def _make_request( - self, - endpoint: str, - method: WazuhAPIMethod = WazuhAPIMethod.GET, - query_parameters: dict = None, - json_body: dict = None, - ) -> dict: - response = self._retry_request_if_failed(endpoint, method, query_parameters, json_body) - if response.status_code == 200: - return response.json() - elif response.status_code == 401: - self._obtain_token() - return self._make_request(endpoint, method=method, query_parameters=query_parameters, json_body=json_body) - elif response.status_code == 403: - raise WazuhError(103, extra_msg=f"Endpoint '{endpoint}'") - else: - raise WazuhError(101, extra_msg=f'Full response: {response.status_code} | {response.json()}') - - def _retry_request_if_failed( - self, - endpoint: str, - method: WazuhAPIMethod = WazuhAPIMethod.GET, - query_parameters: dict = None, - json_body: dict = None, - ) -> requests.Response: - last_handled_exception = '' - uri = f'https://{self.address}:{self.port}{endpoint}' - - for _ in range(self.API_RETRIES): - try: - response = getattr(requests, str(method.value))( - uri, headers=self._security_headers(), json=json_body, params=query_parameters, verify=False - ) - self.logger.trace( - f"{method.value.upper()} '{endpoint}' - Parameters: {query_parameters or {} }" - f' - JSON body: {json_body or {} } [{response.status_code}]' - ) - if response.status_code == 500: - if response.json().get('error', '') == self.TIMEOUT_ERROR_CODE: - last_handled_exception = TimeoutError(response.json()['detail']) - self.logger.debug('Timeout executing API request') - else: - last_handled_exception = WazuhError(101, extra_msg=str(response.json())) - self.logger.debug('Unexpected error executing API request') - time.sleep(1) - else: - return response - except requests.ConnectionError as request_err: - last_handled_exception = request_err - self.logger.debug(f'Could not connect to Wazuh API') - time.sleep(1) - else: - raise WazuhError(104, str(last_handled_exception)) - - def get_cluster_nodes(self) -> dict: - api_response = self._make_request('/cluster/nodes') - return { - item['name']: item['ip'] - for item in api_response['data']['affected_items'] - if item['name'] not in self.excluded_nodes - } - - def reconnect_agents(self, agent_list: list = None) -> dict: - query_params = None - if agent_list is not None: - query_params = {'agents_list': ','.join(agent_list)} - - return self._make_request('/agents/reconnect', method=WazuhAPIMethod.PUT, query_parameters=query_params) - - def get_agents_node_distribution(self) -> dict: + async def get_cluster_nodes(self) -> dict: + data = await DistributedAPI( + f=get_nodes_info, + request_type='local_master', + is_async=True, + logger=self.logger, + local_client_arg='lc', + nodes=await get_system_nodes(), + ).distribute_function() + + return {item['name']: item['ip'] for item in data.affected_items if item['name'] not in self.excluded_nodes} + + async def reconnect_agents(self, agent_list: list = None) -> dict: + data = await DistributedAPI( + f=reconnect_agents, + f_kwargs={'agent_list': agent_list}, + request_type='distributed_master', + logger=self.logger, + ).distribute_function() + + return data.affected_items + + async def get_agents_node_distribution(self) -> dict: agent_distribution = defaultdict(list) - query_params = { - 'select': 'node_name,version', - 'sort': '-version,id', - 'status': 'active', + f_kwargs = { + 'select': ['node_name', 'version'], + 'sort': {'fields': ['version', 'id'], 'order': 'desc'}, + 'filters': {'status': 'active'}, 'q': 'id!=000', 'limit': self.AGENTS_MAX_LIMIT, } - api_response = self._make_request('/agents', query_parameters=query_params) - for agent in api_response['data']['affected_items']: + data = await DistributedAPI( + f=get_agents, + f_kwargs=f_kwargs, + request_type='local_master', + logger=self.logger, + ).distribute_function() + + for agent in data.affected_items: agent_distribution[agent['node_name']].append({'id': agent['id'], 'version': agent['version']}) return agent_distribution - def get_agents_belonging_to_node(self, node_name: str, limit: int = None) -> list[dict]: - query_params = { - 'select': 'version', - 'sort': '-version,id', - 'status': 'active', + async def get_agents_belonging_to_node(self, node_name: str, limit: int = None) -> list[dict]: + f_kwargs = { + 'select': ['version'], + 'sort': {'fields': ['version', 'id'], 'order': 'desc'}, + 'filters': {'status': 'active', 'node_name': node_name}, 'q': 'id!=000', - 'node_name': node_name, 'limit': limit or self.AGENTS_MAX_LIMIT, } - api_response = self._make_request('/agents', query_parameters=query_params) - return api_response['data']['affected_items'] + data = await DistributedAPI( + f=get_agents, + f_kwargs=f_kwargs, + request_type='local_master', + logger=self.logger, + ).distribute_function() + + return data.affected_items diff --git a/framework/wazuh/core/cluster/local_server.py b/framework/wazuh/core/cluster/local_server.py index b8587c6b5de..70feb97708d 100644 --- a/framework/wazuh/core/cluster/local_server.py +++ b/framework/wazuh/core/cluster/local_server.py @@ -7,17 +7,16 @@ import json import os import random -from datetime import datetime from typing import Tuple, Union import uvloop - from wazuh.core import common -from wazuh.core.cluster import common as c_common, server, client, cluster +from wazuh.core.cluster import client, cluster, server +from wazuh.core.cluster import common as c_common from wazuh.core.cluster.dapi import dapi +from wazuh.core.cluster.hap_helper import hap_helper from wazuh.core.cluster.utils import context_tag from wazuh.core.exception import WazuhClusterError -from wazuh.core.utils import get_date_from_timestamp class LocalServerHandler(server.AbstractServerHandler): @@ -352,7 +351,8 @@ def __init__(self, node: server.AbstractServer, **kwargs): self.handler_class = LocalServerHandlerMaster self.dapi = dapi.APIRequestQueue(server=self) self.sendsync = dapi.SendSyncRequestQueue(server=self) - self.tasks.extend([self.dapi.run, self.sendsync.run]) + + self.tasks.extend([self.dapi.run, self.sendsync.run, hap_helper.HAPHelper.run]) class LocalServerHandlerWorker(LocalServerHandler): From 98d74114b78c666584de44a590535d14f23c38f2 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Thu, 15 Feb 2024 15:31:10 -0300 Subject: [PATCH 326/419] Add private method to handle DAPI calls --- .../wazuh/core/cluster/hap_helper/proxy.py | 4 +-- .../wazuh/core/cluster/hap_helper/wazuh.py | 28 +++++++++++-------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index a734bbbf946..bd2046afc2b 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -262,11 +262,11 @@ async def kill_proxy_processes(self, pid_to_exclude: int = 0) -> PROXY_API_RESPO def check_proxy_api(func): - def wrapper(self, *args, **kwargs): + async def wrapper(self, *args, **kwargs): if self.api is None: raise ProxyError(103) - return func(self, *args, **kwargs) + return await func(self, *args, **kwargs) return wrapper diff --git a/framework/wazuh/core/cluster/hap_helper/wazuh.py b/framework/wazuh/core/cluster/hap_helper/wazuh.py index 6b63f7f3a40..6995cd68447 100644 --- a/framework/wazuh/core/cluster/hap_helper/wazuh.py +++ b/framework/wazuh/core/cluster/hap_helper/wazuh.py @@ -2,6 +2,7 @@ import re from collections import defaultdict from enum import Enum +from typing import Callable, Optional from wazuh.agent import get_agents, reconnect_agents from wazuh.cluster import get_nodes_info @@ -54,25 +55,30 @@ def __init__( self.token = '' + async def _make_dapi_call(self, f: Callable, f_kwargs: Optional[dict] = None, **kwargs) -> dict: + ret_val = await DistributedAPI(f=f, f_kwargs=f_kwargs, logger=self.logger, **kwargs).distribute_function() + if isinstance(ret_val, Exception): + self.logger.error(f'Unexpected error calling {f.__name__}') + raise ret_val + return ret_val + async def get_cluster_nodes(self) -> dict: - data = await DistributedAPI( + data = await self._make_dapi_call( f=get_nodes_info, request_type='local_master', is_async=True, - logger=self.logger, local_client_arg='lc', nodes=await get_system_nodes(), - ).distribute_function() + ) return {item['name']: item['ip'] for item in data.affected_items if item['name'] not in self.excluded_nodes} async def reconnect_agents(self, agent_list: list = None) -> dict: - data = await DistributedAPI( + data = await self._make_dapi_call( f=reconnect_agents, f_kwargs={'agent_list': agent_list}, request_type='distributed_master', - logger=self.logger, - ).distribute_function() + ) return data.affected_items @@ -87,12 +93,11 @@ async def get_agents_node_distribution(self) -> dict: 'limit': self.AGENTS_MAX_LIMIT, } - data = await DistributedAPI( + data = await self._make_dapi_call( f=get_agents, f_kwargs=f_kwargs, request_type='local_master', - logger=self.logger, - ).distribute_function() + ) for agent in data.affected_items: agent_distribution[agent['node_name']].append({'id': agent['id'], 'version': agent['version']}) @@ -108,11 +113,10 @@ async def get_agents_belonging_to_node(self, node_name: str, limit: int = None) 'limit': limit or self.AGENTS_MAX_LIMIT, } - data = await DistributedAPI( + data = await self._make_dapi_call( f=get_agents, f_kwargs=f_kwargs, request_type='local_master', - logger=self.logger, - ).distribute_function() + ) return data.affected_items From 75192fd190834a3d8ea40c5e2bf02e0c07c2d8b9 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Thu, 15 Feb 2024 16:12:54 -0300 Subject: [PATCH 327/419] Refactor logging --- .../core/cluster/hap_helper/custom_logging.py | 113 ------------------ .../core/cluster/hap_helper/hap_helper.py | 37 ++---- .../wazuh/core/cluster/hap_helper/proxy.py | 31 +++-- .../wazuh/core/cluster/hap_helper/wazuh.py | 11 +- 4 files changed, 40 insertions(+), 152 deletions(-) delete mode 100644 framework/wazuh/core/cluster/hap_helper/custom_logging.py diff --git a/framework/wazuh/core/cluster/hap_helper/custom_logging.py b/framework/wazuh/core/cluster/hap_helper/custom_logging.py deleted file mode 100644 index 074172e0411..00000000000 --- a/framework/wazuh/core/cluster/hap_helper/custom_logging.py +++ /dev/null @@ -1,113 +0,0 @@ -import calendar -import gzip -import logging -import re -import shutil -from copy import copy -from glob import glob -from logging.handlers import TimedRotatingFileHandler -from os import chmod, makedirs, path, unlink - - -class CustomFileRotatingHandler(TimedRotatingFileHandler): - def doRollover(self): - logging.handlers.TimedRotatingFileHandler.doRollover(self) - - rotated_file = glob(f'{self.baseFilename}.*')[0] - - new_rotated_file = self.compute_archives_directory(rotated_file) - with open(rotated_file, 'rb') as f_in, gzip.open(new_rotated_file, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) - chmod(new_rotated_file, 0o640) - unlink(rotated_file) - - def compute_archives_directory(self, rotated_filepath): - rotated_file = path.basename(rotated_filepath) - year, month, day = re.match(r'[\w.-]+\.(\d+)-(\d+)-(\d+)', rotated_file).groups() - month = calendar.month_abbr[int(month)] - log_path = path.join(path.splitext(self.baseFilename)[0], year, month) - if not path.exists(log_path): - makedirs(log_path) - - return f'{log_path}/{path.basename(self.baseFilename)}-{day}.gz' - - -class LoggingFilter(logging.Filter): - def __init__(self, module_name: str): - super().__init__() - self.module_name = module_name - - def filter(self, record) -> bool: - record.levelname = f'{record.levelname}:' - record.module_name = f'[{self.module_name}]' - return True - - -class ColoredFormatter(logging.Formatter): - GREY = '\x1b[38;20m' - YELLOW = '\x1b[33;20m' - RED = '\x1b[31;20m' - BOLD_RED = '\x1b[31;1m' - ORANGE = '\x1b[33m;20m' - DARK_BLUE = '\x1b[34m' - GREY_BLUE = '\x1b[36m' - RESET = '\x1b[0m' - - TRACE_LEVEL = 5 - - def __init__(self, fmt, style='%', datefmt='', *args, **kwargs): - super().__init__(fmt, *args, **kwargs) - self.style = style - self.datefmt = datefmt - - self.FORMATS = { - logging.DEBUG: self.DARK_BLUE + fmt + self.RESET, - logging.INFO: self.GREY + fmt + self.RESET, - logging.WARNING: self.YELLOW + fmt + self.RESET, - logging.ERROR: self.RED + fmt + self.RESET, - logging.CRITICAL: self.BOLD_RED + fmt + self.RESET, - self.TRACE_LEVEL: self.GREY_BLUE + fmt + self.RESET, - } - - def format(self, record): - record_copy = copy(record) - log_fmt = self.FORMATS.get(record_copy.levelno) - formatter = logging.Formatter(log_fmt, style=self.style, datefmt=self.datefmt) - return formatter.format(record_copy) - - -class CustomLogger: - TRACE_LEVEL = 5 - - def __init__(self, name: str, file_path: str = '', tag: str = 'Main', level: int = logging.INFO): - logging.addLevelName(self.TRACE_LEVEL, 'TRACE') - logger = logging.getLogger(name) - logger.trace = self.trace - logger.addFilter(LoggingFilter(tag)) - logger.propagate = False - - colored_formatter = ColoredFormatter( - '%(asctime)s %(levelname)-9s %(module_name)-11s %(message)s', style='%', datefmt='%Y/%m/%d %H:%M:%S' - ) - colored_handler = logging.StreamHandler() - colored_handler.setFormatter(colored_formatter) - - if file_path: - logger_formatter = logging.Formatter( - '%(asctime)s %(levelname)-9s %(module_name)-11s %(message)s', style='%', datefmt='%Y/%m/%d %H:%M:%S' - ) - fh = CustomFileRotatingHandler(filename=file_path, when='midnight') - fh.setFormatter(logger_formatter) - logger.addHandler(fh) - - logger.addHandler(colored_handler) - logger.setLevel(level) - - self.logger = logger - - def get_logger(self) -> logging.Logger: - return self.logger - - def trace(self, message, *args, **kwargs): - if self.logger.isEnabledFor(self.TRACE_LEVEL): - self.logger._log(self.TRACE_LEVEL, message, args, **kwargs) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index aab2317502f..122e8ad229e 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -2,14 +2,12 @@ import time from asyncio import sleep from math import ceil, floor -from os import path -from wazuh.core import common from wazuh.core.cluster.hap_helper.configuration import parse_configuration -from wazuh.core.cluster.hap_helper.custom_logging import CustomLogger from wazuh.core.cluster.hap_helper.exception import HAPHelperError, ProxyError, WazuhError from wazuh.core.cluster.hap_helper.proxy import Proxy, ProxyAPI, ProxyServerState from wazuh.core.cluster.hap_helper.wazuh import WazuhAgent, WazuhAPI +from wazuh.core.cluster.utils import ClusterFilter class HAPHelper: @@ -17,8 +15,8 @@ class HAPHelper: AGENT_STATUS_SYNC_TIME: int = 25 # Default agent notify time + cluster sync + 5s SERVER_ADMIN_STATE_DELAY: int = 5 - def __init__(self, proxy: Proxy, wazuh_api: WazuhAPI, logger: logging.Logger, options: dict): - self.logger = logger + def __init__(self, proxy: Proxy, wazuh_api: WazuhAPI, options: dict): + self.logger = self._get_logger() self.proxy = proxy self.wazuh_api = wazuh_api @@ -29,6 +27,13 @@ def __init__(self, proxy: Proxy, wazuh_api: WazuhAPI, logger: logging.Logger, op self.agent_tolerance: float = options['agent_tolerance'] self.remove_disconnected_node_after: int = options['remove_disconnected_node_after'] + @staticmethod + def _get_logger() -> logging.Logger: + logger = logging.getLogger('wazuh').getChild('HAPHelper') + logger.addFilter(ClusterFilter(tag='Cluster', subtag='HAPHelper Main')) + + return logger + async def initialize_components(self): try: # self.wazuh_api.initialize() @@ -56,7 +61,7 @@ async def initialize_wazuh_cluster_configuration(self): async def check_node_to_delete(self, node_name: str) -> bool: node_downtime = (await self.proxy.get_wazuh_server_stats(server_name=node_name))['lastchg'] - self.logger.trace(f"Server '{node_name}' has been disconnected for {node_downtime}s") + self.logger.debug2(f"Server '{node_name}' has been disconnected for {node_downtime}s") if node_downtime < self.remove_disconnected_node_after * 60: self.logger.info(f"Server '{node_name}' has not been disconnected enough time to remove it") @@ -299,9 +304,6 @@ async def manage_wazuh_cluster_nodes(self): async def run(cls): try: configuration = parse_configuration() - main_logger, proxy_logger, wazuh_api_logger = setup_loggers( - log_level=configuration['hap_helper'].get('log_level', 'INFO') - ) proxy_api = ProxyAPI( username=configuration['proxy']['api']['user'], @@ -313,7 +315,6 @@ async def run(cls): wazuh_backend=configuration['proxy']['backend'], wazuh_connection_port=configuration['wazuh']['connection']['port'], proxy_api=proxy_api, - logger=proxy_logger, resolver=configuration['proxy'].get('resolver', None), ) @@ -323,10 +324,9 @@ async def run(cls): username=configuration['wazuh']['api']['user'], password=configuration['wazuh']['api']['password'], excluded_nodes=configuration['wazuh']['excluded_nodes'], - logger=wazuh_api_logger, ) - helper = cls(proxy=proxy, wazuh_api=wazuh_api, logger=main_logger, options=configuration['hap_helper']) + helper = cls(proxy=proxy, wazuh_api=wazuh_api, options=configuration['hap_helper']) await helper.initialize_components() await helper.initialize_wazuh_cluster_configuration() @@ -341,16 +341,3 @@ async def run(cls): helper.logger.critical(f'Unexpected exception: {unexpected_exc}', exc_info=True) finally: helper.logger.info('Process ended') - - -def setup_loggers(log_level: str) -> tuple[logging.Logger, logging.Logger, logging.Logger]: - log_level = logging.getLevelName(log_level.upper()) - - log_file_path = path.join(common.WAZUH_LOGS, 'hap_helper.log') - main_logger = CustomLogger('wazuh-haphelper', file_path=log_file_path, level=log_level).get_logger() - proxy_logger = CustomLogger('proxy-logger', file_path=log_file_path, level=log_level, tag='Proxy').get_logger() - wazuh_api_logger = CustomLogger( - 'wazuh-api-logger', file_path=log_file_path, level=log_level, tag='Wazuh API' - ).get_logger() - - return main_logger, proxy_logger, wazuh_api_logger diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index bd2046afc2b..960399a3fc7 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -5,6 +5,7 @@ import httpx from wazuh.core.cluster.hap_helper.exception import ProxyError +from wazuh.core.cluster.utils import ClusterFilter JSON_TYPE: TypeAlias = dict | list[dict] PROXY_API_RESPONSE: TypeAlias = JSON_TYPE @@ -276,16 +277,22 @@ def __init__( self, wazuh_backend: str, proxy_api: ProxyAPI, - logger: logging.Logger, wazuh_connection_port: int = 1514, resolver: str = None, ): - self.logger = logger + self.logger = self._get_logger() self.wazuh_backend = wazuh_backend self.wazuh_connection_port = wazuh_connection_port self.api = proxy_api self.resolver = resolver + @staticmethod + def _get_logger() -> logging.Logger: + logger = logging.getLogger('wazuh').getChild('HAPHelper Proxy') + logger.addFilter(ClusterFilter(tag='Cluster', subtag='HAPHelper Proxy')) + + return logger + async def initialize(self): await self.api.initialize() try: @@ -300,7 +307,7 @@ async def get_current_pid(self) -> int: @check_proxy_api async def get_current_backends(self) -> dict: api_response = await self.api.get_backends() - self.logger.trace('Obtained proxy backends') + self.logger.debug2('Obtained proxy backends') return {backend['name']: backend for backend in api_response['data']} async def exists_backend(self, backend_name: str) -> bool: @@ -309,7 +316,7 @@ async def exists_backend(self, backend_name: str) -> bool: @check_proxy_api async def get_current_frontends(self) -> dict: api_response = await self.api.get_frontends() - self.logger.trace('Obtained proxy frontends') + self.logger.debug2('Obtained proxy frontends') return {frontend['name']: frontend for frontend in api_response['data'] if 'default_backend' in frontend} async def exists_frontend(self, frontend_name: str) -> bool: @@ -323,19 +330,19 @@ async def add_new_backend( algorithm: ProxyBalanceAlgorithm = ProxyBalanceAlgorithm.LEAST_CONNECTIONS, ): await self.api.add_backend(name=name, mode=mode, algorithm=algorithm) - self.logger.trace(f"Added new proxy backend: '{name}'") + self.logger.debug2(f"Added new proxy backend: '{name}'") @check_proxy_api async def add_new_frontend( self, name: str, port: int, backend: str, mode: CommunicationProtocol = CommunicationProtocol.TCP ): await self.api.add_frontend(name=name, port=port, backend=backend, mode=mode) - self.logger.trace(f"Added new proxy frontend: '{name}'") + self.logger.debug2(f"Added new proxy frontend: '{name}'") @check_proxy_api async def get_current_backend_servers(self) -> dict: api_response = await self.api.get_backend_servers(self.wazuh_backend) - self.logger.trace('Obtained proxy servers') + self.logger.debug2('Obtained proxy servers') return {server['name']: server['address'] for server in api_response['data']} @check_proxy_api @@ -347,7 +354,7 @@ async def add_wazuh_manager(self, manager_name: str, manager_address: str, resol port=self.wazuh_connection_port, resolver=resolver, ) - self.logger.trace( + self.logger.debug2( f"Added new server '{manager_name}' {manager_address}:{self.wazuh_connection_port} to backend" f" '{self.wazuh_backend}'" ) @@ -357,7 +364,7 @@ async def add_wazuh_manager(self, manager_name: str, manager_address: str, resol async def remove_wazuh_manager(self, manager_name: str) -> dict: api_response = await self.api.remove_server_from_backend(backend=self.wazuh_backend, server_name=manager_name) - self.logger.trace(f"Removed server {manager_name} from backend '{self.wazuh_backend}'") + self.logger.debug2(f"Removed server {manager_name} from backend '{self.wazuh_backend}'") return api_response @check_proxy_api @@ -365,7 +372,7 @@ async def restrain_server_new_connections(self, server_name: str) -> dict: api_response = await self.api.change_backend_server_state( backend_name=self.wazuh_backend, server_name=server_name, state=ProxyServerState.DRAIN ) - self.logger.trace(f"Changed Wazuh server '{server_name}' to {ProxyServerState.DRAIN.value.upper()} state") + self.logger.debug2(f"Changed Wazuh server '{server_name}' to {ProxyServerState.DRAIN.value.upper()} state") return api_response @check_proxy_api @@ -373,7 +380,7 @@ async def allow_server_new_connections(self, server_name: str) -> dict: api_response = await self.api.change_backend_server_state( backend_name=self.wazuh_backend, server_name=server_name, state=ProxyServerState.READY ) - self.logger.trace(f"Changed Wazuh server '{server_name}' to {ProxyServerState.READY.value.upper()} state") + self.logger.debug2(f"Changed Wazuh server '{server_name}' to {ProxyServerState.READY.value.upper()} state") return api_response @check_proxy_api @@ -382,7 +389,7 @@ async def get_wazuh_server_stats(self, server_name: str) -> dict: await self.api.get_backend_server_stats(backend_name=self.wazuh_backend, server_name=server_name) )[0]['stats'][0]['stats'] - self.logger.trace(f"Obtained server '{server_name}' stats") + self.logger.debug2(f"Obtained server '{server_name}' stats") return server_stats @check_proxy_api diff --git a/framework/wazuh/core/cluster/hap_helper/wazuh.py b/framework/wazuh/core/cluster/hap_helper/wazuh.py index 6995cd68447..d3ceadbdaf3 100644 --- a/framework/wazuh/core/cluster/hap_helper/wazuh.py +++ b/framework/wazuh/core/cluster/hap_helper/wazuh.py @@ -8,6 +8,7 @@ from wazuh.cluster import get_nodes_info from wazuh.core.cluster.control import get_system_nodes from wazuh.core.cluster.dapi.dapi import DistributedAPI +from wazuh.core.cluster.utils import ClusterFilter class WazuhAPIMethod(Enum): @@ -40,13 +41,12 @@ class WazuhAPI: def __init__( self, address: str, - logger: logging.Logger, port: int = 55000, username: str = 'wazuh', password: str = 'wazuh', excluded_nodes: list | None = None, ): - self.logger = logger + self.logger = self._get_logger() self.address = address self.port = port self.username = username @@ -55,6 +55,13 @@ def __init__( self.token = '' + @staticmethod + def _get_logger() -> logging.Logger: + logger = logging.getLogger('wazuh').getChild('HAPHelper DAPI') + logger.addFilter(ClusterFilter(tag='Cluster', subtag='HAPHelper DAPI')) + + return logger + async def _make_dapi_call(self, f: Callable, f_kwargs: Optional[dict] = None, **kwargs) -> dict: ret_val = await DistributedAPI(f=f, f_kwargs=f_kwargs, logger=self.logger, **kwargs).distribute_function() if isinstance(ret_val, Exception): From 5219fad7aee7ce60a51d7aaed49d7450dc6d7b4f Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Thu, 15 Feb 2024 17:53:17 -0300 Subject: [PATCH 328/419] Rename WazuhAPI to WazuhDAPI --- .../core/cluster/hap_helper/hap_helper.py | 25 ++++++++----------- .../wazuh/core/cluster/hap_helper/wazuh.py | 10 +------- 2 files changed, 11 insertions(+), 24 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index 122e8ad229e..e2835bf78d6 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -6,7 +6,7 @@ from wazuh.core.cluster.hap_helper.configuration import parse_configuration from wazuh.core.cluster.hap_helper.exception import HAPHelperError, ProxyError, WazuhError from wazuh.core.cluster.hap_helper.proxy import Proxy, ProxyAPI, ProxyServerState -from wazuh.core.cluster.hap_helper.wazuh import WazuhAgent, WazuhAPI +from wazuh.core.cluster.hap_helper.wazuh import WazuhAgent, WazuhDAPI from wazuh.core.cluster.utils import ClusterFilter @@ -15,10 +15,10 @@ class HAPHelper: AGENT_STATUS_SYNC_TIME: int = 25 # Default agent notify time + cluster sync + 5s SERVER_ADMIN_STATE_DELAY: int = 5 - def __init__(self, proxy: Proxy, wazuh_api: WazuhAPI, options: dict): + def __init__(self, proxy: Proxy, wazuh_dapi: WazuhDAPI, options: dict): self.logger = self._get_logger() self.proxy = proxy - self.wazuh_api = wazuh_api + self.wazuh_dapi = wazuh_dapi self.sleep_time: int = options['sleep_time'] self.agent_reconnection_stability_time: int = options['agent_reconnection_stability_time'] @@ -36,7 +36,6 @@ def _get_logger() -> logging.Logger: async def initialize_components(self): try: - # self.wazuh_api.initialize() await self.proxy.initialize() self.logger.info('Main components were initialized') except (WazuhError, ProxyError) as init_exc: @@ -98,7 +97,7 @@ async def obtain_nodes_to_configure( remove_nodes.append(node_name) add_nodes.append(node_name) for node_name in proxy_backend_servers.keys() - wazuh_cluster_nodes.keys(): - if node_name in self.wazuh_api.excluded_nodes: + if node_name in self.wazuh_dapi.excluded_nodes: self.logger.info(f"Server '{node_name}' has been excluded but is currently active. Removing it") elif await self.check_node_to_delete(node_name): pass @@ -115,7 +114,7 @@ async def update_agent_connections(self, agent_list: list[str]): f'Total iterations: {ceil(len(agent_list) / self.agent_reconnection_chunk_size)}' ) for index in range(0, len(agent_list), self.agent_reconnection_chunk_size): - await self.wazuh_api.reconnect_agents(agent_list[index : index + self.agent_reconnection_chunk_size]) + await self.wazuh_dapi.reconnect_agents(agent_list[index : index + self.agent_reconnection_chunk_size]) self.logger.debug(f'Delay between agent reconnections. Sleeping {self.agent_reconnection_time}s...') await sleep(self.agent_reconnection_time) @@ -160,7 +159,7 @@ async def migrate_old_connections(self, new_servers: list[str], deleted_servers: wazuh_backend_stats = (await self.proxy.get_wazuh_backend_stats()).keys() self.logger.debug('All new servers are UP') - previous_agent_distribution = await self.wazuh_api.get_agents_node_distribution() + previous_agent_distribution = await self.wazuh_dapi.get_agents_node_distribution() previous_connection_distribution = await self.proxy.get_wazuh_backend_server_connections() | { server: len(previous_agent_distribution[server]) for server in previous_agent_distribution @@ -230,7 +229,7 @@ def check_for_balance(self, current_connections_distribution: dict) -> dict: async def calculate_agents_to_balance(self, affected_servers: dict) -> dict: agents_to_balance = {} for server_name, n_agents in affected_servers.items(): - agent_candidates = await self.wazuh_api.get_agents_belonging_to_node(node_name=server_name, limit=n_agents) + agent_candidates = await self.wazuh_dapi.get_agents_belonging_to_node(node_name=server_name, limit=n_agents) eligible_agents = WazuhAgent.get_agents_able_to_reconnect(agents_list=agent_candidates) if len(eligible_agents) != len(agent_candidates): self.logger.warning( @@ -253,7 +252,7 @@ async def manage_wazuh_cluster_nodes(self): try: await self.backend_servers_state_healthcheck() await self.check_proxy_processes(auto_mode=True) and await sleep(self.AGENT_STATUS_SYNC_TIME) - current_wazuh_cluster = await self.wazuh_api.get_cluster_nodes() + current_wazuh_cluster = await self.wazuh_dapi.get_cluster_nodes() current_proxy_backend = await self.proxy.get_current_backend_servers() nodes_to_add, nodes_to_remove = await self.obtain_nodes_to_configure( @@ -318,15 +317,11 @@ async def run(cls): resolver=configuration['proxy'].get('resolver', None), ) - wazuh_api = WazuhAPI( - address=configuration['wazuh']['api']['address'], - port=configuration['wazuh']['api']['port'], - username=configuration['wazuh']['api']['user'], - password=configuration['wazuh']['api']['password'], + wazuh_dapi = WazuhDAPI( excluded_nodes=configuration['wazuh']['excluded_nodes'], ) - helper = cls(proxy=proxy, wazuh_api=wazuh_api, options=configuration['hap_helper']) + helper = cls(proxy=proxy, wazuh_dapi=wazuh_dapi, options=configuration['hap_helper']) await helper.initialize_components() await helper.initialize_wazuh_cluster_configuration() diff --git a/framework/wazuh/core/cluster/hap_helper/wazuh.py b/framework/wazuh/core/cluster/hap_helper/wazuh.py index d3ceadbdaf3..881b525788b 100644 --- a/framework/wazuh/core/cluster/hap_helper/wazuh.py +++ b/framework/wazuh/core/cluster/hap_helper/wazuh.py @@ -33,24 +33,16 @@ def get_agents_able_to_reconnect(cls, agents_list: list[dict]) -> list[str]: return [agent['id'] for agent in agents_list if cls.can_reconnect(agent['version'])] -class WazuhAPI: +class WazuhDAPI: AGENTS_MAX_LIMIT = 100000 API_RETRIES = 5 TIMEOUT_ERROR_CODE = 3021 def __init__( self, - address: str, - port: int = 55000, - username: str = 'wazuh', - password: str = 'wazuh', excluded_nodes: list | None = None, ): self.logger = self._get_logger() - self.address = address - self.port = port - self.username = username - self.password = password self.excluded_nodes = excluded_nodes or [] self.token = '' From baba059bc98dfbd75c5c80f3b6bd27453229cee6 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Fri, 16 Feb 2024 15:57:19 -0300 Subject: [PATCH 329/419] Better error handling for DAPI calls --- .../core/cluster/hap_helper/data/configuration.yaml | 2 +- .../wazuh/core/cluster/hap_helper/exception.py | 12 ------------ .../wazuh/core/cluster/hap_helper/hap_helper.py | 13 +++++++------ 3 files changed, 8 insertions(+), 19 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml index 3c097438efd..160eee56886 100644 --- a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml +++ b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml @@ -49,5 +49,5 @@ hap_helper: # Agent imbalance tolerance agent_tolerance: 0.1 # Time in minutes before removing a disconnected Wazuh node from the backend - remove_disconnected_node_after: 60 + remove_disconnected_node_after: 3 log_level: debug diff --git a/framework/wazuh/core/cluster/hap_helper/exception.py b/framework/wazuh/core/cluster/hap_helper/exception.py index 71fb7f48eef..3c78ea8d128 100644 --- a/framework/wazuh/core/cluster/hap_helper/exception.py +++ b/framework/wazuh/core/cluster/hap_helper/exception.py @@ -17,18 +17,6 @@ class HAPHelperError(CustomException): ERRORS = {100: 'Server status check timed out after adding new servers', 101: 'User configuration is not valid'} -class WazuhError(CustomException): - PREFIX = 'W' - ERRORS = { - 99: 'Cannot initialize Wazuh API', - 100: 'Unexpected error trying to connect to the Wazuh API', - 101: 'Unexpected response from the Wazuh API', - 102: 'Invalid credentials for the Wazuh API', - 103: 'The given Wazuh API user does not have permissions to make the request', - 104: 'Too many API requests retries', - } - - class ProxyError(CustomException): PREFIX = 'P' ERRORS = { diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index e2835bf78d6..42493eb69b1 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -4,10 +4,11 @@ from math import ceil, floor from wazuh.core.cluster.hap_helper.configuration import parse_configuration -from wazuh.core.cluster.hap_helper.exception import HAPHelperError, ProxyError, WazuhError +from wazuh.core.cluster.hap_helper.exception import HAPHelperError, ProxyError from wazuh.core.cluster.hap_helper.proxy import Proxy, ProxyAPI, ProxyServerState from wazuh.core.cluster.hap_helper.wazuh import WazuhAgent, WazuhDAPI from wazuh.core.cluster.utils import ClusterFilter +from wazuh.core.exception import WazuhException class HAPHelper: @@ -37,9 +38,9 @@ def _get_logger() -> logging.Logger: async def initialize_components(self): try: await self.proxy.initialize() - self.logger.info('Main components were initialized') - except (WazuhError, ProxyError) as init_exc: - self.logger.critical('Cannot initialize main components') + self.logger.info('Proxy was initialized') + except ProxyError as init_exc: + self.logger.critical('Cannot initialize the proxy') self.logger.critical(init_exc) exit(1) @@ -292,7 +293,7 @@ async def manage_wazuh_cluster_nodes(self): self.logger.debug(f'Sleeping {self.sleep_time}s...') await sleep(self.sleep_time) - except (HAPHelperError, ProxyError, WazuhError) as handled_exc: + except (HAPHelperError, ProxyError, WazuhException) as handled_exc: self.logger.error(str(handled_exc)) self.logger.warning( f'Tasks may not perform as expected. Sleeping {self.sleep_time}s ' 'before continuing...' @@ -326,7 +327,7 @@ async def run(cls): await helper.initialize_components() await helper.initialize_wazuh_cluster_configuration() - helper.logger.info('Starting HAProxy Helper on auto mode') + helper.logger.info('Starting HAProxy Helper') await helper.manage_wazuh_cluster_nodes() except (HAPHelperError, ProxyError) as main_exc: helper.logger.error(str(main_exc)) From 53a8332f249b4b0c6844eed101a9936f4b17fd6d Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 19 Feb 2024 12:28:31 -0300 Subject: [PATCH 330/419] Remove unused class --- framework/wazuh/core/cluster/hap_helper/wazuh.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/wazuh.py b/framework/wazuh/core/cluster/hap_helper/wazuh.py index 881b525788b..9e80630b84a 100644 --- a/framework/wazuh/core/cluster/hap_helper/wazuh.py +++ b/framework/wazuh/core/cluster/hap_helper/wazuh.py @@ -1,7 +1,6 @@ import logging import re from collections import defaultdict -from enum import Enum from typing import Callable, Optional from wazuh.agent import get_agents, reconnect_agents @@ -11,13 +10,6 @@ from wazuh.core.cluster.utils import ClusterFilter -class WazuhAPIMethod(Enum): - GET = 'get' - POST = 'post' - PUT = 'put' - DELETE = 'delete' - - class WazuhAgent: RECONNECTION_VERSION_MAJOR = 4 RECONNECTION_VERSION_MINOR = 3 From cd1e5af8d2dcf7c9a9c39f6e9dc948089db4a478 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 19 Feb 2024 16:52:53 -0300 Subject: [PATCH 331/419] Add docstrings --- .../hap_helper/data/configuration.yaml | 2 +- .../core/cluster/hap_helper/hap_helper.py | 110 ++++- .../wazuh/core/cluster/hap_helper/proxy.py | 412 ++++++++++++++++-- .../wazuh/core/cluster/hap_helper/wazuh.py | 94 ++++ 4 files changed, 586 insertions(+), 32 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml index 160eee56886..7f1180ef577 100644 --- a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml +++ b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml @@ -31,7 +31,7 @@ proxy: # Defined Proxy backend (frontend will append '_front' to it) backend: wazuh_cluster - # Defines the list of DNS servers to translate DNS names to IP adresses. + # Defines the list of DNS servers to translate DNS names to IP addresses. # This configuration is recommended but not mandatory. # If it is configured, a resolvers section must be properly defined in # the haproxy.cfg file. diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index 42493eb69b1..527dea1659e 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -12,6 +12,8 @@ class HAPHelper: + """Helper to balance Wazuh agents through cluster calling HAProxy.""" + UPDATED_BACKEND_STATUS_TIMEOUT: int = 60 AGENT_STATUS_SYNC_TIME: int = 25 # Default agent notify time + cluster sync + 5s SERVER_ADMIN_STATE_DELAY: int = 5 @@ -30,21 +32,31 @@ def __init__(self, proxy: Proxy, wazuh_dapi: WazuhDAPI, options: dict): @staticmethod def _get_logger() -> logging.Logger: + """Returns the configured logger. + + Returns + ------- + logging.Logger + The configured logger. + """ + logger = logging.getLogger('wazuh').getChild('HAPHelper') logger.addFilter(ClusterFilter(tag='Cluster', subtag='HAPHelper Main')) return logger - async def initialize_components(self): + async def initialize_proxy(self): + """Initialize HAProxy.""" try: await self.proxy.initialize() self.logger.info('Proxy was initialized') except ProxyError as init_exc: self.logger.critical('Cannot initialize the proxy') self.logger.critical(init_exc) - exit(1) + raise async def initialize_wazuh_cluster_configuration(self): + """Initialize main components of the Wazuh cluster.""" if not await self.proxy.exists_backend(self.proxy.wazuh_backend): self.logger.info(f"Could not find Wazuh backend '{self.proxy.wazuh_backend}'") await self.proxy.add_new_backend(name=self.proxy.wazuh_backend) @@ -60,6 +72,18 @@ async def initialize_wazuh_cluster_configuration(self): self.logger.info('Added Wazuh frontend') async def check_node_to_delete(self, node_name: str) -> bool: + """Checks if the given node can be deleted. + + Parameters + ---------- + node_name : str + The node to check. + + Returns + ------- + bool + True if the node can be deleted, else False. + """ node_downtime = (await self.proxy.get_wazuh_server_stats(server_name=node_name))['lastchg'] self.logger.debug2(f"Server '{node_name}' has been disconnected for {node_downtime}s") @@ -81,6 +105,7 @@ async def check_proxy_processes(self, auto_mode: bool = False, warn: bool = True return True async def backend_servers_state_healthcheck(self): + """Checks if any backend server is in DRAIN state and changes to READY.""" for server in (await self.proxy.get_current_backend_servers()).keys(): if await self.proxy.is_server_drain(server_name=server): self.logger.warning(f"Server '{server}' was found {ProxyServerState.DRAIN.value.upper()}. Fixing it") @@ -89,6 +114,20 @@ async def backend_servers_state_healthcheck(self): async def obtain_nodes_to_configure( self, wazuh_cluster_nodes: dict, proxy_backend_servers: dict ) -> tuple[list, list]: + """Returns the nodes able to add and delete. + + Parameters + ---------- + wazuh_cluster_nodes : dict + Wazuh cluster nodes to check. + proxy_backend_servers : dict + Proxy backend servers to check. + + Returns + ------- + tuple[list, list] + List with nodes to add and delete respectively. + """ add_nodes, remove_nodes = [], [] for node_name, node_address in wazuh_cluster_nodes.items(): @@ -109,6 +148,13 @@ async def obtain_nodes_to_configure( return add_nodes, remove_nodes async def update_agent_connections(self, agent_list: list[str]): + """Reconnects a list of given agents. + + Parameters + ---------- + agent_list : list[str] + Agents to reconnect. + """ self.logger.debug('Reconnecting agents') self.logger.debug( f'Agent reconnection chunk size is set to {self.agent_reconnection_chunk_size}. ' @@ -120,6 +166,15 @@ async def update_agent_connections(self, agent_list: list[str]): await sleep(self.agent_reconnection_time) async def force_agent_reconnection_to_server(self, chosen_server: str, agents_list: list[dict]): + """Force agents reconnection to a given server. + + Parameters + ---------- + chosen_server : str + The server for reconnecting the agents. + agents_list : list[dict] + Agents to be reconnected. + """ current_servers = (await self.proxy.get_current_backend_servers()).keys() affected_servers = current_servers - {chosen_server} for server_name in affected_servers: @@ -147,6 +202,20 @@ async def manage_proxy_processes(self): self.logger.info('Managed proxy processes') async def migrate_old_connections(self, new_servers: list[str], deleted_servers: list[str]): + """Reconnects agents to new servers. + + Parameters + ---------- + new_servers : list[str] + List of servers to connect the agents. + deleted_servers : list[str] + List of servers to disconnect the agents. + + Raises + ------ + HAPHelperError + In case of any new server in not running. + """ wazuh_backend_stats = {} backend_stats_iteration = 1 while any([server not in wazuh_backend_stats for server in new_servers]): @@ -196,6 +265,18 @@ async def migrate_old_connections(self, new_servers: list[str], deleted_servers: await sleep(self.agent_reconnection_stability_time) def check_for_balance(self, current_connections_distribution: dict) -> dict: + """Checks if the Wazuh cluster is balanced. + + Parameters + ---------- + current_connections_distribution : dict + Information about the current connections. + + Returns + ------- + dict + Information about the unbalanced connections. + """ if not current_connections_distribution: self.logger.debug('There are not connections at the moment') return {} @@ -228,6 +309,18 @@ def check_for_balance(self, current_connections_distribution: dict) -> dict: return unbalanced_connections async def calculate_agents_to_balance(self, affected_servers: dict) -> dict: + """Returns the needed connections to be balanced. + + Parameters + ---------- + affected_servers : dict + Servers to check. + + Returns + ------- + dict + Agents to balance. + """ agents_to_balance = {} for server_name, n_agents in affected_servers.items(): agent_candidates = await self.wazuh_dapi.get_agents_belonging_to_node(node_name=server_name, limit=n_agents) @@ -242,6 +335,13 @@ async def calculate_agents_to_balance(self, affected_servers: dict) -> dict: return agents_to_balance async def balance_agents(self, affected_servers: dict): + """Performs agents balance. + + Parameters + ---------- + affected_servers : dict + Servers to obtain the agents to balance. + """ self.logger.info('Attempting to balance agent connections') agents_to_balance = await self.calculate_agents_to_balance(affected_servers) for node_name, agent_ids in agents_to_balance.items(): @@ -249,6 +349,8 @@ async def balance_agents(self, affected_servers: dict): await self.update_agent_connections(agent_list=agent_ids) async def manage_wazuh_cluster_nodes(self): + """Main loop for check balance of Wazuh cluster.""" + while True: try: await self.backend_servers_state_healthcheck() @@ -302,6 +404,8 @@ async def manage_wazuh_cluster_nodes(self): @classmethod async def run(cls): + """Initialize and run HAPHelper.""" + try: configuration = parse_configuration() @@ -324,7 +428,7 @@ async def run(cls): helper = cls(proxy=proxy, wazuh_dapi=wazuh_dapi, options=configuration['hap_helper']) - await helper.initialize_components() + await helper.initialize_proxy() await helper.initialize_wazuh_cluster_configuration() helper.logger.info('Starting HAProxy Helper') diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 960399a3fc7..c0a12187f53 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -35,6 +35,8 @@ class ProxyBalanceAlgorithm(Enum): class ProxyAPI: + """Wrapper for calling HAProxy REST API""" + HAPEE_ENDPOINT = '/hapee' def __init__(self, username: str, password: str, address: str = 'localhost', port: int = 7777): @@ -46,6 +48,13 @@ def __init__(self, username: str, password: str, address: str = 'localhost', por self.version = 0 async def initialize(self): + """Initialize the REST API client. + + Raises + ------ + ProxyError + In case of errors communicating with the HAProxy REST API. + """ try: async with httpx.AsyncClient(verify=False) as client: response = await client.post( @@ -67,6 +76,29 @@ async def _make_hapee_request( query_parameters: dict | None = None, json_body: dict | None = None, ) -> PROXY_API_RESPONSE: + """Wrapper to make REST API calls. + + Parameters + ---------- + endpoint : str + Endpoint to call. + method : ProxyAPIMethod, optional + Method to use, by default ProxyAPIMethod.GET + query_parameters : dict | None, optional + Query parameters to send in the request, by default None + json_body : dict | None, optional + Data to send within the request, by default None + + Returns + ------- + PROXY_API_RESPONSE + REST API response in JSON format. + + Raises + ------ + ProxyError + In case of errors communicating with the HAProxy REST API. + """ uri = f'https://{self.address}:{self.port}{self.HAPEE_ENDPOINT}' query_parameters = query_parameters or {} query_parameters.update({'version': self.version}) @@ -100,6 +132,7 @@ async def _make_hapee_request( else: raise ProxyError(101, extra_msg=f'Full response: {response.status_code} | {response.json()}') + # TODO: This must be deprecated async def _make_proxy_request( self, endpoint: str, @@ -129,13 +162,29 @@ async def _make_proxy_request( raise ProxyError(101, extra_msg=f'Full response: {response.status_code} | {response.json()}') async def update_configuration_version(self): + """Get the last version of the configuration schema and set it.""" + configuration_version = await self._make_hapee_request('/services/haproxy/configuration/version') self.version = configuration_version async def get_runtime_info(self) -> PROXY_API_RESPONSE: + """Returns the runtime information of the HAProxy instance. + + Returns + ------- + PROXY_API_RESPONSE + The runtime information. + """ return (await self._make_hapee_request('/services/haproxy/runtime/info'))[0]['info'] async def get_backends(self) -> PROXY_API_RESPONSE: + """Returns the configured backends. + + Returns + ------- + PROXY_API_RESPONSE + Information of configured backends. + """ return await self._make_hapee_request(endpoint='/services/haproxy/configuration/backends') async def add_backend( @@ -144,6 +193,22 @@ async def add_backend( mode: CommunicationProtocol = CommunicationProtocol.TCP, algorithm: ProxyBalanceAlgorithm = ProxyBalanceAlgorithm.LEAST_CONNECTIONS, ) -> PROXY_API_RESPONSE: + """Adds a new backend to HAProxy instance. + + Parameters + ---------- + name : str + Name to set. + mode : CommunicationProtocol, optional + Protocol to use, by default CommunicationProtocol.TCP + algorithm : ProxyBalanceAlgorithm, optional + Load balancing algorithm to use, by default ProxyBalanceAlgorithm.LEAST_CONNECTIONS + + Returns + ------- + PROXY_API_RESPONSE + Information about the newly added backend. + """ query_params = {'force_reload': True} json_body = {'name': name, 'mode': mode.value, 'balance': {'algorithm': algorithm.value}} @@ -155,6 +220,19 @@ async def add_backend( ) async def get_backend_servers(self, backend: str) -> PROXY_API_RESPONSE: + """Returns the servers for the provided backend. + + Parameters + ---------- + backend : str + Backend name to query. + + Returns + ------- + PROXY_API_RESPONSE + The servers for the provided backend. + """ + return await self._make_hapee_request( '/services/haproxy/configuration/servers', query_parameters={'backend': backend} ) @@ -162,15 +240,35 @@ async def get_backend_servers(self, backend: str) -> PROXY_API_RESPONSE: async def add_server_to_backend( self, backend: str, server_name: str, server_address: str, port: int, resolver: Optional[str] ) -> PROXY_API_RESPONSE: + """Adds new server to the provided backend. + + Parameters + ---------- + backend : str + Backend to add the new server. + server_name : str + Name of the new server. + server_address : str + IP or DNS for the new server. + port : int + Port number to use with the new server. + resolver : Optional[str] + The name of the connection resolver to use. + + Returns + ------- + PROXY_API_RESPONSE + Information about the newly added server. + """ query_params = {'backend': backend, 'force_reload': True} json_body = {'check': 'enabled', 'name': server_name, 'address': server_address, 'port': port} - # check that server_address is in ip address format + is_ip_address = None try: is_ip_address = ipaddress.ip_address(server_address) and True except ValueError: - # the server_addr is not in ip address format is_ip_address = False + json_body.update( {'resolvers': resolver, 'init-addr': 'last,libc,none'} if resolver and not is_ip_address else {} ) @@ -183,6 +281,20 @@ async def add_server_to_backend( ) async def remove_server_from_backend(self, backend: str, server_name: str) -> PROXY_API_RESPONSE: + """Remove a server from the backend. + + Parameters + ---------- + backend : str + The backend to remove the server. + server_name : str + The server to remove. + + Returns + ------- + PROXY_API_RESPONSE + Information about the removed server. + """ query_params = {'backend': backend, 'force_reload': True} return await self._make_hapee_request( @@ -192,11 +304,36 @@ async def remove_server_from_backend(self, backend: str, server_name: str) -> PR ) async def get_frontends(self) -> PROXY_API_RESPONSE: + """Returns the frontends configured in the HAProxy instance. + + Returns + ------- + PROXY_API_RESPONSE + Information of configured frontends. + """ return await self._make_hapee_request(endpoint='/services/haproxy/configuration/frontends') async def add_frontend( self, name: str, port: int, backend: str, mode: CommunicationProtocol = CommunicationProtocol.TCP ) -> PROXY_API_RESPONSE: + """Adds a new frontend to the HAProxy instance. + + Parameters + ---------- + name : str + Name of the new frontend. + port : int + Port number to use with the new frontend. + backend : str + Default backend to connect. + mode : CommunicationProtocol, optional + Communication protocol to use, by default CommunicationProtocol.TCP + + Returns + ------- + PROXY_API_RESPONSE + Information about the newly created frontend. + """ frontend_query_params = {'force_reload': True} frontend_json_body = {'name': name, 'mode': mode.value, 'default_backend': backend} @@ -221,6 +358,20 @@ async def add_frontend( return frontend_response async def get_backend_server_runtime_settings(self, backend_name: str, server_name: str) -> PROXY_API_RESPONSE: + """Returns the setting for a backend server. + + Parameters + ---------- + backend_name : str + Backend name to query. + server_name : str + Server name to query. + + Returns + ------- + PROXY_API_RESPONSE + Settings information for the server. + """ query_params = {'backend': backend_name, 'name': server_name} return await self._make_hapee_request( @@ -230,6 +381,22 @@ async def get_backend_server_runtime_settings(self, backend_name: str, server_na async def change_backend_server_state( self, backend_name: str, server_name: str, state: ProxyServerState ) -> PROXY_API_RESPONSE: + """Set the status of a backend server, + + Parameters + ---------- + backend_name : str + Backend name to query. + server_name : str + Server name to query. + state : ProxyServerState + New state to set it. + + Returns + ------- + PROXY_API_RESPONSE + Information about the new server state. + """ query_params = {'backend': backend_name} json_body = {'admin_state': state.value} @@ -241,15 +408,42 @@ async def change_backend_server_state( ) async def get_backend_stats(self, backend_name: str) -> PROXY_API_RESPONSE: + """Returns the statistics of the provided backend. + + Parameters + ---------- + backend_name : str + Backend name to query. + + Returns + ------- + PROXY_API_RESPONSE + Statistics of the backend. + """ query_params = {'type': 'backend', 'name': backend_name} return await self._make_hapee_request('/services/haproxy/stats/native', query_parameters=query_params) async def get_backend_server_stats(self, backend_name: str, server_name: str) -> PROXY_API_RESPONSE: + """Returns the statistics of the provided backend server. + + Parameters + ---------- + backend_name : str + Backend to query. + server_name : str + Server to query. + + Returns + ------- + PROXY_API_RESPONSE + Statistics of the server. + """ query_params = {'type': 'server', 'parent': backend_name, 'name': server_name.lower()} return await self._make_hapee_request('/services/haproxy/stats/native', query_parameters=query_params) + # TODO: This must be deprecated async def get_proxy_processes(self) -> PROXY_API_RESPONSE: return await self._make_proxy_request('/haproxy/processes') @@ -262,16 +456,6 @@ async def kill_proxy_processes(self, pid_to_exclude: int = 0) -> PROXY_API_RESPO ) -def check_proxy_api(func): - async def wrapper(self, *args, **kwargs): - if self.api is None: - raise ProxyError(103) - - return await func(self, *args, **kwargs) - - return wrapper - - class Proxy: def __init__( self, @@ -288,65 +472,165 @@ def __init__( @staticmethod def _get_logger() -> logging.Logger: + """Returns the configured logger. + + Returns + ------- + logging.Logger + The configured logger. + """ + logger = logging.getLogger('wazuh').getChild('HAPHelper Proxy') logger.addFilter(ClusterFilter(tag='Cluster', subtag='HAPHelper Proxy')) return logger async def initialize(self): + """Initialize the ProxyAPI. + + Raises + ------ + ProxyError + In case of errors initializing ProxyAPI. + """ await self.api.initialize() try: (await self.api.get_runtime_info())['version'] except (KeyError, IndexError): raise ProxyError(104) - @check_proxy_api async def get_current_pid(self) -> int: + """Returns the current HAProxy PID + + Returns + ------- + int + Current PID. + """ return (await self.api.get_runtime_info())['pid'] - @check_proxy_api async def get_current_backends(self) -> dict: + """Returns current backends from the Proxy. + + Returns + ------- + dict + The backends. + """ api_response = await self.api.get_backends() self.logger.debug2('Obtained proxy backends') return {backend['name']: backend for backend in api_response['data']} async def exists_backend(self, backend_name: str) -> bool: + """Checks if the provided backend exists. + + Parameters + ---------- + backend_name : str + Backend to check. + + Returns + ------- + bool + True if exists else False. + """ return backend_name in await self.get_current_backends() - @check_proxy_api async def get_current_frontends(self) -> dict: + """Returns current frontends from the Proxy. + + Returns + ------- + dict + The frontends. + """ api_response = await self.api.get_frontends() self.logger.debug2('Obtained proxy frontends') return {frontend['name']: frontend for frontend in api_response['data'] if 'default_backend' in frontend} async def exists_frontend(self, frontend_name: str) -> bool: + """Checks if the provided frontend exists. + + Parameters + ---------- + frontend_name : str + Frontend to check. + + Returns + ------- + bool + True if exists else False. + """ return frontend_name in await self.get_current_frontends() - @check_proxy_api async def add_new_backend( self, name: str, mode: CommunicationProtocol = CommunicationProtocol.TCP, algorithm: ProxyBalanceAlgorithm = ProxyBalanceAlgorithm.LEAST_CONNECTIONS, ): + """Adds new backend to the Proxy. + + Parameters + ---------- + name : str + Name for the new backend. + mode : CommunicationProtocol, optional + Communication protocol to use, by default CommunicationProtocol.TCP + algorithm : ProxyBalanceAlgorithm, optional + Load balancing algorithm to use, by default ProxyBalanceAlgorithm.LEAST_CONNECTIONS + """ await self.api.add_backend(name=name, mode=mode, algorithm=algorithm) self.logger.debug2(f"Added new proxy backend: '{name}'") - @check_proxy_api async def add_new_frontend( self, name: str, port: int, backend: str, mode: CommunicationProtocol = CommunicationProtocol.TCP ): + """Adds new frontend to the Proxy. + + Parameters + ---------- + name : str + Name for the new frontend. + port : int + Port number to use with the new frontend. + backend : str + Default backend to connect. + mode : CommunicationProtocol, optional + Communication protocol to use, by default CommunicationProtocol.TCP + """ await self.api.add_frontend(name=name, port=port, backend=backend, mode=mode) self.logger.debug2(f"Added new proxy frontend: '{name}'") - @check_proxy_api async def get_current_backend_servers(self) -> dict: + """Returns current backend servers from the Proxy. + + Returns + ------- + dict + The backend servers. + """ api_response = await self.api.get_backend_servers(self.wazuh_backend) self.logger.debug2('Obtained proxy servers') return {server['name']: server['address'] for server in api_response['data']} - @check_proxy_api - async def add_wazuh_manager(self, manager_name: str, manager_address: str, resolver: Optional[str]) -> dict: + async def add_wazuh_manager(self, manager_name: str, manager_address: str, resolver: Optional[str] = None) -> dict: + """Adds new Wazuh manager to the Proxy. + + Parameters + ---------- + manager_name : str + Name of the Wazuh manager. + manager_address : str + IP or DNS for the Wazuh manager. + resolver : Optional[str] + Name of the connection resolver to use, by default None. + + Returns + ------- + dict + Information about the newly added manager. + """ api_response = await self.api.add_server_to_backend( backend=self.wazuh_backend, server_name=manager_name, @@ -360,31 +644,75 @@ async def add_wazuh_manager(self, manager_name: str, manager_address: str, resol ) return api_response - @check_proxy_api async def remove_wazuh_manager(self, manager_name: str) -> dict: + """Deletes the given Wazuh manager from the Proxy. + + Parameters + ---------- + manager_name : str + Manager to remove. + + Returns + ------- + dict + Information about the removed manager. + """ api_response = await self.api.remove_server_from_backend(backend=self.wazuh_backend, server_name=manager_name) self.logger.debug2(f"Removed server {manager_name} from backend '{self.wazuh_backend}'") return api_response - @check_proxy_api async def restrain_server_new_connections(self, server_name: str) -> dict: + """Change the status of the given server to DRAIN to restrain new connections. + + Parameters + ---------- + server_name : str + The server to restrain. + + Returns + ------- + dict + Information about the server's new state. + """ api_response = await self.api.change_backend_server_state( backend_name=self.wazuh_backend, server_name=server_name, state=ProxyServerState.DRAIN ) self.logger.debug2(f"Changed Wazuh server '{server_name}' to {ProxyServerState.DRAIN.value.upper()} state") return api_response - @check_proxy_api async def allow_server_new_connections(self, server_name: str) -> dict: + """Change the status of the given server to READY to allow new connections. + + Parameters + ---------- + server_name : str + The server that allows connections. + + Returns + ------- + dict + Information about the server's new state. + """ api_response = await self.api.change_backend_server_state( backend_name=self.wazuh_backend, server_name=server_name, state=ProxyServerState.READY ) self.logger.debug2(f"Changed Wazuh server '{server_name}' to {ProxyServerState.READY.value.upper()} state") return api_response - @check_proxy_api async def get_wazuh_server_stats(self, server_name: str) -> dict: + """Returns statistics of the given server. + + Parameters + ---------- + server_name : str + The server name to query + + Returns + ------- + dict + The statistics of the server. + """ server_stats = ( await self.api.get_backend_server_stats(backend_name=self.wazuh_backend, server_name=server_name) )[0]['stats'][0]['stats'] @@ -392,15 +720,37 @@ async def get_wazuh_server_stats(self, server_name: str) -> dict: self.logger.debug2(f"Obtained server '{server_name}' stats") return server_stats - @check_proxy_api async def is_server_drain(self, server_name: str) -> bool: + """Checks if the server is in DRAIN state. + + Parameters + ---------- + server_name : str + The server to check. + + Returns + ------- + bool + True if the server is in a DRAIN state, else False. + """ server_stats = await self.api.get_backend_server_runtime_settings( backend_name=self.wazuh_backend, server_name=server_name ) return server_stats['admin_state'] == ProxyServerState.DRAIN.value - @check_proxy_api async def get_wazuh_backend_stats(self, only_actives: bool = True) -> dict: + """Returns statistics of the Wazuh backend. + + Parameters + ---------- + only_actives : bool, optional + Only include running servers, by default True + + Returns + ------- + dict + The statistics of the Wazuh backend. + """ backend_servers = [ server['name'] for server in (await self.api.get_backend_servers(self.wazuh_backend))['data'] ] @@ -414,13 +764,19 @@ async def get_wazuh_backend_stats(self, only_actives: bool = True) -> dict: return stats - @check_proxy_api async def get_wazuh_backend_server_connections(self) -> dict: + """Returns the active connections of the Wazuh backend server. + + Returns + ------- + dict + Information about the current connections. + """ current_connections_key = 'scur' server_stats = await self.get_wazuh_backend_stats() return {server_name: server_stats[server_name][current_connections_key] for server_name in server_stats} - @check_proxy_api + # TODO: This must be deprecated async def is_proxy_process_single(self) -> bool: haproxy_processes = await self.api.get_proxy_processes() return len(haproxy_processes['data']['processes']) == 1 diff --git a/framework/wazuh/core/cluster/hap_helper/wazuh.py b/framework/wazuh/core/cluster/hap_helper/wazuh.py index 9e80630b84a..8e7ce4fbc12 100644 --- a/framework/wazuh/core/cluster/hap_helper/wazuh.py +++ b/framework/wazuh/core/cluster/hap_helper/wazuh.py @@ -11,21 +11,49 @@ class WazuhAgent: + """Tools for handle Wazuh agents connections.""" + RECONNECTION_VERSION_MAJOR = 4 RECONNECTION_VERSION_MINOR = 3 AGENT_VERSION_REGEX = re.compile(r'.*v(\d+)\.(\d+)\.\d+') @classmethod def can_reconnect(cls, agent_version: str) -> bool: + """Check if the agent can be re-connected. + + Parameters + ---------- + agent_version : str + The version of the agent to check. + + Returns + ------- + bool + True if the agent can be re-connected else False. + """ major, minor = cls.AGENT_VERSION_REGEX.match(agent_version).groups() return int(major) >= cls.RECONNECTION_VERSION_MAJOR and int(minor) >= cls.RECONNECTION_VERSION_MINOR @classmethod def get_agents_able_to_reconnect(cls, agents_list: list[dict]) -> list[str]: + """Obtain agents that can be re-connected. + + Parameters + ---------- + agents_list : list[dict] + List of agents to analyze. + + Returns + ------- + list[str] + Agents that can be re-connected. + """ return [agent['id'] for agent in agents_list if cls.can_reconnect(agent['version'])] class WazuhDAPI: + """Class for call Wazuh DAPI functions.""" + AGENTS_MAX_LIMIT = 100000 API_RETRIES = 5 TIMEOUT_ERROR_CODE = 3021 @@ -41,12 +69,38 @@ def __init__( @staticmethod def _get_logger() -> logging.Logger: + """Returns the configured logger. + + Returns + ------- + logging.Logger + The configured logger. + """ logger = logging.getLogger('wazuh').getChild('HAPHelper DAPI') logger.addFilter(ClusterFilter(tag='Cluster', subtag='HAPHelper DAPI')) return logger async def _make_dapi_call(self, f: Callable, f_kwargs: Optional[dict] = None, **kwargs) -> dict: + """Wrapper to call DAPI functions. + + Parameters + ---------- + f : Callable + Function to be executed. + f_kwargs : Optional[dict], optional + Arguments to be passed to function `f`, by default None + + Returns + ------- + dict + The API response. + + Raises + ------ + WazuhException + Raise the exception returned by function `f`. + """ ret_val = await DistributedAPI(f=f, f_kwargs=f_kwargs, logger=self.logger, **kwargs).distribute_function() if isinstance(ret_val, Exception): self.logger.error(f'Unexpected error calling {f.__name__}') @@ -54,6 +108,13 @@ async def _make_dapi_call(self, f: Callable, f_kwargs: Optional[dict] = None, ** return ret_val async def get_cluster_nodes(self) -> dict: + """Get the nodes of the cluster. + + Returns + ------- + dict + Information about the cluster nodes. + """ data = await self._make_dapi_call( f=get_nodes_info, request_type='local_master', @@ -65,6 +126,18 @@ async def get_cluster_nodes(self) -> dict: return {item['name']: item['ip'] for item in data.affected_items if item['name'] not in self.excluded_nodes} async def reconnect_agents(self, agent_list: list = None) -> dict: + """Makes an API call to reconnect agents. + + Parameters + ---------- + agent_list : list, optional + The agents to be re-connected, by default None + + Returns + ------- + dict + Information about the re-connected agents. + """ data = await self._make_dapi_call( f=reconnect_agents, f_kwargs={'agent_list': agent_list}, @@ -74,6 +147,13 @@ async def reconnect_agents(self, agent_list: list = None) -> dict: return data.affected_items async def get_agents_node_distribution(self) -> dict: + """Get the distribution of connected agents. + + Returns + ------- + dict + The current distribution of the agents. + """ agent_distribution = defaultdict(list) f_kwargs = { @@ -96,6 +176,20 @@ async def get_agents_node_distribution(self) -> dict: return agent_distribution async def get_agents_belonging_to_node(self, node_name: str, limit: int = None) -> list[dict]: + """Get the agents that are connected to a specific node. + + Parameters + ---------- + node_name : str + The name of the node to check. + limit : int, optional + Max number of agents to retrieve, by default None + + Returns + ------- + list[dict] + The connected agents. + """ f_kwargs = { 'select': ['version'], 'sort': {'fields': ['version', 'id'], 'order': 'desc'}, From e42bd0ef4998527a744214226f8b3964c58c4620 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 19 Feb 2024 17:45:45 -0300 Subject: [PATCH 332/419] Unify exceptions --- .../core/cluster/hap_helper/configuration.py | 4 +-- .../core/cluster/hap_helper/exception.py | 30 ---------------- .../core/cluster/hap_helper/hap_helper.py | 11 +++--- .../wazuh/core/cluster/hap_helper/proxy.py | 34 ++++++++++--------- framework/wazuh/core/exception.py | 19 ++++++++++- 5 files changed, 42 insertions(+), 56 deletions(-) delete mode 100644 framework/wazuh/core/cluster/hap_helper/exception.py diff --git a/framework/wazuh/core/cluster/hap_helper/configuration.py b/framework/wazuh/core/cluster/hap_helper/configuration.py index e411a1bb780..c5ccaebe834 100644 --- a/framework/wazuh/core/cluster/hap_helper/configuration.py +++ b/framework/wazuh/core/cluster/hap_helper/configuration.py @@ -3,7 +3,7 @@ import jsonschema import yaml -from wazuh.core.cluster.hap_helper.exception import HAPHelperError +from wazuh.core.exception import WazuhHAPHelperError def validate_custom_configuration(custom_configuration: dict): @@ -15,7 +15,7 @@ def validate_custom_configuration(custom_configuration: dict): try: jsonschema.validate(instance=custom_configuration, schema=json_schema) except jsonschema.ValidationError as validation_err: - raise HAPHelperError(101, extra_msg=f"({'> '.join(validation_err.path)}) {validation_err.message}") + raise WazuhHAPHelperError(3042, extra_message=f"({'> '.join(validation_err.path)}) {validation_err.message}") def merge_configurations(default: dict, config: dict) -> dict: diff --git a/framework/wazuh/core/cluster/hap_helper/exception.py b/framework/wazuh/core/cluster/hap_helper/exception.py deleted file mode 100644 index 3c78ea8d128..00000000000 --- a/framework/wazuh/core/cluster/hap_helper/exception.py +++ /dev/null @@ -1,30 +0,0 @@ -class CustomException(Exception): - PREFIX = 'U' - ERRORS = {} - - def __init__(self, code: int, extra_msg: str = ''): - self._code = code - self._message = self.ERRORS[self._code] - if extra_msg: - self._message += f' - {extra_msg}' - - def __str__(self): - return f'({self.PREFIX}{self._code}) {self._message}' - - -class HAPHelperError(CustomException): - PREFIX = 'C' - ERRORS = {100: 'Server status check timed out after adding new servers', 101: 'User configuration is not valid'} - - -class ProxyError(CustomException): - PREFIX = 'P' - ERRORS = { - 99: 'Cannot initialize Proxy API', - 100: 'Unexpected error trying to connect to Proxy API', - 101: 'Unexpected response from the Proxy API', - 102: 'Invalid credentials for the Proxy API', - 103: 'Invalid HAProxy Dataplane API specification configured', - 104: 'Cannot detect a valid HAProxy process linked to the Dataplane API', - 105: 'Unexpected response from HAProxy Dataplane API', - } diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index 527dea1659e..8203c743baf 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -4,11 +4,10 @@ from math import ceil, floor from wazuh.core.cluster.hap_helper.configuration import parse_configuration -from wazuh.core.cluster.hap_helper.exception import HAPHelperError, ProxyError from wazuh.core.cluster.hap_helper.proxy import Proxy, ProxyAPI, ProxyServerState from wazuh.core.cluster.hap_helper.wazuh import WazuhAgent, WazuhDAPI from wazuh.core.cluster.utils import ClusterFilter -from wazuh.core.exception import WazuhException +from wazuh.core.exception import WazuhException, WazuhHAPHelperError class HAPHelper: @@ -50,7 +49,7 @@ async def initialize_proxy(self): try: await self.proxy.initialize() self.logger.info('Proxy was initialized') - except ProxyError as init_exc: + except WazuhHAPHelperError as init_exc: self.logger.critical('Cannot initialize the proxy') self.logger.critical(init_exc) raise @@ -221,7 +220,7 @@ async def migrate_old_connections(self, new_servers: list[str], deleted_servers: while any([server not in wazuh_backend_stats for server in new_servers]): if backend_stats_iteration > self.UPDATED_BACKEND_STATUS_TIMEOUT: self.logger.error(f'Some of the new servers did not go UP: {set(new_servers) - wazuh_backend_stats}') - raise HAPHelperError(100) + raise WazuhHAPHelperError(3041) self.logger.debug('Waiting for new servers to go UP') time.sleep(1) @@ -395,7 +394,7 @@ async def manage_wazuh_cluster_nodes(self): self.logger.debug(f'Sleeping {self.sleep_time}s...') await sleep(self.sleep_time) - except (HAPHelperError, ProxyError, WazuhException) as handled_exc: + except WazuhException as handled_exc: self.logger.error(str(handled_exc)) self.logger.warning( f'Tasks may not perform as expected. Sleeping {self.sleep_time}s ' 'before continuing...' @@ -433,8 +432,6 @@ async def run(cls): helper.logger.info('Starting HAProxy Helper') await helper.manage_wazuh_cluster_nodes() - except (HAPHelperError, ProxyError) as main_exc: - helper.logger.error(str(main_exc)) except KeyboardInterrupt: pass except Exception as unexpected_exc: diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index c0a12187f53..641770e5cdf 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -4,8 +4,8 @@ from typing import Optional, TypeAlias import httpx -from wazuh.core.cluster.hap_helper.exception import ProxyError from wazuh.core.cluster.utils import ClusterFilter +from wazuh.core.exception import WazuhHAPHelperError JSON_TYPE: TypeAlias = dict | list[dict] PROXY_API_RESPONSE: TypeAlias = JSON_TYPE @@ -52,7 +52,7 @@ async def initialize(self): Raises ------ - ProxyError + WazuhHAPHelperError In case of errors communicating with the HAProxy REST API. """ try: @@ -61,13 +61,13 @@ async def initialize(self): f'https://{self.address}:{self.port}/', auth=(self.username, self.password) ) if response.status_code == 401: - raise ProxyError(102) + raise WazuhHAPHelperError(3046) elif response.status_code == 404: - raise ProxyError(103) + raise WazuhHAPHelperError(3047) except httpx.ConnectError: - raise ProxyError(99, extra_msg='Check connectivity and the configuration file') + raise WazuhHAPHelperError(3043, extra_message='Check connectivity and the configuration file') except httpx.RequestError as req_exc: - raise ProxyError(99, extra_msg=str(req_exc)) + raise WazuhHAPHelperError(3043, extra_message=str(req_exc)) async def _make_hapee_request( self, @@ -96,7 +96,7 @@ async def _make_hapee_request( Raises ------ - ProxyError + WazuhHAPHelperError In case of errors communicating with the HAProxy REST API. """ uri = f'https://{self.address}:{self.port}{self.HAPEE_ENDPOINT}' @@ -114,13 +114,15 @@ async def _make_hapee_request( async with httpx.AsyncClient(verify=False, follow_redirects=True) as client: response = await client.post(uri, auth=(self.username, self.password), json=hapee_json_body) except httpx.RequestError as request_exc: - raise ProxyError(100, extra_msg=str(request_exc)) + raise WazuhHAPHelperError(3044, extra_message=str(request_exc)) if response.status_code == 200: full_decoded_response = response.json() decoded_response = full_decoded_response['data']['response'] if full_decoded_response['error'] != 0: - raise ProxyError(105, extra_msg=f'Full response: {response.status_code} | {response.json()}') + raise WazuhHAPHelperError( + 3049, extra_message=f'Full response: {response.status_code} | {response.json()}' + ) if isinstance(decoded_response, dict) and '_version' in decoded_response: self.version = decoded_response['_version'] elif method != ProxyAPIMethod.GET and 'configuration' in endpoint: @@ -128,9 +130,9 @@ async def _make_hapee_request( return decoded_response elif response.status_code == 401: - raise ProxyError(102) + raise WazuhHAPHelperError(3046) else: - raise ProxyError(101, extra_msg=f'Full response: {response.status_code} | {response.json()}') + raise WazuhHAPHelperError(3045, extra_message=f'Full response: {response.status_code} | {response.json()}') # TODO: This must be deprecated async def _make_proxy_request( @@ -152,14 +154,14 @@ async def _make_proxy_request( json=json_body, ) except httpx.RequestError as request_exc: - raise ProxyError(100, extra_msg=str(request_exc)) + raise WazuhHAPHelperError(3044, extra_message=str(request_exc)) if response.status_code == 200: return response.json() elif response.status_code == 401: - raise ProxyError(102) + raise WazuhHAPHelperError(3046) else: - raise ProxyError(101, extra_msg=f'Full response: {response.status_code} | {response.json()}') + raise WazuhHAPHelperError(3045, extra_message=f'Full response: {response.status_code} | {response.json()}') async def update_configuration_version(self): """Get the last version of the configuration schema and set it.""" @@ -490,14 +492,14 @@ async def initialize(self): Raises ------ - ProxyError + WazuhHAPHelperError In case of errors initializing ProxyAPI. """ await self.api.initialize() try: (await self.api.get_runtime_info())['version'] except (KeyError, IndexError): - raise ProxyError(104) + raise WazuhHAPHelperError(3048) async def get_current_pid(self) -> int: """Returns the current HAProxy PID diff --git a/framework/wazuh/core/exception.py b/framework/wazuh/core/exception.py index 7ebe5cce7e9..1476fce69b9 100755 --- a/framework/wazuh/core/exception.py +++ b/framework/wazuh/core/exception.py @@ -6,7 +6,7 @@ from typing import Union from wazuh.core.cluster import __version__ -from wazuh.core.common import MAX_SOCKET_BUFFER_SIZE, AGENT_NAME_LEN_LIMIT, MAX_GROUPS_PER_MULTIGROUP +from wazuh.core.common import AGENT_NAME_LEN_LIMIT, MAX_GROUPS_PER_MULTIGROUP, MAX_SOCKET_BUFFER_SIZE GENERIC_ERROR_MSG = "Wazuh Internal Error. See log for more detail" DOCU_VERSION = 'current' if __version__ == '' else '.'.join(__version__.split('.')[:2]).lstrip('v') @@ -496,6 +496,15 @@ class WazuhException(Exception): 3038: "Error while processing extra-valid files", 3039: "Timeout while waiting to receive a file", 3040: "Error while waiting to receive a file", + 3041: "Server status check timed out after adding new servers", + 3042: "User configuration is not valid", + 3043: "Cannot initialize Proxy API", + 3044: "Unexpected error trying to connect to Proxy API", + 3045: "Unexpected response from the Proxy API", + 3046: "Invalid credentials for the Proxy API", + 3047: "Invalid HAProxy Dataplane API specification configured", + 3048: "Cannot detect a valid HAProxy process linked to the Dataplane API", + 3049: "Unexpected response from HAProxy Dataplane API", # RBAC exceptions # The messages of these exceptions are provisional until the RBAC documentation is published. @@ -778,6 +787,14 @@ class WazuhClusterError(WazuhInternalError): _default_title = "Wazuh Cluster Error" +class WazuhHAPHelperError(WazuhClusterError): + """ + This type of exception is raised inside the HAProxy Helper. + """ + _default_type = "about:blank" + _default_title = "HAProxy Helper Error" + + class WazuhError(WazuhException): """ This type of exception is raised as a controlled response to a bad request from user From 69e90f97782e0a510378fc49985b2e0720494883 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Tue, 20 Feb 2024 13:27:07 -0300 Subject: [PATCH 333/419] Suggestions from CR --- .../wazuh/core/cluster/hap_helper/proxy.py | 72 +++++++++---------- .../wazuh/core/cluster/hap_helper/wazuh.py | 14 ++-- framework/wazuh/core/exception.py | 2 + 3 files changed, 45 insertions(+), 43 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 641770e5cdf..a0d2ccb9d67 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -83,11 +83,11 @@ async def _make_hapee_request( endpoint : str Endpoint to call. method : ProxyAPIMethod, optional - Method to use, by default ProxyAPIMethod.GET + Method to use, by default ProxyAPIMethod.GET. query_parameters : dict | None, optional - Query parameters to send in the request, by default None + Query parameters to send in the request, by default None. json_body : dict | None, optional - Data to send within the request, by default None + Data to send within the request, by default None. Returns ------- @@ -170,7 +170,7 @@ async def update_configuration_version(self): self.version = configuration_version async def get_runtime_info(self) -> PROXY_API_RESPONSE: - """Returns the runtime information of the HAProxy instance. + """Get the runtime information of the HAProxy instance. Returns ------- @@ -180,7 +180,7 @@ async def get_runtime_info(self) -> PROXY_API_RESPONSE: return (await self._make_hapee_request('/services/haproxy/runtime/info'))[0]['info'] async def get_backends(self) -> PROXY_API_RESPONSE: - """Returns the configured backends. + """Get the configured backends. Returns ------- @@ -195,16 +195,16 @@ async def add_backend( mode: CommunicationProtocol = CommunicationProtocol.TCP, algorithm: ProxyBalanceAlgorithm = ProxyBalanceAlgorithm.LEAST_CONNECTIONS, ) -> PROXY_API_RESPONSE: - """Adds a new backend to HAProxy instance. + """Add a new backend to HAProxy instance. Parameters ---------- name : str Name to set. mode : CommunicationProtocol, optional - Protocol to use, by default CommunicationProtocol.TCP + Protocol to use, by default CommunicationProtocol.TCP. algorithm : ProxyBalanceAlgorithm, optional - Load balancing algorithm to use, by default ProxyBalanceAlgorithm.LEAST_CONNECTIONS + Load balancing algorithm to use, by default ProxyBalanceAlgorithm.LEAST_CONNECTIONS. Returns ------- @@ -222,7 +222,7 @@ async def add_backend( ) async def get_backend_servers(self, backend: str) -> PROXY_API_RESPONSE: - """Returns the servers for the provided backend. + """Get the servers for the provided backend. Parameters ---------- @@ -242,7 +242,7 @@ async def get_backend_servers(self, backend: str) -> PROXY_API_RESPONSE: async def add_server_to_backend( self, backend: str, server_name: str, server_address: str, port: int, resolver: Optional[str] ) -> PROXY_API_RESPONSE: - """Adds new server to the provided backend. + """Add a new server to the provided backend. Parameters ---------- @@ -318,7 +318,7 @@ async def get_frontends(self) -> PROXY_API_RESPONSE: async def add_frontend( self, name: str, port: int, backend: str, mode: CommunicationProtocol = CommunicationProtocol.TCP ) -> PROXY_API_RESPONSE: - """Adds a new frontend to the HAProxy instance. + """Add a new frontend to the HAProxy instance. Parameters ---------- @@ -329,7 +329,7 @@ async def add_frontend( backend : str Default backend to connect. mode : CommunicationProtocol, optional - Communication protocol to use, by default CommunicationProtocol.TCP + Communication protocol to use, by default CommunicationProtocol.TCP. Returns ------- @@ -360,7 +360,7 @@ async def add_frontend( return frontend_response async def get_backend_server_runtime_settings(self, backend_name: str, server_name: str) -> PROXY_API_RESPONSE: - """Returns the setting for a backend server. + """Get the settings of a backend server. Parameters ---------- @@ -383,7 +383,7 @@ async def get_backend_server_runtime_settings(self, backend_name: str, server_na async def change_backend_server_state( self, backend_name: str, server_name: str, state: ProxyServerState ) -> PROXY_API_RESPONSE: - """Set the status of a backend server, + """Set the status of a backend server. Parameters ---------- @@ -410,7 +410,7 @@ async def change_backend_server_state( ) async def get_backend_stats(self, backend_name: str) -> PROXY_API_RESPONSE: - """Returns the statistics of the provided backend. + """Get the statistics of the provided backend. Parameters ---------- @@ -427,7 +427,7 @@ async def get_backend_stats(self, backend_name: str) -> PROXY_API_RESPONSE: return await self._make_hapee_request('/services/haproxy/stats/native', query_parameters=query_params) async def get_backend_server_stats(self, backend_name: str, server_name: str) -> PROXY_API_RESPONSE: - """Returns the statistics of the provided backend server. + """Get the statistics of the provided backend server. Parameters ---------- @@ -474,7 +474,7 @@ def __init__( @staticmethod def _get_logger() -> logging.Logger: - """Returns the configured logger. + """Get the configured logger. Returns ------- @@ -502,7 +502,7 @@ async def initialize(self): raise WazuhHAPHelperError(3048) async def get_current_pid(self) -> int: - """Returns the current HAProxy PID + """Get the current HAProxy PID. Returns ------- @@ -512,7 +512,7 @@ async def get_current_pid(self) -> int: return (await self.api.get_runtime_info())['pid'] async def get_current_backends(self) -> dict: - """Returns current backends from the Proxy. + """Get the current backends from the Proxy. Returns ------- @@ -524,7 +524,7 @@ async def get_current_backends(self) -> dict: return {backend['name']: backend for backend in api_response['data']} async def exists_backend(self, backend_name: str) -> bool: - """Checks if the provided backend exists. + """Check if the provided backend exists. Parameters ---------- @@ -539,7 +539,7 @@ async def exists_backend(self, backend_name: str) -> bool: return backend_name in await self.get_current_backends() async def get_current_frontends(self) -> dict: - """Returns current frontends from the Proxy. + """Get the current frontends from the Proxy. Returns ------- @@ -551,7 +551,7 @@ async def get_current_frontends(self) -> dict: return {frontend['name']: frontend for frontend in api_response['data'] if 'default_backend' in frontend} async def exists_frontend(self, frontend_name: str) -> bool: - """Checks if the provided frontend exists. + """Check if the provided frontend exists. Parameters ---------- @@ -571,16 +571,16 @@ async def add_new_backend( mode: CommunicationProtocol = CommunicationProtocol.TCP, algorithm: ProxyBalanceAlgorithm = ProxyBalanceAlgorithm.LEAST_CONNECTIONS, ): - """Adds new backend to the Proxy. + """Add a new backend to the Proxy. Parameters ---------- name : str Name for the new backend. mode : CommunicationProtocol, optional - Communication protocol to use, by default CommunicationProtocol.TCP + Communication protocol to use, by default CommunicationProtocol.TCP. algorithm : ProxyBalanceAlgorithm, optional - Load balancing algorithm to use, by default ProxyBalanceAlgorithm.LEAST_CONNECTIONS + Load balancing algorithm to use, by default ProxyBalanceAlgorithm.LEAST_CONNECTIONS. """ await self.api.add_backend(name=name, mode=mode, algorithm=algorithm) self.logger.debug2(f"Added new proxy backend: '{name}'") @@ -588,7 +588,7 @@ async def add_new_backend( async def add_new_frontend( self, name: str, port: int, backend: str, mode: CommunicationProtocol = CommunicationProtocol.TCP ): - """Adds new frontend to the Proxy. + """Add a new frontend to the Proxy. Parameters ---------- @@ -599,13 +599,13 @@ async def add_new_frontend( backend : str Default backend to connect. mode : CommunicationProtocol, optional - Communication protocol to use, by default CommunicationProtocol.TCP + Communication protocol to use, by default CommunicationProtocol.TCP. """ await self.api.add_frontend(name=name, port=port, backend=backend, mode=mode) self.logger.debug2(f"Added new proxy frontend: '{name}'") async def get_current_backend_servers(self) -> dict: - """Returns current backend servers from the Proxy. + """Get the current backend servers from the Proxy. Returns ------- @@ -617,7 +617,7 @@ async def get_current_backend_servers(self) -> dict: return {server['name']: server['address'] for server in api_response['data']} async def add_wazuh_manager(self, manager_name: str, manager_address: str, resolver: Optional[str] = None) -> dict: - """Adds new Wazuh manager to the Proxy. + """Add a new Wazuh manager to the Proxy. Parameters ---------- @@ -647,7 +647,7 @@ async def add_wazuh_manager(self, manager_name: str, manager_address: str, resol return api_response async def remove_wazuh_manager(self, manager_name: str) -> dict: - """Deletes the given Wazuh manager from the Proxy. + """Delete the given Wazuh manager from the Proxy. Parameters ---------- @@ -703,12 +703,12 @@ async def allow_server_new_connections(self, server_name: str) -> dict: return api_response async def get_wazuh_server_stats(self, server_name: str) -> dict: - """Returns statistics of the given server. + """Get the statistics of the given server. Parameters ---------- server_name : str - The server name to query + The server name to query. Returns ------- @@ -723,7 +723,7 @@ async def get_wazuh_server_stats(self, server_name: str) -> dict: return server_stats async def is_server_drain(self, server_name: str) -> bool: - """Checks if the server is in DRAIN state. + """Check if the server is in DRAIN state. Parameters ---------- @@ -741,12 +741,12 @@ async def is_server_drain(self, server_name: str) -> bool: return server_stats['admin_state'] == ProxyServerState.DRAIN.value async def get_wazuh_backend_stats(self, only_actives: bool = True) -> dict: - """Returns statistics of the Wazuh backend. + """Get the statistics of the Wazuh backend. Parameters ---------- only_actives : bool, optional - Only include running servers, by default True + Only include running servers, by default True. Returns ------- @@ -767,7 +767,7 @@ async def get_wazuh_backend_stats(self, only_actives: bool = True) -> dict: return stats async def get_wazuh_backend_server_connections(self) -> dict: - """Returns the active connections of the Wazuh backend server. + """Get the active connections of the Wazuh backend server. Returns ------- diff --git a/framework/wazuh/core/cluster/hap_helper/wazuh.py b/framework/wazuh/core/cluster/hap_helper/wazuh.py index 8e7ce4fbc12..e2cf9a39bae 100644 --- a/framework/wazuh/core/cluster/hap_helper/wazuh.py +++ b/framework/wazuh/core/cluster/hap_helper/wazuh.py @@ -11,7 +11,7 @@ class WazuhAgent: - """Tools for handle Wazuh agents connections.""" + """Tools to handle Wazuh agents connections.""" RECONNECTION_VERSION_MAJOR = 4 RECONNECTION_VERSION_MINOR = 3 @@ -52,7 +52,7 @@ def get_agents_able_to_reconnect(cls, agents_list: list[dict]) -> list[str]: class WazuhDAPI: - """Class for call Wazuh DAPI functions.""" + """Class to call Wazuh DAPI functions.""" AGENTS_MAX_LIMIT = 100000 API_RETRIES = 5 @@ -69,7 +69,7 @@ def __init__( @staticmethod def _get_logger() -> logging.Logger: - """Returns the configured logger. + """Get the configured logger. Returns ------- @@ -89,7 +89,7 @@ async def _make_dapi_call(self, f: Callable, f_kwargs: Optional[dict] = None, ** f : Callable Function to be executed. f_kwargs : Optional[dict], optional - Arguments to be passed to function `f`, by default None + Arguments to be passed to function `f`, by default None. Returns ------- @@ -126,12 +126,12 @@ async def get_cluster_nodes(self) -> dict: return {item['name']: item['ip'] for item in data.affected_items if item['name'] not in self.excluded_nodes} async def reconnect_agents(self, agent_list: list = None) -> dict: - """Makes an API call to reconnect agents. + """Make an API call to reconnect agents. Parameters ---------- agent_list : list, optional - The agents to be re-connected, by default None + The agents to be re-connected, by default None. Returns ------- @@ -183,7 +183,7 @@ async def get_agents_belonging_to_node(self, node_name: str, limit: int = None) node_name : str The name of the node to check. limit : int, optional - Max number of agents to retrieve, by default None + Max number of agents to retrieve, by default None. Returns ------- diff --git a/framework/wazuh/core/exception.py b/framework/wazuh/core/exception.py index 1476fce69b9..18badb539e0 100755 --- a/framework/wazuh/core/exception.py +++ b/framework/wazuh/core/exception.py @@ -496,6 +496,8 @@ class WazuhException(Exception): 3038: "Error while processing extra-valid files", 3039: "Timeout while waiting to receive a file", 3040: "Error while waiting to receive a file", + + # HAProxy Helper exceptions 3041: "Server status check timed out after adding new servers", 3042: "User configuration is not valid", 3043: "Cannot initialize Proxy API", From 019268fa7a29d0627fb860c0c8a40900555787a3 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Tue, 27 Feb 2024 17:09:16 -0300 Subject: [PATCH 334/419] Fix logs tags and move HAPHelper start clusterd --- framework/scripts/wazuh_clusterd.py | 5 ++-- .../core/cluster/hap_helper/hap_helper.py | 26 +++++++++++++------ .../wazuh/core/cluster/hap_helper/proxy.py | 23 +++++++++++----- .../wazuh/core/cluster/hap_helper/wazuh.py | 18 +++++++++---- framework/wazuh/core/cluster/local_server.py | 3 +-- 5 files changed, 50 insertions(+), 25 deletions(-) diff --git a/framework/scripts/wazuh_clusterd.py b/framework/scripts/wazuh_clusterd.py index 85995d90b12..58539d9acda 100644 --- a/framework/scripts/wazuh_clusterd.py +++ b/framework/scripts/wazuh_clusterd.py @@ -11,10 +11,10 @@ import signal import sys +from wazuh.core.cluster.hap_helper.hap_helper import HAPHelper from wazuh.core.utils import clean_pid_files from wazuh.core.wlogging import WazuhLogger - # # Aux functions # @@ -95,7 +95,7 @@ async def master_main(args: argparse.Namespace, cluster_config: dict, cluster_it concurrency_test=args.concurrency_test, node=my_server, configuration=cluster_config, enable_ssl=args.ssl, cluster_items=cluster_items) - await asyncio.gather(my_server.start(), my_local_server.start()) + await asyncio.gather(my_server.start(), my_local_server.start(), HAPHelper.start()) # @@ -115,7 +115,6 @@ async def worker_main(args: argparse.Namespace, cluster_config: dict, cluster_it logger : WazuhLogger Cluster logger. """ - from wazuh.core.cluster import worker, local_server from concurrent.futures import ProcessPoolExecutor cluster_utils.context_tag.set('Worker') diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index 8203c743baf..9db86f4a450 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -6,7 +6,7 @@ from wazuh.core.cluster.hap_helper.configuration import parse_configuration from wazuh.core.cluster.hap_helper.proxy import Proxy, ProxyAPI, ProxyServerState from wazuh.core.cluster.hap_helper.wazuh import WazuhAgent, WazuhDAPI -from wazuh.core.cluster.utils import ClusterFilter +from wazuh.core.cluster.utils import ClusterFilter, context_tag from wazuh.core.exception import WazuhException, WazuhHAPHelperError @@ -17,8 +17,9 @@ class HAPHelper: AGENT_STATUS_SYNC_TIME: int = 25 # Default agent notify time + cluster sync + 5s SERVER_ADMIN_STATE_DELAY: int = 5 - def __init__(self, proxy: Proxy, wazuh_dapi: WazuhDAPI, options: dict): - self.logger = self._get_logger() + def __init__(self, proxy: Proxy, wazuh_dapi: WazuhDAPI, tag: str, options: dict): + self.tag = tag + self.logger = self._get_logger(self.tag) self.proxy = proxy self.wazuh_dapi = wazuh_dapi @@ -30,9 +31,14 @@ def __init__(self, proxy: Proxy, wazuh_dapi: WazuhDAPI, options: dict): self.remove_disconnected_node_after: int = options['remove_disconnected_node_after'] @staticmethod - def _get_logger() -> logging.Logger: + def _get_logger(tag: str) -> logging.Logger: """Returns the configured logger. + Parameters + ---------- + tag : str + Tag to use in log filter. + Returns ------- logging.Logger @@ -40,7 +46,7 @@ def _get_logger() -> logging.Logger: """ logger = logging.getLogger('wazuh').getChild('HAPHelper') - logger.addFilter(ClusterFilter(tag='Cluster', subtag='HAPHelper Main')) + logger.addFilter(ClusterFilter(tag=tag, subtag='Main')) return logger @@ -351,6 +357,7 @@ async def manage_wazuh_cluster_nodes(self): """Main loop for check balance of Wazuh cluster.""" while True: + context_tag.set(self.tag) try: await self.backend_servers_state_healthcheck() await self.check_proxy_processes(auto_mode=True) and await sleep(self.AGENT_STATUS_SYNC_TIME) @@ -402,15 +409,17 @@ async def manage_wazuh_cluster_nodes(self): await sleep(self.sleep_time) @classmethod - async def run(cls): + async def start(cls): """Initialize and run HAPHelper.""" try: configuration = parse_configuration() + tag = 'HAPHelper' proxy_api = ProxyAPI( username=configuration['proxy']['api']['user'], password=configuration['proxy']['api']['password'], + tag=tag, address=configuration['proxy']['api']['address'], port=configuration['proxy']['api']['port'], ) @@ -418,14 +427,15 @@ async def run(cls): wazuh_backend=configuration['proxy']['backend'], wazuh_connection_port=configuration['wazuh']['connection']['port'], proxy_api=proxy_api, + tag=tag, resolver=configuration['proxy'].get('resolver', None), ) wazuh_dapi = WazuhDAPI( - excluded_nodes=configuration['wazuh']['excluded_nodes'], + tag=tag, excluded_nodes=configuration['wazuh']['excluded_nodes'], ) - helper = cls(proxy=proxy, wazuh_dapi=wazuh_dapi, options=configuration['hap_helper']) + helper = cls(proxy=proxy, wazuh_dapi=wazuh_dapi, tag=tag, options=configuration['hap_helper']) await helper.initialize_proxy() await helper.initialize_wazuh_cluster_configuration() diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index a0d2ccb9d67..5a192133587 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -4,7 +4,7 @@ from typing import Optional, TypeAlias import httpx -from wazuh.core.cluster.utils import ClusterFilter +from wazuh.core.cluster.utils import ClusterFilter, context_tag from wazuh.core.exception import WazuhHAPHelperError JSON_TYPE: TypeAlias = dict | list[dict] @@ -39,11 +39,12 @@ class ProxyAPI: HAPEE_ENDPOINT = '/hapee' - def __init__(self, username: str, password: str, address: str = 'localhost', port: int = 7777): + def __init__(self, username: str, password: str, tag: str, address: str = 'localhost', port: int = 7777): self.username = username self.password = password self.address = address self.port = port + self.tag = tag self.version = 0 @@ -99,6 +100,7 @@ async def _make_hapee_request( WazuhHAPHelperError In case of errors communicating with the HAProxy REST API. """ + context_tag.set(self.tag) uri = f'https://{self.address}:{self.port}{self.HAPEE_ENDPOINT}' query_parameters = query_parameters or {} query_parameters.update({'version': self.version}) @@ -142,6 +144,7 @@ async def _make_proxy_request( query_parameters: dict | None = None, json_body: dict | None = None, ) -> PROXY_API_RESPONSE: + context_tag.set(self.tag) uri = f'https://{self.address}:{self.port}{endpoint}' try: @@ -463,27 +466,33 @@ def __init__( self, wazuh_backend: str, proxy_api: ProxyAPI, + tag: str, wazuh_connection_port: int = 1514, resolver: str = None, ): - self.logger = self._get_logger() + self.tag = tag + self.logger = self._get_logger(self.tag) self.wazuh_backend = wazuh_backend self.wazuh_connection_port = wazuh_connection_port self.api = proxy_api self.resolver = resolver @staticmethod - def _get_logger() -> logging.Logger: + def _get_logger(tag: str) -> logging.Logger: """Get the configured logger. + Parameters + ---------- + tag : str + Tag to use in log filter. + Returns ------- logging.Logger The configured logger. """ - - logger = logging.getLogger('wazuh').getChild('HAPHelper Proxy') - logger.addFilter(ClusterFilter(tag='Cluster', subtag='HAPHelper Proxy')) + logger = logging.getLogger('wazuh').getChild('Proxy') + logger.addFilter(ClusterFilter(tag=tag, subtag='Proxy')) return logger diff --git a/framework/wazuh/core/cluster/hap_helper/wazuh.py b/framework/wazuh/core/cluster/hap_helper/wazuh.py index e2cf9a39bae..18045e217cd 100644 --- a/framework/wazuh/core/cluster/hap_helper/wazuh.py +++ b/framework/wazuh/core/cluster/hap_helper/wazuh.py @@ -7,7 +7,7 @@ from wazuh.cluster import get_nodes_info from wazuh.core.cluster.control import get_system_nodes from wazuh.core.cluster.dapi.dapi import DistributedAPI -from wazuh.core.cluster.utils import ClusterFilter +from wazuh.core.cluster.utils import ClusterFilter, context_tag class WazuhAgent: @@ -60,24 +60,31 @@ class WazuhDAPI: def __init__( self, + tag: str, excluded_nodes: list | None = None, ): - self.logger = self._get_logger() + self.tag = tag + self.logger = self._get_logger(self.tag) self.excluded_nodes = excluded_nodes or [] self.token = '' @staticmethod - def _get_logger() -> logging.Logger: + def _get_logger(tag: str) -> logging.Logger: """Get the configured logger. + Parameters + ---------- + tag : str + Tag to use in log filter. + Returns ------- logging.Logger The configured logger. """ - logger = logging.getLogger('wazuh').getChild('HAPHelper DAPI') - logger.addFilter(ClusterFilter(tag='Cluster', subtag='HAPHelper DAPI')) + logger = logging.getLogger('wazuh').getChild('WazuhDAPI') + logger.addFilter(ClusterFilter(tag=tag, subtag='DAPI')) return logger @@ -101,6 +108,7 @@ async def _make_dapi_call(self, f: Callable, f_kwargs: Optional[dict] = None, ** WazuhException Raise the exception returned by function `f`. """ + context_tag.set(self.tag) ret_val = await DistributedAPI(f=f, f_kwargs=f_kwargs, logger=self.logger, **kwargs).distribute_function() if isinstance(ret_val, Exception): self.logger.error(f'Unexpected error calling {f.__name__}') diff --git a/framework/wazuh/core/cluster/local_server.py b/framework/wazuh/core/cluster/local_server.py index 70feb97708d..360052bebe1 100644 --- a/framework/wazuh/core/cluster/local_server.py +++ b/framework/wazuh/core/cluster/local_server.py @@ -14,7 +14,6 @@ from wazuh.core.cluster import client, cluster, server from wazuh.core.cluster import common as c_common from wazuh.core.cluster.dapi import dapi -from wazuh.core.cluster.hap_helper import hap_helper from wazuh.core.cluster.utils import context_tag from wazuh.core.exception import WazuhClusterError @@ -352,7 +351,7 @@ def __init__(self, node: server.AbstractServer, **kwargs): self.dapi = dapi.APIRequestQueue(server=self) self.sendsync = dapi.SendSyncRequestQueue(server=self) - self.tasks.extend([self.dapi.run, self.sendsync.run, hap_helper.HAPHelper.run]) + self.tasks.extend([self.dapi.run, self.sendsync.run]) class LocalServerHandlerWorker(LocalServerHandler): From 0763f3b90b7c9ba1f7cc501d14b27b9dad5b11bd Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 4 Mar 2024 13:10:03 -0300 Subject: [PATCH 335/419] Suggestions from CR --- framework/scripts/tests/test_wazuh_clusterd.py | 17 ++++++++++++++--- framework/scripts/wazuh_clusterd.py | 4 +++- .../wazuh/core/cluster/hap_helper/wazuh.py | 3 ++- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/framework/scripts/tests/test_wazuh_clusterd.py b/framework/scripts/tests/test_wazuh_clusterd.py index 35cf275b89d..5ff39a5bedf 100644 --- a/framework/scripts/tests/test_wazuh_clusterd.py +++ b/framework/scripts/tests/test_wazuh_clusterd.py @@ -134,17 +134,28 @@ def __init__(self, performance_test, logger, concurrency_test, node, configurati def start(self): return 'LOCALSERVER_START' - async def gather(first, second): + class HAPHElperMock: + @classmethod + def start(cls): + return 'HAPHELPER_START' + + + async def gather(first, second, third): assert first == 'MASTER_START' assert second == 'LOCALSERVER_START' + assert third == 'HAPHELPER_START' + wazuh_clusterd.cluster_utils = cluster_utils args = Arguments(performance_test='test_performance', concurrency_test='concurrency_test', ssl=True) with patch('scripts.wazuh_clusterd.asyncio.gather', gather): with patch('wazuh.core.cluster.master.Master', MasterMock): with patch('wazuh.core.cluster.local_server.LocalServerMaster', LocalServerMasterMock): - await wazuh_clusterd.master_main(args=args, cluster_config={'test': 'config'}, - cluster_items={'node': 'item'}, logger='test_logger') + with patch('wazuh.core.cluster.hap_helper.hap_helper.HAPHelper', HAPHElperMock): + await wazuh_clusterd.master_main( + args=args, cluster_config={'test': 'config'}, cluster_items={'node': 'item'}, + logger='test_logger' + ) @pytest.mark.asyncio diff --git a/framework/scripts/wazuh_clusterd.py b/framework/scripts/wazuh_clusterd.py index 58539d9acda..7b89dcba691 100644 --- a/framework/scripts/wazuh_clusterd.py +++ b/framework/scripts/wazuh_clusterd.py @@ -11,7 +11,6 @@ import signal import sys -from wazuh.core.cluster.hap_helper.hap_helper import HAPHelper from wazuh.core.utils import clean_pid_files from wazuh.core.wlogging import WazuhLogger @@ -83,6 +82,8 @@ async def master_main(args: argparse.Namespace, cluster_config: dict, cluster_it Cluster logger. """ from wazuh.core.cluster import master, local_server + from wazuh.core.cluster.hap_helper.hap_helper import HAPHelper + cluster_utils.context_tag.set('Master') my_server = master.Master(performance_test=args.performance_test, concurrency_test=args.concurrency_test, configuration=cluster_config, enable_ssl=args.ssl, logger=logger, @@ -115,6 +116,7 @@ async def worker_main(args: argparse.Namespace, cluster_config: dict, cluster_it logger : WazuhLogger Cluster logger. """ + from wazuh.core.cluster import worker, local_server from concurrent.futures import ProcessPoolExecutor cluster_utils.context_tag.set('Worker') diff --git a/framework/wazuh/core/cluster/hap_helper/wazuh.py b/framework/wazuh/core/cluster/hap_helper/wazuh.py index 18045e217cd..1dbee6e6e1c 100644 --- a/framework/wazuh/core/cluster/hap_helper/wazuh.py +++ b/framework/wazuh/core/cluster/hap_helper/wazuh.py @@ -84,7 +84,7 @@ def _get_logger(tag: str) -> logging.Logger: The configured logger. """ logger = logging.getLogger('wazuh').getChild('WazuhDAPI') - logger.addFilter(ClusterFilter(tag=tag, subtag='DAPI')) + logger.addFilter(ClusterFilter(tag=tag, subtag='D API')) return logger @@ -150,6 +150,7 @@ async def reconnect_agents(self, agent_list: list = None) -> dict: f=reconnect_agents, f_kwargs={'agent_list': agent_list}, request_type='distributed_master', + wait_for_complete=True ) return data.affected_items From 68a4528b1f4cd8f2626c0601db2f5c0a9ff01a78 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 6 Mar 2024 14:20:38 -0300 Subject: [PATCH 336/419] Migrate from wrapper to Dataplane API --- .../hap_helper/data/configuration.yaml | 8 +- .../core/cluster/hap_helper/hap_helper.py | 24 +--- .../wazuh/core/cluster/hap_helper/proxy.py | 130 ++++++------------ 3 files changed, 53 insertions(+), 109 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml index 7f1180ef577..ddc2e0fa6f5 100644 --- a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml +++ b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml @@ -23,11 +23,13 @@ proxy: # Wazuh Proxy API address address: wazuh-proxy # Wazuh Proxy API port - port: 7777 + port: 5555 # Wazuh Proxy API username - user: wazuh + user: haproxy # Wazuh Proxy API password - password: wazuh + password: haproxy + # Protocol to use 'http' or 'https'. By default http + protocol: http # Defined Proxy backend (frontend will append '_front' to it) backend: wazuh_cluster diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index 9db86f4a450..3f50b49f9e8 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -100,15 +100,6 @@ async def check_node_to_delete(self, node_name: str) -> bool: ) return True - # TODO: This must be deprecated - async def check_proxy_processes(self, auto_mode: bool = False, warn: bool = True) -> bool: - if not await self.proxy.is_proxy_process_single(): - warn and self.logger.warning('Detected more than one Proxy processes') - if not auto_mode and input(' Do you wish to fix them? (y/N): ').lower() != 'y': - return False - await self.manage_proxy_processes() - return True - async def backend_servers_state_healthcheck(self): """Checks if any backend server is in DRAIN state and changes to READY.""" for server in (await self.proxy.get_current_backend_servers()).keys(): @@ -196,16 +187,6 @@ async def force_agent_reconnection_to_server(self, chosen_server: str, agents_li await self.proxy.allow_server_new_connections(server_name=server_name) await sleep(self.SERVER_ADMIN_STATE_DELAY) - # TODO: This must be deprecated - async def manage_proxy_processes(self): - current_proxy_pid = (await self.proxy.api.get_runtime_info())['pid'] - response = await self.proxy.api.kill_proxy_processes(pid_to_exclude=current_proxy_pid) - - if response['error'] > 0: - self.logger.error("Could not manage all proxy processes: " f"{response['data']}") - elif len(response['data']) > 0: - self.logger.info('Managed proxy processes') - async def migrate_old_connections(self, new_servers: list[str], deleted_servers: list[str]): """Reconnects agents to new servers. @@ -360,7 +341,6 @@ async def manage_wazuh_cluster_nodes(self): context_tag.set(self.tag) try: await self.backend_servers_state_healthcheck() - await self.check_proxy_processes(auto_mode=True) and await sleep(self.AGENT_STATUS_SYNC_TIME) current_wazuh_cluster = await self.wazuh_dapi.get_cluster_nodes() current_proxy_backend = await self.proxy.get_current_backend_servers() @@ -422,6 +402,7 @@ async def start(cls): tag=tag, address=configuration['proxy']['api']['address'], port=configuration['proxy']['api']['port'], + protocol=configuration['proxy']['api']['protocol'], ) proxy = Proxy( wazuh_backend=configuration['proxy']['backend'], @@ -432,7 +413,8 @@ async def start(cls): ) wazuh_dapi = WazuhDAPI( - tag=tag, excluded_nodes=configuration['wazuh']['excluded_nodes'], + tag=tag, + excluded_nodes=configuration['wazuh']['excluded_nodes'], ) helper = cls(proxy=proxy, wazuh_dapi=wazuh_dapi, tag=tag, options=configuration['hap_helper']) diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 5a192133587..08f00daf267 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -1,7 +1,7 @@ import ipaddress import logging from enum import Enum -from typing import Optional, TypeAlias +from typing import Literal, Optional, TypeAlias import httpx from wazuh.core.cluster.utils import ClusterFilter, context_tag @@ -9,6 +9,7 @@ JSON_TYPE: TypeAlias = dict | list[dict] PROXY_API_RESPONSE: TypeAlias = JSON_TYPE +HTTP_PROTOCOL = Literal['http', 'https'] class ProxyAPIMethod(Enum): @@ -37,14 +38,23 @@ class ProxyBalanceAlgorithm(Enum): class ProxyAPI: """Wrapper for calling HAProxy REST API""" - HAPEE_ENDPOINT = '/hapee' + HAP_ENDPOINT = '/v2' - def __init__(self, username: str, password: str, tag: str, address: str = 'localhost', port: int = 7777): + def __init__( + self, + username: str, + password: str, + tag: str, + address: str = 'localhost', + port: int = 5555, + protocol: HTTP_PROTOCOL = 'http', + ): self.username = username self.password = password self.address = address self.port = port self.tag = tag + self.protocol = protocol self.version = 0 @@ -58,8 +68,9 @@ async def initialize(self): """ try: async with httpx.AsyncClient(verify=False) as client: - response = await client.post( - f'https://{self.address}:{self.port}/', auth=(self.username, self.password) + response = await client.get( + f'{self.protocol}://{self.address}:{self.port}/{self.HAP_ENDPOINT}/health', + auth=(self.username, self.password), ) if response.status_code == 401: raise WazuhHAPHelperError(3046) @@ -70,7 +81,7 @@ async def initialize(self): except httpx.RequestError as req_exc: raise WazuhHAPHelperError(3043, extra_message=str(req_exc)) - async def _make_hapee_request( + async def _make_hap_request( self, endpoint: str, method: ProxyAPIMethod = ProxyAPIMethod.GET, @@ -101,66 +112,32 @@ async def _make_hapee_request( In case of errors communicating with the HAProxy REST API. """ context_tag.set(self.tag) - uri = f'https://{self.address}:{self.port}{self.HAPEE_ENDPOINT}' + version_key = '_version' + uri = f'{self.protocol}://{self.address}:{self.port}{self.HAP_ENDPOINT}/{endpoint}' query_parameters = query_parameters or {} query_parameters.update({'version': self.version}) - hapee_json_body = { - 'method': method.value, - 'uri': endpoint, - 'query_parameters': query_parameters, - 'json_body': json_body or {}, - } - - try: - async with httpx.AsyncClient(verify=False, follow_redirects=True) as client: - response = await client.post(uri, auth=(self.username, self.password), json=hapee_json_body) - except httpx.RequestError as request_exc: - raise WazuhHAPHelperError(3044, extra_message=str(request_exc)) - - if response.status_code == 200: - full_decoded_response = response.json() - decoded_response = full_decoded_response['data']['response'] - if full_decoded_response['error'] != 0: - raise WazuhHAPHelperError( - 3049, extra_message=f'Full response: {response.status_code} | {response.json()}' - ) - if isinstance(decoded_response, dict) and '_version' in decoded_response: - self.version = decoded_response['_version'] - elif method != ProxyAPIMethod.GET and 'configuration' in endpoint: - await self.update_configuration_version() - - return decoded_response - elif response.status_code == 401: - raise WazuhHAPHelperError(3046) - else: - raise WazuhHAPHelperError(3045, extra_message=f'Full response: {response.status_code} | {response.json()}') - - # TODO: This must be deprecated - async def _make_proxy_request( - self, - endpoint: str, - method: ProxyAPIMethod = ProxyAPIMethod.GET, - query_parameters: dict | None = None, - json_body: dict | None = None, - ) -> PROXY_API_RESPONSE: - context_tag.set(self.tag) - uri = f'https://{self.address}:{self.port}{endpoint}' - try: async with httpx.AsyncClient(verify=False, follow_redirects=True) as client: response = await client.request( - method.value, - uri, + method=method.value, + url=uri, auth=(self.username, self.password), - params=query_parameters, json=json_body, + params=query_parameters, ) except httpx.RequestError as request_exc: raise WazuhHAPHelperError(3044, extra_message=str(request_exc)) - if response.status_code == 200: - return response.json() + if response.is_success: + response = response.json() + + if version_key in response: + self.version = response[version_key] + elif method != ProxyAPIMethod.GET and 'configuration' in endpoint: + await self.update_configuration_version() + + return response elif response.status_code == 401: raise WazuhHAPHelperError(3046) else: @@ -169,7 +146,7 @@ async def _make_proxy_request( async def update_configuration_version(self): """Get the last version of the configuration schema and set it.""" - configuration_version = await self._make_hapee_request('/services/haproxy/configuration/version') + configuration_version = await self._make_hap_request('/services/haproxy/configuration/version') self.version = configuration_version async def get_runtime_info(self) -> PROXY_API_RESPONSE: @@ -180,7 +157,7 @@ async def get_runtime_info(self) -> PROXY_API_RESPONSE: PROXY_API_RESPONSE The runtime information. """ - return (await self._make_hapee_request('/services/haproxy/runtime/info'))[0]['info'] + return (await self._make_hap_request('/services/haproxy/runtime/info'))[0]['info'] async def get_backends(self) -> PROXY_API_RESPONSE: """Get the configured backends. @@ -190,7 +167,7 @@ async def get_backends(self) -> PROXY_API_RESPONSE: PROXY_API_RESPONSE Information of configured backends. """ - return await self._make_hapee_request(endpoint='/services/haproxy/configuration/backends') + return await self._make_hap_request(endpoint='/services/haproxy/configuration/backends') async def add_backend( self, @@ -217,7 +194,7 @@ async def add_backend( query_params = {'force_reload': True} json_body = {'name': name, 'mode': mode.value, 'balance': {'algorithm': algorithm.value}} - return await self._make_hapee_request( + return await self._make_hap_request( '/services/haproxy/configuration/backends', method=ProxyAPIMethod.POST, query_parameters=query_params, @@ -238,7 +215,7 @@ async def get_backend_servers(self, backend: str) -> PROXY_API_RESPONSE: The servers for the provided backend. """ - return await self._make_hapee_request( + return await self._make_hap_request( '/services/haproxy/configuration/servers', query_parameters={'backend': backend} ) @@ -278,7 +255,7 @@ async def add_server_to_backend( {'resolvers': resolver, 'init-addr': 'last,libc,none'} if resolver and not is_ip_address else {} ) - return await self._make_hapee_request( + return await self._make_hap_request( '/services/haproxy/configuration/servers', method=ProxyAPIMethod.POST, query_parameters=query_params, @@ -302,7 +279,7 @@ async def remove_server_from_backend(self, backend: str, server_name: str) -> PR """ query_params = {'backend': backend, 'force_reload': True} - return await self._make_hapee_request( + return await self._make_hap_request( f'/services/haproxy/configuration/servers/{server_name}', method=ProxyAPIMethod.DELETE, query_parameters=query_params, @@ -316,7 +293,7 @@ async def get_frontends(self) -> PROXY_API_RESPONSE: PROXY_API_RESPONSE Information of configured frontends. """ - return await self._make_hapee_request(endpoint='/services/haproxy/configuration/frontends') + return await self._make_hap_request(endpoint='/services/haproxy/configuration/frontends') async def add_frontend( self, name: str, port: int, backend: str, mode: CommunicationProtocol = CommunicationProtocol.TCP @@ -342,7 +319,7 @@ async def add_frontend( frontend_query_params = {'force_reload': True} frontend_json_body = {'name': name, 'mode': mode.value, 'default_backend': backend} - frontend_response = await self._make_hapee_request( + frontend_response = await self._make_hap_request( '/services/haproxy/configuration/frontends', method=ProxyAPIMethod.POST, query_parameters=frontend_query_params, @@ -353,7 +330,7 @@ async def add_frontend( bind_query_params = {'force_reload': True, 'frontend': frontend_name} bind_json_body = {'port': port, 'name': f'{frontend_name}_bind'} - await self._make_hapee_request( + await self._make_hap_request( '/services/haproxy/configuration/binds', method=ProxyAPIMethod.POST, query_parameters=bind_query_params, @@ -379,7 +356,7 @@ async def get_backend_server_runtime_settings(self, backend_name: str, server_na """ query_params = {'backend': backend_name, 'name': server_name} - return await self._make_hapee_request( + return await self._make_hap_request( f'/services/haproxy/runtime/servers/{server_name}', query_parameters=query_params ) @@ -405,7 +382,7 @@ async def change_backend_server_state( query_params = {'backend': backend_name} json_body = {'admin_state': state.value} - return await self._make_hapee_request( + return await self._make_hap_request( f'/services/haproxy/runtime/servers/{server_name}', method=ProxyAPIMethod.PUT, query_parameters=query_params, @@ -427,7 +404,7 @@ async def get_backend_stats(self, backend_name: str) -> PROXY_API_RESPONSE: """ query_params = {'type': 'backend', 'name': backend_name} - return await self._make_hapee_request('/services/haproxy/stats/native', query_parameters=query_params) + return await self._make_hap_request('/services/haproxy/stats/native', query_parameters=query_params) async def get_backend_server_stats(self, backend_name: str, server_name: str) -> PROXY_API_RESPONSE: """Get the statistics of the provided backend server. @@ -446,19 +423,7 @@ async def get_backend_server_stats(self, backend_name: str, server_name: str) -> """ query_params = {'type': 'server', 'parent': backend_name, 'name': server_name.lower()} - return await self._make_hapee_request('/services/haproxy/stats/native', query_parameters=query_params) - - # TODO: This must be deprecated - async def get_proxy_processes(self) -> PROXY_API_RESPONSE: - return await self._make_proxy_request('/haproxy/processes') - - # TODO: This must be deprecated - async def kill_proxy_processes(self, pid_to_exclude: int = 0) -> PROXY_API_RESPONSE: - query_params = {'exclude_pid': pid_to_exclude} - - return await self._make_proxy_request( - '/haproxy/processes', method=ProxyAPIMethod.DELETE, query_parameters=query_params - ) + return await self._make_hap_request('/services/haproxy/stats/native', query_parameters=query_params) class Proxy: @@ -786,8 +751,3 @@ async def get_wazuh_backend_server_connections(self) -> dict: current_connections_key = 'scur' server_stats = await self.get_wazuh_backend_stats() return {server_name: server_stats[server_name][current_connections_key] for server_name in server_stats} - - # TODO: This must be deprecated - async def is_proxy_process_single(self) -> bool: - haproxy_processes = await self.api.get_proxy_processes() - return len(haproxy_processes['data']['processes']) == 1 From b786336c1c8f9638ba36bb9e3de4186f7245b3fd Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Thu, 14 Mar 2024 11:56:48 -0300 Subject: [PATCH 337/419] Check hard-stop-configuration or set it --- .../core/cluster/hap_helper/hap_helper.py | 38 +++++++- .../wazuh/core/cluster/hap_helper/proxy.py | 92 ++++++++++++++++++- 2 files changed, 125 insertions(+), 5 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index 3f50b49f9e8..b9942108f11 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -6,7 +6,7 @@ from wazuh.core.cluster.hap_helper.configuration import parse_configuration from wazuh.core.cluster.hap_helper.proxy import Proxy, ProxyAPI, ProxyServerState from wazuh.core.cluster.hap_helper.wazuh import WazuhAgent, WazuhDAPI -from wazuh.core.cluster.utils import ClusterFilter, context_tag +from wazuh.core.cluster.utils import ClusterFilter, context_tag, get_cluster_items from wazuh.core.exception import WazuhException, WazuhHAPHelperError @@ -244,8 +244,6 @@ async def migrate_old_connections(self, new_servers: list[str], deleted_servers: self.logger.info('Balancing exceeding connections after changes on the Wazuh backend') await self.update_agent_connections(agent_list=agents_to_balance) - await self.check_proxy_processes(auto_mode=True, warn=False) - self.logger.info('Waiting for agent connections stability') self.logger.debug(f'Sleeping {self.agent_reconnection_stability_time}s...') await sleep(self.agent_reconnection_stability_time) @@ -388,6 +386,29 @@ async def manage_wazuh_cluster_nodes(self): ) await sleep(self.sleep_time) + async def set_hard_stop_after(self): + """Check if HAProxy has the hard-stop-after configuration. If is not it will be set.""" + + cluster_items = get_cluster_items() + connection_retry = cluster_items['intervals']['worker']['connection_retry'] + 2 + + self.logger.debug(f'Waiting for workers connections {connection_retry}s...') + await sleep(connection_retry) + + self.logger.info('Setting a value for `hard-stop-after` configuration.') + agents_distribution = await self.wazuh_dapi.get_agents_node_distribution() + agents_id = [item['id'] for agents in agents_distribution.values() for item in agents] + + await self.proxy.set_hard_stop_after_value( + active_agents=len(agents_id), + chunk_size=self.agent_reconnection_chunk_size, + agent_reconnection_time=self.agent_reconnection_time, + ) + + if len(agents_id) > 0: + self.logger.info(f'Reconnecting {len(agents_id)} agents.') + await self.update_agent_connections(agent_list=agents_id) + @classmethod async def start(cls): """Initialize and run HAPHelper.""" @@ -420,8 +441,19 @@ async def start(cls): helper = cls(proxy=proxy, wazuh_dapi=wazuh_dapi, tag=tag, options=configuration['hap_helper']) await helper.initialize_proxy() + + if helper.proxy.hard_stop_after is not None: + helper.logger.info( + 'Ensuring only exists one HAProxy process. ' + f'Sleeping {helper.proxy.hard_stop_after}s before start...' + ) + await sleep(helper.proxy.hard_stop_after) + await helper.initialize_wazuh_cluster_configuration() + if helper.proxy.hard_stop_after is None: + await helper.set_hard_stop_after() + helper.logger.info('Starting HAProxy Helper') await helper.manage_wazuh_cluster_nodes() except KeyboardInterrupt: diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 08f00daf267..3a5ebb818f3 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -12,6 +12,38 @@ HTTP_PROTOCOL = Literal['http', 'https'] +def _convert_to_seconds(milliseconds: int) -> float: + """Convert the given milliseconds to seconds. + + Parameters + ---------- + millisenconds : int + Milliseconds to convert. + + Returns + ------- + float + The amount of seconds. + """ + return milliseconds / 1000 + + +def _convert_to_milliseconds(seconds: int) -> int: + """Convert the given seconds to milliseconds. + + Parameters + ---------- + seconds : int + Seconds to convert. + + Returns + ------- + int + The amount of milliseconds. + """ + return seconds * 1000 + + class ProxyAPIMethod(Enum): GET = 'get' POST = 'post' @@ -131,8 +163,7 @@ async def _make_hap_request( if response.is_success: response = response.json() - - if version_key in response: + if isinstance(response, dict) and version_key in response: self.version = response[version_key] elif method != ProxyAPIMethod.GET and 'configuration' in endpoint: await self.update_configuration_version() @@ -159,6 +190,28 @@ async def get_runtime_info(self) -> PROXY_API_RESPONSE: """ return (await self._make_hap_request('/services/haproxy/runtime/info'))[0]['info'] + async def get_global_configuration(self) -> dict: + """Get the global configuration from HAProxy. + + Returns + ------- + dict + The current global configuration. + """ + return (await self._make_hap_request('/services/haproxy/configuration/global'))['data'] + + async def update_global_configuration(self, new_configuration: dict): + """Apply the new global configuration. + + Parameters + ---------- + new_configuration : str + New global configuration to apply. + """ + await self._make_hap_request( + '/services/haproxy/configuration/global', json_body=new_configuration, method=ProxyAPIMethod.PUT + ) + async def get_backends(self) -> PROXY_API_RESPONSE: """Get the configured backends. @@ -441,6 +494,7 @@ def __init__( self.wazuh_connection_port = wazuh_connection_port self.api = proxy_api self.resolver = resolver + self.hard_stop_after = None @staticmethod def _get_logger(tag: str) -> logging.Logger: @@ -472,9 +526,43 @@ async def initialize(self): await self.api.initialize() try: (await self.api.get_runtime_info())['version'] + hard_stop_after = await self.get_hard_stop_after_value() + self.hard_stop_after = _convert_to_seconds(hard_stop_after) if hard_stop_after is not None else None except (KeyError, IndexError): raise WazuhHAPHelperError(3048) + async def get_hard_stop_after_value(self) -> Optional[str]: + """Get the `hard-stop-after` value from the global configurations. + + Returns + ------- + Optional[str] + The value of the configuration. + """ + return (await self.api.get_global_configuration()).get('hard_stop_after', None) + + async def set_hard_stop_after_value(self, active_agents: int, chunk_size: int, agent_reconnection_time: int): + """Calculate a dinamic value for `hard-stop-after` and set it. + + Parameters + ---------- + active_agents : int + Number of active agents. + chunk_size : int + Max number of agents to be reconnected at once. + agent_reconnection_time : int + Seconds to sleep after an agent chunk reconnection. + """ + number_of_chunks = active_agents / chunk_size if active_agents > chunk_size else 1 + hard_stop_after = number_of_chunks * agent_reconnection_time + + configuration = await self.api.get_global_configuration() + configuration['hard_stop_after'] = _convert_to_milliseconds(hard_stop_after) + + await self.api.update_global_configuration(new_configuration=configuration) + self.hard_stop_after = hard_stop_after + self.logger.info(f'Setted `hard-stop-after` with {hard_stop_after} seconds.') + async def get_current_pid(self) -> int: """Get the current HAProxy PID. From 11426ec140d1d0d9bd380da910390b093aef5c8e Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 20 Mar 2024 10:41:30 -0300 Subject: [PATCH 338/419] Fix messages and docstrings --- framework/wazuh/core/cluster/hap_helper/hap_helper.py | 4 ++-- framework/wazuh/core/cluster/hap_helper/proxy.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index b9942108f11..ffe4d0b2837 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -387,12 +387,12 @@ async def manage_wazuh_cluster_nodes(self): await sleep(self.sleep_time) async def set_hard_stop_after(self): - """Check if HAProxy has the hard-stop-after configuration. If is not it will be set.""" + """Check if HAProxy has the hard-stop-after configuration. If not, it will be set.""" cluster_items = get_cluster_items() connection_retry = cluster_items['intervals']['worker']['connection_retry'] + 2 - self.logger.debug(f'Waiting for workers connections {connection_retry}s...') + self.logger.debug(f'Waiting {connection_retry}s for workers connections...') await sleep(connection_retry) self.logger.info('Setting a value for `hard-stop-after` configuration.') diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 3a5ebb818f3..4d28a3f3d48 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -532,7 +532,7 @@ async def initialize(self): raise WazuhHAPHelperError(3048) async def get_hard_stop_after_value(self) -> Optional[str]: - """Get the `hard-stop-after` value from the global configurations. + """Get the `hard-stop-after` value from the global configuration. Returns ------- @@ -542,7 +542,7 @@ async def get_hard_stop_after_value(self) -> Optional[str]: return (await self.api.get_global_configuration()).get('hard_stop_after', None) async def set_hard_stop_after_value(self, active_agents: int, chunk_size: int, agent_reconnection_time: int): - """Calculate a dinamic value for `hard-stop-after` and set it. + """Calculate a dynamic value for `hard-stop-after` and set it. Parameters ---------- @@ -561,7 +561,7 @@ async def set_hard_stop_after_value(self, active_agents: int, chunk_size: int, a await self.api.update_global_configuration(new_configuration=configuration) self.hard_stop_after = hard_stop_after - self.logger.info(f'Setted `hard-stop-after` with {hard_stop_after} seconds.') + self.logger.info(f'Set `hard-stop-after` with {hard_stop_after} seconds.') async def get_current_pid(self) -> int: """Get the current HAProxy PID. From bc386ff883730aade2d2778cdcaaeb430e7f45d3 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 25 Mar 2024 15:31:54 -0300 Subject: [PATCH 339/419] Improved server deletion --- framework/wazuh/core/cluster/hap_helper/hap_helper.py | 9 ++++++++- framework/wazuh/core/cluster/hap_helper/proxy.py | 11 +++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index ffe4d0b2837..6e6c985176a 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -89,7 +89,14 @@ async def check_node_to_delete(self, node_name: str) -> bool: bool True if the node can be deleted, else False. """ - node_downtime = (await self.proxy.get_wazuh_server_stats(server_name=node_name))['lastchg'] + node_stats = await self.proxy.get_wazuh_server_stats(server_name=node_name) + + node_status = node_stats['status'] + node_downtime = node_stats['lastchg'] + + if node_status == ProxyServerState.UP.value.upper(): + return False + self.logger.debug2(f"Server '{node_name}' has been disconnected for {node_downtime}s") if node_downtime < self.remove_disconnected_node_after * 60: diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 4d28a3f3d48..27eb15e060c 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -1,4 +1,5 @@ import ipaddress +import json import logging from enum import Enum from typing import Literal, Optional, TypeAlias @@ -8,7 +9,7 @@ from wazuh.core.exception import WazuhHAPHelperError JSON_TYPE: TypeAlias = dict | list[dict] -PROXY_API_RESPONSE: TypeAlias = JSON_TYPE +PROXY_API_RESPONSE: TypeAlias = JSON_TYPE | None HTTP_PROTOCOL = Literal['http', 'https'] @@ -55,6 +56,8 @@ class ProxyServerState(Enum): READY = 'ready' MAINTENANCE = 'maint' DRAIN = 'drain' + DOWN = 'down' + UP = 'up' class CommunicationProtocol(Enum): @@ -162,7 +165,11 @@ async def _make_hap_request( raise WazuhHAPHelperError(3044, extra_message=str(request_exc)) if response.is_success: - response = response.json() + try: + response = response.json() + except json.JSONDecodeError: + response = None + if isinstance(response, dict) and version_key in response: self.version = response[version_key] elif method != ProxyAPIMethod.GET and 'configuration' in endpoint: From 13e47ea8895caf6544b397830dc9268daed9e5cd Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 25 Mar 2024 15:53:39 -0300 Subject: [PATCH 340/419] Improved docstring --- framework/wazuh/core/cluster/hap_helper/hap_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index 6e6c985176a..0f13ecbcb56 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -394,7 +394,7 @@ async def manage_wazuh_cluster_nodes(self): await sleep(self.sleep_time) async def set_hard_stop_after(self): - """Check if HAProxy has the hard-stop-after configuration. If not, it will be set.""" + """Calculate and set hard-stop-after configuration in HAProxy.""" cluster_items = get_cluster_items() connection_retry = cluster_items['intervals']['worker']['connection_retry'] + 2 From 384a6955bf5ab070f90899946eb8a96e42125d61 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 25 Mar 2024 16:14:45 -0300 Subject: [PATCH 341/419] Improved sleep time before start --- .../core/cluster/hap_helper/hap_helper.py | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index 0f13ecbcb56..f93726334a6 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -395,9 +395,7 @@ async def manage_wazuh_cluster_nodes(self): async def set_hard_stop_after(self): """Calculate and set hard-stop-after configuration in HAProxy.""" - - cluster_items = get_cluster_items() - connection_retry = cluster_items['intervals']['worker']['connection_retry'] + 2 + connection_retry = self.get_connection_retry() self.logger.debug(f'Waiting {connection_retry}s for workers connections...') await sleep(connection_retry) @@ -416,6 +414,18 @@ async def set_hard_stop_after(self): self.logger.info(f'Reconnecting {len(agents_id)} agents.') await self.update_agent_connections(agent_list=agents_id) + @staticmethod + def get_connection_retry() -> int: + """Returns the connection retry value, from cluster.json, plus two seconds. + + Returns + ------- + int + The seconds of connection retry. + """ + cluster_items = get_cluster_items() + return cluster_items['intervals']['worker']['connection_retry'] + 2 + @classmethod async def start(cls): """Initialize and run HAPHelper.""" @@ -450,11 +460,9 @@ async def start(cls): await helper.initialize_proxy() if helper.proxy.hard_stop_after is not None: - helper.logger.info( - 'Ensuring only exists one HAProxy process. ' - f'Sleeping {helper.proxy.hard_stop_after}s before start...' - ) - await sleep(helper.proxy.hard_stop_after) + sleep_time = max(helper.proxy.hard_stop_after, cls.get_connection_retry()) + helper.logger.info(f'Ensuring only exists one HAProxy process. Sleeping {sleep_time}s before start...') + await sleep(sleep_time) await helper.initialize_wazuh_cluster_configuration() From b088a8c8970485cbd0d968b0e1993d0dde78af3e Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 27 Mar 2024 09:50:07 -0300 Subject: [PATCH 342/419] Set hard-stop-after when the cluster changes --- .../core/cluster/hap_helper/hap_helper.py | 25 +++++++++++++------ .../wazuh/core/cluster/hap_helper/proxy.py | 17 +++++++------ 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index f93726334a6..c3afdd915fe 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -367,6 +367,8 @@ async def manage_wazuh_cluster_nodes(self): manager_address=current_wazuh_cluster[node_to_add], resolver=self.proxy.resolver, ) + + await self.set_hard_stop_after(wait_connection_retry=False, reconnect_agents=False) await self.migrate_old_connections(new_servers=nodes_to_add, deleted_servers=nodes_to_remove) continue @@ -393,12 +395,21 @@ async def manage_wazuh_cluster_nodes(self): ) await sleep(self.sleep_time) - async def set_hard_stop_after(self): - """Calculate and set hard-stop-after configuration in HAProxy.""" - connection_retry = self.get_connection_retry() + async def set_hard_stop_after(self, wait_connection_retry: bool = True, reconnect_agents: bool = True): + """Calculate and set hard-stop-after configuration in HAProxy. + + Parameters + ---------- + wait_connection_retry : bool, optional + Wait for the workers connections, by default True. + reconnect_agents : bool, optional + Reconnect agents after set the hard-stop-after, by default True. + """ - self.logger.debug(f'Waiting {connection_retry}s for workers connections...') - await sleep(connection_retry) + if wait_connection_retry: + connection_retry = self.get_connection_retry() + self.logger.debug(f'Waiting {connection_retry}s for workers connections...') + await sleep(connection_retry) self.logger.info('Setting a value for `hard-stop-after` configuration.') agents_distribution = await self.wazuh_dapi.get_agents_node_distribution() @@ -410,13 +421,13 @@ async def set_hard_stop_after(self): agent_reconnection_time=self.agent_reconnection_time, ) - if len(agents_id) > 0: + if reconnect_agents and len(agents_id) > 0: self.logger.info(f'Reconnecting {len(agents_id)} agents.') await self.update_agent_connections(agent_list=agents_id) @staticmethod def get_connection_retry() -> int: - """Returns the connection retry value, from cluster.json, plus two seconds. + """Return the connection retry value, from cluster.json, plus two seconds. Returns ------- diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 27eb15e060c..9e478f60545 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -13,7 +13,7 @@ HTTP_PROTOCOL = Literal['http', 'https'] -def _convert_to_seconds(milliseconds: int) -> float: +def _convert_to_seconds(milliseconds: int) -> int: """Convert the given milliseconds to seconds. Parameters @@ -26,7 +26,7 @@ def _convert_to_seconds(milliseconds: int) -> float: float The amount of seconds. """ - return milliseconds / 1000 + return int(milliseconds / 1000) def _convert_to_milliseconds(seconds: int) -> int: @@ -42,7 +42,7 @@ def _convert_to_milliseconds(seconds: int) -> int: int The amount of milliseconds. """ - return seconds * 1000 + return int(seconds * 1000) class ProxyAPIMethod(Enum): @@ -563,12 +563,13 @@ async def set_hard_stop_after_value(self, active_agents: int, chunk_size: int, a number_of_chunks = active_agents / chunk_size if active_agents > chunk_size else 1 hard_stop_after = number_of_chunks * agent_reconnection_time - configuration = await self.api.get_global_configuration() - configuration['hard_stop_after'] = _convert_to_milliseconds(hard_stop_after) + if self.hard_stop_after is None or self.hard_stop_after != hard_stop_after: + configuration = await self.api.get_global_configuration() + configuration['hard_stop_after'] = _convert_to_milliseconds(hard_stop_after) - await self.api.update_global_configuration(new_configuration=configuration) - self.hard_stop_after = hard_stop_after - self.logger.info(f'Set `hard-stop-after` with {hard_stop_after} seconds.') + await self.api.update_global_configuration(new_configuration=configuration) + self.hard_stop_after = hard_stop_after + self.logger.info(f'Set `hard-stop-after` with {hard_stop_after} seconds.') async def get_current_pid(self) -> int: """Get the current HAProxy PID. From 9214e261a7f2ed770a0e955fd30badc78b84d38f Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 3 Apr 2024 17:14:21 -0300 Subject: [PATCH 343/419] Review hard-stop-after formula --- .../core/cluster/hap_helper/hap_helper.py | 6 ++++-- .../wazuh/core/cluster/hap_helper/proxy.py | 19 ++++++++++++++++--- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index c3afdd915fe..e464c14a9cc 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -1,5 +1,4 @@ import logging -import time from asyncio import sleep from math import ceil, floor @@ -217,7 +216,7 @@ async def migrate_old_connections(self, new_servers: list[str], deleted_servers: raise WazuhHAPHelperError(3041) self.logger.debug('Waiting for new servers to go UP') - time.sleep(1) + await sleep(1) backend_stats_iteration += 1 wazuh_backend_stats = (await self.proxy.get_wazuh_backend_stats()).keys() @@ -414,11 +413,14 @@ async def set_hard_stop_after(self, wait_connection_retry: bool = True, reconnec self.logger.info('Setting a value for `hard-stop-after` configuration.') agents_distribution = await self.wazuh_dapi.get_agents_node_distribution() agents_id = [item['id'] for agents in agents_distribution.values() for item in agents] + current_cluster = await self.wazuh_dapi.get_cluster_nodes() await self.proxy.set_hard_stop_after_value( active_agents=len(agents_id), chunk_size=self.agent_reconnection_chunk_size, agent_reconnection_time=self.agent_reconnection_time, + n_managers=len(current_cluster.keys()), + server_admin_state_delay=self.SERVER_ADMIN_STATE_DELAY, ) if reconnect_agents and len(agents_id) > 0: diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 9e478f60545..b47135a3ed0 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -548,7 +548,14 @@ async def get_hard_stop_after_value(self) -> Optional[str]: """ return (await self.api.get_global_configuration()).get('hard_stop_after', None) - async def set_hard_stop_after_value(self, active_agents: int, chunk_size: int, agent_reconnection_time: int): + async def set_hard_stop_after_value( + self, + active_agents: int, + chunk_size: int, + agent_reconnection_time: int, + n_managers: int, + server_admin_state_delay: int, + ): """Calculate a dynamic value for `hard-stop-after` and set it. Parameters @@ -559,9 +566,15 @@ async def set_hard_stop_after_value(self, active_agents: int, chunk_size: int, a Max number of agents to be reconnected at once. agent_reconnection_time : int Seconds to sleep after an agent chunk reconnection. + n_manager : int + Number of managers in the cluster. + server_admin_state_delay : int + Delay of server administration. """ - number_of_chunks = active_agents / chunk_size if active_agents > chunk_size else 1 - hard_stop_after = number_of_chunks * agent_reconnection_time + + hard_stop_after = (active_agents / (n_managers * chunk_size)) * n_managers * agent_reconnection_time + ( + n_managers * server_admin_state_delay * 2 + ) if self.hard_stop_after is None or self.hard_stop_after != hard_stop_after: configuration = await self.api.get_global_configuration() From 1cc1caa6301ff50bbecc7cbeb5201eb99a9b8fd6 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 18 Mar 2024 16:24:48 -0300 Subject: [PATCH 344/419] Add validations for HAProxy helper parameters --- src/config/cluster-config.c | 75 ++++++++++++++++++++++++++++++++++--- src/config/config.c | 2 +- src/config/config.h | 2 +- 3 files changed, 71 insertions(+), 8 deletions(-) diff --git a/src/config/cluster-config.c b/src/config/cluster-config.c index b0c96f03a4f..5b8cfcd6163 100644 --- a/src/config/cluster-config.c +++ b/src/config/cluster-config.c @@ -14,7 +14,7 @@ #include "global-config.h" -int Read_Cluster(XML_NODE node, void *d1, __attribute__((unused)) void *d2) { +int Read_Cluster(const OS_XML *xml, XML_NODE node, void *d1, __attribute__((unused)) void *d2) { static const char *disabled = "disabled"; static const char *cluster_name = "name"; @@ -30,9 +30,27 @@ int Read_Cluster(XML_NODE node, void *d1, __attribute__((unused)) void *d2) { static const char *bind_addr = "bind_addr"; static const char *C_VALID = "!\"#$%&'-.0123456789:<=>?ABCDEFGHIJKLMNOPQRESTUVWXYZ[\\]^_abcdefghijklmnopqrstuvwxyz{|}~"; + xml_node **children = NULL; + static const char *haproxy_helper = "haproxy_helper"; + static const char *haproxy_address = "haproxy_address"; + static const char *haproxy_port = "haproxy_port"; + static const char *haproxy_user = "haproxy_user"; + static const char *haproxy_password = "haproxy_password"; + static const char *haproxy_resolver = "haproxy_resolver"; + static const char *haproxy_backend = "haproxy_backend"; + static const char *api_port = "api_port"; + static const char *excluded_nodes = "excluded_nodes"; + static const char *frequency = "frequency"; + static const char *agent_chunk_size = "agent_chunk_size"; + static const char *agent_reconnection_time = "agent_reconnection_time"; + static const char *agent_reconnection_stability_time = "agent_reconnection_stability_time"; + static const char *imbalance_tolerance = "imbalance_tolerance"; + static const char *remove_disconnected_node_after = "remove_disconnected_node_after"; + _Config *Config; Config = (_Config *)d1; int i; + int j; int disable_cluster_info = 0; Config->hide_cluster_info = 0; @@ -96,14 +114,59 @@ int Read_Cluster(XML_NODE node, void *d1, __attribute__((unused)) void *d2) { } else if (!strcmp(node[i]->element, nodes)) { } else if (!strcmp(node[i]->element, port)) { } else if (!strcmp(node[i]->element, bind_addr)) { - } else { - merror(XML_INVELEM, node[i]->element); - return OS_INVALID; - } + } else if (!strcmp(node[i]->element, haproxy_helper)) { + + if (!(children = OS_GetElementsbyNode(xml, node[i]))) { + continue; + } + + for (j = 0; children[j]; j++) { + if (!strcmp(children[j]->element, disabled)) { + if (strcmp(children[j]->content, "yes") && strcmp(children[j]->content, "no")) { + merror("Detected a not allowed value for disabled tag '%s'. Valid values are 'yes' and 'no'.", children[j]->element); + return OS_INVALID; + } + } else if (!strcmp(children[j]->element, frequency)) { + } else if (!strcmp(children[j]->element, haproxy_address)) { + if (!strlen(node[i]->content)) { + merror("HAProxy address is empty in configuration"); + return OS_INVALID; + } + } else if (!strcmp(children[j]->element, haproxy_port)) { + } else if (!strcmp(children[j]->element, haproxy_user)) { + if (!strlen(node[i]->content)) { + merror("HAProxy user is empty in configuration"); + return OS_INVALID; + } + } else if (!strcmp(children[j]->element, haproxy_password)) { + if (!strlen(node[i]->content)) { + merror("HAProxy password is empty in configuration"); + return OS_INVALID; + } + } else if (!strcmp(children[j]->element, haproxy_backend)) { + } else if (!strcmp(children[j]->element, haproxy_resolver)) { + } else if (!strcmp(children[j]->element, api_port)) { + } else if (!strcmp(children[j]->element, excluded_nodes)) { + } else if (!strcmp(children[j]->element, agent_chunk_size)) { + } else if (!strcmp(children[j]->element, agent_reconnection_time)) { + } else if (!strcmp(children[j]->element, agent_reconnection_stability_time)) { + } else if (!strcmp(children[j]->element, imbalance_tolerance)) { + } else if (!strcmp(children[j]->element, remove_disconnected_node_after)) { + } else { + merror(XML_INVELEM, children[i]->element); + return OS_INVALID; + } + + } + } else { + merror(XML_INVELEM, node[i]->element); + return OS_INVALID; } + if (disable_cluster_info) Config->hide_cluster_info = 1; + } return 0; - } +} \ No newline at end of file diff --git a/src/config/config.c b/src/config/config.c index e47f0668b44..cfff6a7892b 100644 --- a/src/config/config.c +++ b/src/config/config.c @@ -240,7 +240,7 @@ static int read_main_elements(const OS_XML *xml, int modules, } } else if (strcmp(node[i]->element, oslogging) == 0) { } else if (chld_node && (strcmp(node[i]->element, oscluster) == 0)) { - if ((modules & CCLUSTER) && (Read_Cluster(chld_node, d1, d2) < 0)) { + if ((modules & CCLUSTER) && (Read_Cluster(xml, chld_node, d1, d2) < 0)) { goto fail; } } else if (chld_node && (strcmp(node[i]->element, ossocket) == 0)) { diff --git a/src/config/config.h b/src/config/config.h index e8be3ab5317..4da73fa3afa 100644 --- a/src/config/config.h +++ b/src/config/config.h @@ -121,7 +121,7 @@ int wm_key_request_read(__attribute__((unused)) xml_node **nodes, __attribute__( #endif #endif int Read_Labels(XML_NODE node, void *d1, void *d2); -int Read_Cluster(XML_NODE node, void *d1, void *d2); +int Read_Cluster(const OS_XML *xml, XML_NODE node, void *d1, void *d2); int Read_LogCollecSocket(XML_NODE node, void *d1, void *d2); int Read_AnalysisdSocket(XML_NODE node, void *d1, void *d2); int Read_Vuln(const OS_XML *xml, xml_node **nodes, void *d1, char d2); From 12494cf4e2d7aee9f7e660c5cc7ff60512b02f73 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 20 Mar 2024 18:52:17 -0300 Subject: [PATCH 345/419] Get configuration from ossec.conf --- framework/scripts/wazuh_clusterd.py | 14 ++- .../core/cluster/hap_helper/hap_helper.py | 110 ++++++++++++++---- framework/wazuh/core/cluster/utils.py | 82 +++++++++++-- framework/wazuh/core/configuration.py | 22 ++-- 4 files changed, 181 insertions(+), 47 deletions(-) diff --git a/framework/scripts/wazuh_clusterd.py b/framework/scripts/wazuh_clusterd.py index 7b89dcba691..2f42e41e4d4 100644 --- a/framework/scripts/wazuh_clusterd.py +++ b/framework/scripts/wazuh_clusterd.py @@ -42,7 +42,7 @@ def set_logging(foreground_mode=False, debug_mode=0) -> WazuhLogger: def print_version(): """Print Wazuh metadata.""" - from wazuh.core.cluster import __version__, __author__, __wazuh_name__, __licence__ + from wazuh.core.cluster import __author__, __licence__, __version__, __wazuh_name__ print(f"\n{__wazuh_name__} {__version__} - {__author__}\n\n{__licence__}") @@ -81,7 +81,7 @@ async def master_main(args: argparse.Namespace, cluster_config: dict, cluster_it logger : WazuhLogger Cluster logger. """ - from wazuh.core.cluster import master, local_server + from wazuh.core.cluster import local_server, master from wazuh.core.cluster.hap_helper.hap_helper import HAPHelper cluster_utils.context_tag.set('Master') @@ -96,7 +96,10 @@ async def master_main(args: argparse.Namespace, cluster_config: dict, cluster_it concurrency_test=args.concurrency_test, node=my_server, configuration=cluster_config, enable_ssl=args.ssl, cluster_items=cluster_items) - await asyncio.gather(my_server.start(), my_local_server.start(), HAPHelper.start()) + tasks = [my_server, my_local_server] + if not cluster_config.get(cluster_utils.HAPROXY_HELPER, {}).get(cluster_utils.DISABLED, True): + tasks.append(HAPHelper) + await asyncio.gather(*[task.start() for task in tasks]) # @@ -116,8 +119,9 @@ async def worker_main(args: argparse.Namespace, cluster_config: dict, cluster_it logger : WazuhLogger Cluster logger. """ - from wazuh.core.cluster import worker, local_server from concurrent.futures import ProcessPoolExecutor + + from wazuh.core.cluster import local_server, worker cluster_utils.context_tag.set('Worker') # Pool is defined here so the child process is not recreated when the connection with master node is broken. @@ -254,7 +258,7 @@ def main(): if __name__ == '__main__': import wazuh.core.cluster.utils as cluster_utils - from wazuh.core import pyDaemonModule, common, configuration + from wazuh.core import common, configuration, pyDaemonModule cluster_items = cluster_utils.get_cluster_items() original_sig_handler = signal.signal(signal.SIGTERM, exit_handler) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index e464c14a9cc..ce21bb5e52b 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -2,12 +2,42 @@ from asyncio import sleep from math import ceil, floor -from wazuh.core.cluster.hap_helper.configuration import parse_configuration from wazuh.core.cluster.hap_helper.proxy import Proxy, ProxyAPI, ProxyServerState from wazuh.core.cluster.hap_helper.wazuh import WazuhAgent, WazuhDAPI -from wazuh.core.cluster.utils import ClusterFilter, context_tag, get_cluster_items +from wazuh.core.cluster.utils import ( + AGENT_CHUNK_SIZE, + AGENT_RECONNECTION_STABILITY_TIME, + AGENT_RECONNECTION_TIME, + EXCLUDED_NODES, + FREQUENCY, + HAPROXY_BACKEND, + HAPROXY_PORT, + HAPROXY_PROTOCOL, + HAPROXY_RESOLVER, + IMBALANCE_TOLERANCE, + REMOVE_DISCONNECTED_NODE_AFTER, + ClusterFilter, + context_tag, + get_cluster_items, + read_cluster_config, +) +from wazuh.core.configuration import get_ossec_conf from wazuh.core.exception import WazuhException, WazuhHAPHelperError +HELPER_DEFAULTS = { + HAPROXY_PORT: 5555, + HAPROXY_PROTOCOL: 'http', + HAPROXY_BACKEND: 'wazuh_cluster', + HAPROXY_RESOLVER: None, + EXCLUDED_NODES: [], + FREQUENCY: 60, + AGENT_CHUNK_SIZE: 120, + AGENT_RECONNECTION_TIME: 5, + AGENT_RECONNECTION_STABILITY_TIME: 60, + IMBALANCE_TOLERANCE: 0.1, + REMOVE_DISCONNECTED_NODE_AFTER: 3, +} + class HAPHelper: """Helper to balance Wazuh agents through cluster calling HAProxy.""" @@ -16,18 +46,29 @@ class HAPHelper: AGENT_STATUS_SYNC_TIME: int = 25 # Default agent notify time + cluster sync + 5s SERVER_ADMIN_STATE_DELAY: int = 5 - def __init__(self, proxy: Proxy, wazuh_dapi: WazuhDAPI, tag: str, options: dict): + def __init__( + self, + proxy: Proxy, + wazuh_dapi: WazuhDAPI, + tag: str, + sleep_time: float, + agent_reconnection_stability_time: int, + agent_reconnection_time: int, + agent_reconnection_chunk_size: int, + agent_tolerance: float, + remove_disconnected_node_after: int, + ): self.tag = tag self.logger = self._get_logger(self.tag) self.proxy = proxy self.wazuh_dapi = wazuh_dapi - self.sleep_time: int = options['sleep_time'] - self.agent_reconnection_stability_time: int = options['agent_reconnection_stability_time'] - self.agent_reconnection_chunk_size: int = options['agent_reconnection_chunk_size'] - self.agent_reconnection_time: int = options['agent_reconnection_time'] - self.agent_tolerance: float = options['agent_tolerance'] - self.remove_disconnected_node_after: int = options['remove_disconnected_node_after'] + self.sleep_time = sleep_time + self.agent_reconnection_stability_time = agent_reconnection_stability_time + self.agent_reconnection_chunk_size = agent_reconnection_chunk_size + self.agent_reconnection_time = agent_reconnection_time + self.agent_tolerance = agent_tolerance + self.remove_disconnected_node_after = remove_disconnected_node_after @staticmethod def _get_logger(tag: str) -> logging.Logger: @@ -442,33 +483,52 @@ def get_connection_retry() -> int: @classmethod async def start(cls): """Initialize and run HAPHelper.""" + tag = 'HAPHelper' + context_tag.set(tag) + logger = HAPHelper._get_logger(tag) try: - configuration = parse_configuration() - tag = 'HAPHelper' + helper_config = read_cluster_config()['haproxy_helper'] + port_config = get_ossec_conf(section='remote', field='port') proxy_api = ProxyAPI( - username=configuration['proxy']['api']['user'], - password=configuration['proxy']['api']['password'], + username=helper_config['haproxy_user'], + password=helper_config['haproxy_password'], tag=tag, - address=configuration['proxy']['api']['address'], - port=configuration['proxy']['api']['port'], - protocol=configuration['proxy']['api']['protocol'], + address=helper_config['haproxy_address'], + port=helper_config.get(HAPROXY_PORT, HELPER_DEFAULTS[HAPROXY_PORT]), + protocol=helper_config.get(HAPROXY_PROTOCOL, HELPER_DEFAULTS[HAPROXY_PROTOCOL]), ) proxy = Proxy( - wazuh_backend=configuration['proxy']['backend'], - wazuh_connection_port=configuration['wazuh']['connection']['port'], + wazuh_backend=helper_config.get(HAPROXY_BACKEND, HELPER_DEFAULTS[HAPROXY_BACKEND]), + wazuh_connection_port=int(port_config.get('remote')[0].get('port')), proxy_api=proxy_api, tag=tag, - resolver=configuration['proxy'].get('resolver', None), + resolver=helper_config.get(HAPROXY_RESOLVER, HELPER_DEFAULTS[HAPROXY_RESOLVER]), ) wazuh_dapi = WazuhDAPI( tag=tag, - excluded_nodes=configuration['wazuh']['excluded_nodes'], + excluded_nodes=helper_config.get(EXCLUDED_NODES, HELPER_DEFAULTS[EXCLUDED_NODES]), ) - helper = cls(proxy=proxy, wazuh_dapi=wazuh_dapi, tag=tag, options=configuration['hap_helper']) + helper = cls( + proxy=proxy, + wazuh_dapi=wazuh_dapi, + tag=tag, + sleep_time=int(helper_config.get(FREQUENCY, HELPER_DEFAULTS[FREQUENCY])), + agent_reconnection_stability_time=helper_config.get( + AGENT_RECONNECTION_STABILITY_TIME, HELPER_DEFAULTS[AGENT_RECONNECTION_STABILITY_TIME] + ), + agent_reconnection_time=helper_config.get( + AGENT_RECONNECTION_TIME, HELPER_DEFAULTS[AGENT_RECONNECTION_TIME] + ), + agent_reconnection_chunk_size=helper_config.get(AGENT_CHUNK_SIZE, HELPER_DEFAULTS[AGENT_CHUNK_SIZE]), + agent_tolerance=helper_config.get(IMBALANCE_TOLERANCE, HELPER_DEFAULTS[IMBALANCE_TOLERANCE]), + remove_disconnected_node_after=helper_config.get( + REMOVE_DISCONNECTED_NODE_AFTER, HELPER_DEFAULTS[REMOVE_DISCONNECTED_NODE_AFTER] + ), + ) await helper.initialize_proxy() @@ -482,11 +542,13 @@ async def start(cls): if helper.proxy.hard_stop_after is None: await helper.set_hard_stop_after() - helper.logger.info('Starting HAProxy Helper') + logger.info('Starting HAProxy Helper') await helper.manage_wazuh_cluster_nodes() + except KeyError as exc: + logger.error(f'Missing configuration {exc}. The helper cannot start.') except KeyboardInterrupt: pass except Exception as unexpected_exc: - helper.logger.critical(f'Unexpected exception: {unexpected_exc}', exc_info=True) + logger.critical(f'Unexpected exception: {unexpected_exc}', exc_info=True) finally: - helper.logger.info('Process ended') + logger.info('Process ended') diff --git a/framework/wazuh/core/cluster/utils.py b/framework/wazuh/core/cluster/utils.py index 4d7b68b3a31..d060625ac0b 100644 --- a/framework/wazuh/core/cluster/utils.py +++ b/framework/wazuh/core/cluster/utils.py @@ -18,15 +18,79 @@ from wazuh.core import common, pyDaemonModule from wazuh.core.configuration import get_ossec_conf -from wazuh.core.exception import WazuhException, WazuhError, WazuhInternalError +from wazuh.core.exception import WazuhError, WazuhException, WazuhInternalError from wazuh.core.results import WazuhResult from wazuh.core.utils import temporary_cache from wazuh.core.wazuh_socket import create_wazuh_socket_message from wazuh.core.wlogging import WazuhLogger +NO = 'no' +YES = 'yes' +DISABLED = 'disabled' +HAPROXY_HELPER = 'haproxy_helper' +FREQUENCY = 'frequency' +HAPROXY_ADDRESS = 'haproxy_address' +HAPROXY_PORT = 'haproxy_port' +HAPROXY_PROTOCOL = 'haproxy_protocol' +HAPROXY_USER = 'haproxy_user' +HAPROXY_PASSWORD = 'haproxy_password' +HAPROXY_BACKEND = 'haproxy_backend' +HAPROXY_RESOLVER = 'haproxy_resolver' +EXCLUDED_NODES = 'excluded_nodes' +AGENT_CHUNK_SIZE = 'agent_chunk_size' +AGENT_RECONNECTION_TIME = 'agent_reconnection_time' +AGENT_RECONNECTION_STABILITY_TIME = 'agent_reconnection_stability_time' +IMBALANCE_TOLERANCE = 'imbalance_tolerance' +REMOVE_DISCONNECTED_NODE_AFTER = 'remove_disconnected_node_after' + logger = logging.getLogger('wazuh') execq_lockfile = os.path.join(common.WAZUH_PATH, "var", "run", ".api_execq_lock") +def validate_haproxy_helper_config(helper_config: dict) -> dict: + """Validate HAProxy helper configuration section. + + Parameters + ---------- + helper_config : dict + Configuration to validate. + + Returns + ------- + dict + Validated configuration for HAProxy Helper. + + Raises + ------ + WazuhError (3004) + If some value have an invalid type. + """ + if helper_config[DISABLED] == NO: + helper_config[DISABLED] = False + elif helper_config[DISABLED] == YES: + helper_config[DISABLED] = True + + for field in [ + FREQUENCY, + AGENT_RECONNECTION_STABILITY_TIME, + AGENT_RECONNECTION_TIME, + AGENT_CHUNK_SIZE, + REMOVE_DISCONNECTED_NODE_AFTER + ]: + if helper_config.get(field): + try: + helper_config[field] = int(helper_config[field]) + except ValueError: + raise WazuhError(3004, extra_message=f"HAProxy Helper {field} must be an integer.") + + for field in [IMBALANCE_TOLERANCE]: + if helper_config.get(field): + try: + helper_config[field] = float(helper_config[field]) + except ValueError: + raise WazuhError(3004, extra_message=f"HAProxy Helper {field} must be a float.") + + return helper_config + def read_cluster_config(config_file=common.OSSEC_CONF, from_import=False) -> typing.Dict: """Read cluster configuration from ossec.conf. @@ -79,11 +143,11 @@ def read_cluster_config(config_file=common.OSSEC_CONF, from_import=False) -> typ raise WazuhError(3004, extra_message="Cluster port must be an integer.") config_cluster['port'] = int(config_cluster['port']) - if config_cluster['disabled'] == 'no': - config_cluster['disabled'] = False - elif config_cluster['disabled'] == 'yes': - config_cluster['disabled'] = True - elif not isinstance(config_cluster['disabled'], bool): + if config_cluster[DISABLED] == 'no': + config_cluster[DISABLED] = False + elif config_cluster[DISABLED] == 'yes': + config_cluster[DISABLED] = True + elif not isinstance(config_cluster[DISABLED], bool): raise WazuhError(3004, extra_message=f"Allowed values for 'disabled' field are 'yes' and 'no'. " f"Found: '{config_cluster['disabled']}'") @@ -92,6 +156,9 @@ def read_cluster_config(config_file=common.OSSEC_CONF, from_import=False) -> typ logger.info("Deprecated node type 'client'. Using 'worker' instead.") config_cluster['node_type'] = 'worker' + if config_cluster.get(HAPROXY_HELPER): + config_cluster[HAPROXY_HELPER] = validate_haproxy_helper_config(config_cluster[HAPROXY_HELPER]) + return config_cluster @@ -125,7 +192,7 @@ def get_manager_status(cache=False) -> typing.Dict: pidfile = glob(os.path.join(run_dir, f"{process}-*.pid")) if os.path.exists(os.path.join(run_dir, f"{process}.failed")): data[process] = 'failed' - elif os.path.exists(os.path.join(run_dir, f".restart")): + elif os.path.exists(os.path.join(run_dir, ".restart")): data[process] = 'restarting' elif os.path.exists(os.path.join(run_dir, f"{process}.start")): data[process] = 'starting' @@ -372,6 +439,7 @@ async def forward_function(func: callable, f_kwargs: dict = None, request_type: import concurrent from asyncio import run + from wazuh.core.cluster.dapi.dapi import DistributedAPI dapi = DistributedAPI(f=func, f_kwargs=f_kwargs, request_type=request_type, is_async=False, wait_for_complete=True, logger=logger, nodes=nodes, diff --git a/framework/wazuh/core/configuration.py b/framework/wazuh/core/configuration.py index 94337dacec2..6031d4ee125 100755 --- a/framework/wazuh/core/configuration.py +++ b/framework/wazuh/core/configuration.py @@ -9,19 +9,17 @@ import subprocess import sys import tempfile -from configparser import RawConfigParser, NoOptionError +from configparser import NoOptionError, RawConfigParser from io import StringIO -from os import remove, path as os_path +from os import path as os_path +from os import remove from types import MappingProxyType -from typing import Union, List +from typing import List, Union from defusedxml.ElementTree import tostring from defusedxml.minidom import parseString - -from wazuh.core import common -from wazuh.core import wazuh_socket -from wazuh.core.exception import WazuhInternalError, WazuhError -from wazuh.core.exception import WazuhResourceNotFound +from wazuh.core import common, wazuh_socket +from wazuh.core.exception import WazuhError, WazuhInternalError, WazuhResourceNotFound from wazuh.core.utils import cut_array, load_wazuh_xml, safe_move logger = logging.getLogger('wazuh') @@ -231,12 +229,14 @@ def _read_option(section_name: str, opt: str) -> tuple: json_path = json_attribs.copy() json_path['path'] = path.strip() opt_value.append(json_path) - elif section_name == 'syscheck' and opt_name in ('synchronization', 'whodata'): + elif (section_name == 'syscheck' and opt_name in ('synchronization', 'whodata')) or \ + (section_name == 'cluster' and opt_name == 'haproxy_helper'): opt_value = {} for child in opt: child_section, child_config = _read_option(child.tag.lower(), child) opt_value[child_section] = child_config.split(',') if child_config.find(',') > 0 else child_config elif (section_name == 'cluster' and opt_name == 'nodes') or \ + (section_name == 'haproxy_helper' and opt_name == 'excluded_nodes') or \ (section_name == 'sca' and opt_name == 'policies') or \ (section_name == 'indexer' and opt_name == 'hosts') : opt_value = [child.text for child in opt] @@ -856,7 +856,7 @@ def get_file_conf(filename: str, group_id: str = None, type_conf: str = None, ra if not os_path.exists(file_path): raise WazuhError(1006, file_path) - + if raw: with open(file_path, 'r') as raw_data: data = raw_data.read() @@ -1342,7 +1342,7 @@ def update_check_is_enabled() -> bool: def get_cti_url() -> str: """Get the CTI service URL from the configuration. - + Returns ------- str From 0a8dd22b427a8a2312ce1531432d5cad305b9c44 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Fri, 5 Apr 2024 11:14:36 -0300 Subject: [PATCH 346/419] Add field for haproxy protocol --- framework/scripts/wazuh_clusterd.py | 2 +- framework/wazuh/core/cluster/utils.py | 11 +++--- src/config/cluster-config.c | 48 +++++++++++++++------------ 3 files changed, 34 insertions(+), 27 deletions(-) diff --git a/framework/scripts/wazuh_clusterd.py b/framework/scripts/wazuh_clusterd.py index 2f42e41e4d4..e059fac5bde 100644 --- a/framework/scripts/wazuh_clusterd.py +++ b/framework/scripts/wazuh_clusterd.py @@ -97,7 +97,7 @@ async def master_main(args: argparse.Namespace, cluster_config: dict, cluster_it configuration=cluster_config, enable_ssl=args.ssl, cluster_items=cluster_items) tasks = [my_server, my_local_server] - if not cluster_config.get(cluster_utils.HAPROXY_HELPER, {}).get(cluster_utils.DISABLED, True): + if not cluster_config.get(cluster_utils.HAPROXY_HELPER, {}).get(cluster_utils.HAPROXY_DISABLED, True): tasks.append(HAPHelper) await asyncio.gather(*[task.start() for task in tasks]) diff --git a/framework/wazuh/core/cluster/utils.py b/framework/wazuh/core/cluster/utils.py index d060625ac0b..3816aafeda7 100644 --- a/framework/wazuh/core/cluster/utils.py +++ b/framework/wazuh/core/cluster/utils.py @@ -28,7 +28,7 @@ YES = 'yes' DISABLED = 'disabled' HAPROXY_HELPER = 'haproxy_helper' -FREQUENCY = 'frequency' +HAPROXY_DISABLED = 'haproxy_disabled' HAPROXY_ADDRESS = 'haproxy_address' HAPROXY_PORT = 'haproxy_port' HAPROXY_PROTOCOL = 'haproxy_protocol' @@ -36,6 +36,7 @@ HAPROXY_PASSWORD = 'haproxy_password' HAPROXY_BACKEND = 'haproxy_backend' HAPROXY_RESOLVER = 'haproxy_resolver' +FREQUENCY = 'frequency' EXCLUDED_NODES = 'excluded_nodes' AGENT_CHUNK_SIZE = 'agent_chunk_size' AGENT_RECONNECTION_TIME = 'agent_reconnection_time' @@ -64,10 +65,10 @@ def validate_haproxy_helper_config(helper_config: dict) -> dict: WazuhError (3004) If some value have an invalid type. """ - if helper_config[DISABLED] == NO: - helper_config[DISABLED] = False - elif helper_config[DISABLED] == YES: - helper_config[DISABLED] = True + if helper_config[HAPROXY_DISABLED] == NO: + helper_config[HAPROXY_DISABLED] = False + elif helper_config[HAPROXY_DISABLED] == YES: + helper_config[HAPROXY_DISABLED] = True for field in [ FREQUENCY, diff --git a/src/config/cluster-config.c b/src/config/cluster-config.c index 5b8cfcd6163..99ba5865626 100644 --- a/src/config/cluster-config.c +++ b/src/config/cluster-config.c @@ -30,10 +30,12 @@ int Read_Cluster(const OS_XML *xml, XML_NODE node, void *d1, __attribute__((unus static const char *bind_addr = "bind_addr"; static const char *C_VALID = "!\"#$%&'-.0123456789:<=>?ABCDEFGHIJKLMNOPQRESTUVWXYZ[\\]^_abcdefghijklmnopqrstuvwxyz{|}~"; - xml_node **children = NULL; + xml_node **child = NULL; static const char *haproxy_helper = "haproxy_helper"; + static const char *haproxy_disabled = "haproxy_disabled"; static const char *haproxy_address = "haproxy_address"; static const char *haproxy_port = "haproxy_port"; + static const char *haproxy_protocol = "haproxy_protocol"; static const char *haproxy_user = "haproxy_user"; static const char *haproxy_password = "haproxy_password"; static const char *haproxy_resolver = "haproxy_resolver"; @@ -116,44 +118,48 @@ int Read_Cluster(const OS_XML *xml, XML_NODE node, void *d1, __attribute__((unus } else if (!strcmp(node[i]->element, bind_addr)) { } else if (!strcmp(node[i]->element, haproxy_helper)) { - if (!(children = OS_GetElementsbyNode(xml, node[i]))) { + if (!(child = OS_GetElementsbyNode(xml, node[i]))) { continue; } - for (j = 0; children[j]; j++) { - if (!strcmp(children[j]->element, disabled)) { - if (strcmp(children[j]->content, "yes") && strcmp(children[j]->content, "no")) { - merror("Detected a not allowed value for disabled tag '%s'. Valid values are 'yes' and 'no'.", children[j]->element); + for (j = 0; child[j]; j++) { + if (!strcmp(child[j]->element, haproxy_disabled)) { + if (strcmp(child[j]->content, "yes") && strcmp(child[j]->content, "no")) { + merror("Detected a not allowed value for disabled tag '%s'. Valid values are 'yes' and 'no'.", child[j]->element); return OS_INVALID; } - } else if (!strcmp(children[j]->element, frequency)) { - } else if (!strcmp(children[j]->element, haproxy_address)) { + } else if (!strcmp(child[j]->element, frequency)) { + } else if (!strcmp(child[j]->element, haproxy_address)) { if (!strlen(node[i]->content)) { merror("HAProxy address is empty in configuration"); return OS_INVALID; } - } else if (!strcmp(children[j]->element, haproxy_port)) { - } else if (!strcmp(children[j]->element, haproxy_user)) { + } else if (!strcmp(child[j]->element, haproxy_port)) { + } else if (!strcmp(child[j]->element, haproxy_protocol)) { + if (strcmp(child[j]->content, "http") && strcmp(child[j]->content, "https")) { + merror("Detected a not allowed value for haproxy_protocol tag '%s'. Valid values are 'http' and 'https'.", child[j]->element); + return OS_INVALID; + } + } else if (!strcmp(child[j]->element, haproxy_user)) { if (!strlen(node[i]->content)) { merror("HAProxy user is empty in configuration"); return OS_INVALID; } - } else if (!strcmp(children[j]->element, haproxy_password)) { + } else if (!strcmp(child[j]->element, haproxy_password)) { if (!strlen(node[i]->content)) { merror("HAProxy password is empty in configuration"); return OS_INVALID; } - } else if (!strcmp(children[j]->element, haproxy_backend)) { - } else if (!strcmp(children[j]->element, haproxy_resolver)) { - } else if (!strcmp(children[j]->element, api_port)) { - } else if (!strcmp(children[j]->element, excluded_nodes)) { - } else if (!strcmp(children[j]->element, agent_chunk_size)) { - } else if (!strcmp(children[j]->element, agent_reconnection_time)) { - } else if (!strcmp(children[j]->element, agent_reconnection_stability_time)) { - } else if (!strcmp(children[j]->element, imbalance_tolerance)) { - } else if (!strcmp(children[j]->element, remove_disconnected_node_after)) { + } else if (!strcmp(child[j]->element, haproxy_backend)) { + } else if (!strcmp(child[j]->element, haproxy_resolver)) { + } else if (!strcmp(child[j]->element, excluded_nodes)) { + } else if (!strcmp(child[j]->element, agent_chunk_size)) { + } else if (!strcmp(child[j]->element, agent_reconnection_time)) { + } else if (!strcmp(child[j]->element, agent_reconnection_stability_time)) { + } else if (!strcmp(child[j]->element, imbalance_tolerance)) { + } else if (!strcmp(child[j]->element, remove_disconnected_node_after)) { } else { - merror(XML_INVELEM, children[i]->element); + merror(XML_INVELEM, child[i]->element); return OS_INVALID; } From 8fad9b6feb7a898c5c514e925de5c2c15032b958 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Fri, 5 Apr 2024 16:09:44 -0300 Subject: [PATCH 347/419] Clean up old configuration utils --- .../core/cluster/hap_helper/configuration.py | 43 ------------ .../hap_helper/data/configuration.yaml | 55 --------------- .../hap_helper/data/configuration_schema.json | 68 ------------------- src/config/cluster-config.c | 2 +- 4 files changed, 1 insertion(+), 167 deletions(-) delete mode 100644 framework/wazuh/core/cluster/hap_helper/configuration.py delete mode 100644 framework/wazuh/core/cluster/hap_helper/data/configuration.yaml delete mode 100644 framework/wazuh/core/cluster/hap_helper/data/configuration_schema.json diff --git a/framework/wazuh/core/cluster/hap_helper/configuration.py b/framework/wazuh/core/cluster/hap_helper/configuration.py deleted file mode 100644 index c5ccaebe834..00000000000 --- a/framework/wazuh/core/cluster/hap_helper/configuration.py +++ /dev/null @@ -1,43 +0,0 @@ -import json -from os import path - -import jsonschema -import yaml -from wazuh.core.exception import WazuhHAPHelperError - - -def validate_custom_configuration(custom_configuration: dict): - with open( - path.join(path.abspath((path.dirname(__file__))), 'data', 'configuration_schema.json'), 'r' - ) as schema_file: - json_schema = json.loads(schema_file.read()) - - try: - jsonschema.validate(instance=custom_configuration, schema=json_schema) - except jsonschema.ValidationError as validation_err: - raise WazuhHAPHelperError(3042, extra_message=f"({'> '.join(validation_err.path)}) {validation_err.message}") - - -def merge_configurations(default: dict, config: dict) -> dict: - for key, value in config.items(): - if isinstance(value, dict): - default[key] = merge_configurations(default.get(key, {}), value) - else: - default[key] = value - return default - - -def parse_configuration(custom_configuration_path: str = '') -> dict: - with open( - path.join(path.abspath((path.dirname(__file__))), 'data', 'configuration.yaml'), 'r' - ) as default_conf_file: - default_configuration = yaml.safe_load(default_conf_file) - - if not custom_configuration_path: - return default_configuration - - with open(custom_configuration_path, 'r') as custom_conf_file: - custom_configuration = yaml.safe_load(custom_conf_file) - - validate_custom_configuration(custom_configuration) - return merge_configurations(default_configuration, custom_configuration) diff --git a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml b/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml deleted file mode 100644 index ddc2e0fa6f5..00000000000 --- a/framework/wazuh/core/cluster/hap_helper/data/configuration.yaml +++ /dev/null @@ -1,55 +0,0 @@ ---- -wazuh: - # Wazuh API configuration - api: - # Wazuh API address - address: localhost - # Wazuh API port - port: 55000 - # Wazuh API username. It must have read permissions for cluster and agents and reconnect permissions for agents - user: wazuh - # Wazuh API password - password: wazuh - # Wazuh cluster configuration - connection: - # Wazuh agents connection service port (TCP) - port: 1514 - # Wazuh cluster nodes to exclude on auto mode - excluded_nodes: [] - -proxy: - # Wazuh Proxy API configuration - api: - # Wazuh Proxy API address - address: wazuh-proxy - # Wazuh Proxy API port - port: 5555 - # Wazuh Proxy API username - user: haproxy - # Wazuh Proxy API password - password: haproxy - # Protocol to use 'http' or 'https'. By default http - protocol: http - # Defined Proxy backend (frontend will append '_front' to it) - backend: wazuh_cluster - - # Defines the list of DNS servers to translate DNS names to IP addresses. - # This configuration is recommended but not mandatory. - # If it is configured, a resolvers section must be properly defined in - # the haproxy.cfg file. - # resolver: wazuh_resolver - -hap_helper: - # Seconds to sleep between each coordinator iteration - sleep_time: 60 - # Seconds to sleep after the end of the agent reconnection phase - agent_reconnection_stability_time: 60 - # Agent chunk size (each chunk defines the max number of agents to be reconnected at once) - agent_reconnection_chunk_size: 120 - # Seconds to sleep after an agent chunk reconnection (if there is more than one) - agent_reconnection_time: 5 - # Agent imbalance tolerance - agent_tolerance: 0.1 - # Time in minutes before removing a disconnected Wazuh node from the backend - remove_disconnected_node_after: 3 - log_level: debug diff --git a/framework/wazuh/core/cluster/hap_helper/data/configuration_schema.json b/framework/wazuh/core/cluster/hap_helper/data/configuration_schema.json deleted file mode 100644 index ac018281788..00000000000 --- a/framework/wazuh/core/cluster/hap_helper/data/configuration_schema.json +++ /dev/null @@ -1,68 +0,0 @@ - -{ - "type": "object", - "additionalProperties": false, - "properties": { - "wazuh": { - "type": "object", - "additionalProperties": false, - "properties": { - "api": { - "type": "object", - "additionalProperties": false, - "properties": { - "address": {"type": "string"}, - "port": {"type": "integer"}, - "user": {"type": "string"}, - "password": {"type": "string"} - } - }, - - "connection": { - "type": "object", - "additionalProperties": false, - "properties": { - "port": {"type": "integer"} - } - }, - - "excluded_nodes": {"type": "array", "items": {"type": "string"}} - } - }, - - - "proxy": { - "type": "object", - "additionalProperties": false, - "properties": { - "api": { - "type": "object", - "additionalProperties": false, - "properties": { - "address": {"type": "string"}, - "port": {"type": "integer"}, - "user": {"type": "string"}, - "password": {"type": "string"} - } - }, - - "backend": {"type": "string"}, - "resolver": {"type": "string"} - } - }, - - - "coordinator": { - "type": "object", - "additionalProperties": false, - "properties": { - "sleep_time": {"type": "integer", "minimum": 10}, - "agent_reconnection_stability_time": {"type": "integer", "minimum": 10}, - "agent_reconnection_chunk_size": {"type": "integer", "minimum": 100}, - "agent_reconnection_time": {"type": "integer", "minimum": 0}, - "agent_tolerance": {"type": "number", "minimum": 0, "exclusiveMinimum": true, "maximum": 1}, - "remove_disconnected_node_after": {"type": "integer", "minimum": 0} - } - } - } -} diff --git a/src/config/cluster-config.c b/src/config/cluster-config.c index 99ba5865626..745a2909bbe 100644 --- a/src/config/cluster-config.c +++ b/src/config/cluster-config.c @@ -175,4 +175,4 @@ int Read_Cluster(const OS_XML *xml, XML_NODE node, void *d1, __attribute__((unus } return 0; -} \ No newline at end of file +} From 960f49aac286cdbef321cbb1c7fbb7be21638586 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Fri, 5 Apr 2024 17:04:40 -0300 Subject: [PATCH 348/419] Fix UT --- framework/scripts/tests/test_wazuh_clusterd.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/framework/scripts/tests/test_wazuh_clusterd.py b/framework/scripts/tests/test_wazuh_clusterd.py index 5ff39a5bedf..553ac2ce57a 100644 --- a/framework/scripts/tests/test_wazuh_clusterd.py +++ b/framework/scripts/tests/test_wazuh_clusterd.py @@ -140,10 +140,11 @@ def start(cls): return 'HAPHELPER_START' - async def gather(first, second, third): + async def gather(first, second): assert first == 'MASTER_START' assert second == 'LOCALSERVER_START' - assert third == 'HAPHELPER_START' + # FIXME: When write UT for the new components. + # assert third == 'HAPHELPER_START' wazuh_clusterd.cluster_utils = cluster_utils From 62413cf27648abed8c983f4fcaca7eba9b11b99a Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 10 Apr 2024 15:34:09 -0300 Subject: [PATCH 349/419] Suggestions from CR --- framework/scripts/tests/test_wazuh_clusterd.py | 2 +- framework/wazuh/core/cluster/hap_helper/proxy.py | 4 +++- framework/wazuh/core/cluster/utils.py | 7 ++++--- src/config/cluster-config.c | 10 +++++----- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/framework/scripts/tests/test_wazuh_clusterd.py b/framework/scripts/tests/test_wazuh_clusterd.py index 553ac2ce57a..6e2bf168557 100644 --- a/framework/scripts/tests/test_wazuh_clusterd.py +++ b/framework/scripts/tests/test_wazuh_clusterd.py @@ -143,7 +143,7 @@ def start(cls): async def gather(first, second): assert first == 'MASTER_START' assert second == 'LOCALSERVER_START' - # FIXME: When write UT for the new components. + # FIXME: (20940) When write UT for the new components. # assert third == 'HAPHELPER_START' diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index b47135a3ed0..32bfbb04522 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -112,7 +112,9 @@ async def initialize(self): elif response.status_code == 404: raise WazuhHAPHelperError(3047) except httpx.ConnectError: - raise WazuhHAPHelperError(3043, extra_message='Check connectivity and the configuration file') + raise WazuhHAPHelperError( + 3043, extra_message='Check connectivity and the configuration in the `ossec.conf`' + ) except httpx.RequestError as req_exc: raise WazuhHAPHelperError(3043, extra_message=str(req_exc)) diff --git a/framework/wazuh/core/cluster/utils.py b/framework/wazuh/core/cluster/utils.py index 3816aafeda7..c8407aab22b 100644 --- a/framework/wazuh/core/cluster/utils.py +++ b/framework/wazuh/core/cluster/utils.py @@ -63,7 +63,7 @@ def validate_haproxy_helper_config(helper_config: dict) -> dict: Raises ------ WazuhError (3004) - If some value have an invalid type. + If some value has an invalid type. """ if helper_config[HAPROXY_DISABLED] == NO: helper_config[HAPROXY_DISABLED] = False @@ -71,6 +71,7 @@ def validate_haproxy_helper_config(helper_config: dict) -> dict: helper_config[HAPROXY_DISABLED] = True for field in [ + HAPROXY_PORT, FREQUENCY, AGENT_RECONNECTION_STABILITY_TIME, AGENT_RECONNECTION_TIME, @@ -144,9 +145,9 @@ def read_cluster_config(config_file=common.OSSEC_CONF, from_import=False) -> typ raise WazuhError(3004, extra_message="Cluster port must be an integer.") config_cluster['port'] = int(config_cluster['port']) - if config_cluster[DISABLED] == 'no': + if config_cluster[DISABLED] == NO: config_cluster[DISABLED] = False - elif config_cluster[DISABLED] == 'yes': + elif config_cluster[DISABLED] == YES: config_cluster[DISABLED] = True elif not isinstance(config_cluster[DISABLED], bool): raise WazuhError(3004, diff --git a/src/config/cluster-config.c b/src/config/cluster-config.c index 745a2909bbe..1a1ac82d37c 100644 --- a/src/config/cluster-config.c +++ b/src/config/cluster-config.c @@ -125,29 +125,29 @@ int Read_Cluster(const OS_XML *xml, XML_NODE node, void *d1, __attribute__((unus for (j = 0; child[j]; j++) { if (!strcmp(child[j]->element, haproxy_disabled)) { if (strcmp(child[j]->content, "yes") && strcmp(child[j]->content, "no")) { - merror("Detected a not allowed value for disabled tag '%s'. Valid values are 'yes' and 'no'.", child[j]->element); + merror("Detected an invalid value for the disabled tag '%s'. Valid values are 'yes' and 'no'.", child[j]->element); return OS_INVALID; } } else if (!strcmp(child[j]->element, frequency)) { } else if (!strcmp(child[j]->element, haproxy_address)) { if (!strlen(node[i]->content)) { - merror("HAProxy address is empty in configuration"); + merror("HAProxy address is missing in the configuration"); return OS_INVALID; } } else if (!strcmp(child[j]->element, haproxy_port)) { } else if (!strcmp(child[j]->element, haproxy_protocol)) { if (strcmp(child[j]->content, "http") && strcmp(child[j]->content, "https")) { - merror("Detected a not allowed value for haproxy_protocol tag '%s'. Valid values are 'http' and 'https'.", child[j]->element); + merror("Detected an invalid value for the haproxy_protocol tag '%s'. Valid values are 'http' and 'https'.", child[j]->element); return OS_INVALID; } } else if (!strcmp(child[j]->element, haproxy_user)) { if (!strlen(node[i]->content)) { - merror("HAProxy user is empty in configuration"); + merror("HAProxy user is missing in the configuration"); return OS_INVALID; } } else if (!strcmp(child[j]->element, haproxy_password)) { if (!strlen(node[i]->content)) { - merror("HAProxy password is empty in configuration"); + merror("HAProxy password is missing in the configuration"); return OS_INVALID; } } else if (!strcmp(child[j]->element, haproxy_backend)) { From 305fbf8b6abf1cdcf155b7aa65dacabeeccf4e1c Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Fri, 19 Apr 2024 13:58:07 -0300 Subject: [PATCH 350/419] Fill empty values with defaults --- .../core/cluster/hap_helper/hap_helper.py | 56 +++++++------------ framework/wazuh/core/cluster/utils.py | 19 +++++++ 2 files changed, 39 insertions(+), 36 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index ce21bb5e52b..a1804c567c6 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -10,10 +10,13 @@ AGENT_RECONNECTION_TIME, EXCLUDED_NODES, FREQUENCY, + HAPROXY_ADDRESS, HAPROXY_BACKEND, + HAPROXY_PASSWORD, HAPROXY_PORT, HAPROXY_PROTOCOL, HAPROXY_RESOLVER, + HAPROXY_USER, IMBALANCE_TOLERANCE, REMOVE_DISCONNECTED_NODE_AFTER, ClusterFilter, @@ -24,20 +27,7 @@ from wazuh.core.configuration import get_ossec_conf from wazuh.core.exception import WazuhException, WazuhHAPHelperError -HELPER_DEFAULTS = { - HAPROXY_PORT: 5555, - HAPROXY_PROTOCOL: 'http', - HAPROXY_BACKEND: 'wazuh_cluster', - HAPROXY_RESOLVER: None, - EXCLUDED_NODES: [], - FREQUENCY: 60, - AGENT_CHUNK_SIZE: 120, - AGENT_RECONNECTION_TIME: 5, - AGENT_RECONNECTION_STABILITY_TIME: 60, - IMBALANCE_TOLERANCE: 0.1, - REMOVE_DISCONNECTED_NODE_AFTER: 3, -} - +CONNECTION_PORT = 1514 class HAPHelper: """Helper to balance Wazuh agents through cluster calling HAProxy.""" @@ -489,45 +479,39 @@ async def start(cls): try: helper_config = read_cluster_config()['haproxy_helper'] - port_config = get_ossec_conf(section='remote', field='port') + port_config = get_ossec_conf(section='remote') proxy_api = ProxyAPI( - username=helper_config['haproxy_user'], - password=helper_config['haproxy_password'], + username=helper_config[HAPROXY_USER], + password=helper_config[HAPROXY_PASSWORD], tag=tag, - address=helper_config['haproxy_address'], - port=helper_config.get(HAPROXY_PORT, HELPER_DEFAULTS[HAPROXY_PORT]), - protocol=helper_config.get(HAPROXY_PROTOCOL, HELPER_DEFAULTS[HAPROXY_PROTOCOL]), + address=helper_config[HAPROXY_ADDRESS], + port=helper_config[HAPROXY_PORT], + protocol=helper_config[HAPROXY_PROTOCOL], ) proxy = Proxy( - wazuh_backend=helper_config.get(HAPROXY_BACKEND, HELPER_DEFAULTS[HAPROXY_BACKEND]), - wazuh_connection_port=int(port_config.get('remote')[0].get('port')), + wazuh_backend=helper_config[HAPROXY_BACKEND], + wazuh_connection_port=int(port_config.get('remote')[0].get('port', CONNECTION_PORT)), proxy_api=proxy_api, tag=tag, - resolver=helper_config.get(HAPROXY_RESOLVER, HELPER_DEFAULTS[HAPROXY_RESOLVER]), + resolver=helper_config[HAPROXY_RESOLVER], ) wazuh_dapi = WazuhDAPI( tag=tag, - excluded_nodes=helper_config.get(EXCLUDED_NODES, HELPER_DEFAULTS[EXCLUDED_NODES]), + excluded_nodes=helper_config[EXCLUDED_NODES], ) helper = cls( proxy=proxy, wazuh_dapi=wazuh_dapi, tag=tag, - sleep_time=int(helper_config.get(FREQUENCY, HELPER_DEFAULTS[FREQUENCY])), - agent_reconnection_stability_time=helper_config.get( - AGENT_RECONNECTION_STABILITY_TIME, HELPER_DEFAULTS[AGENT_RECONNECTION_STABILITY_TIME] - ), - agent_reconnection_time=helper_config.get( - AGENT_RECONNECTION_TIME, HELPER_DEFAULTS[AGENT_RECONNECTION_TIME] - ), - agent_reconnection_chunk_size=helper_config.get(AGENT_CHUNK_SIZE, HELPER_DEFAULTS[AGENT_CHUNK_SIZE]), - agent_tolerance=helper_config.get(IMBALANCE_TOLERANCE, HELPER_DEFAULTS[IMBALANCE_TOLERANCE]), - remove_disconnected_node_after=helper_config.get( - REMOVE_DISCONNECTED_NODE_AFTER, HELPER_DEFAULTS[REMOVE_DISCONNECTED_NODE_AFTER] - ), + sleep_time=helper_config[FREQUENCY], + agent_reconnection_stability_time=helper_config[AGENT_RECONNECTION_STABILITY_TIME], + agent_reconnection_time=helper_config[AGENT_RECONNECTION_TIME], + agent_reconnection_chunk_size=helper_config[AGENT_CHUNK_SIZE], + agent_tolerance=helper_config[IMBALANCE_TOLERANCE], + remove_disconnected_node_after=helper_config[REMOVE_DISCONNECTED_NODE_AFTER], ) await helper.initialize_proxy() diff --git a/framework/wazuh/core/cluster/utils.py b/framework/wazuh/core/cluster/utils.py index c8407aab22b..96bf5cb6899 100644 --- a/framework/wazuh/core/cluster/utils.py +++ b/framework/wazuh/core/cluster/utils.py @@ -47,6 +47,21 @@ logger = logging.getLogger('wazuh') execq_lockfile = os.path.join(common.WAZUH_PATH, "var", "run", ".api_execq_lock") +HELPER_DEFAULTS = { + HAPROXY_PORT: 5555, + HAPROXY_PROTOCOL: 'http', + HAPROXY_BACKEND: 'wazuh_cluster', + HAPROXY_RESOLVER: None, + EXCLUDED_NODES: [], + FREQUENCY: 60, + AGENT_CHUNK_SIZE: 120, + AGENT_RECONNECTION_TIME: 5, + AGENT_RECONNECTION_STABILITY_TIME: 60, + IMBALANCE_TOLERANCE: 0.1, + REMOVE_DISCONNECTED_NODE_AFTER: 3, +} + + def validate_haproxy_helper_config(helper_config: dict) -> dict: """Validate HAProxy helper configuration section. @@ -65,6 +80,10 @@ def validate_haproxy_helper_config(helper_config: dict) -> dict: WazuhError (3004) If some value has an invalid type. """ + # If any value is missing from user's cluster configuration, add the default one. + for value_name in set(HELPER_DEFAULTS.keys()) - set(helper_config.keys()): + helper_config[value_name] = HELPER_DEFAULTS[value_name] + if helper_config[HAPROXY_DISABLED] == NO: helper_config[HAPROXY_DISABLED] = False elif helper_config[HAPROXY_DISABLED] == YES: From e8708848f383df8ab35017bf8bf80a03ce850c52 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Fri, 19 Apr 2024 16:13:20 -0300 Subject: [PATCH 351/419] Set default number of managers in case all are excluded --- framework/wazuh/core/cluster/hap_helper/hap_helper.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index a1804c567c6..c2f00fc69ae 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -29,6 +29,7 @@ CONNECTION_PORT = 1514 + class HAPHelper: """Helper to balance Wazuh agents through cluster calling HAProxy.""" @@ -450,7 +451,7 @@ async def set_hard_stop_after(self, wait_connection_retry: bool = True, reconnec active_agents=len(agents_id), chunk_size=self.agent_reconnection_chunk_size, agent_reconnection_time=self.agent_reconnection_time, - n_managers=len(current_cluster.keys()), + n_managers=len(current_cluster.keys()) or 1, server_admin_state_delay=self.SERVER_ADMIN_STATE_DELAY, ) From 8b91c4ad1d87df54657655654208f1fe565bcdf8 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Fri, 19 Apr 2024 18:01:10 -0300 Subject: [PATCH 352/419] Improve configuration validation --- framework/wazuh/core/cluster/cluster.py | 62 ++++++++++++++-- .../core/cluster/hap_helper/hap_helper.py | 2 +- framework/wazuh/core/cluster/utils.py | 71 +++++++++++++++---- 3 files changed, 115 insertions(+), 20 deletions(-) diff --git a/framework/wazuh/core/cluster/cluster.py b/framework/wazuh/core/cluster/cluster.py index e8451db2122..f65d4433c07 100644 --- a/framework/wazuh/core/cluster/cluster.py +++ b/framework/wazuh/core/cluster/cluster.py @@ -16,23 +16,73 @@ from os import listdir, path, remove, stat, walk from uuid import uuid4 +from jsonschema import ValidationError, validate from wazuh import WazuhError, WazuhException, WazuhInternalError from wazuh.core import common +from wazuh.core.cluster.utils import ( + AGENT_CHUNK_SIZE, + AGENT_RECONNECTION_STABILITY_TIME, + AGENT_RECONNECTION_TIME, + FREQUENCY, + HAPROXY_HELPER, + HAPROXY_PORT, + HAPROXY_PROTOCOL, + IMBALANCE_TOLERANCE, + REMOVE_DISCONNECTED_NODE_AFTER, + get_cluster_items, + read_config, +) from wazuh.core.InputValidator import InputValidator -from wazuh.core.cluster.utils import get_cluster_items, read_config -from wazuh.core.utils import blake2b, mkdir_with_mode, get_utc_now, get_date_from_timestamp, to_relative_path +from wazuh.core.utils import blake2b, get_date_from_timestamp, get_utc_now, mkdir_with_mode, to_relative_path logger = logging.getLogger('wazuh') # Separators used in compression/decompression functions to delimit files. FILE_SEP = '|@@//@@|' PATH_SEP = '|//@@//|' +MIN_PORT = 1024 +MAX_PÖRT = 65535 # # Cluster # +def validate_haproxy_helper_config(config: dict): + """Validate the values of the give HAProxy helper configuration. + + Parameters + ---------- + config : dict + Configuration to validate. + + Raises + ------ + WazuhError(3004) + If there any invalid value. + """ + SCHEMA = { + 'type': 'object', + 'properties': { + HAPROXY_PORT: {'type': 'integer', 'minimum': MIN_PORT, 'maximum': MAX_PÖRT}, + HAPROXY_PROTOCOL: {'type': 'string', 'enum': ['http', 'https']}, + FREQUENCY: {'type': 'integer', 'minimum': 10}, + AGENT_RECONNECTION_STABILITY_TIME: {'type': 'integer', 'minimum': 10}, + AGENT_CHUNK_SIZE: {'type': 'integer', 'minimum': 100}, + AGENT_RECONNECTION_TIME: {'type': 'integer', 'minimum': 0}, + IMBALANCE_TOLERANCE: {'type': 'number', 'minimum': 0, 'exclusiveMinimum': True, 'maximum': 1}, + REMOVE_DISCONNECTED_NODE_AFTER: {'type': 'integer', 'minimum': 0}, + }, + } + + try: + validate(config, SCHEMA) + except ValidationError as error: + raise WazuhError( + 3004, + f'Invalid value for {error.path.pop()}. {error.message}' + ) + def check_cluster_config(config): """Verify that cluster configuration is correct. @@ -71,8 +121,8 @@ def check_cluster_config(config): elif not isinstance(config['port'], int): raise WazuhError(3004, "Port has to be an integer.") - elif not 1024 < config['port'] < 65535: - raise WazuhError(3004, "Port must be higher than 1024 and lower than 65535.") + elif not MIN_PORT < config['port'] < MAX_PÖRT: + raise WazuhError(3004, f"Port must be higher than {MIN_PORT} and lower than {MAX_PÖRT}.") if len(config['nodes']) > 1: logger.warning( @@ -84,6 +134,8 @@ def check_cluster_config(config): if len(invalid_elements) != 0: raise WazuhError(3004, f"Invalid elements in node fields: {', '.join(invalid_elements)}.") + validate_haproxy_helper_config(config.get(HAPROXY_HELPER, {})) + def get_node(): """Get dict with current active node information. @@ -374,7 +426,7 @@ def compress_files(name, list_path, cluster_control_json=None, max_zip_size=None except zlib.error as e: raise WazuhError(3001, str(e)) except Exception as e: - result_logs['debug'][file].append(f"Exception raised: " + str(WazuhException(3001, str(e)))) + result_logs['debug'][file].append("Exception raised: " + str(WazuhException(3001, str(e)))) update_cluster_control(file, cluster_control_json, exists=False) try: diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index c2f00fc69ae..d75cc9198fd 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -536,4 +536,4 @@ async def start(cls): except Exception as unexpected_exc: logger.critical(f'Unexpected exception: {unexpected_exc}', exc_info=True) finally: - logger.info('Process ended') + logger.info('Task ended') diff --git a/framework/wazuh/core/cluster/utils.py b/framework/wazuh/core/cluster/utils.py index 96bf5cb6899..4f5b0e4ab2d 100644 --- a/framework/wazuh/core/cluster/utils.py +++ b/framework/wazuh/core/cluster/utils.py @@ -62,33 +62,24 @@ } -def validate_haproxy_helper_config(helper_config: dict) -> dict: - """Validate HAProxy helper configuration section. +def parse_haproxy_helper_integer_values(helper_config: dict) -> dict: + """Parse HAProxy helper integer values. Parameters ---------- helper_config : dict - Configuration to validate. + Configuration to parse. Returns ------- dict - Validated configuration for HAProxy Helper. + Parsed configuration with integer values. Raises ------ WazuhError (3004) If some value has an invalid type. """ - # If any value is missing from user's cluster configuration, add the default one. - for value_name in set(HELPER_DEFAULTS.keys()) - set(helper_config.keys()): - helper_config[value_name] = HELPER_DEFAULTS[value_name] - - if helper_config[HAPROXY_DISABLED] == NO: - helper_config[HAPROXY_DISABLED] = False - elif helper_config[HAPROXY_DISABLED] == YES: - helper_config[HAPROXY_DISABLED] = True - for field in [ HAPROXY_PORT, FREQUENCY, @@ -102,13 +93,65 @@ def validate_haproxy_helper_config(helper_config: dict) -> dict: helper_config[field] = int(helper_config[field]) except ValueError: raise WazuhError(3004, extra_message=f"HAProxy Helper {field} must be an integer.") + return helper_config + + +def parse_haproxy_helper_float_values(helper_config: dict) -> dict: + """Parse HAProxy helper float values. + + Parameters + ---------- + helper_config : dict + Configuration to parse. + + Returns + ------- + dict + Parsed configuration with float values. + Raises + ------ + WazuhError (3004) + If some value has an invalid type. + """ for field in [IMBALANCE_TOLERANCE]: if helper_config.get(field): try: helper_config[field] = float(helper_config[field]) except ValueError: raise WazuhError(3004, extra_message=f"HAProxy Helper {field} must be a float.") + return helper_config + + +def parse_haproxy_helper_config(helper_config: dict) -> dict: + """Parse HAProxy helper configuration section. + + Parameters + ---------- + helper_config : dict + Configuration to parse. + + Returns + ------- + dict + Parsed configuration for HAProxy Helper. + + Raises + ------ + WazuhError (3004) + If some value has an invalid type. + """ + # If any value is missing from user's cluster configuration, add the default one. + for value_name in set(HELPER_DEFAULTS.keys()) - set(helper_config.keys()): + helper_config[value_name] = HELPER_DEFAULTS[value_name] + + if helper_config[HAPROXY_DISABLED] == NO: + helper_config[HAPROXY_DISABLED] = False + elif helper_config[HAPROXY_DISABLED] == YES: + helper_config[HAPROXY_DISABLED] = True + + helper_config = parse_haproxy_helper_integer_values(helper_config) + helper_config = parse_haproxy_helper_float_values(helper_config) return helper_config @@ -178,7 +221,7 @@ def read_cluster_config(config_file=common.OSSEC_CONF, from_import=False) -> typ config_cluster['node_type'] = 'worker' if config_cluster.get(HAPROXY_HELPER): - config_cluster[HAPROXY_HELPER] = validate_haproxy_helper_config(config_cluster[HAPROXY_HELPER]) + config_cluster[HAPROXY_HELPER] = parse_haproxy_helper_config(config_cluster[HAPROXY_HELPER]) return config_cluster From d23c8277be6456a290f0c66b4873716e6a020a5b Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 22 Apr 2024 20:17:08 -0300 Subject: [PATCH 353/419] Update default values --- framework/wazuh/core/cluster/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/framework/wazuh/core/cluster/utils.py b/framework/wazuh/core/cluster/utils.py index 4f5b0e4ab2d..12931bdc92f 100644 --- a/framework/wazuh/core/cluster/utils.py +++ b/framework/wazuh/core/cluster/utils.py @@ -54,11 +54,11 @@ HAPROXY_RESOLVER: None, EXCLUDED_NODES: [], FREQUENCY: 60, - AGENT_CHUNK_SIZE: 120, + AGENT_CHUNK_SIZE: 300, AGENT_RECONNECTION_TIME: 5, AGENT_RECONNECTION_STABILITY_TIME: 60, IMBALANCE_TOLERANCE: 0.1, - REMOVE_DISCONNECTED_NODE_AFTER: 3, + REMOVE_DISCONNECTED_NODE_AFTER: 240, } From 7291f722fdfbacdeda52d6b18b24cbe49ec6e543 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Tue, 23 Apr 2024 13:17:14 -0300 Subject: [PATCH 354/419] Fix constant name --- framework/wazuh/core/cluster/cluster.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/framework/wazuh/core/cluster/cluster.py b/framework/wazuh/core/cluster/cluster.py index f65d4433c07..ca8a3d8cd77 100644 --- a/framework/wazuh/core/cluster/cluster.py +++ b/framework/wazuh/core/cluster/cluster.py @@ -41,7 +41,7 @@ FILE_SEP = '|@@//@@|' PATH_SEP = '|//@@//|' MIN_PORT = 1024 -MAX_PÖRT = 65535 +MAX_PORT = 65535 # @@ -64,7 +64,7 @@ def validate_haproxy_helper_config(config: dict): SCHEMA = { 'type': 'object', 'properties': { - HAPROXY_PORT: {'type': 'integer', 'minimum': MIN_PORT, 'maximum': MAX_PÖRT}, + HAPROXY_PORT: {'type': 'integer', 'minimum': MIN_PORT, 'maximum': MAX_PORT}, HAPROXY_PROTOCOL: {'type': 'string', 'enum': ['http', 'https']}, FREQUENCY: {'type': 'integer', 'minimum': 10}, AGENT_RECONNECTION_STABILITY_TIME: {'type': 'integer', 'minimum': 10}, @@ -121,8 +121,8 @@ def check_cluster_config(config): elif not isinstance(config['port'], int): raise WazuhError(3004, "Port has to be an integer.") - elif not MIN_PORT < config['port'] < MAX_PÖRT: - raise WazuhError(3004, f"Port must be higher than {MIN_PORT} and lower than {MAX_PÖRT}.") + elif not MIN_PORT < config['port'] < MAX_PORT: + raise WazuhError(3004, f"Port must be higher than {MIN_PORT} and lower than {MAX_PORT}.") if len(config['nodes']) > 1: logger.warning( From 59659ec0000d97f8d02b17f1a3ece97a6c361fc7 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 8 Apr 2024 14:55:50 -0300 Subject: [PATCH 355/419] Added and fixed test case for test_master_main --- .../scripts/tests/test_wazuh_clusterd.py | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/framework/scripts/tests/test_wazuh_clusterd.py b/framework/scripts/tests/test_wazuh_clusterd.py index 6e2bf168557..1bb78ca97de 100644 --- a/framework/scripts/tests/test_wazuh_clusterd.py +++ b/framework/scripts/tests/test_wazuh_clusterd.py @@ -8,6 +8,7 @@ import pytest import scripts.wazuh_clusterd as wazuh_clusterd +from wazuh.core.cluster.utils import HAPROXY_DISABLED, HAPROXY_HELPER def test_set_logging(): @@ -89,11 +90,12 @@ def original_sig_handler(signum, frame): delete_pid_mock.assert_called_once_with('wazuh-clusterd', 1001) original_sig_handler_mock.assert_not_called() - +@pytest.mark.parametrize('helper_disabled', (True, False)) @pytest.mark.asyncio -async def test_master_main(): +async def test_master_main(helper_disabled: bool): """Check and set the behavior of master_main function.""" import wazuh.core.cluster.utils as cluster_utils + cluster_config = {'test': 'config', HAPROXY_HELPER: {HAPROXY_DISABLED: helper_disabled}} class Arguments: def __init__(self, performance_test, concurrency_test, ssl): @@ -113,7 +115,7 @@ class MasterMock: def __init__(self, performance_test, concurrency_test, configuration, enable_ssl, logger, cluster_items): assert performance_test == 'test_performance' assert concurrency_test == 'concurrency_test' - assert configuration == {'test': 'config'} + assert configuration == cluster_config assert enable_ssl is True assert logger == 'test_logger' assert cluster_items == {'node': 'item'} @@ -127,7 +129,7 @@ def __init__(self, performance_test, logger, concurrency_test, node, configurati assert performance_test == 'test_performance' assert logger == 'test_logger' assert concurrency_test == 'concurrency_test' - assert configuration == {'test': 'config'} + assert configuration == cluster_config assert enable_ssl is True assert cluster_items == {'node': 'item'} @@ -140,11 +142,11 @@ def start(cls): return 'HAPHELPER_START' - async def gather(first, second): + async def gather(first, second, third=None): assert first == 'MASTER_START' assert second == 'LOCALSERVER_START' - # FIXME: (20940) When write UT for the new components. - # assert third == 'HAPHELPER_START' + if third is not None: + assert third == 'HAPHELPER_START' wazuh_clusterd.cluster_utils = cluster_utils @@ -154,7 +156,9 @@ async def gather(first, second): with patch('wazuh.core.cluster.local_server.LocalServerMaster', LocalServerMasterMock): with patch('wazuh.core.cluster.hap_helper.hap_helper.HAPHelper', HAPHElperMock): await wazuh_clusterd.master_main( - args=args, cluster_config={'test': 'config'}, cluster_items={'node': 'item'}, + args=args, + cluster_config=cluster_config, + cluster_items={'node': 'item'}, logger='test_logger' ) From 748de2b94b3a017406eaf7647010ce48ae6b002a Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 8 Apr 2024 14:58:47 -0300 Subject: [PATCH 356/419] Added main structure for HAP Helper tests --- framework/wazuh/core/cluster/hap_helper/tests/__init__.py | 0 framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py | 0 framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py | 0 framework/wazuh/core/cluster/hap_helper/tests/test_wazuh.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 framework/wazuh/core/cluster/hap_helper/tests/__init__.py create mode 100644 framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py create mode 100644 framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py create mode 100644 framework/wazuh/core/cluster/hap_helper/tests/test_wazuh.py diff --git a/framework/wazuh/core/cluster/hap_helper/tests/__init__.py b/framework/wazuh/core/cluster/hap_helper/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py b/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_wazuh.py b/framework/wazuh/core/cluster/hap_helper/tests/test_wazuh.py new file mode 100644 index 00000000000..e69de29bb2d From cde626457f88a5eb0253ca43713ff25dda5c1fe8 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Tue, 9 Apr 2024 16:35:21 -0300 Subject: [PATCH 357/419] Added tests for framework/wazuh/core/cluster/hap_helper/wazuh.py module --- .../cluster/hap_helper/tests/test_wazuh.py | 169 ++++++++++++++++++ 1 file changed, 169 insertions(+) diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_wazuh.py b/framework/wazuh/core/cluster/hap_helper/tests/test_wazuh.py index e69de29bb2d..4a9e3aa7e17 100644 --- a/framework/wazuh/core/cluster/hap_helper/tests/test_wazuh.py +++ b/framework/wazuh/core/cluster/hap_helper/tests/test_wazuh.py @@ -0,0 +1,169 @@ +from typing import Optional +from unittest import mock + +import pytest +from wazuh.agent import get_agents, reconnect_agents +from wazuh.cluster import get_nodes_info +from wazuh.core.results import AffectedItemsWazuhResult + +from framework.wazuh.core.cluster.hap_helper.wazuh import WazuhAgent, WazuhDAPI + + +class TestWazuhAgent: + @pytest.mark.parametrize('version,expected', [('v4.2.0', False), ('v4.3.0', True), ('v4.4.0', True)]) + def test_can_reconnect_returns_correct_value(self, version: str, expected: bool): + """Check the correct output of `can_reconnect` function.""" + + assert WazuhAgent.can_reconnect(version) == expected + + def test_get_agents_able_to_reconnect_returns_correct_items(self): + """Check the correct output of `get_agents_able_to_reconnect` function.""" + + agents = [ + {'id': 1, 'version': 'v4.2.0'}, + {'id': 2, 'version': 'v4.3.0'}, + {'id': 3, 'version': 'v4.4.0'}, + ] + + assert WazuhAgent.get_agents_able_to_reconnect(agents_list=agents) == [2, 3] + + +@mock.patch('framework.wazuh.core.cluster.hap_helper.wazuh.DistributedAPI', autospec=True) +class TestWazuhDAPI: + wazuh_dapi = WazuhDAPI(tag='test') + + @pytest.fixture + def fixture_affected_items_result(self): + return AffectedItemsWazuhResult() + + @pytest.mark.parametrize( + 'nodes_data,excluded_nodes', + ( + ( + [ + {'name': 'worker1', 'ip': '192.168.0.1'}, + {'name': 'worker2', 'ip': '192.168.0.2'}, + {'name': 'worker3', 'ip': '192.168.0.3'}, + ], + [], + ), + ( + [ + {'name': 'worker1', 'ip': '192.168.0.1'}, + {'name': 'worker2', 'ip': '192.168.0.2'}, + {'name': 'worker3', 'ip': '192.168.0.3'}, + ], + ['worker1'], + ), + ), + ) + @mock.patch('framework.wazuh.core.cluster.hap_helper.wazuh.get_system_nodes', return_value={}) + async def test_get_cluster_nodes_returns_correct_information( + self, + get_system_nodes_mock: mock.AsyncMock, + dapi_mock: mock.MagicMock, + fixture_affected_items_result: AffectedItemsWazuhResult, + nodes_data: list, + excluded_nodes: list, + ): + """Check the correct output of `get_cluster_nodes` function.""" + + self.wazuh_dapi.excluded_nodes = excluded_nodes + fixture_affected_items_result.affected_items = nodes_data + dapi_mock.return_value.distribute_function.return_value = fixture_affected_items_result + + ret_val = await self.wazuh_dapi.get_cluster_nodes() + + dapi_mock.assert_called_once_with( + f=get_nodes_info, + f_kwargs=None, + logger=self.wazuh_dapi.logger, + request_type='local_master', + is_async=True, + local_client_arg='lc', + nodes={}, + ) + assert ret_val == {item['name']: item['ip'] for item in nodes_data if item['name'] not in excluded_nodes} + + async def test_reconnect_agents_correct_information( + self, + dapi_mock: mock.MagicMock, + fixture_affected_items_result: AffectedItemsWazuhResult, + ): + """Check the correct output of `reconnect_agents` function.""" + + agent_list = [1, 2, 3] + fixture_affected_items_result.affected_items = agent_list + dapi_mock.return_value.distribute_function.return_value = fixture_affected_items_result + + ret_val = await self.wazuh_dapi.reconnect_agents(agent_list=agent_list) + + dapi_mock.assert_called_once_with( + f=reconnect_agents, + f_kwargs={'agent_list': agent_list}, + logger=self.wazuh_dapi.logger, + request_type='distributed_master', + wait_for_complete=True, + ) + assert ret_val == agent_list + + async def test_get_agents_node_distribution_returns_correct_information( + self, + dapi_mock: mock.MagicMock, + fixture_affected_items_result: AffectedItemsWazuhResult, + ): + """Check the correct output of `get_agents_node_distribution` function.""" + + agents_data = [ + {'id': 1, 'name': 'agent1', 'version': '4.9.0', 'node_name': 'worker1'}, + {'id': 2, 'name': 'agent2', 'version': '4.9.0', 'node_name': 'worker2'}, + ] + fixture_affected_items_result.affected_items = agents_data + dapi_mock.return_value.distribute_function.return_value = fixture_affected_items_result + + ret_val = await self.wazuh_dapi.get_agents_node_distribution() + + dapi_mock.assert_called_once_with( + f=get_agents, + f_kwargs={ + 'select': ['node_name', 'version'], + 'sort': {'fields': ['version', 'id'], 'order': 'desc'}, + 'filters': {'status': 'active'}, + 'q': 'id!=000', + 'limit': self.wazuh_dapi.AGENTS_MAX_LIMIT, + }, + logger=self.wazuh_dapi.logger, + request_type='local_master', + ) + assert ret_val == {'worker1': [{'id': 1, 'version': '4.9.0'}], 'worker2': [{'id': 2, 'version': '4.9.0'}]} + + @pytest.mark.parametrize('limit', [100, None]) + async def test_get_agents_belonging_to_node_returns_correct_information( + self, dapi_mock: mock.MagicMock, fixture_affected_items_result: AffectedItemsWazuhResult, limit: Optional[int] + ): + """Check the correct output of `get_agents_belonging_to_node` function.""" + + agents_data = [ + {'id': 1, 'name': 'agent1', 'version': '4.9.0'}, + {'id': 2, 'name': 'agent2', 'version': '4.9.0'}, + ] + fixture_affected_items_result.affected_items = agents_data + dapi_mock.return_value.distribute_function.return_value = fixture_affected_items_result + + node_name = 'worker1' + + ret_val = await self.wazuh_dapi.get_agents_belonging_to_node(node_name=node_name, limit=limit) + + dapi_mock.assert_called_once_with( + f=get_agents, + f_kwargs={ + 'select': ['version'], + 'sort': {'fields': ['version', 'id'], 'order': 'desc'}, + 'filters': {'status': 'active', 'node_name': node_name}, + 'q': 'id!=000', + 'limit': limit or self.wazuh_dapi.AGENTS_MAX_LIMIT, + }, + logger=self.wazuh_dapi.logger, + request_type='local_master', + ) + assert ret_val == agents_data From ac4802bf2f29f8aa1a3d32d30a5024fc7a914594 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Thu, 11 Apr 2024 19:34:18 -0300 Subject: [PATCH 358/419] Added tests for framework/wazuh/core/cluster/hap_helper/proxy.py module --- .../wazuh/core/cluster/hap_helper/proxy.py | 41 +- .../cluster/hap_helper/tests/test_proxy.py | 757 ++++++++++++++++++ 2 files changed, 778 insertions(+), 20 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 32bfbb04522..082e68ab64a 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -2,6 +2,7 @@ import json import logging from enum import Enum +from os.path import join from typing import Literal, Optional, TypeAlias import httpx @@ -73,7 +74,7 @@ class ProxyBalanceAlgorithm(Enum): class ProxyAPI: """Wrapper for calling HAProxy REST API""" - HAP_ENDPOINT = '/v2' + HAP_ENDPOINT = 'v2' def __init__( self, @@ -104,7 +105,7 @@ async def initialize(self): try: async with httpx.AsyncClient(verify=False) as client: response = await client.get( - f'{self.protocol}://{self.address}:{self.port}/{self.HAP_ENDPOINT}/health', + join(f'{self.protocol}://', f'{self.address}:{self.port}', self.HAP_ENDPOINT, 'health'), auth=(self.username, self.password), ) if response.status_code == 401: @@ -150,7 +151,7 @@ async def _make_hap_request( """ context_tag.set(self.tag) version_key = '_version' - uri = f'{self.protocol}://{self.address}:{self.port}{self.HAP_ENDPOINT}/{endpoint}' + uri = join(f'{self.protocol}://', f'{self.address}:{self.port}', self.HAP_ENDPOINT, endpoint) query_parameters = query_parameters or {} query_parameters.update({'version': self.version}) @@ -186,7 +187,7 @@ async def _make_hap_request( async def update_configuration_version(self): """Get the last version of the configuration schema and set it.""" - configuration_version = await self._make_hap_request('/services/haproxy/configuration/version') + configuration_version = await self._make_hap_request('services/haproxy/configuration/version') self.version = configuration_version async def get_runtime_info(self) -> PROXY_API_RESPONSE: @@ -197,7 +198,7 @@ async def get_runtime_info(self) -> PROXY_API_RESPONSE: PROXY_API_RESPONSE The runtime information. """ - return (await self._make_hap_request('/services/haproxy/runtime/info'))[0]['info'] + return (await self._make_hap_request('services/haproxy/runtime/info'))[0]['info'] async def get_global_configuration(self) -> dict: """Get the global configuration from HAProxy. @@ -207,7 +208,7 @@ async def get_global_configuration(self) -> dict: dict The current global configuration. """ - return (await self._make_hap_request('/services/haproxy/configuration/global'))['data'] + return (await self._make_hap_request('services/haproxy/configuration/global'))['data'] async def update_global_configuration(self, new_configuration: dict): """Apply the new global configuration. @@ -218,7 +219,7 @@ async def update_global_configuration(self, new_configuration: dict): New global configuration to apply. """ await self._make_hap_request( - '/services/haproxy/configuration/global', json_body=new_configuration, method=ProxyAPIMethod.PUT + 'services/haproxy/configuration/global', json_body=new_configuration, method=ProxyAPIMethod.PUT ) async def get_backends(self) -> PROXY_API_RESPONSE: @@ -229,7 +230,7 @@ async def get_backends(self) -> PROXY_API_RESPONSE: PROXY_API_RESPONSE Information of configured backends. """ - return await self._make_hap_request(endpoint='/services/haproxy/configuration/backends') + return await self._make_hap_request(endpoint='services/haproxy/configuration/backends') async def add_backend( self, @@ -257,7 +258,7 @@ async def add_backend( json_body = {'name': name, 'mode': mode.value, 'balance': {'algorithm': algorithm.value}} return await self._make_hap_request( - '/services/haproxy/configuration/backends', + 'services/haproxy/configuration/backends', method=ProxyAPIMethod.POST, query_parameters=query_params, json_body=json_body, @@ -278,11 +279,11 @@ async def get_backend_servers(self, backend: str) -> PROXY_API_RESPONSE: """ return await self._make_hap_request( - '/services/haproxy/configuration/servers', query_parameters={'backend': backend} + 'services/haproxy/configuration/servers', query_parameters={'backend': backend} ) async def add_server_to_backend( - self, backend: str, server_name: str, server_address: str, port: int, resolver: Optional[str] + self, backend: str, server_name: str, server_address: str, port: int, resolver: Optional[str] = None ) -> PROXY_API_RESPONSE: """Add a new server to the provided backend. @@ -318,7 +319,7 @@ async def add_server_to_backend( ) return await self._make_hap_request( - '/services/haproxy/configuration/servers', + 'services/haproxy/configuration/servers', method=ProxyAPIMethod.POST, query_parameters=query_params, json_body=json_body, @@ -342,7 +343,7 @@ async def remove_server_from_backend(self, backend: str, server_name: str) -> PR query_params = {'backend': backend, 'force_reload': True} return await self._make_hap_request( - f'/services/haproxy/configuration/servers/{server_name}', + f'services/haproxy/configuration/servers/{server_name}', method=ProxyAPIMethod.DELETE, query_parameters=query_params, ) @@ -355,7 +356,7 @@ async def get_frontends(self) -> PROXY_API_RESPONSE: PROXY_API_RESPONSE Information of configured frontends. """ - return await self._make_hap_request(endpoint='/services/haproxy/configuration/frontends') + return await self._make_hap_request(endpoint='services/haproxy/configuration/frontends') async def add_frontend( self, name: str, port: int, backend: str, mode: CommunicationProtocol = CommunicationProtocol.TCP @@ -382,7 +383,7 @@ async def add_frontend( frontend_json_body = {'name': name, 'mode': mode.value, 'default_backend': backend} frontend_response = await self._make_hap_request( - '/services/haproxy/configuration/frontends', + 'services/haproxy/configuration/frontends', method=ProxyAPIMethod.POST, query_parameters=frontend_query_params, json_body=frontend_json_body, @@ -393,7 +394,7 @@ async def add_frontend( bind_json_body = {'port': port, 'name': f'{frontend_name}_bind'} await self._make_hap_request( - '/services/haproxy/configuration/binds', + 'services/haproxy/configuration/binds', method=ProxyAPIMethod.POST, query_parameters=bind_query_params, json_body=bind_json_body, @@ -419,7 +420,7 @@ async def get_backend_server_runtime_settings(self, backend_name: str, server_na query_params = {'backend': backend_name, 'name': server_name} return await self._make_hap_request( - f'/services/haproxy/runtime/servers/{server_name}', query_parameters=query_params + f'services/haproxy/runtime/servers/{server_name}', query_parameters=query_params ) async def change_backend_server_state( @@ -445,7 +446,7 @@ async def change_backend_server_state( json_body = {'admin_state': state.value} return await self._make_hap_request( - f'/services/haproxy/runtime/servers/{server_name}', + f'services/haproxy/runtime/servers/{server_name}', method=ProxyAPIMethod.PUT, query_parameters=query_params, json_body=json_body, @@ -466,7 +467,7 @@ async def get_backend_stats(self, backend_name: str) -> PROXY_API_RESPONSE: """ query_params = {'type': 'backend', 'name': backend_name} - return await self._make_hap_request('/services/haproxy/stats/native', query_parameters=query_params) + return await self._make_hap_request('services/haproxy/stats/native', query_parameters=query_params) async def get_backend_server_stats(self, backend_name: str, server_name: str) -> PROXY_API_RESPONSE: """Get the statistics of the provided backend server. @@ -485,7 +486,7 @@ async def get_backend_server_stats(self, backend_name: str, server_name: str) -> """ query_params = {'type': 'server', 'parent': backend_name, 'name': server_name.lower()} - return await self._make_hap_request('/services/haproxy/stats/native', query_parameters=query_params) + return await self._make_hap_request('services/haproxy/stats/native', query_parameters=query_params) class Proxy: diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py index e69de29bb2d..201df079702 100644 --- a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py @@ -0,0 +1,757 @@ +import json +import random +from unittest import mock + +import httpx +import pytest +from wazuh.core.cluster.hap_helper.proxy import ( + CommunicationProtocol, + Proxy, + ProxyAPI, + ProxyAPIMethod, + ProxyBalanceAlgorithm, + ProxyServerState, +) +from wazuh.core.exception import WazuhHAPHelperError + + +class TestProxyAPI: + METHODS_KWARGS = ( + 'method,f_kwargs', + [ + ('update_configuration_version', {}), + ('get_runtime_info', {}), + ('get_global_configuration', {}), + ('update_global_configuration', {'new_configuration': {'foo': 1}}), + ('get_backends', {}), + ('add_backend', {'name': 'foo'}), + ('get_backend_servers', {'backend': 'foo'}), + ( + 'add_server_to_backend', + {'backend': 'foo', 'server_name': 'bar', 'server_address': '192.168.0.1', 'port': 1514}, + ), + ('remove_server_from_backend', {'backend': 'foo', 'server_name': 'bar'}), + ('get_frontends', {}), + ('add_frontend', {'backend': 'foo', 'name': 'bar', 'port': 1514}), + ('get_backend_server_runtime_settings', {'backend_name': 'foo', 'server_name': 'bar'}), + ( + 'change_backend_server_state', + {'backend_name': 'foo', 'server_name': 'bar', 'state': ProxyServerState.DRAIN}, + ), + ('get_backend_stats', {'backend_name': 'foo'}), + ('get_backend_server_stats', {'backend_name': 'foo', 'server_name': 'bar'}), + ], + ) + + @pytest.fixture + def proxy_api(self): + return ProxyAPI(username='test', password='test', tag='test') + + @pytest.fixture + def get_mock(self): + with mock.patch('framework.wazuh.core.cluster.hap_helper.proxy.httpx.AsyncClient.get') as get_mock: + yield get_mock + + @pytest.fixture + def request_mock(self): + with mock.patch('framework.wazuh.core.cluster.hap_helper.proxy.httpx.AsyncClient.request') as request_mock: + yield request_mock + + async def test_initialize_runs_ok(self, proxy_api: ProxyAPI, get_mock: mock.AsyncMock): + await proxy_api.initialize() + + get_mock.assert_called_once_with( + f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/health', + auth=(proxy_api.username, proxy_api.password), + ) + + @pytest.mark.parametrize( + 'status_code,side_effect,expected', + ( + [401, None, 3046], + [404, None, 3047], + [None, httpx.ConnectError, 3043], + [None, httpx.RequestError, 3043], + ), + ) + async def test_initialize_dont_raise_in_case_of_error( + self, + proxy_api: ProxyAPI, + get_mock: mock.AsyncMock, + status_code: int | None, + side_effect: Exception | None, + expected: int, + ): + if status_code is not None: + get_mock.return_value = mock.MagicMock(status_code=status_code) + + if side_effect is not None: + get_mock.side_effect = side_effect('Some error message') + + with pytest.raises(WazuhHAPHelperError, match=f'.*{expected}.*'): + await proxy_api.initialize() + + @pytest.mark.parametrize( + 'response,side_effect,expected', + ( + [{'status_code': 401, 'is_success': False}, None, 3046], + [ + {'status_code': random.choice([403, 404, 500]), 'is_success': False, 'json.return_value': '{}'}, + None, + 3045, + ], + [{}, httpx.RequestError, 3044], + ), + ) + @pytest.mark.parametrize(*METHODS_KWARGS) + async def test_proxy_method_dont_raise_in_case_of_error( + self, + proxy_api: ProxyAPI, + request_mock: mock.AsyncMock, + response: dict, + side_effect: Exception | None, + expected: int, + method, + f_kwargs, + ): + if response: + request_mock.return_value = mock.MagicMock(**response) + if side_effect is not None: + request_mock.side_effect = side_effect('Some error message') + + with pytest.raises(WazuhHAPHelperError, match=f'.*{expected}.*'): + await getattr(proxy_api, method)(**f_kwargs) + + async def test_update_configuration_version_set_correct_version( + self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock + ): + endpoint = 'services/haproxy/configuration/version' + version = '1' + request_mock.return_value = mock.MagicMock( + **{'status_code': 200, 'is_success': True, 'json.return_value': version} + ) + + await proxy_api.update_configuration_version() + + request_mock.assert_called_once_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'version': 0}, + ) + assert proxy_api.version == version + + async def test_get_runtime_info_returns_correct_information( + self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock + ): + endpoint = 'services/haproxy/runtime/info' + info = {'foo': 1, 'bar': 2} + request_mock.return_value = mock.MagicMock( + **{'status_code': 200, 'is_success': True, 'json.return_value': [{'info': info}]} + ) + + ret_val = await proxy_api.get_runtime_info() + + request_mock.assert_called_once_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'version': 0}, + ) + assert ret_val == info + + async def test_get_global_configuration_returns_correct_information( + self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock + ): + endpoint = 'services/haproxy/configuration/global' + data = {'foo': 1, 'bar': 2} + request_mock.return_value = mock.MagicMock( + **{'status_code': 200, 'is_success': True, 'json.return_value': {'data': data}} + ) + + ret_val = await proxy_api.get_global_configuration() + + request_mock.assert_called_once_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'version': 0}, + ) + assert ret_val == data + + async def test_update_global_configuration_makes_correct_request( + self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock + ): + endpoint = 'services/haproxy/configuration/global' + version_endpoint = 'services/haproxy/configuration/version' + new_configuration = {'foo': 1, 'bar': 2} + request_mock.return_value = mock.MagicMock( + **{ + 'status_code': 202, + 'is_success': True, + 'json.side_effect': json.JSONDecodeError(msg='Some Error', doc='', pos=1), + } + ) + + await proxy_api.update_global_configuration(new_configuration) + + assert request_mock.call_count == 2 + request_mock.assert_any_call( + method=ProxyAPIMethod.PUT.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=new_configuration, + params={'version': 0}, + ) + request_mock.assert_called_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{version_endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'version': 0}, + ) + + async def test_get_backends_returns_correct_information(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + endpoint = 'services/haproxy/configuration/backends' + data = {'data': {'foo': 1, 'bar': 2}} + request_mock.return_value = mock.MagicMock( + **{'status_code': 200, 'is_success': True, 'json.return_value': data} + ) + + ret_val = await proxy_api.get_backends() + + request_mock.assert_called_once_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'version': 0}, + ) + assert ret_val == data + + async def test_add_backend_makes_correct_request(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + endpoint = 'services/haproxy/configuration/backends' + version_endpoint = 'services/haproxy/configuration/version' + + request_mock.return_value = mock.MagicMock(**{'status_code': 200, 'is_success': True}) + + name = 'foo' + await proxy_api.add_backend(name) + + assert request_mock.call_count == 2 + request_mock.assert_any_call( + method=ProxyAPIMethod.POST.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json={ + 'name': name, + 'mode': CommunicationProtocol.TCP.value, + 'balance': {'algorithm': ProxyBalanceAlgorithm.LEAST_CONNECTIONS.value}, + }, + params={'force_reload': True, 'version': 0}, + ) + request_mock.assert_called_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{version_endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'version': 0}, + ) + + async def test_get_backend_servers_returns_correct_information( + self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock + ): + endpoint = 'services/haproxy/configuration/servers' + data = {'data': {'foo': 1, 'bar': 2}} + request_mock.return_value = mock.MagicMock( + **{'status_code': 200, 'is_success': True, 'json.return_value': data} + ) + + backend = 'foo' + ret_val = await proxy_api.get_backend_servers(backend) + + request_mock.assert_called_once_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'backend': backend, 'version': 0}, + ) + assert ret_val == data + + @pytest.mark.parametrize( + 'server_address,is_ip_address,resolver', + (['192.168.0.1', True, None], ['192.168.0.1', True, 'some-resolver'], ['some-address', False, 'some-resolver']), + ) + async def test_add_server_to_backend_makes_correct_request( + self, + proxy_api: ProxyAPI, + request_mock: mock.AsyncMock, + server_address: str, + is_ip_address: bool, + resolver: str | None, + ): + endpoint = 'services/haproxy/configuration/servers' + version_endpoint = 'services/haproxy/configuration/version' + + request_mock.return_value = mock.MagicMock(**{'status_code': 201, 'is_success': True}) + + backend = 'foo' + server_name = 'bar' + port = 1514 + + json_body = {'check': 'enabled', 'name': server_name, 'address': server_address, 'port': port} + + json_body.update( + {'resolvers': resolver, 'init-addr': 'last,libc,none'} if resolver is not None and not is_ip_address else {} + ) + + await proxy_api.add_server_to_backend(backend, server_name, server_address, port, resolver) + + assert request_mock.call_count == 2 + request_mock.assert_any_call( + method=ProxyAPIMethod.POST.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=json_body, + params={'backend': backend, 'force_reload': True, 'version': 0}, + ) + request_mock.assert_called_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{version_endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'version': 0}, + ) + + async def test_remove_server_from_backend_makes_correct_request( + self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock + ): + endpoint = 'services/haproxy/configuration/servers' + version_endpoint = 'services/haproxy/configuration/version' + + request_mock.return_value = mock.MagicMock(**{'status_code': 204, 'is_success': True}) + + backend = 'foo' + server_name = 'bar' + + await proxy_api.remove_server_from_backend(backend, server_name) + + assert request_mock.call_count == 2 + request_mock.assert_any_call( + method=ProxyAPIMethod.DELETE.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}/{server_name}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'backend': backend, 'force_reload': True, 'version': 0}, + ) + request_mock.assert_called_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{version_endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'version': 0}, + ) + + async def test_get_frontends_returns_correct_information(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + endpoint = 'services/haproxy/configuration/frontends' + data = {'data': {'foo': 1, 'bar': 2}} + request_mock.return_value = mock.MagicMock( + **{'status_code': 200, 'is_success': True, 'json.return_value': data} + ) + + ret_val = await proxy_api.get_frontends() + + request_mock.assert_called_once_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'version': 0}, + ) + assert ret_val == data + + async def test_add_frontend_makes_correct_request(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + endpoint = 'services/haproxy/configuration/frontends' + bind_endpoint = 'services/haproxy/configuration/binds' + # version_endpoint = 'services/haproxy/configuration/version' + name = 'bar' + + request_mock.side_effect = ( + mock.MagicMock(**{'status_code': 201, 'is_success': True, 'json.return_value': {'name': name}}), + mock.MagicMock(**{'status_code': 201, 'is_success': True, 'json.return_value': 1}), + mock.MagicMock(**{'status_code': 201, 'is_success': True, 'json.return_value': {'foo': 'baz'}}), + mock.MagicMock(**{'status_code': 201, 'is_success': True, 'json.return_value': 2}), + ) + + port = 1514 + backend = 'foo' + + await proxy_api.add_frontend(name, port, backend) + + assert request_mock.call_count == 4 + + request_mock.assert_any_call( + method=ProxyAPIMethod.POST.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json={'name': name, 'mode': CommunicationProtocol.TCP.value, 'default_backend': backend}, + params={'force_reload': True, 'version': 0}, + ) + request_mock.assert_any_call( + method=ProxyAPIMethod.POST.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{bind_endpoint}', + auth=(proxy_api.username, proxy_api.password), + json={'port': port, 'name': f'{name}_bind'}, + params={'force_reload': True, 'frontend': name, 'version': 1}, + ) + + async def test_get_backend_server_runtime_settings_returns_correct_information( + self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock + ): + endpoint = 'services/haproxy/runtime/servers' + data = {'data': {'foo': 1, 'bar': 2}} + request_mock.return_value = mock.MagicMock( + **{'status_code': 200, 'is_success': True, 'json.return_value': data} + ) + + backend_name = 'foo' + server_name = 'bar' + ret_val = await proxy_api.get_backend_server_runtime_settings(backend_name, server_name) + + request_mock.assert_called_once_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}/{server_name}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'backend': backend_name, 'name': server_name, 'version': 0}, + ) + assert ret_val == data + + @pytest.mark.parametrize( + 'state', + [ + ProxyServerState.DOWN, + ProxyServerState.DRAIN, + ProxyServerState.MAINTENANCE, + ProxyServerState.READY, + ProxyServerState.UP, + ], + ) + async def test_change_backend_server_state_makes_correct_request( + self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock, state: ProxyServerState + ): + endpoint = 'services/haproxy/runtime/servers' + + request_mock.return_value = mock.MagicMock(**{'status_code': 200, 'is_success': True}) + + backend_name = 'foo' + server_name = 'bar' + + await proxy_api.change_backend_server_state(backend_name, server_name, state) + + request_mock.assert_called_once_with( + method=ProxyAPIMethod.PUT.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}/{server_name}', + auth=(proxy_api.username, proxy_api.password), + json={'admin_state': state.value}, + params={'backend': backend_name, 'version': 0}, + ) + + async def test_get_backend_stats_returns_correct_information( + self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock + ): + endpoint = 'services/haproxy/stats/native' + data = {'data': {'foo': 1, 'bar': 2}} + request_mock.return_value = mock.MagicMock( + **{'status_code': 200, 'is_success': True, 'json.return_value': data} + ) + + backend_name = 'foo' + ret_val = await proxy_api.get_backend_stats(backend_name) + + request_mock.assert_called_once_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'type': 'backend', 'name': backend_name, 'version': 0}, + ) + assert ret_val == data + + async def test_get_backend_server_stats_returns_correct_information( + self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock + ): + endpoint = 'services/haproxy/stats/native' + data = {'data': {'foo': 1, 'bar': 2}} + request_mock.return_value = mock.MagicMock( + **{'status_code': 200, 'is_success': True, 'json.return_value': data} + ) + + backend_name = 'foo' + server_name = 'bar' + ret_val = await proxy_api.get_backend_server_stats(backend_name, server_name) + + request_mock.assert_called_once_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'type': 'server', 'parent': backend_name, 'name': server_name, 'version': 0}, + ) + assert ret_val == data + + +class TestProxy: + @pytest.fixture + def proxy_api_mock(self): + with mock.patch('framework.wazuh.core.cluster.hap_helper.proxy.ProxyAPI', autospec=True) as proxy_api_mock: + yield proxy_api_mock + + @pytest.fixture + def proxy(self, proxy_api_mock: mock.MagicMock): + proxy = Proxy(wazuh_backend='test', proxy_api=proxy_api_mock, tag='test') + with mock.patch.object(proxy, 'logger'): + yield proxy + + @pytest.mark.parametrize('hard_stop_after,expected', ([3000, 3], [None, None])) + async def test_initialize_runs_ok( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy, hard_stop_after: int | None, expected: int | None + ): + proxy_api_mock.get_runtime_info.return_value = {'version': 1} + + with mock.patch.object( + proxy, 'get_hard_stop_after_value', return_value=hard_stop_after + ) as hard_stop_after_mock: + await proxy.initialize() + hard_stop_after_mock.assert_called_once() + proxy_api_mock.initialize.assert_called_once() + proxy_api_mock.get_runtime_info.assert_called_once() + assert proxy.hard_stop_after == expected + + @pytest.mark.parametrize('side_effect', [KeyError, IndexError]) + async def test_initialize_dont_rise_in_case_of_error( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy, side_effect: Exception + ): + proxy_api_mock.get_runtime_info.side_effect = side_effect + with pytest.raises(WazuhHAPHelperError, match='.*3048.*'): + await proxy.initialize() + + @pytest.mark.parametrize( + 'global_configuration,expected', ([{'hard_stop_after': 3000}, 3000], [{'foo': 'bar'}, None]) + ) + async def test_get_hard_stop_after_value_returns_correct_value( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy, global_configuration: int, expected: int | None + ): + proxy_api_mock.get_global_configuration.return_value = global_configuration + + assert (await proxy.get_hard_stop_after_value()) == expected + proxy_api_mock.get_global_configuration.assert_called_once() + + @pytest.mark.parametrize( + 'hard_stop_after,new_configuration', + ([None, {'hard_stop_after': 70000}], [50.0, {'hard_stop_after': 70000}], [70.0, {}]), + ) + async def test_set_hard_stop_after_value_calculate_and_set_correct_value( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy, hard_stop_after: float | None, new_configuration: dict + ): + proxy_api_mock.get_global_configuration.return_value = {} + proxy.hard_stop_after = hard_stop_after + await proxy.set_hard_stop_after_value( + active_agents=20, n_managers=3, chunk_size=5, agent_reconnection_time=10, server_admin_state_delay=5 + ) + + if new_configuration: + proxy_api_mock.update_global_configuration.assert_called_once_with(new_configuration=new_configuration) + else: + proxy_api_mock.update_global_configuration.assert_not_called() + + async def test_get_current_pid_returns_correct_value(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + pid = 10 + proxy_api_mock.get_runtime_info.return_value = {'pid': pid} + + assert (await proxy.get_current_pid()) == pid + + async def test_get_current_backends_returns_correct_information(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + backends = [ + {'name': 'backend1', 'mode': 'http', 'adv_check': 'httpchk', 'balance': {'algorithm': 'roundrobin'}}, + {'name': 'backend2', 'mode': 'http', 'adv_check': 'httpchk', 'balance': {'algorithm': 'roundrobin'}}, + ] + proxy_api_mock.get_backends.return_value = {'data': backends} + + ret_val = await proxy.get_current_backends() + + proxy_api_mock.get_backends.assert_called_once() + assert ret_val == {backend['name']: backend for backend in backends} + + @pytest.mark.parametrize( + 'current_backends,backend,expected', ([{'backend1': {}}, 'backend1', True], [{}, 'backend1', False]) + ) + async def test_exists_backend_returns_correct_value( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy, current_backends: dict, backend: str, expected: bool + ): + with mock.patch.object(proxy, 'get_current_backends', return_value=current_backends): + assert await proxy.exists_backend(backend) == expected + + async def test_get_current_frontends_returns_correct_information( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy + ): + frontends = [ + {'name': 'frontend1', 'mode': 'http', 'default_backend': 'backend1'}, + {'name': 'frontend2', 'mode': 'http'}, + ] + proxy_api_mock.get_frontends.return_value = {'data': frontends} + + ret_val = await proxy.get_current_frontends() + + proxy_api_mock.get_frontends.assert_called_once() + assert ret_val == {frontend['name']: frontend for frontend in frontends if 'default_backend' in frontend} + + @pytest.mark.parametrize( + 'current_frontends,frontend,expected', ([{'frontend1': {}}, 'frontend1', True], [{}, 'frontend1', False]) + ) + async def test_exists_frontend_returns_correct_value( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy, current_frontends: dict, frontend: str, expected: bool + ): + with mock.patch.object(proxy, 'get_current_frontends', return_value=current_frontends): + assert await proxy.exists_frontend(frontend) == expected + + async def test_add_new_backend_makes_correct_callback(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + parameters = { + 'name': 'foo', + 'mode': CommunicationProtocol.TCP, + 'algorithm': ProxyBalanceAlgorithm.LEAST_CONNECTIONS, + } + + await proxy.add_new_backend(**parameters) + + proxy_api_mock.add_backend.assert_called_once_with(**parameters) + + async def test_add_new_frontend_makes_correct_callback(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + parameters = {'name': 'foo', 'port': 1514, 'backend': 'bar', 'mode': CommunicationProtocol.TCP} + + await proxy.add_new_frontend(**parameters) + + proxy_api_mock.add_frontend.assert_called_once_with(**parameters) + + async def test_get_current_backend_servers_returns_correct_information( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy + ): + servers = [ + {'name': 'server1', 'address': '192.168.0.1'}, + {'name': 'server2', 'address': '192.168.0.2'}, + ] + proxy_api_mock.get_backend_servers.return_value = {'data': servers} + + ret_val = await proxy.get_current_backend_servers() + + proxy_api_mock.get_backend_servers.assert_called_once_with(backend=proxy.wazuh_backend) + assert ret_val == {server['name']: server['address'] for server in servers} + + async def test_add_wazuh_manager_makes_correct_callback(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + manager_name = 'foo' + manager_address = '192.168.0.1' + resolver = 'test-resolver' + + await proxy.add_wazuh_manager(manager_name, manager_address, resolver) + + proxy_api_mock.add_server_to_backend.assert_called_once_with( + backend=proxy.wazuh_backend, + server_name=manager_name, + server_address=manager_address, + port=proxy.wazuh_connection_port, + resolver=resolver, + ) + + async def test_remove_wazuh_manager_makes_correct_callback(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + manager_name = 'foo' + + await proxy.remove_wazuh_manager(manager_name) + + proxy_api_mock.remove_server_from_backend.assert_called_with( + backend=proxy.wazuh_backend, server_name=manager_name + ) + + async def test_restrain_server_new_connections_makes_correct_callback( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy + ): + server_name = 'foo' + + await proxy.restrain_server_new_connections(server_name) + + proxy_api_mock.change_backend_server_state.assert_called_once_with( + backend_name=proxy.wazuh_backend, server_name=server_name, state=ProxyServerState.DRAIN + ) + + async def test_allow_server_new_connections_makes_correct_callback( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy + ): + server_name = 'foo' + + await proxy.allow_server_new_connections(server_name) + + proxy_api_mock.change_backend_server_state.assert_called_once_with( + backend_name=proxy.wazuh_backend, server_name=server_name, state=ProxyServerState.READY + ) + + async def test_get_wazuh_server_stats_returns_correct_information( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy + ): + stats = {'foo': 'bar'} + proxy_api_mock.get_backend_server_stats.return_value = [{'stats': [{'stats': stats}]}] + server_name = 'foo' + assert (await proxy.get_wazuh_server_stats(server_name)) == stats + proxy_api_mock.get_backend_server_stats.assert_called_once_with( + backend_name=proxy.wazuh_backend, server_name=server_name + ) + + @pytest.mark.parametrize( + 'state,expected', + ( + [ProxyServerState.DRAIN.value, True], + [random.choice([ProxyServerState.READY.value, ProxyServerState.MAINTENANCE.value]), False], + ), + ) + async def test_is_server_drain_returns_correct_value( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy, state: ProxyServerState, expected: bool + ): + proxy_api_mock.get_backend_server_runtime_settings.return_value = {'admin_state': state} + + server_name = 'foo' + assert (await proxy.is_server_drain(server_name)) == expected + proxy_api_mock.get_backend_server_runtime_settings.assert_called_once_with( + backend_name=proxy.wazuh_backend, server_name=server_name + ) + + async def test_get_wazuh_backend_stats_returns_correct_information( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy + ): + servers = [ + {'name': 'server1', 'address': '192.168.0.1'}, + {'name': 'server2', 'address': '192.168.0.2'}, + ] + proxy_api_mock.get_backend_servers.return_value = {'data': servers} + + with mock.patch.object( + proxy, 'get_wazuh_server_stats', return_value={'status': ProxyServerState.UP.value.upper()} + ) as server_stats_mock: + stats = await proxy.get_wazuh_backend_stats() + assert len(stats.keys()) == len(servers) + server_stats_mock.call_count = len(servers) + for server in servers: + server_stats_mock.assert_any_call(server_name=server['name']) + + async def test_get_wazuh_backend_server_connections_returns_correct_information( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy + ): + stats = { + 'server1': {'scur': 10}, + 'server2': {'scur': 20}, + } + with mock.patch.object(proxy, 'get_wazuh_backend_stats', return_value=stats) as backend_stats_mock: + ret_stats = await proxy.get_wazuh_backend_server_connections() + backend_stats_mock.assert_called_once() + assert ret_stats == {'server1': 10, 'server2': 20} From b5232cc39a146695df5fee965faabf9529876ce0 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Fri, 12 Apr 2024 09:29:01 -0300 Subject: [PATCH 359/419] Add docstrings --- .../cluster/hap_helper/tests/test_proxy.py | 76 +++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py index 201df079702..576664caf0f 100644 --- a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py @@ -58,6 +58,8 @@ def request_mock(self): yield request_mock async def test_initialize_runs_ok(self, proxy_api: ProxyAPI, get_mock: mock.AsyncMock): + """Check the correct function of `initialize` method.""" + await proxy_api.initialize() get_mock.assert_called_once_with( @@ -82,6 +84,8 @@ async def test_initialize_dont_raise_in_case_of_error( side_effect: Exception | None, expected: int, ): + """Check the correct error handling of `initialize` method.""" + if status_code is not None: get_mock.return_value = mock.MagicMock(status_code=status_code) @@ -114,6 +118,8 @@ async def test_proxy_method_dont_raise_in_case_of_error( method, f_kwargs, ): + """Check the correct error handling of all methods that call `_make_hap_request`.""" + if response: request_mock.return_value = mock.MagicMock(**response) if side_effect is not None: @@ -125,6 +131,8 @@ async def test_proxy_method_dont_raise_in_case_of_error( async def test_update_configuration_version_set_correct_version( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): + """Check that `update_configuration_version` method set the correct version.""" + endpoint = 'services/haproxy/configuration/version' version = '1' request_mock.return_value = mock.MagicMock( @@ -145,6 +153,8 @@ async def test_update_configuration_version_set_correct_version( async def test_get_runtime_info_returns_correct_information( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): + """Check the correct output of `get_runtime_info` method.""" + endpoint = 'services/haproxy/runtime/info' info = {'foo': 1, 'bar': 2} request_mock.return_value = mock.MagicMock( @@ -165,6 +175,8 @@ async def test_get_runtime_info_returns_correct_information( async def test_get_global_configuration_returns_correct_information( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): + """Check the correct output of `get_global_configuration` method.""" + endpoint = 'services/haproxy/configuration/global' data = {'foo': 1, 'bar': 2} request_mock.return_value = mock.MagicMock( @@ -185,6 +197,8 @@ async def test_get_global_configuration_returns_correct_information( async def test_update_global_configuration_makes_correct_request( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): + """Check that `update_globla_configuration` method makes the correct request.""" + endpoint = 'services/haproxy/configuration/global' version_endpoint = 'services/haproxy/configuration/version' new_configuration = {'foo': 1, 'bar': 2} @@ -215,6 +229,8 @@ async def test_update_global_configuration_makes_correct_request( ) async def test_get_backends_returns_correct_information(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + """Check the correct output of `get_backends` method.""" + endpoint = 'services/haproxy/configuration/backends' data = {'data': {'foo': 1, 'bar': 2}} request_mock.return_value = mock.MagicMock( @@ -233,6 +249,8 @@ async def test_get_backends_returns_correct_information(self, proxy_api: ProxyAP assert ret_val == data async def test_add_backend_makes_correct_request(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + """Check that `add_backend` method makes the correct request.""" + endpoint = 'services/haproxy/configuration/backends' version_endpoint = 'services/haproxy/configuration/version' @@ -264,6 +282,8 @@ async def test_add_backend_makes_correct_request(self, proxy_api: ProxyAPI, requ async def test_get_backend_servers_returns_correct_information( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): + """Check the correct output of `get_backend_servers` method.""" + endpoint = 'services/haproxy/configuration/servers' data = {'data': {'foo': 1, 'bar': 2}} request_mock.return_value = mock.MagicMock( @@ -294,6 +314,8 @@ async def test_add_server_to_backend_makes_correct_request( is_ip_address: bool, resolver: str | None, ): + """Check that `add_server_to_backend` method makes the correct request.""" + endpoint = 'services/haproxy/configuration/servers' version_endpoint = 'services/haproxy/configuration/version' @@ -330,6 +352,8 @@ async def test_add_server_to_backend_makes_correct_request( async def test_remove_server_from_backend_makes_correct_request( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): + """Check that `remove_server_from_backend` method makes the correct request.""" + endpoint = 'services/haproxy/configuration/servers' version_endpoint = 'services/haproxy/configuration/version' @@ -357,6 +381,8 @@ async def test_remove_server_from_backend_makes_correct_request( ) async def test_get_frontends_returns_correct_information(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + """Check the correct output of `get_frontends` method.""" + endpoint = 'services/haproxy/configuration/frontends' data = {'data': {'foo': 1, 'bar': 2}} request_mock.return_value = mock.MagicMock( @@ -375,6 +401,8 @@ async def test_get_frontends_returns_correct_information(self, proxy_api: ProxyA assert ret_val == data async def test_add_frontend_makes_correct_request(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + """Check that `add_frontend` method makes the correct request.""" + endpoint = 'services/haproxy/configuration/frontends' bind_endpoint = 'services/haproxy/configuration/binds' # version_endpoint = 'services/haproxy/configuration/version' @@ -412,6 +440,8 @@ async def test_add_frontend_makes_correct_request(self, proxy_api: ProxyAPI, req async def test_get_backend_server_runtime_settings_returns_correct_information( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): + """Check the correct output of `get_backend_server` method.""" + endpoint = 'services/haproxy/runtime/servers' data = {'data': {'foo': 1, 'bar': 2}} request_mock.return_value = mock.MagicMock( @@ -444,6 +474,8 @@ async def test_get_backend_server_runtime_settings_returns_correct_information( async def test_change_backend_server_state_makes_correct_request( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock, state: ProxyServerState ): + """Check that `change_backend_server` method makes the correct request.""" + endpoint = 'services/haproxy/runtime/servers' request_mock.return_value = mock.MagicMock(**{'status_code': 200, 'is_success': True}) @@ -464,6 +496,8 @@ async def test_change_backend_server_state_makes_correct_request( async def test_get_backend_stats_returns_correct_information( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): + """Check the correct output of `get_backend_stats` method.""" + endpoint = 'services/haproxy/stats/native' data = {'data': {'foo': 1, 'bar': 2}} request_mock.return_value = mock.MagicMock( @@ -485,6 +519,8 @@ async def test_get_backend_stats_returns_correct_information( async def test_get_backend_server_stats_returns_correct_information( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): + """Check the correct output of `get_backend_server_stats` method.""" + endpoint = 'services/haproxy/stats/native' data = {'data': {'foo': 1, 'bar': 2}} request_mock.return_value = mock.MagicMock( @@ -521,6 +557,8 @@ def proxy(self, proxy_api_mock: mock.MagicMock): async def test_initialize_runs_ok( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, hard_stop_after: int | None, expected: int | None ): + """Check the correct function of `initialize` method.""" + proxy_api_mock.get_runtime_info.return_value = {'version': 1} with mock.patch.object( @@ -536,6 +574,8 @@ async def test_initialize_runs_ok( async def test_initialize_dont_rise_in_case_of_error( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, side_effect: Exception ): + """Check the correct error handling of `initialize` method.""" + proxy_api_mock.get_runtime_info.side_effect = side_effect with pytest.raises(WazuhHAPHelperError, match='.*3048.*'): await proxy.initialize() @@ -546,6 +586,8 @@ async def test_initialize_dont_rise_in_case_of_error( async def test_get_hard_stop_after_value_returns_correct_value( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, global_configuration: int, expected: int | None ): + """Check the correct output of `get_hard_stop_after` method.""" + proxy_api_mock.get_global_configuration.return_value = global_configuration assert (await proxy.get_hard_stop_after_value()) == expected @@ -558,6 +600,8 @@ async def test_get_hard_stop_after_value_returns_correct_value( async def test_set_hard_stop_after_value_calculate_and_set_correct_value( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, hard_stop_after: float | None, new_configuration: dict ): + """Check the correct function of `set_hard_stop_after` method.""" + proxy_api_mock.get_global_configuration.return_value = {} proxy.hard_stop_after = hard_stop_after await proxy.set_hard_stop_after_value( @@ -570,12 +614,16 @@ async def test_set_hard_stop_after_value_calculate_and_set_correct_value( proxy_api_mock.update_global_configuration.assert_not_called() async def test_get_current_pid_returns_correct_value(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + """Check the correct output of `get_current_pid` method.""" + pid = 10 proxy_api_mock.get_runtime_info.return_value = {'pid': pid} assert (await proxy.get_current_pid()) == pid async def test_get_current_backends_returns_correct_information(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + """Check the correct output of `get_current_backends` method.""" + backends = [ {'name': 'backend1', 'mode': 'http', 'adv_check': 'httpchk', 'balance': {'algorithm': 'roundrobin'}}, {'name': 'backend2', 'mode': 'http', 'adv_check': 'httpchk', 'balance': {'algorithm': 'roundrobin'}}, @@ -593,12 +641,16 @@ async def test_get_current_backends_returns_correct_information(self, proxy_api_ async def test_exists_backend_returns_correct_value( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, current_backends: dict, backend: str, expected: bool ): + """Check the correct output of `exists_backend` method.""" + with mock.patch.object(proxy, 'get_current_backends', return_value=current_backends): assert await proxy.exists_backend(backend) == expected async def test_get_current_frontends_returns_correct_information( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): + """Check the correct output of `get_current_frontends` method.""" + frontends = [ {'name': 'frontend1', 'mode': 'http', 'default_backend': 'backend1'}, {'name': 'frontend2', 'mode': 'http'}, @@ -616,10 +668,14 @@ async def test_get_current_frontends_returns_correct_information( async def test_exists_frontend_returns_correct_value( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, current_frontends: dict, frontend: str, expected: bool ): + """Check the correct output of `exists_frontend` method.""" + with mock.patch.object(proxy, 'get_current_frontends', return_value=current_frontends): assert await proxy.exists_frontend(frontend) == expected async def test_add_new_backend_makes_correct_callback(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + """Check that `add_new_backend` method makes the correct callback.""" + parameters = { 'name': 'foo', 'mode': CommunicationProtocol.TCP, @@ -631,6 +687,8 @@ async def test_add_new_backend_makes_correct_callback(self, proxy_api_mock: mock proxy_api_mock.add_backend.assert_called_once_with(**parameters) async def test_add_new_frontend_makes_correct_callback(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + """Check that `add_new_frontend` method makes the correct callback.""" + parameters = {'name': 'foo', 'port': 1514, 'backend': 'bar', 'mode': CommunicationProtocol.TCP} await proxy.add_new_frontend(**parameters) @@ -640,6 +698,8 @@ async def test_add_new_frontend_makes_correct_callback(self, proxy_api_mock: moc async def test_get_current_backend_servers_returns_correct_information( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): + """Check the correct output of `get_current_backend` method.""" + servers = [ {'name': 'server1', 'address': '192.168.0.1'}, {'name': 'server2', 'address': '192.168.0.2'}, @@ -652,6 +712,8 @@ async def test_get_current_backend_servers_returns_correct_information( assert ret_val == {server['name']: server['address'] for server in servers} async def test_add_wazuh_manager_makes_correct_callback(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + """Check that `add_wazuh_manager` method makes the correct callback.""" + manager_name = 'foo' manager_address = '192.168.0.1' resolver = 'test-resolver' @@ -667,6 +729,8 @@ async def test_add_wazuh_manager_makes_correct_callback(self, proxy_api_mock: mo ) async def test_remove_wazuh_manager_makes_correct_callback(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + """Check that `remove_wazuh_manager` method makes the correct callback.""" + manager_name = 'foo' await proxy.remove_wazuh_manager(manager_name) @@ -678,6 +742,8 @@ async def test_remove_wazuh_manager_makes_correct_callback(self, proxy_api_mock: async def test_restrain_server_new_connections_makes_correct_callback( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): + """Check that `restrain_server_new_connections` method makes the correct callback.""" + server_name = 'foo' await proxy.restrain_server_new_connections(server_name) @@ -689,6 +755,8 @@ async def test_restrain_server_new_connections_makes_correct_callback( async def test_allow_server_new_connections_makes_correct_callback( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): + """Check that `allow_server_new_connections` method makes the correct callback.""" + server_name = 'foo' await proxy.allow_server_new_connections(server_name) @@ -700,6 +768,8 @@ async def test_allow_server_new_connections_makes_correct_callback( async def test_get_wazuh_server_stats_returns_correct_information( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): + """Check the correct output of `get_wazuh_server_stats` method.""" + stats = {'foo': 'bar'} proxy_api_mock.get_backend_server_stats.return_value = [{'stats': [{'stats': stats}]}] server_name = 'foo' @@ -718,6 +788,8 @@ async def test_get_wazuh_server_stats_returns_correct_information( async def test_is_server_drain_returns_correct_value( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, state: ProxyServerState, expected: bool ): + """Check the correct output of `is_server_drain` method.""" + proxy_api_mock.get_backend_server_runtime_settings.return_value = {'admin_state': state} server_name = 'foo' @@ -729,6 +801,8 @@ async def test_is_server_drain_returns_correct_value( async def test_get_wazuh_backend_stats_returns_correct_information( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): + """Check the correct output of `get_wazuh_backend_stats` method.""" + servers = [ {'name': 'server1', 'address': '192.168.0.1'}, {'name': 'server2', 'address': '192.168.0.2'}, @@ -747,6 +821,8 @@ async def test_get_wazuh_backend_stats_returns_correct_information( async def test_get_wazuh_backend_server_connections_returns_correct_information( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): + """Check the correct output of `get_wazuh_backend_server_connections` method.""" + stats = { 'server1': {'scur': 10}, 'server2': {'scur': 20}, From 740816abde0c67e5b88015f5f8209536da92f058 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Fri, 19 Apr 2024 09:48:37 -0300 Subject: [PATCH 360/419] Added tests for framework/wazuh/core/cluster/hap_helper/hap_helper.py module --- .../core/cluster/hap_helper/hap_helper.py | 22 +- .../hap_helper/tests/test_hap_helper.py | 653 ++++++++++++++++++ 2 files changed, 664 insertions(+), 11 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index d75cc9198fd..adcf9eb2e3d 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -1,5 +1,5 @@ +import asyncio import logging -from asyncio import sleep from math import ceil, floor from wazuh.core.cluster.hap_helper.proxy import Proxy, ProxyAPI, ProxyServerState @@ -197,7 +197,7 @@ async def update_agent_connections(self, agent_list: list[str]): for index in range(0, len(agent_list), self.agent_reconnection_chunk_size): await self.wazuh_dapi.reconnect_agents(agent_list[index : index + self.agent_reconnection_chunk_size]) self.logger.debug(f'Delay between agent reconnections. Sleeping {self.agent_reconnection_time}s...') - await sleep(self.agent_reconnection_time) + await asyncio.sleep(self.agent_reconnection_time) async def force_agent_reconnection_to_server(self, chosen_server: str, agents_list: list[dict]): """Force agents reconnection to a given server. @@ -213,7 +213,7 @@ async def force_agent_reconnection_to_server(self, chosen_server: str, agents_li affected_servers = current_servers - {chosen_server} for server_name in affected_servers: await self.proxy.restrain_server_new_connections(server_name=server_name) - await sleep(self.SERVER_ADMIN_STATE_DELAY) + await asyncio.sleep(self.SERVER_ADMIN_STATE_DELAY) eligible_agents = WazuhAgent.get_agents_able_to_reconnect(agents_list=agents_list) if len(eligible_agents) != len(agents_list): self.logger.warning( @@ -223,7 +223,7 @@ async def force_agent_reconnection_to_server(self, chosen_server: str, agents_li await self.update_agent_connections(agent_list=eligible_agents) for server_name in affected_servers: await self.proxy.allow_server_new_connections(server_name=server_name) - await sleep(self.SERVER_ADMIN_STATE_DELAY) + await asyncio.sleep(self.SERVER_ADMIN_STATE_DELAY) async def migrate_old_connections(self, new_servers: list[str], deleted_servers: list[str]): """Reconnects agents to new servers. @@ -248,7 +248,7 @@ async def migrate_old_connections(self, new_servers: list[str], deleted_servers: raise WazuhHAPHelperError(3041) self.logger.debug('Waiting for new servers to go UP') - await sleep(1) + await asyncio.sleep(1) backend_stats_iteration += 1 wazuh_backend_stats = (await self.proxy.get_wazuh_backend_stats()).keys() @@ -284,7 +284,7 @@ async def migrate_old_connections(self, new_servers: list[str], deleted_servers: self.logger.info('Waiting for agent connections stability') self.logger.debug(f'Sleeping {self.agent_reconnection_stability_time}s...') - await sleep(self.agent_reconnection_stability_time) + await asyncio.sleep(self.agent_reconnection_stability_time) def check_for_balance(self, current_connections_distribution: dict) -> dict: """Checks if the Wazuh cluster is balanced. @@ -414,17 +414,17 @@ async def manage_wazuh_cluster_nodes(self): self.logger.info('Load balancer backend is balanced') else: self.logger.info('Agent imbalance detected. Waiting for agent status sync...') - await sleep(self.AGENT_STATUS_SYNC_TIME) + await asyncio.sleep(self.AGENT_STATUS_SYNC_TIME) await self.balance_agents(affected_servers=unbalanced_connections) self.logger.debug(f'Sleeping {self.sleep_time}s...') - await sleep(self.sleep_time) + await asyncio.sleep(self.sleep_time) except WazuhException as handled_exc: self.logger.error(str(handled_exc)) self.logger.warning( f'Tasks may not perform as expected. Sleeping {self.sleep_time}s ' 'before continuing...' ) - await sleep(self.sleep_time) + await asyncio.sleep(self.sleep_time) async def set_hard_stop_after(self, wait_connection_retry: bool = True, reconnect_agents: bool = True): """Calculate and set hard-stop-after configuration in HAProxy. @@ -440,7 +440,7 @@ async def set_hard_stop_after(self, wait_connection_retry: bool = True, reconnec if wait_connection_retry: connection_retry = self.get_connection_retry() self.logger.debug(f'Waiting {connection_retry}s for workers connections...') - await sleep(connection_retry) + await asyncio.sleep(connection_retry) self.logger.info('Setting a value for `hard-stop-after` configuration.') agents_distribution = await self.wazuh_dapi.get_agents_node_distribution() @@ -520,7 +520,7 @@ async def start(cls): if helper.proxy.hard_stop_after is not None: sleep_time = max(helper.proxy.hard_stop_after, cls.get_connection_retry()) helper.logger.info(f'Ensuring only exists one HAProxy process. Sleeping {sleep_time}s before start...') - await sleep(sleep_time) + await asyncio.sleep(sleep_time) await helper.initialize_wazuh_cluster_configuration() diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py b/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py index e69de29bb2d..8ecc12ed42a 100644 --- a/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py @@ -0,0 +1,653 @@ +from asyncio import TimeoutError, wait_for +from random import choice +from unittest import mock + +import pytest +from wazuh.core.cluster.hap_helper.hap_helper import HAPHelper, ProxyServerState, WazuhHAPHelperError +from wazuh.core.cluster.utils import ( + AGENT_CHUNK_SIZE, + AGENT_RECONNECTION_STABILITY_TIME, + AGENT_RECONNECTION_TIME, + EXCLUDED_NODES, + FREQUENCY, + HAPROXY_ADDRESS, + HAPROXY_BACKEND, + HAPROXY_HELPER, + HAPROXY_PASSWORD, + HAPROXY_PORT, + HAPROXY_PROTOCOL, + HAPROXY_RESOLVER, + HAPROXY_USER, + IMBALANCE_TOLERANCE, + REMOVE_DISCONNECTED_NODE_AFTER, +) +from wazuh.core.exception import WazuhException + + +class TestHAPHelper: + CONFIGURATION = { + 'sleep_time': 60, + 'agent_reconnection_stability_time': 10, + 'agent_reconnection_time': 1, + 'agent_reconnection_chunk_size': 5, + 'agent_tolerance': 0.1, + 'remove_disconnected_node_after': 3, + } + + @pytest.fixture + def dapi_mock(self): + with mock.patch('wazuh.core.cluster.hap_helper.hap_helper.WazuhDAPI', autospec=True) as dapi_mock: + yield dapi_mock + + @pytest.fixture + def proxy_api_mock(self): + with mock.patch('wazuh.core.cluster.hap_helper.hap_helper.ProxyAPI', autospec=True) as proxy_api_mock: + yield proxy_api_mock + + @pytest.fixture + def proxy_mock(self): + with mock.patch('wazuh.core.cluster.hap_helper.hap_helper.Proxy', autospec=True) as proxy_mock: + yield proxy_mock + + @pytest.fixture + def helper(self, proxy_mock: mock.MagicMock, dapi_mock: mock.MagicMock): + helper = HAPHelper(proxy=proxy_mock, wazuh_dapi=dapi_mock, tag='test', **self.CONFIGURATION) + with mock.patch.object(helper, 'logger'): + yield helper + + @pytest.fixture + def sleep_mock(self): + with mock.patch('wazuh.core.cluster.hap_helper.hap_helper.asyncio.sleep') as sleep_mock: + yield sleep_mock + + @pytest.fixture + def wazuh_agent_mock(self): + with mock.patch( + 'wazuh.core.cluster.hap_helper.hap_helper.WazuhAgent.get_agents_able_to_reconnect', autospec=True + ) as wazuh_agent_mock: + yield wazuh_agent_mock + + @pytest.fixture + def read_cluster_config_mock(self): + with mock.patch('wazuh.core.cluster.hap_helper.hap_helper.read_cluster_config') as read_cluster_config_mock: + yield read_cluster_config_mock + + @pytest.fixture + def get_ossec_conf(self): + with mock.patch('wazuh.core.cluster.hap_helper.hap_helper.get_ossec_conf') as get_ossec_conf: + yield get_ossec_conf + + async def test_initialize_cluster_runs_ok(self, helper: HAPHelper, proxy_mock: mock.MagicMock): + """Check the correct function of `initialize` method.""" + await helper.initialize_proxy() + proxy_mock.initialize.assert_called_once() + + async def test_initialize_raise_and_log_error(self, helper: HAPHelper, proxy_mock: mock.MagicMock): + """Check the correct error handling of `initialize` method.""" + proxy_mock.initialize.side_effect = WazuhHAPHelperError(3046) + + with pytest.raises(WazuhHAPHelperError): + await helper.initialize_proxy() + assert helper.logger.critical.call_count == 2 + + @pytest.mark.parametrize( + 'exists_backend,exists_frontend', ([True, True], [True, False], [False, True], [False, False]) + ) + async def test_initialize_wazuh_cluster_configuration_makes_correct_callbacks( + self, helper: HAPHelper, proxy_mock: mock.MagicMock, exists_backend: bool, exists_frontend: bool + ): + """Check that `initialize_wazuh_cluster_configuration` method makes the correct callbacks.""" + backend_name = 'test' + frontend_name = f'{backend_name}_front' + port = 1514 + + proxy_mock.wazuh_backend = backend_name + proxy_mock.wazuh_connection_port = port + proxy_mock.exists_backend.return_value = exists_backend + proxy_mock.exists_frontend.return_value = exists_frontend + + await helper.initialize_wazuh_cluster_configuration() + + proxy_mock.exists_backend.assert_called_once_with(backend_name) + if not exists_backend: + proxy_mock.add_new_backend.assert_called_once_with(backend_name) + else: + proxy_mock.add_new_backend.assert_not_called() + + proxy_mock.exists_frontend.assert_called_once_with(frontend_name) + if not exists_frontend: + proxy_mock.add_new_frontend.assert_called_once_with(name=frontend_name, port=port, backend=backend_name) + else: + proxy_mock.add_new_frontend.assert_not_called() + + @pytest.mark.parametrize( + 'stats,expected', + ( + [ + { + 'status': ProxyServerState.UP.value.upper(), + 'lastchg': CONFIGURATION['remove_disconnected_node_after'] * 60, + }, + False, + ], + [ + { + 'status': ProxyServerState.UP.value.upper(), + 'lastchg': (CONFIGURATION['remove_disconnected_node_after'] + 1) * 60, + }, + False, + ], + [ + { + 'status': choice([ProxyServerState.DOWN.value.upper(), ProxyServerState.MAINTENANCE.value.upper()]), + 'lastchg': (CONFIGURATION['remove_disconnected_node_after'] - 1) * 60, + }, + False, + ], + [ + { + 'status': choice([ProxyServerState.DOWN.value.upper(), ProxyServerState.MAINTENANCE.value.upper()]), + 'lastchg': CONFIGURATION['remove_disconnected_node_after'] * 60, + }, + True, + ], + [ + { + 'status': choice([ProxyServerState.DOWN.value.upper(), ProxyServerState.MAINTENANCE.value.upper()]), + 'lastchg': (CONFIGURATION['remove_disconnected_node_after'] + 1) * 60, + }, + True, + ], + ), + ) + async def test_check_node_to_delete_returns_correct_information( + self, helper: HAPHelper, proxy_mock: mock.MagicMock, stats: dict, expected: bool + ): + """Check the correct output of `check_node_to_delete` method.""" + proxy_mock.get_wazuh_server_stats.return_value = stats + node_name = 'test_node' + + ret_val = await helper.check_node_to_delete(node_name) + + proxy_mock.get_wazuh_server_stats.assert_called_once_with(server_name=node_name) + assert ret_val == expected + + async def test_backend_servers_state_healthcheck_makes_correct_callbacks( + self, helper: HAPHelper, proxy_mock: mock.MagicMock + ): + """Check that `backend_servers_state_healthcheck` method makes the correct callbacks.""" + WORKER1 = 'worker1' + WORKER2 = 'worker2' + BACKEND_DRAIN = {WORKER1: True, WORKER2: False} + + proxy_mock.get_current_backend_servers.return_value = {WORKER1: {}, WORKER2: {}} + proxy_mock.is_server_drain.side_effect = list(BACKEND_DRAIN.values()) + + await helper.backend_servers_state_healthcheck() + + for server in BACKEND_DRAIN.keys(): + proxy_mock.is_server_drain.assert_any_call(server) + + proxy_mock.allow_server_new_connections.assert_called_once_with(WORKER1) + + @pytest.mark.parametrize('check_node_to_delete', [True, False]) + async def test_obtain_nodes_to_configure_servers_returns_correct_information( + self, helper: HAPHelper, proxy_mock: mock.MagicMock, dapi_mock: mock.MagicMock, check_node_to_delete: bool + ): + """Check the correct output of `obtain_nodes_to_configure` method.""" + WORKER1 = 'worker1' + WORKER2 = 'worker2' + WORKER3 = 'worker3' + WORKER4 = 'worker4' + + expected_add_nodes = [WORKER1, WORKER2] + expected_remove_nodes = [WORKER2, WORKER4] + if check_node_to_delete: + expected_remove_nodes.append(WORKER3) + + wazuh_cluster_nodes = {WORKER1: '192.168.0.1', WORKER2: '192.168.0.2'} + proxy_backend_servers = {WORKER2: '192.168.0.3', WORKER3: '192.168.0.4', WORKER4: '192.168.0.5'} + + dapi_mock.excluded_nodes = [WORKER4] + + with mock.patch.object(helper, 'check_node_to_delete', return_value=check_node_to_delete) as check_mock: + add_nodes, remove_nodes = await helper.obtain_nodes_to_configure(wazuh_cluster_nodes, proxy_backend_servers) + check_mock.assert_called_once_with(WORKER3) + + assert add_nodes == expected_add_nodes + assert not set(remove_nodes) - set(expected_remove_nodes) + + @pytest.mark.parametrize('agents_count,expected', ([6, 1], [11, 2])) + @pytest.mark.asyncio + async def test_update_agent_connections_makes_correct_callbacks( + self, helper: HAPHelper, dapi_mock: mock.MagicMock, sleep_mock: mock.AsyncMock, agents_count: int, expected: int + ): + """Check that `update_agent_connections` method makes the correct callbacks.""" + agent_list = [f'{n:03}' for n in range(1, agents_count)] + + await helper.update_agent_connections(agent_list) + + assert dapi_mock.reconnect_agents.call_count == expected + for index in range(0, len(agent_list), helper.agent_reconnection_chunk_size): + dapi_mock.reconnect_agents.assert_any_call(agent_list[index : index + helper.agent_reconnection_chunk_size]) + assert sleep_mock.call_count == expected + + @pytest.mark.parametrize( + 'agent_list,elegible_agents', + ( + [ + [ + {'id': '001', 'version': '4.9.0'}, + {'id': '002', 'version': '4.9.0'}, + {'id': '003', 'version': '4.9.0'}, + ], + ['001', '002', '003'], + ], + [ + [ + {'id': '001', 'version': '4.9.0'}, + {'id': '002', 'version': '4.9.0'}, + {'id': '003', 'version': '4.2.0'}, + ], + ['001', '002'], + ], + ), + ) + async def test_force_agent_reconnection_to_server_makes_correct_callbacks( + self, + wazuh_agent_mock: mock.MagicMock, + helper: HAPHelper, + proxy_mock: mock.MagicMock, + sleep_mock: mock.AsyncMock, + agent_list: list, + elegible_agents: list, + ): + """Check that `force_agent_reconnection_to_server` method makes the correct callbacks.""" + WORKER1 = 'worker1' + WORKER2 = 'worker2' + WORKER3 = 'worker3' + + proxy_mock.get_current_backend_servers.return_value = { + WORKER1: {}, + WORKER2: {}, + WORKER3: {}, + } + wazuh_agent_mock.return_value = elegible_agents + + with mock.patch.object(helper, 'update_agent_connections') as update_agent_connections_mock: + await helper.force_agent_reconnection_to_server(WORKER1, agent_list) + update_agent_connections_mock.assert_called_once_with(agent_list=elegible_agents) + + for server in [WORKER2, WORKER3]: + proxy_mock.restrain_server_new_connections.assert_any_call(server) + proxy_mock.allow_server_new_connections.assert_any_call(server) + + assert sleep_mock.call_count == 2 + sleep_mock.assert_called_with(helper.SERVER_ADMIN_STATE_DELAY) + + async def test_migrate_old_connections_makes_correct_callbacks( + self, helper: HAPHelper, proxy_mock: mock.MagicMock, dapi_mock: mock.MagicMock, sleep_mock: mock.AsyncMock + ): + """Check that `migrate_old_connections` method makes the correct callbacks.""" + WORKER1 = 'worker1' + WORKER2 = 'worker2' + WORKER3 = 'worker3' + + NEW_SERVERS = [WORKER2] + OLD_SERVERS = [WORKER3] + + AGENTS_TO_FORCE = [{'id': '001', 'version': 'v4.9.0'}] + AGENTS_TO_UPDATE = [{'id': '002', 'version': 'v4.9.0'}] + + PREVIOUSE_CONNECTION_DIST = {WORKER1: 1, WORKER3: 1} + + proxy_mock.get_wazuh_backend_stats.return_value = {WORKER1: {}, WORKER2: {}} + dapi_mock.get_agents_node_distribution.return_value = { + WORKER1: AGENTS_TO_FORCE, + WORKER3: AGENTS_TO_UPDATE, + } + proxy_mock.get_wazuh_backend_server_connections.return_value = PREVIOUSE_CONNECTION_DIST + with mock.patch.object(helper, 'check_for_balance', return_value={WORKER3: 1}) as check_for_balance_mock: + with mock.patch.object( + helper, 'force_agent_reconnection_to_server' + ) as force_agent_reconnection_to_server_mock: + with mock.patch.object(helper, 'update_agent_connections') as update_agent_connections_mock: + await helper.migrate_old_connections(NEW_SERVERS, OLD_SERVERS) + check_for_balance_mock.assert_called_once_with( + current_connections_distribution=PREVIOUSE_CONNECTION_DIST + ) + force_agent_reconnection_to_server_mock.assert_called_once_with( + chosen_server=WORKER1, agents_list=AGENTS_TO_FORCE + ) + update_agent_connections_mock.assert_called_once_with( + agent_list=[item['id'] for item in AGENTS_TO_UPDATE] + ) + sleep_mock.assert_any_call(helper.agent_reconnection_stability_time) + + async def test_migrate_old_connections_raises_when_exceed_timeout( + self, helper: HAPHelper, proxy_mock: mock.MagicMock, sleep_mock: mock.AsyncMock + ): + """Check that `migrate_old_connections` method makes the correct callbacks.""" + WORKER1 = 'worker1' + WORKER2 = 'worker2' + WORKER3 = 'worker3' + + NEW_SERVERS = [WORKER2] + OLD_SERVERS = [WORKER3] + + proxy_mock.get_wazuh_backend_stats.return_value = {WORKER1: {}} + + with pytest.raises(WazuhHAPHelperError, match='.*3041.*'): + await helper.migrate_old_connections(NEW_SERVERS, OLD_SERVERS) + + assert sleep_mock.call_count == helper.UPDATED_BACKEND_STATUS_TIMEOUT + + @pytest.mark.parametrize( + 'distribution,expected', + ( + [{}, {}], + [{'worker1': 1, 'worker2': 2, 'worker3': 1}, {}], + [{'worker1': 0, 'worker2': 2, 'worker3': 1}, {'worker2': 1}], + [{'worker1': 0, 'worker2': 2, 'worker3': 2}, {'worker2': 1, 'worker3': 1}], + [{'worker1': 0, 'worker2': 4, 'worker3': 0}, {'worker2': 3}], + ), + ) + async def test_check_for_balance_returns_correct_information( + self, helper: HAPHelper, distribution: dict, expected: dict + ): + """Check the correct output of `check_for_balance` method.""" + assert helper.check_for_balance(current_connections_distribution=distribution) == expected + + @pytest.mark.parametrize( + 'agent_list,elegible_agents', + ( + [[{'id': '001'}, {'id': '002'}, {'id': '003'}], ['001', '002', '003']], + [[{'id': '001'}, {'id': '002'}, {'id': '003'}], ['001', '002']], + ), + ) + async def test_calculate_agents_to_balance_returns_correct_information( + self, + helper: HAPHelper, + dapi_mock: mock.MagicMock, + wazuh_agent_mock: mock.MagicMock, + agent_list: list, + elegible_agents: list, + ): + """Check the correct output of `calculate_agents_to_balance` method.""" + WORKER1 = 'worker1' + + affected_servers = {WORKER1: 3} + dapi_mock.get_agents_belonging_to_node.return_value = agent_list + wazuh_agent_mock.return_value = elegible_agents + + assert (await helper.calculate_agents_to_balance(affected_servers)) == {WORKER1: elegible_agents} + if len(elegible_agents) != len(agent_list): + helper.logger.warning.assert_called_once() + + async def test_balance_agents_makes_correct_callbacks(self, helper: HAPHelper, proxy_mock: mock.MagicMock): + """Check that `balance_agents` method makes the correct callbacks.""" + WORKER1 = 'worker1' + + affected_servers = {WORKER1: 3} + agent_list = ['001', '002', '003'] + agents_to_balance = {WORKER1: agent_list} + + with mock.patch.object( + helper, 'calculate_agents_to_balance', return_value=agents_to_balance + ) as calculate_agents_to_balance_mock: + with mock.patch.object(helper, 'update_agent_connections') as update_agent_connections_mock: + await helper.balance_agents(affected_servers) + calculate_agents_to_balance_mock.assert_called_once_with(affected_servers) + update_agent_connections_mock.assert_called_once_with(agent_list=agent_list) + + @pytest.mark.parametrize( + 'nodes_to_add,nodes_to_remove,unbalanced_connections', + [ + ([], ['worker2'], {}), + (['worker2'], [], {}), + (['worker1', 'worker2'], ['worker3'], {}), + ([], [], {'worker1': 10, 'worker2': 8}), + ], + ) + async def test_manage_wazuh_cluster_nodes_makes_correct_callbacks( + self, + helper: HAPHelper, + proxy_mock: mock.MagicMock, + dapi_mock: mock.MagicMock, + sleep_mock: mock.AsyncMock, + nodes_to_add: list, + nodes_to_remove: list, + unbalanced_connections: dict, + ): + """Check that `manage_wazuh_cluster_nodes` method makes the correct callbacks.""" + WORKER1 = 'worker1' + WORKER2 = 'worker2' + WORKER3 = 'worker3' + + nodes = {WORKER1: '192.168.0.1', WORKER2: '192.168.0.2', WORKER3: '192.168.0.3'} + nodes_to_configure = [(nodes_to_add, nodes_to_remove)] + if any([nodes_to_add, nodes_to_remove]): + nodes_to_configure.append(([], [])) + + dapi_mock.get_cluster_nodes.return_value = nodes + proxy_mock.get_current_backend_servers.return_value = nodes + proxy_mock.resolver = 'test' + + with mock.patch.object(helper, 'backend_servers_state_healthcheck'): + with mock.patch.object(helper, 'obtain_nodes_to_configure', side_effect=nodes_to_configure): + with mock.patch.object(helper, 'set_hard_stop_after'): + with mock.patch.object(helper, 'migrate_old_connections'): + with mock.patch.object(helper, 'check_for_balance', return_value=unbalanced_connections): + with mock.patch.object(helper, 'balance_agents'): + try: + await wait_for(helper.manage_wazuh_cluster_nodes(), 0.5) + except (TimeoutError, StopAsyncIteration): + pass + + assert helper.backend_servers_state_healthcheck.call_count + + if nodes_to_add or nodes_to_remove: + for node_to_remove in nodes_to_remove: + proxy_mock.remove_wazuh_manager.assert_any_call(manager_name=node_to_remove) + + for node_to_add in nodes_to_add: + proxy_mock.add_wazuh_manager.assert_any_call( + manager_name=node_to_add, + manager_address=nodes[node_to_add], + resolver=proxy_mock.resolver, + ) + helper.set_hard_stop_after.assert_called_once_with( + wait_connection_retry=False, reconnect_agents=False + ) + helper.migrate_old_connections.assert_called_once_with( + new_servers=nodes_to_add, deleted_servers=nodes_to_remove + ) + if unbalanced_connections: + sleep_mock.assert_any_call(helper.AGENT_STATUS_SYNC_TIME) + helper.balance_agents.assert_called_once_with( + affected_servers=unbalanced_connections + ) + else: + helper.logger.info.assert_any_call('Load balancer backend is balanced') + sleep_mock.assert_any_call(helper.sleep_time) + + async def test_manage_wazuh_cluster_nodes_dont_raise_in_case_of_error( + self, + helper: HAPHelper, + sleep_mock: mock.AsyncMock, + ): + """Check the correct error handling of `manage_wazuh_cluster_nodes` method.""" + error = WazuhException(3000, 'Some test exception') + + with mock.patch.object(helper, 'backend_servers_state_healthcheck', side_effect=[error]): + try: + await wait_for(helper.manage_wazuh_cluster_nodes(), 0.5) + except (TimeoutError, StopAsyncIteration): + pass + helper.logger.error.assert_called_once_with(str(error)) + sleep_mock.assert_any_call(helper.sleep_time) + + @pytest.mark.parametrize('wait_connection_retry,reconnect_agents', ([True, False], [False, False], [False, True])) + @pytest.mark.parametrize('agent_ids', (['001', '002'], [])) + async def test_set_hard_stop_after_makes_correct_callbacks( + self, + helper: HAPHelper, + proxy_mock: mock.MagicMock, + dapi_mock: mock.MagicMock, + sleep_mock: mock.AsyncMock, + wait_connection_retry: bool, + reconnect_agents: bool, + agent_ids: list, + ): + """Check that `set_hard_stop_after` method makes the correct callbacks.""" + WORKER1 = 'worker1' + + AGENT_NODE_DISTRIBUTION = {WORKER1: [{'id': agent_id} for agent_id in agent_ids]} + CLUSTER_NODES = {WORKER1: '192.168.0.1'} + + dapi_mock.get_agents_node_distribution.return_value = AGENT_NODE_DISTRIBUTION + dapi_mock.get_cluster_nodes.return_value = CLUSTER_NODES + connection_retry = 10 + with mock.patch.object( + helper, 'get_connection_retry', return_value=connection_retry + ) as get_connection_retry_mock: + with mock.patch.object(helper, 'update_agent_connections') as update_agent_connections_mock: + await helper.set_hard_stop_after(wait_connection_retry, reconnect_agents) + + if wait_connection_retry: + get_connection_retry_mock.assert_called_once() + sleep_mock.assert_called_once_with(connection_retry) + else: + get_connection_retry_mock.assert_not_called() + + proxy_mock.set_hard_stop_after_value.assert_called_once_with( + active_agents=len(agent_ids), + chunk_size=helper.agent_reconnection_chunk_size, + agent_reconnection_time=helper.agent_reconnection_time, + n_managers=len(CLUSTER_NODES.keys()), + server_admin_state_delay=helper.SERVER_ADMIN_STATE_DELAY, + ) + + if reconnect_agents and len(agent_ids) > 0: + update_agent_connections_mock.assert_called_once_with(agent_list=agent_ids) + else: + update_agent_connections_mock.assert_not_called() + + async def test_get_connection_retry_returns_correct_information( + self, helper: HAPHelper, proxy_mock: mock.MagicMock + ): + """Check the correct output of `get_connection_retry` method.""" + CONNECTION_RETRY = 10 + + with mock.patch( + 'wazuh.core.cluster.hap_helper.hap_helper.get_cluster_items', + return_value={'intervals': {'worker': {'connection_retry': CONNECTION_RETRY}}}, + ): + assert helper.get_connection_retry() == CONNECTION_RETRY + 2 + + @pytest.mark.parametrize('hard_stop_after', [None, 8, 12]) + async def test_start_makes_correct_callbacks( + self, + read_cluster_config_mock: mock.MagicMock, + get_ossec_conf: mock.MagicMock, + proxy_api_mock: mock.MagicMock, + proxy_mock: mock.MagicMock, + dapi_mock: mock.MagicMock, + sleep_mock: mock.AsyncMock, + hard_stop_after: int | None, + ): + """Check that `start` method makes the correct callbacks.""" + HAPROXY_USER_VALUE = 'test' + HAPROXY_PASSWORD_VALUE = 'test' + HAPROXY_ADDRESS_VALUE = 'wazuh-proxy' + HAPROXY_PORT_VALUE = 5555 + HAPROXY_PROTOCOL_VALUE = 'http' + HAPROXY_BACKEND_VALUE = 'wazuh_test' + HAPROXY_RESOLVER_VALUE = 'resolver_test' + EXCLUDED_NODES_VALUE = ['worker1'] + FREQUENCY_VALUE = 60 + AGENT_RECONNECTION_STABILITY_TIME_VALUE = 10 + AGENT_RECONNECTION_TIME_VALUE = 1 + AGENT_CHUNK_SIZE_VALUE = 10 + IMBALANCE_TOLERANCE_VALUE = 0.1 + REMOVE_DISCONNECTED_NODE_AFTER_VALUE = 3 + WAZUH_PORT = 1514 + TAG = 'HAPHelper' + + HELPER_CONFIG = { + HAPROXY_USER: HAPROXY_USER_VALUE, + HAPROXY_PASSWORD: HAPROXY_PASSWORD_VALUE, + HAPROXY_ADDRESS: HAPROXY_ADDRESS_VALUE, + HAPROXY_PORT: HAPROXY_PORT_VALUE, + HAPROXY_PROTOCOL: HAPROXY_PROTOCOL_VALUE, + HAPROXY_BACKEND: HAPROXY_BACKEND_VALUE, + HAPROXY_RESOLVER: HAPROXY_RESOLVER_VALUE, + EXCLUDED_NODES: EXCLUDED_NODES_VALUE, + FREQUENCY: FREQUENCY_VALUE, + AGENT_RECONNECTION_STABILITY_TIME: AGENT_RECONNECTION_STABILITY_TIME_VALUE, + AGENT_RECONNECTION_TIME: AGENT_RECONNECTION_TIME_VALUE, + AGENT_CHUNK_SIZE: AGENT_CHUNK_SIZE_VALUE, + IMBALANCE_TOLERANCE: IMBALANCE_TOLERANCE_VALUE, + REMOVE_DISCONNECTED_NODE_AFTER: REMOVE_DISCONNECTED_NODE_AFTER_VALUE, + } + + proxy_api = mock.MagicMock() + proxy_api_mock.return_value = proxy_api + + proxy = mock.MagicMock(hard_stop_after=hard_stop_after) + proxy_mock.return_value = proxy + + dapi = mock.MagicMock() + dapi_mock.return_value = dapi + + read_cluster_config_mock.return_value = {HAPROXY_HELPER: HELPER_CONFIG} + get_ossec_conf.return_value = {'remote': [{'port': WAZUH_PORT}]} + + connection_retry = 10 + with mock.patch.object(HAPHelper, 'initialize_proxy', new=mock.AsyncMock()): + with mock.patch.object(HAPHelper, 'get_connection_retry', return_value=connection_retry): + with mock.patch.object(HAPHelper, 'initialize_wazuh_cluster_configuration', new=mock.AsyncMock()): + with mock.patch.object(HAPHelper, 'set_hard_stop_after', new=mock.AsyncMock()): + with mock.patch.object(HAPHelper, 'manage_wazuh_cluster_nodes', new=mock.AsyncMock()): + await HAPHelper.start() + + proxy_api_mock.assert_called_once_with( + username=HAPROXY_USER_VALUE, + password=HAPROXY_PASSWORD_VALUE, + tag=TAG, + address=HAPROXY_ADDRESS_VALUE, + port=HAPROXY_PORT_VALUE, + protocol=HAPROXY_PROTOCOL_VALUE, + ) + + proxy_mock.assert_called_once_with( + wazuh_backend=HAPROXY_BACKEND_VALUE, + wazuh_connection_port=WAZUH_PORT, + proxy_api=proxy_api, + tag=TAG, + resolver=HAPROXY_RESOLVER_VALUE, + ) + + dapi_mock.assert_called_once_with(tag=TAG, excluded_nodes=EXCLUDED_NODES_VALUE) + + HAPHelper.initialize_proxy.assert_called_once() + if hard_stop_after is not None: + sleep_mock.assert_called_once_with(max(hard_stop_after, connection_retry)) + + HAPHelper.initialize_wazuh_cluster_configuration.assert_called_once() + + if hard_stop_after is None: + HAPHelper.set_hard_stop_after.assert_called_once() + + HAPHelper.manage_wazuh_cluster_nodes.assert_called_once() + + @pytest.mark.parametrize('exception', [KeyError(), KeyboardInterrupt(), WazuhHAPHelperError(3046)]) + async def test_start_dont_raise_in_case_of_error( + self, read_cluster_config_mock: mock.MagicMock, exception: Exception + ): + """Check the correct error handling of `start` method.""" + read_cluster_config_mock.side_effect = exception + logger_mock = mock.MagicMock() + with mock.patch.object(HAPHelper, '_get_logger', return_value=logger_mock): + await HAPHelper.start() + logger_mock.info.assert_called_once_with('Task ended') From 5aafdb70b07aed6dcfe78c1638c7031880712a36 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Mon, 22 Apr 2024 20:06:22 -0300 Subject: [PATCH 361/419] Added test for cluster and utils functions --- framework/wazuh/core/cluster/cluster.py | 29 ++++---- .../wazuh/core/cluster/tests/test_cluster.py | 31 ++++++-- .../wazuh/core/cluster/tests/test_utils.py | 74 ++++++++++++++++++- framework/wazuh/core/cluster/utils.py | 8 +- 4 files changed, 115 insertions(+), 27 deletions(-) diff --git a/framework/wazuh/core/cluster/cluster.py b/framework/wazuh/core/cluster/cluster.py index ca8a3d8cd77..9262645dd1c 100644 --- a/framework/wazuh/core/cluster/cluster.py +++ b/framework/wazuh/core/cluster/cluster.py @@ -43,6 +43,19 @@ MIN_PORT = 1024 MAX_PORT = 65535 +HAPROXY_HELPER_SCHEMA = { + 'type': 'object', + 'properties': { + HAPROXY_PORT: {'type': 'integer', 'minimum': MIN_PORT, 'maximum': MAX_PORT}, + HAPROXY_PROTOCOL: {'type': 'string', 'enum': ['http', 'https']}, + FREQUENCY: {'type': 'integer', 'minimum': 10}, + AGENT_RECONNECTION_STABILITY_TIME: {'type': 'integer', 'minimum': 10}, + AGENT_CHUNK_SIZE: {'type': 'integer', 'minimum': 100}, + AGENT_RECONNECTION_TIME: {'type': 'integer', 'minimum': 0}, + IMBALANCE_TOLERANCE: {'type': 'number', 'minimum': 0, 'exclusiveMinimum': True, 'maximum': 1}, + REMOVE_DISCONNECTED_NODE_AFTER: {'type': 'integer', 'minimum': 0}, + }, +} # # Cluster @@ -61,22 +74,8 @@ def validate_haproxy_helper_config(config: dict): WazuhError(3004) If there any invalid value. """ - SCHEMA = { - 'type': 'object', - 'properties': { - HAPROXY_PORT: {'type': 'integer', 'minimum': MIN_PORT, 'maximum': MAX_PORT}, - HAPROXY_PROTOCOL: {'type': 'string', 'enum': ['http', 'https']}, - FREQUENCY: {'type': 'integer', 'minimum': 10}, - AGENT_RECONNECTION_STABILITY_TIME: {'type': 'integer', 'minimum': 10}, - AGENT_CHUNK_SIZE: {'type': 'integer', 'minimum': 100}, - AGENT_RECONNECTION_TIME: {'type': 'integer', 'minimum': 0}, - IMBALANCE_TOLERANCE: {'type': 'number', 'minimum': 0, 'exclusiveMinimum': True, 'maximum': 1}, - REMOVE_DISCONNECTED_NODE_AFTER: {'type': 'integer', 'minimum': 0}, - }, - } - try: - validate(config, SCHEMA) + validate(config, HAPROXY_HELPER_SCHEMA) except ValidationError as error: raise WazuhError( 3004, diff --git a/framework/wazuh/core/cluster/tests/test_cluster.py b/framework/wazuh/core/cluster/tests/test_cluster.py index fd8c106a0d5..a789bfd073e 100644 --- a/framework/wazuh/core/cluster/tests/test_cluster.py +++ b/framework/wazuh/core/cluster/tests/test_cluster.py @@ -2,18 +2,16 @@ # Created by Wazuh, Inc. . # This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 -import io import os import sys -import zipfile import zlib from collections import defaultdict +from concurrent.futures import ProcessPoolExecutor from time import time -from unittest.mock import MagicMock, mock_open, patch, call, ANY +from unittest.mock import ANY, MagicMock, call, mock_open, patch import pytest from wazuh.core import common -from concurrent.futures import ProcessPoolExecutor with patch('wazuh.common.wazuh_uid'): with patch('wazuh.common.wazuh_gid'): @@ -254,7 +252,7 @@ def test_get_files_status(mock_get_cluster_items): with patch('wazuh.core.cluster.cluster.walk_dir', side_effect=Exception): _, logs = cluster.get_files_status() - assert logs['warning']['etc/'] == [f"Error getting file status: ."] + assert logs['warning']['etc/'] == ["Error getting file status: ."] @patch('wazuh.core.cluster.cluster.get_cluster_items', return_value={ @@ -491,7 +489,7 @@ def test_compare_files_ko(logger_mock, mock_get_cluster_items): with pytest.raises(Exception): cluster.compare_files(seq, condition, 'worker1') logger_mock.assert_called_once_with( - f"Error getting agent IDs while verifying which extra-valid files are required: ") + "Error getting agent IDs while verifying which extra-valid files are required: ") mock_get_cluster_items.assert_called_once_with() wazuh_db_query_mock.assert_called_once_with() @@ -603,3 +601,24 @@ def mock_callable(*args, **kwargs): # Test the second condition assert await cluster.run_in_pool(event_loop, None, mock_callable, None) == "Mock callable" + + +def test_validate_haproxy_helper_config_calls_validate(): + """Verify that validate_haproxy_helper_config function calls validate function.""" + + config = {cluster.AGENT_CHUNK_SIZE: 120, cluster.AGENT_RECONNECTION_TIME: 10} + + with patch.object(cluster, 'validate') as validate_mock: + cluster.validate_haproxy_helper_config(config) + + validate_mock.assert_called_once_with(config, cluster.HAPROXY_HELPER_SCHEMA) + + +def test_validate_haproxy_helper_config_raises_wazuh_error(): + """Verify that validate_haproxy_helper_config raises WazuhError when the is validation error.""" + + config = {cluster.AGENT_CHUNK_SIZE: 120, cluster.AGENT_RECONNECTION_TIME: 10} + + with patch.object(cluster, 'validate', side_effect=cluster.ValidationError(message='Error test', path=['test'])): + with pytest.raises(cluster.WazuhError, match='.* 3004 .*'): + cluster.validate_haproxy_helper_config(config) diff --git a/framework/wazuh/core/cluster/tests/test_utils.py b/framework/wazuh/core/cluster/tests/test_utils.py index f077e7df1b1..2c79d1d5502 100644 --- a/framework/wazuh/core/cluster/tests/test_utils.py +++ b/framework/wazuh/core/cluster/tests/test_utils.py @@ -5,7 +5,7 @@ import logging import os import sys -from unittest.mock import patch, MagicMock, call +from unittest.mock import MagicMock, patch import pytest @@ -15,8 +15,8 @@ with patch('wazuh.core.common.wazuh_gid'): sys.modules['wazuh.rbac.orm'] = MagicMock() - from wazuh.core.cluster import utils from wazuh import WazuhError, WazuhException, WazuhInternalError + from wazuh.core.cluster import utils from wazuh.core.results import WazuhResult default_cluster_config = { @@ -77,6 +77,76 @@ def test_read_cluster_config(): utils.read_cluster_config() +@pytest.mark.parametrize( + 'config', + ( + { + utils.HAPROXY_DISABLED: 'no', + utils.HAPROXY_ADDRESS: 'test', + utils.HAPROXY_PASSWORD: 'test', + utils.HAPROXY_USER: 'test' + }, + { + utils.HAPROXY_DISABLED: 'no', + utils.HAPROXY_ADDRESS: 'test', + utils.HAPROXY_PASSWORD: 'test', + utils.HAPROXY_USER: 'test', + utils.FREQUENCY: '60', + utils.AGENT_CHUNK_SIZE: '120', + utils.IMBALANCE_TOLERANCE: '0.1' + } + ) +) +def test_parse_haproxy_helper_config_return_correct_values(config: dict): + """Verify that parse_haproxy_helper_config function returns the default configuration.""" + + ret_val = utils.parse_haproxy_helper_config(config) + + for key in ((config.keys()) | utils.HELPER_DEFAULTS.keys()): + assert key in ret_val + + assert isinstance(ret_val[utils.HAPROXY_DISABLED], bool) + + if key in [ + utils.FREQUENCY, + utils.AGENT_CHUNK_SIZE, + utils.AGENT_RECONNECTION_STABILITY_TIME, + utils.AGENT_RECONNECTION_TIME, + utils.REMOVE_DISCONNECTED_NODE_AFTER, + utils.HAPROXY_PORT + ]: + assert isinstance(ret_val[key], int) + + if key in [utils.IMBALANCE_TOLERANCE]: + assert isinstance(ret_val[key], float) + + +@pytest.mark.parametrize( + 'config', + ( + { + utils.HAPROXY_DISABLED: 'no', + utils.HAPROXY_ADDRESS: 'test', + utils.HAPROXY_PASSWORD: 'test', + utils.HAPROXY_USER: 'test', + utils.FREQUENCY: 'bad', + }, + { + utils.HAPROXY_DISABLED: 'no', + utils.HAPROXY_ADDRESS: 'test', + utils.HAPROXY_PASSWORD: 'test', + utils.HAPROXY_USER: 'test', + utils.IMBALANCE_TOLERANCE: 'bad' + } + ) +) +def test_parse_haproxy_helper_config_raises_with_an_invalid_type(config: dict): + """Verify that parse_haproxy_helper_config function raises when config has an invalid type.""" + + with pytest.raises(WazuhError, match='.* 3004 .*'): + utils.parse_haproxy_helper_config(config) + + def test_get_manager_status(): """Check that get_manager_status function returns the manager status. diff --git a/framework/wazuh/core/cluster/utils.py b/framework/wazuh/core/cluster/utils.py index 12931bdc92f..32f1747315e 100644 --- a/framework/wazuh/core/cluster/utils.py +++ b/framework/wazuh/core/cluster/utils.py @@ -62,7 +62,7 @@ } -def parse_haproxy_helper_integer_values(helper_config: dict) -> dict: +def _parse_haproxy_helper_integer_values(helper_config: dict) -> dict: """Parse HAProxy helper integer values. Parameters @@ -96,7 +96,7 @@ def parse_haproxy_helper_integer_values(helper_config: dict) -> dict: return helper_config -def parse_haproxy_helper_float_values(helper_config: dict) -> dict: +def _parse_haproxy_helper_float_values(helper_config: dict) -> dict: """Parse HAProxy helper float values. Parameters @@ -150,8 +150,8 @@ def parse_haproxy_helper_config(helper_config: dict) -> dict: elif helper_config[HAPROXY_DISABLED] == YES: helper_config[HAPROXY_DISABLED] = True - helper_config = parse_haproxy_helper_integer_values(helper_config) - helper_config = parse_haproxy_helper_float_values(helper_config) + helper_config = _parse_haproxy_helper_integer_values(helper_config) + helper_config = _parse_haproxy_helper_float_values(helper_config) return helper_config From 76b3ac2bafa4c11f68e656f038681bef6957db52 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Tue, 23 Apr 2024 16:30:37 -0300 Subject: [PATCH 362/419] Suggestions from CR --- .../hap_helper/tests/test_hap_helper.py | 48 +++++------ .../cluster/hap_helper/tests/test_proxy.py | 85 +++++++++---------- .../cluster/hap_helper/tests/test_wazuh.py | 12 +-- .../wazuh/core/cluster/tests/test_cluster.py | 6 +- .../wazuh/core/cluster/tests/test_utils.py | 4 +- 5 files changed, 77 insertions(+), 78 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py b/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py index 8ecc12ed42a..266d0d0563b 100644 --- a/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py @@ -77,13 +77,13 @@ def get_ossec_conf(self): with mock.patch('wazuh.core.cluster.hap_helper.hap_helper.get_ossec_conf') as get_ossec_conf: yield get_ossec_conf - async def test_initialize_cluster_runs_ok(self, helper: HAPHelper, proxy_mock: mock.MagicMock): - """Check the correct function of `initialize` method.""" + async def test_initialize_proxy(self, helper: HAPHelper, proxy_mock: mock.MagicMock): + """Check the correct function of `initialize_proxy` method.""" await helper.initialize_proxy() proxy_mock.initialize.assert_called_once() - async def test_initialize_raise_and_log_error(self, helper: HAPHelper, proxy_mock: mock.MagicMock): - """Check the correct error handling of `initialize` method.""" + async def test_initialize_proxy_ko(self, helper: HAPHelper, proxy_mock: mock.MagicMock): + """Check the correct error handling of `initialize_proxy` method.""" proxy_mock.initialize.side_effect = WazuhHAPHelperError(3046) with pytest.raises(WazuhHAPHelperError): @@ -93,7 +93,7 @@ async def test_initialize_raise_and_log_error(self, helper: HAPHelper, proxy_moc @pytest.mark.parametrize( 'exists_backend,exists_frontend', ([True, True], [True, False], [False, True], [False, False]) ) - async def test_initialize_wazuh_cluster_configuration_makes_correct_callbacks( + async def test_initialize_wazuh_cluster_configuration( self, helper: HAPHelper, proxy_mock: mock.MagicMock, exists_backend: bool, exists_frontend: bool ): """Check that `initialize_wazuh_cluster_configuration` method makes the correct callbacks.""" @@ -160,7 +160,7 @@ async def test_initialize_wazuh_cluster_configuration_makes_correct_callbacks( ], ), ) - async def test_check_node_to_delete_returns_correct_information( + async def test_check_node_to_delete( self, helper: HAPHelper, proxy_mock: mock.MagicMock, stats: dict, expected: bool ): """Check the correct output of `check_node_to_delete` method.""" @@ -172,7 +172,7 @@ async def test_check_node_to_delete_returns_correct_information( proxy_mock.get_wazuh_server_stats.assert_called_once_with(server_name=node_name) assert ret_val == expected - async def test_backend_servers_state_healthcheck_makes_correct_callbacks( + async def test_backend_servers_state_healthcheck( self, helper: HAPHelper, proxy_mock: mock.MagicMock ): """Check that `backend_servers_state_healthcheck` method makes the correct callbacks.""" @@ -191,7 +191,7 @@ async def test_backend_servers_state_healthcheck_makes_correct_callbacks( proxy_mock.allow_server_new_connections.assert_called_once_with(WORKER1) @pytest.mark.parametrize('check_node_to_delete', [True, False]) - async def test_obtain_nodes_to_configure_servers_returns_correct_information( + async def test_obtain_nodes_to_configure_servers( self, helper: HAPHelper, proxy_mock: mock.MagicMock, dapi_mock: mock.MagicMock, check_node_to_delete: bool ): """Check the correct output of `obtain_nodes_to_configure` method.""" @@ -219,7 +219,7 @@ async def test_obtain_nodes_to_configure_servers_returns_correct_information( @pytest.mark.parametrize('agents_count,expected', ([6, 1], [11, 2])) @pytest.mark.asyncio - async def test_update_agent_connections_makes_correct_callbacks( + async def test_update_agent_connections( self, helper: HAPHelper, dapi_mock: mock.MagicMock, sleep_mock: mock.AsyncMock, agents_count: int, expected: int ): """Check that `update_agent_connections` method makes the correct callbacks.""" @@ -253,7 +253,7 @@ async def test_update_agent_connections_makes_correct_callbacks( ], ), ) - async def test_force_agent_reconnection_to_server_makes_correct_callbacks( + async def test_force_agent_reconnection_to_server( self, wazuh_agent_mock: mock.MagicMock, helper: HAPHelper, @@ -285,7 +285,7 @@ async def test_force_agent_reconnection_to_server_makes_correct_callbacks( assert sleep_mock.call_count == 2 sleep_mock.assert_called_with(helper.SERVER_ADMIN_STATE_DELAY) - async def test_migrate_old_connections_makes_correct_callbacks( + async def test_migrate_old_connections( self, helper: HAPHelper, proxy_mock: mock.MagicMock, dapi_mock: mock.MagicMock, sleep_mock: mock.AsyncMock ): """Check that `migrate_old_connections` method makes the correct callbacks.""" @@ -299,14 +299,14 @@ async def test_migrate_old_connections_makes_correct_callbacks( AGENTS_TO_FORCE = [{'id': '001', 'version': 'v4.9.0'}] AGENTS_TO_UPDATE = [{'id': '002', 'version': 'v4.9.0'}] - PREVIOUSE_CONNECTION_DIST = {WORKER1: 1, WORKER3: 1} + PREVIOUS_CONNECTION_DIST = {WORKER1: 1, WORKER3: 1} proxy_mock.get_wazuh_backend_stats.return_value = {WORKER1: {}, WORKER2: {}} dapi_mock.get_agents_node_distribution.return_value = { WORKER1: AGENTS_TO_FORCE, WORKER3: AGENTS_TO_UPDATE, } - proxy_mock.get_wazuh_backend_server_connections.return_value = PREVIOUSE_CONNECTION_DIST + proxy_mock.get_wazuh_backend_server_connections.return_value = PREVIOUS_CONNECTION_DIST with mock.patch.object(helper, 'check_for_balance', return_value={WORKER3: 1}) as check_for_balance_mock: with mock.patch.object( helper, 'force_agent_reconnection_to_server' @@ -314,7 +314,7 @@ async def test_migrate_old_connections_makes_correct_callbacks( with mock.patch.object(helper, 'update_agent_connections') as update_agent_connections_mock: await helper.migrate_old_connections(NEW_SERVERS, OLD_SERVERS) check_for_balance_mock.assert_called_once_with( - current_connections_distribution=PREVIOUSE_CONNECTION_DIST + current_connections_distribution=PREVIOUS_CONNECTION_DIST ) force_agent_reconnection_to_server_mock.assert_called_once_with( chosen_server=WORKER1, agents_list=AGENTS_TO_FORCE @@ -324,7 +324,7 @@ async def test_migrate_old_connections_makes_correct_callbacks( ) sleep_mock.assert_any_call(helper.agent_reconnection_stability_time) - async def test_migrate_old_connections_raises_when_exceed_timeout( + async def test_migrate_old_connections_ko( self, helper: HAPHelper, proxy_mock: mock.MagicMock, sleep_mock: mock.AsyncMock ): """Check that `migrate_old_connections` method makes the correct callbacks.""" @@ -352,7 +352,7 @@ async def test_migrate_old_connections_raises_when_exceed_timeout( [{'worker1': 0, 'worker2': 4, 'worker3': 0}, {'worker2': 3}], ), ) - async def test_check_for_balance_returns_correct_information( + async def test_check_for_balance( self, helper: HAPHelper, distribution: dict, expected: dict ): """Check the correct output of `check_for_balance` method.""" @@ -365,7 +365,7 @@ async def test_check_for_balance_returns_correct_information( [[{'id': '001'}, {'id': '002'}, {'id': '003'}], ['001', '002']], ), ) - async def test_calculate_agents_to_balance_returns_correct_information( + async def test_calculate_agents_to_balance( self, helper: HAPHelper, dapi_mock: mock.MagicMock, @@ -384,7 +384,7 @@ async def test_calculate_agents_to_balance_returns_correct_information( if len(elegible_agents) != len(agent_list): helper.logger.warning.assert_called_once() - async def test_balance_agents_makes_correct_callbacks(self, helper: HAPHelper, proxy_mock: mock.MagicMock): + async def test_balance_agents(self, helper: HAPHelper, proxy_mock: mock.MagicMock): """Check that `balance_agents` method makes the correct callbacks.""" WORKER1 = 'worker1' @@ -409,7 +409,7 @@ async def test_balance_agents_makes_correct_callbacks(self, helper: HAPHelper, p ([], [], {'worker1': 10, 'worker2': 8}), ], ) - async def test_manage_wazuh_cluster_nodes_makes_correct_callbacks( + async def test_manage_wazuh_cluster_nodes( self, helper: HAPHelper, proxy_mock: mock.MagicMock, @@ -471,7 +471,7 @@ async def test_manage_wazuh_cluster_nodes_makes_correct_callbacks( helper.logger.info.assert_any_call('Load balancer backend is balanced') sleep_mock.assert_any_call(helper.sleep_time) - async def test_manage_wazuh_cluster_nodes_dont_raise_in_case_of_error( + async def test_manage_wazuh_cluster_nodes_ko( self, helper: HAPHelper, sleep_mock: mock.AsyncMock, @@ -489,7 +489,7 @@ async def test_manage_wazuh_cluster_nodes_dont_raise_in_case_of_error( @pytest.mark.parametrize('wait_connection_retry,reconnect_agents', ([True, False], [False, False], [False, True])) @pytest.mark.parametrize('agent_ids', (['001', '002'], [])) - async def test_set_hard_stop_after_makes_correct_callbacks( + async def test_set_hard_stop_after( self, helper: HAPHelper, proxy_mock: mock.MagicMock, @@ -533,7 +533,7 @@ async def test_set_hard_stop_after_makes_correct_callbacks( else: update_agent_connections_mock.assert_not_called() - async def test_get_connection_retry_returns_correct_information( + async def test_get_connection_retry( self, helper: HAPHelper, proxy_mock: mock.MagicMock ): """Check the correct output of `get_connection_retry` method.""" @@ -546,7 +546,7 @@ async def test_get_connection_retry_returns_correct_information( assert helper.get_connection_retry() == CONNECTION_RETRY + 2 @pytest.mark.parametrize('hard_stop_after', [None, 8, 12]) - async def test_start_makes_correct_callbacks( + async def test_start( self, read_cluster_config_mock: mock.MagicMock, get_ossec_conf: mock.MagicMock, @@ -642,7 +642,7 @@ async def test_start_makes_correct_callbacks( HAPHelper.manage_wazuh_cluster_nodes.assert_called_once() @pytest.mark.parametrize('exception', [KeyError(), KeyboardInterrupt(), WazuhHAPHelperError(3046)]) - async def test_start_dont_raise_in_case_of_error( + async def test_start_ko( self, read_cluster_config_mock: mock.MagicMock, exception: Exception ): """Check the correct error handling of `start` method.""" diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py index 576664caf0f..2332fb35931 100644 --- a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py @@ -57,7 +57,7 @@ def request_mock(self): with mock.patch('framework.wazuh.core.cluster.hap_helper.proxy.httpx.AsyncClient.request') as request_mock: yield request_mock - async def test_initialize_runs_ok(self, proxy_api: ProxyAPI, get_mock: mock.AsyncMock): + async def test_initialize(self, proxy_api: ProxyAPI, get_mock: mock.AsyncMock): """Check the correct function of `initialize` method.""" await proxy_api.initialize() @@ -76,7 +76,7 @@ async def test_initialize_runs_ok(self, proxy_api: ProxyAPI, get_mock: mock.Asyn [None, httpx.RequestError, 3043], ), ) - async def test_initialize_dont_raise_in_case_of_error( + async def test_initialize_ko( self, proxy_api: ProxyAPI, get_mock: mock.AsyncMock, @@ -108,7 +108,7 @@ async def test_initialize_dont_raise_in_case_of_error( ), ) @pytest.mark.parametrize(*METHODS_KWARGS) - async def test_proxy_method_dont_raise_in_case_of_error( + async def test_proxy_method_ko( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock, @@ -128,10 +128,10 @@ async def test_proxy_method_dont_raise_in_case_of_error( with pytest.raises(WazuhHAPHelperError, match=f'.*{expected}.*'): await getattr(proxy_api, method)(**f_kwargs) - async def test_update_configuration_version_set_correct_version( + async def test_update_configuration_version( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): - """Check that `update_configuration_version` method set the correct version.""" + """Check that `update_configuration_version` method sets the correct version.""" endpoint = 'services/haproxy/configuration/version' version = '1' @@ -150,7 +150,7 @@ async def test_update_configuration_version_set_correct_version( ) assert proxy_api.version == version - async def test_get_runtime_info_returns_correct_information( + async def test_get_runtime_info( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): """Check the correct output of `get_runtime_info` method.""" @@ -172,7 +172,7 @@ async def test_get_runtime_info_returns_correct_information( ) assert ret_val == info - async def test_get_global_configuration_returns_correct_information( + async def test_get_global_configuration( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): """Check the correct output of `get_global_configuration` method.""" @@ -194,7 +194,7 @@ async def test_get_global_configuration_returns_correct_information( ) assert ret_val == data - async def test_update_global_configuration_makes_correct_request( + async def test_update_global_configuration( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): """Check that `update_globla_configuration` method makes the correct request.""" @@ -228,7 +228,7 @@ async def test_update_global_configuration_makes_correct_request( params={'version': 0}, ) - async def test_get_backends_returns_correct_information(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + async def test_get_backends(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check the correct output of `get_backends` method.""" endpoint = 'services/haproxy/configuration/backends' @@ -248,7 +248,7 @@ async def test_get_backends_returns_correct_information(self, proxy_api: ProxyAP ) assert ret_val == data - async def test_add_backend_makes_correct_request(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + async def test_add_backend(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check that `add_backend` method makes the correct request.""" endpoint = 'services/haproxy/configuration/backends' @@ -279,7 +279,7 @@ async def test_add_backend_makes_correct_request(self, proxy_api: ProxyAPI, requ params={'version': 0}, ) - async def test_get_backend_servers_returns_correct_information( + async def test_get_backend_servers( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): """Check the correct output of `get_backend_servers` method.""" @@ -306,7 +306,7 @@ async def test_get_backend_servers_returns_correct_information( 'server_address,is_ip_address,resolver', (['192.168.0.1', True, None], ['192.168.0.1', True, 'some-resolver'], ['some-address', False, 'some-resolver']), ) - async def test_add_server_to_backend_makes_correct_request( + async def test_add_server_to_backend( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock, @@ -349,7 +349,7 @@ async def test_add_server_to_backend_makes_correct_request( params={'version': 0}, ) - async def test_remove_server_from_backend_makes_correct_request( + async def test_remove_server_from_backend( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): """Check that `remove_server_from_backend` method makes the correct request.""" @@ -380,7 +380,7 @@ async def test_remove_server_from_backend_makes_correct_request( params={'version': 0}, ) - async def test_get_frontends_returns_correct_information(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + async def test_get_frontends(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check the correct output of `get_frontends` method.""" endpoint = 'services/haproxy/configuration/frontends' @@ -400,12 +400,11 @@ async def test_get_frontends_returns_correct_information(self, proxy_api: ProxyA ) assert ret_val == data - async def test_add_frontend_makes_correct_request(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + async def test_add_frontend(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check that `add_frontend` method makes the correct request.""" endpoint = 'services/haproxy/configuration/frontends' bind_endpoint = 'services/haproxy/configuration/binds' - # version_endpoint = 'services/haproxy/configuration/version' name = 'bar' request_mock.side_effect = ( @@ -437,10 +436,10 @@ async def test_add_frontend_makes_correct_request(self, proxy_api: ProxyAPI, req params={'force_reload': True, 'frontend': name, 'version': 1}, ) - async def test_get_backend_server_runtime_settings_returns_correct_information( + async def test_get_backend_server_runtime_settings( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): - """Check the correct output of `get_backend_server` method.""" + """Check the correct output of `get_backend_server_runtime_settings` method.""" endpoint = 'services/haproxy/runtime/servers' data = {'data': {'foo': 1, 'bar': 2}} @@ -471,10 +470,10 @@ async def test_get_backend_server_runtime_settings_returns_correct_information( ProxyServerState.UP, ], ) - async def test_change_backend_server_state_makes_correct_request( + async def test_change_backend_server_state( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock, state: ProxyServerState ): - """Check that `change_backend_server` method makes the correct request.""" + """Check that `change_backend_server_state` method makes the correct request.""" endpoint = 'services/haproxy/runtime/servers' @@ -493,7 +492,7 @@ async def test_change_backend_server_state_makes_correct_request( params={'backend': backend_name, 'version': 0}, ) - async def test_get_backend_stats_returns_correct_information( + async def test_get_backend_stats( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): """Check the correct output of `get_backend_stats` method.""" @@ -516,7 +515,7 @@ async def test_get_backend_stats_returns_correct_information( ) assert ret_val == data - async def test_get_backend_server_stats_returns_correct_information( + async def test_get_backend_server_stats( self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock ): """Check the correct output of `get_backend_server_stats` method.""" @@ -554,7 +553,7 @@ def proxy(self, proxy_api_mock: mock.MagicMock): yield proxy @pytest.mark.parametrize('hard_stop_after,expected', ([3000, 3], [None, None])) - async def test_initialize_runs_ok( + async def test_initialize( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, hard_stop_after: int | None, expected: int | None ): """Check the correct function of `initialize` method.""" @@ -571,7 +570,7 @@ async def test_initialize_runs_ok( assert proxy.hard_stop_after == expected @pytest.mark.parametrize('side_effect', [KeyError, IndexError]) - async def test_initialize_dont_rise_in_case_of_error( + async def test_initialize_ko( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, side_effect: Exception ): """Check the correct error handling of `initialize` method.""" @@ -583,7 +582,7 @@ async def test_initialize_dont_rise_in_case_of_error( @pytest.mark.parametrize( 'global_configuration,expected', ([{'hard_stop_after': 3000}, 3000], [{'foo': 'bar'}, None]) ) - async def test_get_hard_stop_after_value_returns_correct_value( + async def test_get_hard_stop_after_value( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, global_configuration: int, expected: int | None ): """Check the correct output of `get_hard_stop_after` method.""" @@ -597,7 +596,7 @@ async def test_get_hard_stop_after_value_returns_correct_value( 'hard_stop_after,new_configuration', ([None, {'hard_stop_after': 70000}], [50.0, {'hard_stop_after': 70000}], [70.0, {}]), ) - async def test_set_hard_stop_after_value_calculate_and_set_correct_value( + async def test_set_hard_stop_after_value( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, hard_stop_after: float | None, new_configuration: dict ): """Check the correct function of `set_hard_stop_after` method.""" @@ -613,7 +612,7 @@ async def test_set_hard_stop_after_value_calculate_and_set_correct_value( else: proxy_api_mock.update_global_configuration.assert_not_called() - async def test_get_current_pid_returns_correct_value(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + async def test_get_current_pid(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check the correct output of `get_current_pid` method.""" pid = 10 @@ -621,7 +620,7 @@ async def test_get_current_pid_returns_correct_value(self, proxy_api_mock: mock. assert (await proxy.get_current_pid()) == pid - async def test_get_current_backends_returns_correct_information(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + async def test_get_current_backends(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check the correct output of `get_current_backends` method.""" backends = [ @@ -638,7 +637,7 @@ async def test_get_current_backends_returns_correct_information(self, proxy_api_ @pytest.mark.parametrize( 'current_backends,backend,expected', ([{'backend1': {}}, 'backend1', True], [{}, 'backend1', False]) ) - async def test_exists_backend_returns_correct_value( + async def test_exists_backend( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, current_backends: dict, backend: str, expected: bool ): """Check the correct output of `exists_backend` method.""" @@ -646,7 +645,7 @@ async def test_exists_backend_returns_correct_value( with mock.patch.object(proxy, 'get_current_backends', return_value=current_backends): assert await proxy.exists_backend(backend) == expected - async def test_get_current_frontends_returns_correct_information( + async def test_get_current_frontends( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): """Check the correct output of `get_current_frontends` method.""" @@ -665,7 +664,7 @@ async def test_get_current_frontends_returns_correct_information( @pytest.mark.parametrize( 'current_frontends,frontend,expected', ([{'frontend1': {}}, 'frontend1', True], [{}, 'frontend1', False]) ) - async def test_exists_frontend_returns_correct_value( + async def test_exists_frontend( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, current_frontends: dict, frontend: str, expected: bool ): """Check the correct output of `exists_frontend` method.""" @@ -673,7 +672,7 @@ async def test_exists_frontend_returns_correct_value( with mock.patch.object(proxy, 'get_current_frontends', return_value=current_frontends): assert await proxy.exists_frontend(frontend) == expected - async def test_add_new_backend_makes_correct_callback(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + async def test_add_new_backend(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check that `add_new_backend` method makes the correct callback.""" parameters = { @@ -686,7 +685,7 @@ async def test_add_new_backend_makes_correct_callback(self, proxy_api_mock: mock proxy_api_mock.add_backend.assert_called_once_with(**parameters) - async def test_add_new_frontend_makes_correct_callback(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + async def test_add_new_frontend(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check that `add_new_frontend` method makes the correct callback.""" parameters = {'name': 'foo', 'port': 1514, 'backend': 'bar', 'mode': CommunicationProtocol.TCP} @@ -695,10 +694,10 @@ async def test_add_new_frontend_makes_correct_callback(self, proxy_api_mock: moc proxy_api_mock.add_frontend.assert_called_once_with(**parameters) - async def test_get_current_backend_servers_returns_correct_information( + async def test_get_current_backend_servers( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): - """Check the correct output of `get_current_backend` method.""" + """Check the correct output of `get_current_backend_servers` method.""" servers = [ {'name': 'server1', 'address': '192.168.0.1'}, @@ -711,7 +710,7 @@ async def test_get_current_backend_servers_returns_correct_information( proxy_api_mock.get_backend_servers.assert_called_once_with(backend=proxy.wazuh_backend) assert ret_val == {server['name']: server['address'] for server in servers} - async def test_add_wazuh_manager_makes_correct_callback(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + async def test_add_wazuh_manager(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check that `add_wazuh_manager` method makes the correct callback.""" manager_name = 'foo' @@ -728,7 +727,7 @@ async def test_add_wazuh_manager_makes_correct_callback(self, proxy_api_mock: mo resolver=resolver, ) - async def test_remove_wazuh_manager_makes_correct_callback(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): + async def test_remove_wazuh_manager(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check that `remove_wazuh_manager` method makes the correct callback.""" manager_name = 'foo' @@ -739,7 +738,7 @@ async def test_remove_wazuh_manager_makes_correct_callback(self, proxy_api_mock: backend=proxy.wazuh_backend, server_name=manager_name ) - async def test_restrain_server_new_connections_makes_correct_callback( + async def test_restrain_server_new_connections( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): """Check that `restrain_server_new_connections` method makes the correct callback.""" @@ -752,7 +751,7 @@ async def test_restrain_server_new_connections_makes_correct_callback( backend_name=proxy.wazuh_backend, server_name=server_name, state=ProxyServerState.DRAIN ) - async def test_allow_server_new_connections_makes_correct_callback( + async def test_allow_server_new_connections( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): """Check that `allow_server_new_connections` method makes the correct callback.""" @@ -765,7 +764,7 @@ async def test_allow_server_new_connections_makes_correct_callback( backend_name=proxy.wazuh_backend, server_name=server_name, state=ProxyServerState.READY ) - async def test_get_wazuh_server_stats_returns_correct_information( + async def test_get_wazuh_server_stats( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): """Check the correct output of `get_wazuh_server_stats` method.""" @@ -785,7 +784,7 @@ async def test_get_wazuh_server_stats_returns_correct_information( [random.choice([ProxyServerState.READY.value, ProxyServerState.MAINTENANCE.value]), False], ), ) - async def test_is_server_drain_returns_correct_value( + async def test_is_server_drain( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, state: ProxyServerState, expected: bool ): """Check the correct output of `is_server_drain` method.""" @@ -798,7 +797,7 @@ async def test_is_server_drain_returns_correct_value( backend_name=proxy.wazuh_backend, server_name=server_name ) - async def test_get_wazuh_backend_stats_returns_correct_information( + async def test_get_wazuh_backend_stats( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): """Check the correct output of `get_wazuh_backend_stats` method.""" @@ -818,7 +817,7 @@ async def test_get_wazuh_backend_stats_returns_correct_information( for server in servers: server_stats_mock.assert_any_call(server_name=server['name']) - async def test_get_wazuh_backend_server_connections_returns_correct_information( + async def test_get_wazuh_backend_server_connections( self, proxy_api_mock: mock.MagicMock, proxy: Proxy ): """Check the correct output of `get_wazuh_backend_server_connections` method.""" diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_wazuh.py b/framework/wazuh/core/cluster/hap_helper/tests/test_wazuh.py index 4a9e3aa7e17..f6b7752f1de 100644 --- a/framework/wazuh/core/cluster/hap_helper/tests/test_wazuh.py +++ b/framework/wazuh/core/cluster/hap_helper/tests/test_wazuh.py @@ -11,12 +11,12 @@ class TestWazuhAgent: @pytest.mark.parametrize('version,expected', [('v4.2.0', False), ('v4.3.0', True), ('v4.4.0', True)]) - def test_can_reconnect_returns_correct_value(self, version: str, expected: bool): + def test_can_reconnect(self, version: str, expected: bool): """Check the correct output of `can_reconnect` function.""" assert WazuhAgent.can_reconnect(version) == expected - def test_get_agents_able_to_reconnect_returns_correct_items(self): + def test_get_agents_able_to_reconnect(self): """Check the correct output of `get_agents_able_to_reconnect` function.""" agents = [ @@ -58,7 +58,7 @@ def fixture_affected_items_result(self): ), ) @mock.patch('framework.wazuh.core.cluster.hap_helper.wazuh.get_system_nodes', return_value={}) - async def test_get_cluster_nodes_returns_correct_information( + async def test_get_cluster_nodes( self, get_system_nodes_mock: mock.AsyncMock, dapi_mock: mock.MagicMock, @@ -85,7 +85,7 @@ async def test_get_cluster_nodes_returns_correct_information( ) assert ret_val == {item['name']: item['ip'] for item in nodes_data if item['name'] not in excluded_nodes} - async def test_reconnect_agents_correct_information( + async def test_reconnect_agents( self, dapi_mock: mock.MagicMock, fixture_affected_items_result: AffectedItemsWazuhResult, @@ -107,7 +107,7 @@ async def test_reconnect_agents_correct_information( ) assert ret_val == agent_list - async def test_get_agents_node_distribution_returns_correct_information( + async def test_get_agents_node_distribution( self, dapi_mock: mock.MagicMock, fixture_affected_items_result: AffectedItemsWazuhResult, @@ -138,7 +138,7 @@ async def test_get_agents_node_distribution_returns_correct_information( assert ret_val == {'worker1': [{'id': 1, 'version': '4.9.0'}], 'worker2': [{'id': 2, 'version': '4.9.0'}]} @pytest.mark.parametrize('limit', [100, None]) - async def test_get_agents_belonging_to_node_returns_correct_information( + async def test_get_agents_belonging_to_node( self, dapi_mock: mock.MagicMock, fixture_affected_items_result: AffectedItemsWazuhResult, limit: Optional[int] ): """Check the correct output of `get_agents_belonging_to_node` function.""" diff --git a/framework/wazuh/core/cluster/tests/test_cluster.py b/framework/wazuh/core/cluster/tests/test_cluster.py index a789bfd073e..8062b7c824c 100644 --- a/framework/wazuh/core/cluster/tests/test_cluster.py +++ b/framework/wazuh/core/cluster/tests/test_cluster.py @@ -603,7 +603,7 @@ def mock_callable(*args, **kwargs): assert await cluster.run_in_pool(event_loop, None, mock_callable, None) == "Mock callable" -def test_validate_haproxy_helper_config_calls_validate(): +def test_validate_haproxy_helper_config(): """Verify that validate_haproxy_helper_config function calls validate function.""" config = {cluster.AGENT_CHUNK_SIZE: 120, cluster.AGENT_RECONNECTION_TIME: 10} @@ -614,8 +614,8 @@ def test_validate_haproxy_helper_config_calls_validate(): validate_mock.assert_called_once_with(config, cluster.HAPROXY_HELPER_SCHEMA) -def test_validate_haproxy_helper_config_raises_wazuh_error(): - """Verify that validate_haproxy_helper_config raises WazuhError when the is validation error.""" +def test_validate_haproxy_helper_config_ko(): + """Verify that validate_haproxy_helper_config raises WazuhError when there is a validation error.""" config = {cluster.AGENT_CHUNK_SIZE: 120, cluster.AGENT_RECONNECTION_TIME: 10} diff --git a/framework/wazuh/core/cluster/tests/test_utils.py b/framework/wazuh/core/cluster/tests/test_utils.py index 2c79d1d5502..c27a6f80232 100644 --- a/framework/wazuh/core/cluster/tests/test_utils.py +++ b/framework/wazuh/core/cluster/tests/test_utils.py @@ -97,7 +97,7 @@ def test_read_cluster_config(): } ) ) -def test_parse_haproxy_helper_config_return_correct_values(config: dict): +def test_parse_haproxy_helper_config(config: dict): """Verify that parse_haproxy_helper_config function returns the default configuration.""" ret_val = utils.parse_haproxy_helper_config(config) @@ -140,7 +140,7 @@ def test_parse_haproxy_helper_config_return_correct_values(config: dict): } ) ) -def test_parse_haproxy_helper_config_raises_with_an_invalid_type(config: dict): +def test_parse_haproxy_helper_config_ko(config: dict): """Verify that parse_haproxy_helper_config function raises when config has an invalid type.""" with pytest.raises(WazuhError, match='.* 3004 .*'): From 727cdcdd5dd52682176fe91b97cbae88347d4dff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Palomeque?= Date: Mon, 29 Apr 2024 12:53:29 -0300 Subject: [PATCH 363/419] Add HAProxy files --- framework/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/framework/Makefile b/framework/Makefile index 8b23d568cb7..b1fb9e6c779 100644 --- a/framework/Makefile +++ b/framework/Makefile @@ -34,12 +34,14 @@ install: $(INSTALL_DIR) $(INSTALLDIR)/framework/wazuh $(INSTALL_DIR) $(INSTALLDIR)/framework/wazuh/core/cluster $(INSTALL_DIR) $(INSTALLDIR)/framework/wazuh/core/cluster/dapi + $(INSTALL_DIR) $(INSTALLDIR)/framework/wazuh/core/cluster/hap_helper $(INSTALL_FILE) scripts/*.py ${INSTALLDIR}/framework/scripts $(INSTALL_FILE) wazuh/*.py ${INSTALLDIR}/framework/wazuh $(INSTALL_FILE) wazuh/core/cluster/*.json ${INSTALLDIR}/framework/wazuh/core/cluster $(INSTALL_FILE) wazuh/core/cluster/*.py ${INSTALLDIR}/framework/wazuh/core/cluster $(INSTALL_FILE) wazuh/core/cluster/dapi/*.py ${INSTALLDIR}/framework/wazuh/core/cluster/dapi + $(INSTALL_FILE) wazuh/core/cluster/hap_helper/*.py ${INSTALLDIR}/framework/wazuh/core/cluster/hap_helper # Remove update_ruleset script when upgrading to >=4.2.0 (deprecated) [ ! -e ${INSTALLDIR}/bin/update_ruleset ] || $(RM_FILE) ${INSTALLDIR}/bin/update_ruleset From 2e7e44eaf20e971941d56c20682b34f11a4e04a1 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Thu, 16 May 2024 20:27:18 -0300 Subject: [PATCH 364/419] Improved log messages and exceptions handling --- .../wazuh/core/cluster/hap_helper/hap_helper.py | 17 +++++++---------- .../wazuh/core/cluster/hap_helper/proxy.py | 2 +- framework/wazuh/core/exception.py | 8 ++++---- 3 files changed, 12 insertions(+), 15 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index adcf9eb2e3d..5953ae47019 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -83,13 +83,8 @@ def _get_logger(tag: str) -> logging.Logger: async def initialize_proxy(self): """Initialize HAProxy.""" - try: - await self.proxy.initialize() - self.logger.info('Proxy was initialized') - except WazuhHAPHelperError as init_exc: - self.logger.critical('Cannot initialize the proxy') - self.logger.critical(init_exc) - raise + await self.proxy.initialize() + self.logger.info('Proxy was initialized') async def initialize_wazuh_cluster_configuration(self): """Initialize main components of the Wazuh cluster.""" @@ -283,7 +278,7 @@ async def migrate_old_connections(self, new_servers: list[str], deleted_servers: await self.update_agent_connections(agent_list=agents_to_balance) self.logger.info('Waiting for agent connections stability') - self.logger.debug(f'Sleeping {self.agent_reconnection_stability_time}s...') + self.logger.debug(f'Sleeping {self.agent_reconnection_stability_time}s, waiting for agents reconnection...') await asyncio.sleep(self.agent_reconnection_stability_time) def check_for_balance(self, current_connections_distribution: dict) -> dict: @@ -417,12 +412,12 @@ async def manage_wazuh_cluster_nodes(self): await asyncio.sleep(self.AGENT_STATUS_SYNC_TIME) await self.balance_agents(affected_servers=unbalanced_connections) - self.logger.debug(f'Sleeping {self.sleep_time}s...') + self.logger.debug(f'Sleeping {self.sleep_time}s before next cycle...') await asyncio.sleep(self.sleep_time) except WazuhException as handled_exc: self.logger.error(str(handled_exc)) self.logger.warning( - f'Tasks may not perform as expected. Sleeping {self.sleep_time}s ' 'before continuing...' + f'Tasks may not perform as expected. Sleeping {self.sleep_time}s before trying again...' ) await asyncio.sleep(self.sleep_time) @@ -531,6 +526,8 @@ async def start(cls): await helper.manage_wazuh_cluster_nodes() except KeyError as exc: logger.error(f'Missing configuration {exc}. The helper cannot start.') + except WazuhHAPHelperError as exc: + logger.error(exc) except KeyboardInterrupt: pass except Exception as unexpected_exc: diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 082e68ab64a..f9073c1066d 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -182,7 +182,7 @@ async def _make_hap_request( elif response.status_code == 401: raise WazuhHAPHelperError(3046) else: - raise WazuhHAPHelperError(3045, extra_message=f'Full response: {response.status_code} | {response.json()}') + raise WazuhHAPHelperError(3045, extra_message=response.json()['message']) async def update_configuration_version(self): """Get the last version of the configuration schema and set it.""" diff --git a/framework/wazuh/core/exception.py b/framework/wazuh/core/exception.py index 18badb539e0..b5b68b9ad67 100755 --- a/framework/wazuh/core/exception.py +++ b/framework/wazuh/core/exception.py @@ -500,12 +500,12 @@ class WazuhException(Exception): # HAProxy Helper exceptions 3041: "Server status check timed out after adding new servers", 3042: "User configuration is not valid", - 3043: "Cannot initialize Proxy API", - 3044: "Unexpected error trying to connect to Proxy API", - 3045: "Unexpected response from the Proxy API", + 3043: "Could not initialize Proxy API", + 3044: "Could not connect to the HAProxy dataplane API", + 3045: "Could not connect to HAProxy", 3046: "Invalid credentials for the Proxy API", 3047: "Invalid HAProxy Dataplane API specification configured", - 3048: "Cannot detect a valid HAProxy process linked to the Dataplane API", + 3048: "Could not detect a valid HAProxy process linked to the Dataplane API", 3049: "Unexpected response from HAProxy Dataplane API", # RBAC exceptions From 2f100e066c82b24e4bb2964299244149802222a9 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Thu, 16 May 2024 21:25:09 -0300 Subject: [PATCH 365/419] Fix UT --- .../cluster/hap_helper/tests/test_proxy.py | 74 ++++++------------- 1 file changed, 22 insertions(+), 52 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py index 2332fb35931..e227ffc2f48 100644 --- a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py @@ -100,7 +100,11 @@ async def test_initialize_ko( ( [{'status_code': 401, 'is_success': False}, None, 3046], [ - {'status_code': random.choice([403, 404, 500]), 'is_success': False, 'json.return_value': '{}'}, + { + 'status_code': random.choice([403, 404, 500]), + 'is_success': False, + 'json.return_value': {'message': 'error'}, + }, None, 3045, ], @@ -128,9 +132,7 @@ async def test_proxy_method_ko( with pytest.raises(WazuhHAPHelperError, match=f'.*{expected}.*'): await getattr(proxy_api, method)(**f_kwargs) - async def test_update_configuration_version( - self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock - ): + async def test_update_configuration_version(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check that `update_configuration_version` method sets the correct version.""" endpoint = 'services/haproxy/configuration/version' @@ -150,9 +152,7 @@ async def test_update_configuration_version( ) assert proxy_api.version == version - async def test_get_runtime_info( - self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock - ): + async def test_get_runtime_info(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check the correct output of `get_runtime_info` method.""" endpoint = 'services/haproxy/runtime/info' @@ -172,9 +172,7 @@ async def test_get_runtime_info( ) assert ret_val == info - async def test_get_global_configuration( - self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock - ): + async def test_get_global_configuration(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check the correct output of `get_global_configuration` method.""" endpoint = 'services/haproxy/configuration/global' @@ -194,9 +192,7 @@ async def test_get_global_configuration( ) assert ret_val == data - async def test_update_global_configuration( - self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock - ): + async def test_update_global_configuration(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check that `update_globla_configuration` method makes the correct request.""" endpoint = 'services/haproxy/configuration/global' @@ -279,9 +275,7 @@ async def test_add_backend(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMo params={'version': 0}, ) - async def test_get_backend_servers( - self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock - ): + async def test_get_backend_servers(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check the correct output of `get_backend_servers` method.""" endpoint = 'services/haproxy/configuration/servers' @@ -349,9 +343,7 @@ async def test_add_server_to_backend( params={'version': 0}, ) - async def test_remove_server_from_backend( - self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock - ): + async def test_remove_server_from_backend(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check that `remove_server_from_backend` method makes the correct request.""" endpoint = 'services/haproxy/configuration/servers' @@ -436,9 +428,7 @@ async def test_add_frontend(self, proxy_api: ProxyAPI, request_mock: mock.AsyncM params={'force_reload': True, 'frontend': name, 'version': 1}, ) - async def test_get_backend_server_runtime_settings( - self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock - ): + async def test_get_backend_server_runtime_settings(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check the correct output of `get_backend_server_runtime_settings` method.""" endpoint = 'services/haproxy/runtime/servers' @@ -492,9 +482,7 @@ async def test_change_backend_server_state( params={'backend': backend_name, 'version': 0}, ) - async def test_get_backend_stats( - self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock - ): + async def test_get_backend_stats(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check the correct output of `get_backend_stats` method.""" endpoint = 'services/haproxy/stats/native' @@ -515,9 +503,7 @@ async def test_get_backend_stats( ) assert ret_val == data - async def test_get_backend_server_stats( - self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock - ): + async def test_get_backend_server_stats(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): """Check the correct output of `get_backend_server_stats` method.""" endpoint = 'services/haproxy/stats/native' @@ -570,9 +556,7 @@ async def test_initialize( assert proxy.hard_stop_after == expected @pytest.mark.parametrize('side_effect', [KeyError, IndexError]) - async def test_initialize_ko( - self, proxy_api_mock: mock.MagicMock, proxy: Proxy, side_effect: Exception - ): + async def test_initialize_ko(self, proxy_api_mock: mock.MagicMock, proxy: Proxy, side_effect: Exception): """Check the correct error handling of `initialize` method.""" proxy_api_mock.get_runtime_info.side_effect = side_effect @@ -645,9 +629,7 @@ async def test_exists_backend( with mock.patch.object(proxy, 'get_current_backends', return_value=current_backends): assert await proxy.exists_backend(backend) == expected - async def test_get_current_frontends( - self, proxy_api_mock: mock.MagicMock, proxy: Proxy - ): + async def test_get_current_frontends(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check the correct output of `get_current_frontends` method.""" frontends = [ @@ -694,9 +676,7 @@ async def test_add_new_frontend(self, proxy_api_mock: mock.MagicMock, proxy: Pro proxy_api_mock.add_frontend.assert_called_once_with(**parameters) - async def test_get_current_backend_servers( - self, proxy_api_mock: mock.MagicMock, proxy: Proxy - ): + async def test_get_current_backend_servers(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check the correct output of `get_current_backend_servers` method.""" servers = [ @@ -738,9 +718,7 @@ async def test_remove_wazuh_manager(self, proxy_api_mock: mock.MagicMock, proxy: backend=proxy.wazuh_backend, server_name=manager_name ) - async def test_restrain_server_new_connections( - self, proxy_api_mock: mock.MagicMock, proxy: Proxy - ): + async def test_restrain_server_new_connections(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check that `restrain_server_new_connections` method makes the correct callback.""" server_name = 'foo' @@ -751,9 +729,7 @@ async def test_restrain_server_new_connections( backend_name=proxy.wazuh_backend, server_name=server_name, state=ProxyServerState.DRAIN ) - async def test_allow_server_new_connections( - self, proxy_api_mock: mock.MagicMock, proxy: Proxy - ): + async def test_allow_server_new_connections(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check that `allow_server_new_connections` method makes the correct callback.""" server_name = 'foo' @@ -764,9 +740,7 @@ async def test_allow_server_new_connections( backend_name=proxy.wazuh_backend, server_name=server_name, state=ProxyServerState.READY ) - async def test_get_wazuh_server_stats( - self, proxy_api_mock: mock.MagicMock, proxy: Proxy - ): + async def test_get_wazuh_server_stats(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check the correct output of `get_wazuh_server_stats` method.""" stats = {'foo': 'bar'} @@ -797,9 +771,7 @@ async def test_is_server_drain( backend_name=proxy.wazuh_backend, server_name=server_name ) - async def test_get_wazuh_backend_stats( - self, proxy_api_mock: mock.MagicMock, proxy: Proxy - ): + async def test_get_wazuh_backend_stats(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check the correct output of `get_wazuh_backend_stats` method.""" servers = [ @@ -817,9 +789,7 @@ async def test_get_wazuh_backend_stats( for server in servers: server_stats_mock.assert_any_call(server_name=server['name']) - async def test_get_wazuh_backend_server_connections( - self, proxy_api_mock: mock.MagicMock, proxy: Proxy - ): + async def test_get_wazuh_backend_server_connections(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check the correct output of `get_wazuh_backend_server_connections` method.""" stats = { From 2178dc7cb950c724ea83d3a356dc2f4e16ef3f1a Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Fri, 17 May 2024 16:01:56 -0300 Subject: [PATCH 366/419] Rename default HAProxy backend --- framework/wazuh/core/cluster/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/wazuh/core/cluster/utils.py b/framework/wazuh/core/cluster/utils.py index 32f1747315e..b62f5479bf0 100644 --- a/framework/wazuh/core/cluster/utils.py +++ b/framework/wazuh/core/cluster/utils.py @@ -50,7 +50,7 @@ HELPER_DEFAULTS = { HAPROXY_PORT: 5555, HAPROXY_PROTOCOL: 'http', - HAPROXY_BACKEND: 'wazuh_cluster', + HAPROXY_BACKEND: 'wazuh_reporting', HAPROXY_RESOLVER: None, EXCLUDED_NODES: [], FREQUENCY: 60, From 581363af3689a7c62372913a346d51cd2a4a2334 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Tue, 21 May 2024 15:33:37 -0300 Subject: [PATCH 367/419] Add warning for multiple frontends binding the same port --- .../core/cluster/hap_helper/hap_helper.py | 10 ++++- .../wazuh/core/cluster/hap_helper/proxy.py | 42 +++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index 5953ae47019..b91003f0d02 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -476,6 +476,7 @@ async def start(cls): try: helper_config = read_cluster_config()['haproxy_helper'] port_config = get_ossec_conf(section='remote') + connection_port = int(port_config.get('remote')[0].get('port', CONNECTION_PORT)) proxy_api = ProxyAPI( username=helper_config[HAPROXY_USER], @@ -487,7 +488,7 @@ async def start(cls): ) proxy = Proxy( wazuh_backend=helper_config[HAPROXY_BACKEND], - wazuh_connection_port=int(port_config.get('remote')[0].get('port', CONNECTION_PORT)), + wazuh_connection_port=connection_port, proxy_api=proxy_api, tag=tag, resolver=helper_config[HAPROXY_RESOLVER], @@ -512,6 +513,13 @@ async def start(cls): await helper.initialize_proxy() + if await helper.proxy.check_multiple_frontends(port=connection_port): + logger.warning( + f'Exists several frontends binding the port "{connection_port}". ' + 'To ensure the proper function of the helper, ' + f'keep only the one related to the backend "{helper_config[HAPROXY_BACKEND]}".', + ) + if helper.proxy.hard_stop_after is not None: sleep_time = max(helper.proxy.hard_stop_after, cls.get_connection_retry()) helper.logger.info(f'Ensuring only exists one HAProxy process. Sleeping {sleep_time}s before start...') diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index f9073c1066d..417ad473c82 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -488,6 +488,19 @@ async def get_backend_server_stats(self, backend_name: str, server_name: str) -> return await self._make_hap_request('services/haproxy/stats/native', query_parameters=query_params) + async def get_binds(self, frontend: str) -> PROXY_API_RESPONSE: + """Returns the binds configured for the given frontend. + + Returns + ------- + PROXY_API_RESPONSE + Information of configured frontends. + """ + query_parameters = {'frontend': frontend} + return await self._make_hap_request( + endpoint='services/haproxy/configuration/binds', query_parameters=query_parameters + ) + class Proxy: def __init__( @@ -634,6 +647,7 @@ async def get_current_frontends(self) -> dict: """ api_response = await self.api.get_frontends() self.logger.debug2('Obtained proxy frontends') + return {frontend['name']: frontend for frontend in api_response['data'] if 'default_backend' in frontend} async def exists_frontend(self, frontend_name: str) -> bool: @@ -651,6 +665,34 @@ async def exists_frontend(self, frontend_name: str) -> bool: """ return frontend_name in await self.get_current_frontends() + async def check_multiple_frontends(self, port: int) -> bool: + """Check if there are multiple frontends binding the given port. + + Parameters + ---------- + port : int + Port number to check. + + Returns + ------- + bool + True if exists mutiple frontends else False. + """ + self.logger.debug(f'Checking multiple frontends for port {port}') + frontends = await self.get_current_frontends() + port_bind_exists = False + + for frontend in frontends.keys(): + data = (await self.api.get_binds(frontend=frontend))['data'] + binds = [bind for bind in data if bind.get('port') == port] + + if binds and port_bind_exists: + return True + elif binds: + port_bind_exists = True + + return False + async def add_new_backend( self, name: str, From 587da73295843b46da2a674d9f753f0751b00e33 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Tue, 21 May 2024 17:09:04 -0300 Subject: [PATCH 368/419] Add unit tests --- .../hap_helper/tests/test_hap_helper.py | 83 ++++++++++--------- .../cluster/hap_helper/tests/test_proxy.py | 38 +++++++++ 2 files changed, 82 insertions(+), 39 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py b/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py index 266d0d0563b..76f1b037dad 100644 --- a/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py @@ -172,9 +172,7 @@ async def test_check_node_to_delete( proxy_mock.get_wazuh_server_stats.assert_called_once_with(server_name=node_name) assert ret_val == expected - async def test_backend_servers_state_healthcheck( - self, helper: HAPHelper, proxy_mock: mock.MagicMock - ): + async def test_backend_servers_state_healthcheck(self, helper: HAPHelper, proxy_mock: mock.MagicMock): """Check that `backend_servers_state_healthcheck` method makes the correct callbacks.""" WORKER1 = 'worker1' WORKER2 = 'worker2' @@ -352,9 +350,7 @@ async def test_migrate_old_connections_ko( [{'worker1': 0, 'worker2': 4, 'worker3': 0}, {'worker2': 3}], ), ) - async def test_check_for_balance( - self, helper: HAPHelper, distribution: dict, expected: dict - ): + async def test_check_for_balance(self, helper: HAPHelper, distribution: dict, expected: dict): """Check the correct output of `check_for_balance` method.""" assert helper.check_for_balance(current_connections_distribution=distribution) == expected @@ -533,9 +529,7 @@ async def test_set_hard_stop_after( else: update_agent_connections_mock.assert_not_called() - async def test_get_connection_retry( - self, helper: HAPHelper, proxy_mock: mock.MagicMock - ): + async def test_get_connection_retry(self, helper: HAPHelper, proxy_mock: mock.MagicMock): """Check the correct output of `get_connection_retry` method.""" CONNECTION_RETRY = 10 @@ -546,6 +540,7 @@ async def test_get_connection_retry( assert helper.get_connection_retry() == CONNECTION_RETRY + 2 @pytest.mark.parametrize('hard_stop_after', [None, 8, 12]) + @pytest.mark.parametrize('multiple_frontends', [True, False]) async def test_start( self, read_cluster_config_mock: mock.MagicMock, @@ -555,6 +550,7 @@ async def test_start( dapi_mock: mock.MagicMock, sleep_mock: mock.AsyncMock, hard_stop_after: int | None, + multiple_frontends: bool, ): """Check that `start` method makes the correct callbacks.""" HAPROXY_USER_VALUE = 'test' @@ -595,6 +591,7 @@ async def test_start( proxy_api_mock.return_value = proxy_api proxy = mock.MagicMock(hard_stop_after=hard_stop_after) + proxy.check_multiple_frontends = mock.AsyncMock(return_value=multiple_frontends) proxy_mock.return_value = proxy dapi = mock.MagicMock() @@ -603,48 +600,56 @@ async def test_start( read_cluster_config_mock.return_value = {HAPROXY_HELPER: HELPER_CONFIG} get_ossec_conf.return_value = {'remote': [{'port': WAZUH_PORT}]} + logger_mock = mock.MagicMock() + connection_retry = 10 with mock.patch.object(HAPHelper, 'initialize_proxy', new=mock.AsyncMock()): with mock.patch.object(HAPHelper, 'get_connection_retry', return_value=connection_retry): with mock.patch.object(HAPHelper, 'initialize_wazuh_cluster_configuration', new=mock.AsyncMock()): with mock.patch.object(HAPHelper, 'set_hard_stop_after', new=mock.AsyncMock()): with mock.patch.object(HAPHelper, 'manage_wazuh_cluster_nodes', new=mock.AsyncMock()): - await HAPHelper.start() - - proxy_api_mock.assert_called_once_with( - username=HAPROXY_USER_VALUE, - password=HAPROXY_PASSWORD_VALUE, - tag=TAG, - address=HAPROXY_ADDRESS_VALUE, - port=HAPROXY_PORT_VALUE, - protocol=HAPROXY_PROTOCOL_VALUE, - ) - - proxy_mock.assert_called_once_with( - wazuh_backend=HAPROXY_BACKEND_VALUE, - wazuh_connection_port=WAZUH_PORT, - proxy_api=proxy_api, - tag=TAG, - resolver=HAPROXY_RESOLVER_VALUE, - ) - - dapi_mock.assert_called_once_with(tag=TAG, excluded_nodes=EXCLUDED_NODES_VALUE) + with mock.patch.object(HAPHelper, '_get_logger', return_value=logger_mock): + await HAPHelper.start() + + proxy_api_mock.assert_called_once_with( + username=HAPROXY_USER_VALUE, + password=HAPROXY_PASSWORD_VALUE, + tag=TAG, + address=HAPROXY_ADDRESS_VALUE, + port=HAPROXY_PORT_VALUE, + protocol=HAPROXY_PROTOCOL_VALUE, + ) + + proxy_mock.assert_called_once_with( + wazuh_backend=HAPROXY_BACKEND_VALUE, + wazuh_connection_port=WAZUH_PORT, + proxy_api=proxy_api, + tag=TAG, + resolver=HAPROXY_RESOLVER_VALUE, + ) + + dapi_mock.assert_called_once_with(tag=TAG, excluded_nodes=EXCLUDED_NODES_VALUE) + + HAPHelper.initialize_proxy.assert_called_once() + + proxy.check_multiple_frontends.assert_called_once_with(port=WAZUH_PORT) + if multiple_frontends: + logger_mock.warning.assert_called_once() + else: + assert logger_mock.call_count == 0 - HAPHelper.initialize_proxy.assert_called_once() - if hard_stop_after is not None: - sleep_mock.assert_called_once_with(max(hard_stop_after, connection_retry)) + if hard_stop_after is not None: + sleep_mock.assert_called_once_with(max(hard_stop_after, connection_retry)) - HAPHelper.initialize_wazuh_cluster_configuration.assert_called_once() + HAPHelper.initialize_wazuh_cluster_configuration.assert_called_once() - if hard_stop_after is None: - HAPHelper.set_hard_stop_after.assert_called_once() + if hard_stop_after is None: + HAPHelper.set_hard_stop_after.assert_called_once() - HAPHelper.manage_wazuh_cluster_nodes.assert_called_once() + HAPHelper.manage_wazuh_cluster_nodes.assert_called_once() @pytest.mark.parametrize('exception', [KeyError(), KeyboardInterrupt(), WazuhHAPHelperError(3046)]) - async def test_start_ko( - self, read_cluster_config_mock: mock.MagicMock, exception: Exception - ): + async def test_start_ko(self, read_cluster_config_mock: mock.MagicMock, exception: Exception): """Check the correct error handling of `start` method.""" read_cluster_config_mock.side_effect = exception logger_mock = mock.MagicMock() diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py index e227ffc2f48..8dbfa3b6bb3 100644 --- a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py @@ -525,6 +525,27 @@ async def test_get_backend_server_stats(self, proxy_api: ProxyAPI, request_mock: ) assert ret_val == data + async def test_get_binds(self, proxy_api: ProxyAPI, request_mock: mock.AsyncMock): + """Check the correct output of `get_binds` method.""" + + endpoint = 'services/haproxy/configuration/binds' + data = {'data': {'foo': 1, 'bar': 2}} + request_mock.return_value = mock.MagicMock( + **{'status_code': 200, 'is_success': True, 'json.return_value': data} + ) + frontend = 'baz' + + ret_val = await proxy_api.get_binds(frontend=frontend) + + request_mock.assert_called_once_with( + method=ProxyAPIMethod.GET.value, + url=f'{proxy_api.protocol}://{proxy_api.address}:{proxy_api.port}/v2/{endpoint}', + auth=(proxy_api.username, proxy_api.password), + json=None, + params={'version': 0, 'frontend': frontend}, + ) + assert ret_val == data + class TestProxy: @pytest.fixture @@ -654,6 +675,23 @@ async def test_exists_frontend( with mock.patch.object(proxy, 'get_current_frontends', return_value=current_frontends): assert await proxy.exists_frontend(frontend) == expected + @pytest.mark.parametrize( + 'binds,expected', + ( + [({'data': [{'port': '1514'}]}, {'data': [{'port': '1514'}]}), True], + [({'data': [{'port': '1514'}]}, {'data': [{'port': '2000'}]}), False], + ), + ) + async def test_check_multiple_frontends( + self, proxy_api_mock: mock.MagicMock, proxy: Proxy, binds: tuple, expected: bool + ): + """Check the correct output of `check_multiple_frontends` method.""" + current_frontends = {'frontend1': {}, 'frontend2': {}} + proxy_api_mock.get_binds.side_effect = binds + + with mock.patch.object(proxy, 'get_current_frontends', return_value=current_frontends): + assert await proxy.check_multiple_frontends('1514') == expected + async def test_add_new_backend(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check that `add_new_backend` method makes the correct callback.""" From 18029a84ebc9776a604c144875ea607be6f6eabf Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 22 May 2024 11:10:57 -0300 Subject: [PATCH 369/419] Suggestions from CR --- framework/wazuh/core/cluster/hap_helper/hap_helper.py | 2 +- framework/wazuh/core/cluster/hap_helper/proxy.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index b91003f0d02..c37d80f112a 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -515,7 +515,7 @@ async def start(cls): if await helper.proxy.check_multiple_frontends(port=connection_port): logger.warning( - f'Exists several frontends binding the port "{connection_port}". ' + f'Several frontends exist binding the port "{connection_port}". ' 'To ensure the proper function of the helper, ' f'keep only the one related to the backend "{helper_config[HAPROXY_BACKEND]}".', ) diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 417ad473c82..3ea0e4224c3 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -491,6 +491,11 @@ async def get_backend_server_stats(self, backend_name: str, server_name: str) -> async def get_binds(self, frontend: str) -> PROXY_API_RESPONSE: """Returns the binds configured for the given frontend. + Parameters + ---------- + frontend : str + Frontend to query. + Returns ------- PROXY_API_RESPONSE From 23e3dbb1e2c4a7e083a04a7d60e796b6314899d9 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Thu, 23 May 2024 13:40:41 -0300 Subject: [PATCH 370/419] Fix check behavior --- .../core/cluster/hap_helper/hap_helper.py | 4 +++- .../wazuh/core/cluster/hap_helper/proxy.py | 14 ++++++----- .../hap_helper/tests/test_hap_helper.py | 6 +++-- .../cluster/hap_helper/tests/test_proxy.py | 23 +++++++++++++++---- 4 files changed, 34 insertions(+), 13 deletions(-) diff --git a/framework/wazuh/core/cluster/hap_helper/hap_helper.py b/framework/wazuh/core/cluster/hap_helper/hap_helper.py index c37d80f112a..0e03c2d0286 100644 --- a/framework/wazuh/core/cluster/hap_helper/hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/hap_helper.py @@ -513,7 +513,9 @@ async def start(cls): await helper.initialize_proxy() - if await helper.proxy.check_multiple_frontends(port=connection_port): + if await helper.proxy.check_multiple_frontends( + port=connection_port, frontend_to_skip=f'{helper.proxy.wazuh_backend}_front' + ): logger.warning( f'Several frontends exist binding the port "{connection_port}". ' 'To ensure the proper function of the helper, ' diff --git a/framework/wazuh/core/cluster/hap_helper/proxy.py b/framework/wazuh/core/cluster/hap_helper/proxy.py index 3ea0e4224c3..e23a1c8d303 100644 --- a/framework/wazuh/core/cluster/hap_helper/proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/proxy.py @@ -670,13 +670,16 @@ async def exists_frontend(self, frontend_name: str) -> bool: """ return frontend_name in await self.get_current_frontends() - async def check_multiple_frontends(self, port: int) -> bool: + async def check_multiple_frontends(self, port: int, frontend_to_skip: str) -> bool: """Check if there are multiple frontends binding the given port. Parameters ---------- port : int Port number to check. + frontend_to_skip: str + Skip the comprobation for the given frontend name. + Returns ------- @@ -685,16 +688,15 @@ async def check_multiple_frontends(self, port: int) -> bool: """ self.logger.debug(f'Checking multiple frontends for port {port}') frontends = await self.get_current_frontends() - port_bind_exists = False for frontend in frontends.keys(): + if frontend == frontend_to_skip: + continue + data = (await self.api.get_binds(frontend=frontend))['data'] binds = [bind for bind in data if bind.get('port') == port] - - if binds and port_bind_exists: + if binds: return True - elif binds: - port_bind_exists = True return False diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py b/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py index 76f1b037dad..1617d7cca2b 100644 --- a/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py +++ b/framework/wazuh/core/cluster/hap_helper/tests/test_hap_helper.py @@ -590,7 +590,7 @@ async def test_start( proxy_api = mock.MagicMock() proxy_api_mock.return_value = proxy_api - proxy = mock.MagicMock(hard_stop_after=hard_stop_after) + proxy = mock.MagicMock(hard_stop_after=hard_stop_after, wazuh_backend=HAPROXY_BACKEND_VALUE) proxy.check_multiple_frontends = mock.AsyncMock(return_value=multiple_frontends) proxy_mock.return_value = proxy @@ -632,7 +632,9 @@ async def test_start( HAPHelper.initialize_proxy.assert_called_once() - proxy.check_multiple_frontends.assert_called_once_with(port=WAZUH_PORT) + proxy.check_multiple_frontends.assert_called_once_with( + port=WAZUH_PORT, frontend_to_skip=f'{HAPROXY_BACKEND_VALUE}_front' + ) if multiple_frontends: logger_mock.warning.assert_called_once() else: diff --git a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py index 8dbfa3b6bb3..d35df8a1651 100644 --- a/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py +++ b/framework/wazuh/core/cluster/hap_helper/tests/test_proxy.py @@ -678,19 +678,34 @@ async def test_exists_frontend( @pytest.mark.parametrize( 'binds,expected', ( - [({'data': [{'port': '1514'}]}, {'data': [{'port': '1514'}]}), True], - [({'data': [{'port': '1514'}]}, {'data': [{'port': '2000'}]}), False], + [ + ({'data': [{'name': 'bar_bind', 'port': '1514'}]}, {'data': [{'name': 'baz_bind'}]}), + True, + ], + [ + ({'data': [{'name': 'bar_bind', 'port': '2000'}]}, {'data': [{'name': 'baz_bind'}]}), + False, + ], + [ + ({'data': [{'name': 'bar_bind', 'port': '2000'}]}, {'data': [{'name': 'baz_bind', 'port': '1516'}]}), + False, + ], + [ + ({'data': [{'name': 'bar_bind'}]}, {'data': [{'name': 'baz_bind', 'port': '1514'}]}), + True, + ], ), ) async def test_check_multiple_frontends( self, proxy_api_mock: mock.MagicMock, proxy: Proxy, binds: tuple, expected: bool ): """Check the correct output of `check_multiple_frontends` method.""" - current_frontends = {'frontend1': {}, 'frontend2': {}} + FRONTEND1 = 'foo' + current_frontends = {FRONTEND1: {}, 'bar': {}, 'baz': {}} proxy_api_mock.get_binds.side_effect = binds with mock.patch.object(proxy, 'get_current_frontends', return_value=current_frontends): - assert await proxy.check_multiple_frontends('1514') == expected + assert await proxy.check_multiple_frontends('1514', frontend_to_skip=FRONTEND1) == expected async def test_add_new_backend(self, proxy_api_mock: mock.MagicMock, proxy: Proxy): """Check that `add_new_backend` method makes the correct callback.""" From efb7bc307cc08a2f9a2a1d89fc11f0950fc21a36 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Fri, 24 May 2024 11:06:44 -0300 Subject: [PATCH 371/419] Update AIT mapping --- .../integration_test_api_endpoints.json | 87 ++++++++++++------- 1 file changed, 56 insertions(+), 31 deletions(-) diff --git a/api/test/integration/mapping/integration_test_api_endpoints.json b/api/test/integration/mapping/integration_test_api_endpoints.json index 14350e827c9..e26de375bc3 100644 --- a/api/test/integration/mapping/integration_test_api_endpoints.json +++ b/api/test/integration/mapping/integration_test_api_endpoints.json @@ -329,15 +329,6 @@ "test_rbac_white_task_endpoints.tavern.yaml", "test_task_endpoints.tavern.yaml" ] - }, - { - "name": "event_controller.py", - "tag": "webhook", - "tests": [ - "test_rbac_black_webhook_endpoints.tavern.yaml", - "test_rbac_white_webhook_endpoints.tavern.yaml", - "test_webhook_endpoints.tavern.yaml" - ] } ] }, @@ -672,15 +663,6 @@ "test_rbac_white_task_endpoints.tavern.yaml", "test_task_endpoints.tavern.yaml" ] - }, - { - "name": "event.py", - "tag": "webhook", - "tests": [ - "test_rbac_black_webhook_endpoints.tavern.yaml", - "test_rbac_white_webhook_endpoints.tavern.yaml", - "test_webhook_endpoints.tavern.yaml" - ] } ] }, @@ -750,17 +732,6 @@ "test_cluster_endpoints.tavern.yaml" ] }, - { - "name": "database.py", - "tag": "database", - "tests": [ - "test_agent_DELETE_endpoints.tavern.yaml", - "test_agent_GET_endpoints.tavern.yaml", - "test_agent_POST_endpoints.tavern.yaml", - "test_agent_PUT_endpoints.tavern.yaml", - "test_cluster_endpoints.tavern.yaml" - ] - }, { "name": "decoder.py", "tag": "decoder", @@ -1095,6 +1066,44 @@ } ] }, + { + "path": "framework/wazuh/core/cluster/hap_helper", + "files": [ + { + "name": "hap_helper.py", + "tag": "hap", + "tests": [ + "test_agent_DELETE_endpoints.tavern.yaml", + "test_agent_GET_endpoints.tavern.yaml", + "test_agent_POST_endpoints.tavern.yaml", + "test_agent_PUT_endpoints.tavern.yaml", + "test_cluster_endpoints.tavern.yaml" + ] + }, + { + "name": "proxy.py", + "tag": "proxy", + "tests": [ + "test_agent_DELETE_endpoints.tavern.yaml", + "test_agent_GET_endpoints.tavern.yaml", + "test_agent_POST_endpoints.tavern.yaml", + "test_agent_PUT_endpoints.tavern.yaml", + "test_cluster_endpoints.tavern.yaml" + ] + }, + { + "name": "wazuh.py", + "tag": "wazuh", + "tests": [ + "test_agent_DELETE_endpoints.tavern.yaml", + "test_agent_GET_endpoints.tavern.yaml", + "test_agent_POST_endpoints.tavern.yaml", + "test_agent_PUT_endpoints.tavern.yaml", + "test_cluster_endpoints.tavern.yaml" + ] + } + ] + }, { "path": "framework/wazuh/core/cluster/tests", "files": [ @@ -2123,7 +2132,23 @@ ] }, { - "path": "api/test/integration/env/base/manager", + "path": "api/test/integration/env/base/cti", + "files": [ + { + "name": "http_server.py", + "tag": "http", + "tests": [ + "test_agent_DELETE_endpoints.tavern.yaml", + "test_agent_GET_endpoints.tavern.yaml", + "test_agent_POST_endpoints.tavern.yaml", + "test_agent_PUT_endpoints.tavern.yaml", + "test_cluster_endpoints.tavern.yaml" + ] + } + ] + }, + { + "path": "api/test/integration/env/base/haproxy-lb", "files": [ { "name": "entrypoint.sh", @@ -2139,7 +2164,7 @@ ] }, { - "path": "api/test/integration/env/base/haproxy-lb", + "path": "api/test/integration/env/base/manager", "files": [ { "name": "entrypoint.sh", From 0921c234f33f037b7ae8b17fd90c57e58503987e Mon Sep 17 00:00:00 2001 From: Tomas Turina Date: Wed, 29 May 2024 16:19:24 +0000 Subject: [PATCH 372/419] build: bump revision to 40720 --- api/api/spec/spec.yaml | 2 +- framework/wazuh/core/cluster/__init__.py | 2 +- src/Doxyfile | 2 +- src/REVISION | 2 +- src/init/wazuh-client.sh | 2 +- src/init/wazuh-local.sh | 2 +- src/init/wazuh-server.sh | 2 +- src/win32/wazuh-installer.nsi | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/api/spec/spec.yaml b/api/api/spec/spec.yaml index d4ac5f0e4b4..516b137171d 100644 --- a/api/api/spec/spec.yaml +++ b/api/api/spec/spec.yaml @@ -41,7 +41,7 @@ info: version: '4.7.5' - x-revision: '40719' + x-revision: '40720' title: 'Wazuh API REST' license: name: 'GPL 2.0' diff --git a/framework/wazuh/core/cluster/__init__.py b/framework/wazuh/core/cluster/__init__.py index c49a33b76f0..4b2dc343dda 100644 --- a/framework/wazuh/core/cluster/__init__.py +++ b/framework/wazuh/core/cluster/__init__.py @@ -5,7 +5,7 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 __version__ = '4.7.5' -__revision__ = '40719' +__revision__ = '40720' __author__ = "Wazuh Inc" __wazuh_name__ = "Wazuh" __licence__ = "\ diff --git a/src/Doxyfile b/src/Doxyfile index 599e3fd3f7f..7cf81896ba3 100644 --- a/src/Doxyfile +++ b/src/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "WAZUH" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = "v4.7.5-40719" +PROJECT_NUMBER = "v4.7.5-40720" # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/src/REVISION b/src/REVISION index bcc76d3f7a1..6e0f9dad2ee 100644 --- a/src/REVISION +++ b/src/REVISION @@ -1 +1 @@ -40719 +40720 diff --git a/src/init/wazuh-client.sh b/src/init/wazuh-client.sh index 7742369b336..63b7634b2c5 100755 --- a/src/init/wazuh-client.sh +++ b/src/init/wazuh-client.sh @@ -12,7 +12,7 @@ DIR=`dirname $PWD`; # Installation info VERSION="v4.7.5" -REVISION="40719" +REVISION="40720" TYPE="agent" ### Do not modify below here ### diff --git a/src/init/wazuh-local.sh b/src/init/wazuh-local.sh index be49ea3496c..b80427858c6 100644 --- a/src/init/wazuh-local.sh +++ b/src/init/wazuh-local.sh @@ -14,7 +14,7 @@ PLIST=${DIR}/bin/.process_list; # Installation info VERSION="v4.7.5" -REVISION="40719" +REVISION="40720" TYPE="local" ### Do not modify below here ### diff --git a/src/init/wazuh-server.sh b/src/init/wazuh-server.sh index a1800dcd007..f6fd30de6a2 100755 --- a/src/init/wazuh-server.sh +++ b/src/init/wazuh-server.sh @@ -14,7 +14,7 @@ PLIST=${DIR}/bin/.process_list; # Installation info VERSION="v4.7.5" -REVISION="40719" +REVISION="40720" TYPE="server" ### Do not modify below here ### diff --git a/src/win32/wazuh-installer.nsi b/src/win32/wazuh-installer.nsi index 06e15139504..e88876deea3 100644 --- a/src/win32/wazuh-installer.nsi +++ b/src/win32/wazuh-installer.nsi @@ -21,7 +21,7 @@ !define MUI_ICON install.ico !define MUI_UNICON uninstall.ico !define VERSION "4.7.5" -!define REVISION "40719" +!define REVISION "40720" !define NAME "Wazuh" !define SERVICE "WazuhSvc" From 12af0aa8b31fbb3167b607186b03abd2b77b2e4a Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Wed, 29 May 2024 18:03:18 -0300 Subject: [PATCH 373/419] Add hap_helper to RPM SPEC --- packages/rpms/SPECS/wazuh-manager.spec | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/rpms/SPECS/wazuh-manager.spec b/packages/rpms/SPECS/wazuh-manager.spec index 97b1ac0553d..56e66f2f9d3 100644 --- a/packages/rpms/SPECS/wazuh-manager.spec +++ b/packages/rpms/SPECS/wazuh-manager.spec @@ -698,6 +698,8 @@ rm -fr %{buildroot} %dir %attr(750, root, wazuh) %{_localstatedir}/framework/wazuh/core/cluster %attr(640, root, wazuh) %{_localstatedir}/framework/wazuh/core/cluster/*.py %attr(640, root, wazuh) %{_localstatedir}/framework/wazuh/core/cluster/*.json +%dir %attr(750, root, wazuh) %{_localstatedir}/framework/wazuh/core/cluster/hap_helper +%attr(640, root, wazuh) %{_localstatedir}/framework/wazuh/core/cluster/hap_helper/*.py %dir %attr(750, root, wazuh) %{_localstatedir}/framework/wazuh/core/cluster/dapi %attr(640, root, wazuh) %{_localstatedir}/framework/wazuh/core/cluster/dapi/*.py %dir %attr(750, root, wazuh) %{_localstatedir}/integrations From 107e46a2942df48128865fe9d52b595044f8409b Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Thu, 30 May 2024 00:27:38 -0300 Subject: [PATCH 374/419] Fixes for coverity issues (uncheck return value and unhandled exception) --- src/shared_modules/keystore/src/main.cpp | 12 ++++++------ .../src/vulnerabilityScannerFacade.cpp | 5 ++++- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/shared_modules/keystore/src/main.cpp b/src/shared_modules/keystore/src/main.cpp index 5fe4c42bb5b..90ba7011a50 100644 --- a/src/shared_modules/keystore/src/main.cpp +++ b/src/shared_modules/keystore/src/main.cpp @@ -23,16 +23,16 @@ namespace Log int main(int argc, char* argv[]) { - // Define current working directory - std::filesystem::path home_path = Utils::findHomeDirectory(); - std::filesystem::current_path(home_path); - std::string family; std::string key; std::string value; try { + // Define current working directory + std::filesystem::path home_path = Utils::findHomeDirectory(); + std::filesystem::current_path(home_path); + CmdLineArgs args(argc, argv); family = args.getColumnFamily(); @@ -43,13 +43,13 @@ int main(int argc, char* argv[]) } catch (const CmdLineArgsException& e) { - std::cerr << e.what() << std::endl; + std::cerr << e.what() << "\n"; CmdLineArgs::showHelp(); return 1; } catch (const std::exception& e) { - std::cerr << e.what() << std::endl; + std::cerr << e.what() << "\n"; return 1; } diff --git a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp index a85996b110e..51ad316191b 100644 --- a/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp +++ b/src/wazuh_modules/vulnerability_scanner/src/vulnerabilityScannerFacade.cpp @@ -379,7 +379,10 @@ void VulnerabilityScannerFacade::start( // Query the current database version. std::string databaseVersion; - stateDB->get(VD_DATABASE_VERSION_KEY, databaseVersion); + if (stateDB->get(VD_DATABASE_VERSION_KEY, databaseVersion)) + { + logDebug1(WM_VULNSCAN_LOGTAG, "Database version: %s", databaseVersion.c_str()); + } // Decompress database content. if (decompressDatabase(databaseVersion) && !m_shouldStop.load()) From 10a462da325c131b512d466711692d24c907dc26 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Thu, 30 May 2024 11:59:13 -0300 Subject: [PATCH 375/419] Added new logs to gcloud.py. --- wodles/gcloud/gcloud.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/wodles/gcloud/gcloud.py b/wodles/gcloud/gcloud.py index fcaafab9c68..40adaaa6c51 100644 --- a/wodles/gcloud/gcloud.py +++ b/wodles/gcloud/gcloud.py @@ -29,6 +29,7 @@ def main(): num_processed_messages = 0 if arguments.integration_type == "pubsub": + logger.info("Working with Google Cloud Pub/Sub") if arguments.subscription_id is None: raise exceptions.GCloudError(1200) if arguments.project is None: @@ -61,6 +62,7 @@ def main(): # check permissions subscriber_client = WazuhGCloudSubscriber(credentials_file, project, logger, subscription_id) + logger.info("Checking credentials") subscriber_client.check_permissions() messages_per_thread = max_messages // n_threads remaining_messages = max_messages % n_threads @@ -74,6 +76,7 @@ def main(): num_processed_messages = sum([future.result() for future in futures]) elif arguments.integration_type == "access_logs": + logger.info("Working with Google Cloud Access Logs") if not arguments.bucket_name: raise exceptions.GCloudError(1103) @@ -83,6 +86,7 @@ def main(): "only_logs_after": arguments.only_logs_after, "reparse": arguments.reparse} integration = GCSAccessLogs(arguments.credentials_file, logger, **f_kwargs) + logger.info("Checking credentials") integration.check_permissions() num_processed_messages = integration.process_data() From 3cc564716dd4f009d9e18594bd58a0c8d19aa004 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Thu, 30 May 2024 12:31:44 -0300 Subject: [PATCH 376/419] Added new logs to azure_services. --- wodles/azure/azure_services/analytics.py | 11 ++++++++++- wodles/azure/azure_services/graph.py | 14 +++++++++++++- wodles/azure/azure_services/storage.py | 10 ++++++++-- 3 files changed, 31 insertions(+), 4 deletions(-) diff --git a/wodles/azure/azure_services/analytics.py b/wodles/azure/azure_services/analytics.py index ebbecebb51e..44ac62a9700 100644 --- a/wodles/azure/azure_services/analytics.py +++ b/wodles/azure/azure_services/analytics.py @@ -39,10 +39,12 @@ def start_log_analytics(args): # Read credentials if args.la_auth_path and args.la_tenant_domain: + logging.info(f"Log Analytics: Using the auth file {args.la_auth_path} for authentication") client, secret = read_auth_file( auth_path=args.la_auth_path, fields=('application_id', 'application_key') ) elif args.la_id and args.la_key and args.la_tenant_domain: + logging.info(f"Log Analytics: Using id and key from configuration for authentication") logging.warning( DEPRECATED_MESSAGE.format( name='la_id and la_key', release='4.4', url=CREDENTIALS_URL @@ -140,17 +142,21 @@ def build_log_analytics_query( else: # Build the filter taking into account the min and max values if desired_datetime < min_datetime: + logging.info(f"Log Analytics: Making request query for the following intervals: " + f"from {desired_str} to {min_str} and from {max_str}") filter_value = ( f'( TimeGenerated < {min_str} and TimeGenerated >= {desired_str}) or ' f'( TimeGenerated > {max_str})' ) elif desired_datetime > max_datetime: + logging.info(f"Log Analytics: Making request for the following interval: from {desired_str}") filter_value = f'TimeGenerated >= {desired_str}' else: + logging.info(f"Log Analytics: Making request for the following interval: from {max_str}") filter_value = f'TimeGenerated > {max_str}' query = f'{query} | order by TimeGenerated asc | where {filter_value} ' - logging.info(f'Log Analytics: The search starts for query: "{query}"') + logging.debug(f'Log Analytics: The search starts for query: "{query}"') return {'query': query} @@ -176,6 +182,7 @@ def get_log_analytics_events( If the response for the request is not 200 OK. """ logging.info('Log Analytics: Sending a request to the Log Analytics API.') + logging.debug(f"Log Analytics request - URL: {url} - Params: {body} - Headers: {headers}") response = get(url, params=body, headers=headers) if response.status_code == 200: try: @@ -202,6 +209,7 @@ def get_log_analytics_events( f'Error: It was not possible to obtain the columns and rows from the event: "{e}".' ) else: + logging.error(f"Error with Log Analytics request: {response.json()}") response.raise_for_status() @@ -249,4 +257,5 @@ def iter_log_analytics_events(columns: list, rows: list, tag: str): for c in range(0, len(columns)): event[columns[c]['name']] = row[c] logging.info('Log Analytics: Sending event by socket.') + logging.debug(f"Event send to socket: {event}") send_message(dumps(event)) diff --git a/wodles/azure/azure_services/graph.py b/wodles/azure/azure_services/graph.py index 6f69b6ec246..6f1d6919e5a 100644 --- a/wodles/azure/azure_services/graph.py +++ b/wodles/azure/azure_services/graph.py @@ -38,10 +38,12 @@ def start_graph(args): # Read credentials if args.graph_auth_path and args.graph_tenant_domain: + logging.info(f"Graph: Using the auth file {args.graph_auth_path} for authentication") client, secret = read_auth_file( auth_path=args.graph_auth_path, fields=('application_id', 'application_key') ) elif args.graph_id and args.graph_key and args.graph_tenant_domain: + logging.info(f"Graph: Using id and key from configuration for authentication") logging.warning( DEPRECATED_MESSAGE.format( name='graph_id and graph_key', release='4.4', url=CREDENTIALS_URL @@ -135,16 +137,20 @@ def build_graph_url(query: str, offset: str, reparse: bool, md5_hash: str): # Build the filter taking into account the min and max values from the file else: if desired_datetime < min_datetime: + logging.info(f"Graph: Making request query for the following intervals: " + f"from {desired_str} to {min_str} and from {max_str}") filter_value = ( f'({filtering_condition}+lt+{min_str}+and+{filtering_condition}+ge+{desired_str})' f'+or+({filtering_condition}+gt+{max_str})' ) elif desired_datetime > max_datetime: + logging.info(f"Graph: Making request for the following interval: from {desired_str}") filter_value = f'{filtering_condition}+ge+{desired_str}' else: + logging.info(f"Graph: Making request for the following interval: from {max_str}") filter_value = f'{filtering_condition}+gt+{max_str}' - logging.info(f'Graph: The search starts for query: "{query}" using {filter_value}') + logging.debug(f'Graph: The search starts for query: "{query}" using {filter_value}') return f'{URL_GRAPH}/v1.0/{query}{"?" if "?" not in query else ""}&$filter={filter_value}' @@ -165,6 +171,9 @@ def get_graph_events(url: str, headers: dict, md5_hash: str, query: str, tag: st HTTPError If the response for the request is not 200 OK. """ + + logging.debug(f"Graph request - URL: {url} - Headers: {headers}") + logging.info("Graph: Requesting data") response = get(url=url, headers=headers) if response.status_code == 200: @@ -194,6 +203,8 @@ def get_graph_events(url: str, headers: dict, md5_hash: str, query: str, tag: st next_url = response_json.get('@odata.nextLink') if next_url: + logging.info(f"Graph: Requesting data from next page") + logging.debug(f"Iterating to next url: {next_url}") get_graph_events( url=next_url, headers=headers, md5_hash=md5_hash, query=query, tag=tag ) @@ -203,4 +214,5 @@ def get_graph_events(url: str, headers: dict, md5_hash: str, query: str, tag: st f'Ensure the URL is valid and there is data available for the specified datetime.' ) else: + logging.error(f"Error with Graph request: {response.json()}") response.raise_for_status() diff --git a/wodles/azure/azure_services/storage.py b/wodles/azure/azure_services/storage.py index b63bc3e6546..7ef613a507b 100644 --- a/wodles/azure/azure_services/storage.py +++ b/wodles/azure/azure_services/storage.py @@ -38,10 +38,12 @@ def start_storage(args): # Read credentials logging.info('Storage: Authenticating.') if args.storage_auth_path: + logging.info(f"Storage: Using path {args.storage_auth_path} for authentication") name, key = read_auth_file( auth_path=args.storage_auth_path, fields=('account_name', 'account_key') ) elif args.account_name and args.account_key: + logging.info(f"Storage: Using path account name and account key for authentication") logging.warning( DEPRECATED_MESSAGE.format( name='account_name and account_key', release='4.4', url=CREDENTIALS_URL @@ -67,6 +69,7 @@ def start_storage(args): f'Storage: The "{args.container}" container does not exists.' ) sys.exit(1) + logging.info(f"Storage: Getting the specified containers: {args.container}") containers = [args.container] except AzureException: logging.error( @@ -75,7 +78,7 @@ def start_storage(args): sys.exit(1) else: try: - logging.info('Storage: Getting containers.') + logging.info("Storage: Getting all containers.") containers = [ container.name for container in block_blob_service.list_containers() ] @@ -171,7 +174,7 @@ def get_blobs( """ try: # Get the blob list - logging.info('Storage: Getting blobs.') + logging.info(f"Storage: Getting blobs from container {container_name}.") blobs = blob_service.list_blobs( container_name, prefix=prefix, marker=next_marker ) @@ -208,10 +211,12 @@ def get_blobs( last_modified < desired_datetime or (min_datetime <= last_modified <= max_datetime) ): + logging.info(f"Storage: Skipping blob {blob.name} due to being already processed") continue # Get the blob data try: + logging.info(f"Getting data from blob {blob.name}") data = blob_service.get_blob_to_text(container_name, blob.name) except (ValueError, AzureException, AzureHttpError) as e: logging.error(f'Storage: Error reading the blob data: "{e}".') @@ -265,6 +270,7 @@ def get_blobs( # Continue until no marker is returned if blobs.next_marker: + logging.debug(f"Iteration to next marker: {blobs.next_marker}") get_blobs( container_name=container_name, blob_service=blob_service, From 3dbf7ed30dfa291496957b537ebe73f1b9a719cd Mon Sep 17 00:00:00 2001 From: RamosFe Date: Thu, 30 May 2024 15:45:14 -0300 Subject: [PATCH 377/419] Added the error function to the AWS wodle. --- wodles/aws/aws_tools.py | 8 ++++++++ wodles/aws/buckets_s3/aws_bucket.py | 18 +++++++++--------- wodles/aws/buckets_s3/config.py | 6 +++++- wodles/aws/buckets_s3/guardduty.py | 2 +- wodles/aws/buckets_s3/server_access.py | 8 ++++---- wodles/aws/buckets_s3/umbrella.py | 6 +++++- wodles/aws/buckets_s3/vpcflow.py | 4 ++-- wodles/aws/buckets_s3/waf.py | 8 ++++++-- wodles/aws/services/cloudwatchlogs.py | 8 ++++---- wodles/aws/subscribers/s3_log_handler.py | 2 +- wodles/aws/subscribers/sqs_queue.py | 2 +- wodles/aws/tests/test_cloudwatchlogs.py | 2 +- .../azure/tests/azure_services/test_graph.py | 3 --- 13 files changed, 47 insertions(+), 30 deletions(-) diff --git a/wodles/aws/aws_tools.py b/wodles/aws/aws_tools.py index 39e0efb68ba..a8a158e9e49 100644 --- a/wodles/aws/aws_tools.py +++ b/wodles/aws/aws_tools.py @@ -122,6 +122,14 @@ def debug(msg, msg_level): print('DEBUG: {debug_msg}'.format(debug_msg=msg)) +def error(msg): + print('ERROR: {error_msg}'.format(error_msg=msg)) + + +def info(msg): + print('INFO: {msg}'.format(msg=msg)) + + def arg_valid_date(arg_string): try: parsed_date = datetime.strptime(arg_string, "%Y-%b-%d") diff --git a/wodles/aws/buckets_s3/aws_bucket.py b/wodles/aws/buckets_s3/aws_bucket.py index 98abdea8802..2af59cfbe00 100644 --- a/wodles/aws/buckets_s3/aws_bucket.py +++ b/wodles/aws/buckets_s3/aws_bucket.py @@ -284,7 +284,7 @@ def db_maintenance(self, aws_account_id=None, aws_region=None): 'aws_region': aws_region, 'retain_db_records': self.retain_db_records}) except Exception as e: - print(f"ERROR: Failed to execute DB cleanup - AWS Account ID: {aws_account_id} Region: {aws_region}: {e}") + aws_tools.error(f"Failed to execute DB cleanup - AWS Account ID: {aws_account_id} Region: {aws_region}: {e}") def marker_custom_date(self, aws_region: str, aws_account_id: str, date: datetime) -> str: """ @@ -362,7 +362,7 @@ def find_account_ids(self): sys.exit(1) except KeyError: - print(f"ERROR: No logs found in '{self.get_base_prefix()}'. Check the provided prefix and the location of " + aws_tools.error(f"No logs found in '{self.get_base_prefix()}'. Check the provided prefix and the location of " f"the logs for the bucket type '{aws_tools.get_script_arguments().type.lower()}'") sys.exit(18) @@ -480,7 +480,7 @@ def exception_handler(error_txt, error_code): except: aws_tools.debug("++ Failed to send message to Wazuh", 1) else: - print("ERROR: {}".format(error_txt)) + aws_tools.error(error_txt) sys.exit(error_code) try: @@ -635,7 +635,7 @@ def iter_files_in_bucket(self, aws_account_id=None, aws_region=None, **kwargs): else: error_message = f'ERROR: The "iter_files_in_bucket" request failed: {error}' exit_number = 1 - print(f"ERROR: {error_message}") + aws_tools.error(f"{error_message}") exit(exit_number) except Exception as err: @@ -643,7 +643,7 @@ def iter_files_in_bucket(self, aws_account_id=None, aws_region=None, **kwargs): aws_tools.debug(f"+++ Unexpected error: {err.message}", 2) else: aws_tools.debug(f"+++ Unexpected error: {err}", 2) - print(f"ERROR: Unexpected error querying/working with objects in S3: {err}") + aws_tools.error(f"Unexpected error querying/working with objects in S3: {err}") sys.exit(7) def check_bucket(self): @@ -655,7 +655,7 @@ def check_bucket(self): if 'CommonPrefixes' in page: break else: - print("ERROR: No files were found in '{0}'. No logs will be processed.".format(self.bucket_path)) + aws_tools.error("No files were found in '{0}'. No logs will be processed.".format(self.bucket_path)) exit(14) except botocore.exceptions.ClientError as error: @@ -674,10 +674,10 @@ def check_bucket(self): error_message = UNKNOWN_ERROR_MESSAGE.format(error=error) exit_number = 1 - print(f"ERROR: {error_message}") + aws_tools.error(f"{error_message}") exit(exit_number) except botocore.exceptions.EndpointConnectionError as e: - print(f"ERROR: {str(e)}") + aws_tools.error(f"{str(e)}") exit(15) @@ -947,4 +947,4 @@ def db_maintenance(self, aws_account_id=None, **kwargs): 'aws_account_id': aws_account_id if aws_account_id else self.aws_account_id, 'retain_db_records': self.retain_db_records}) except Exception as e: - print(f"ERROR: Failed to execute DB cleanup - Path: {self.bucket_path}: {e}") + aws_tools.error(f"ERROR: Failed to execute DB cleanup - Path: {self.bucket_path}: {e}") diff --git a/wodles/aws/buckets_s3/config.py b/wodles/aws/buckets_s3/config.py index 08ab85ce57a..cef2ac6f1b8 100644 --- a/wodles/aws/buckets_s3/config.py +++ b/wodles/aws/buckets_s3/config.py @@ -10,6 +10,10 @@ import aws_bucket +sys.path.insert(0, path.dirname(path.dirname(path.abspath(__file__)))) +import aws_tools + + class AWSConfigBucket(aws_bucket.AWSLogsBucket): """ @@ -76,7 +80,7 @@ def _remove_padding_zeros_from_marker(self, marker: str) -> str: parsed_date = re.sub(self._leading_zero_regex, r'/\g', date) return marker.replace(date, parsed_date) except AttributeError: - print(f"ERROR: There was an error while trying to extract a date from the marker '{marker}'") + aws_tools.error(f"There was an error while trying to extract a date from the marker '{marker}'") sys.exit(16) def marker_only_logs_after(self, aws_region: str, aws_account_id: str) -> str: diff --git a/wodles/aws/buckets_s3/guardduty.py b/wodles/aws/buckets_s3/guardduty.py index e3438ac56c7..746b3714d2e 100644 --- a/wodles/aws/buckets_s3/guardduty.py +++ b/wodles/aws/buckets_s3/guardduty.py @@ -39,7 +39,7 @@ def check_guardduty_type(self): aws_tools.debug(f"+++ Unexpected error: {err.message}", 2) else: aws_tools.debug(f"+++ Unexpected error: {err}", 2) - print(f"ERROR: Unexpected error querying/working with objects in S3: {err}") + aws_tools.error(f"Unexpected error querying/working with objects in S3: {err}") sys.exit(7) def get_service_prefix(self, account_id): diff --git a/wodles/aws/buckets_s3/server_access.py b/wodles/aws/buckets_s3/server_access.py index faf93a0ce8a..4360a91b3c1 100644 --- a/wodles/aws/buckets_s3/server_access.py +++ b/wodles/aws/buckets_s3/server_access.py @@ -56,7 +56,7 @@ def iter_files_in_bucket(self, aws_account_id: str = None, aws_region: str = Non "skipping it.", 1) continue else: - print(f"ERROR: The filename of {bucket_file['Key']} doesn't have the valid format.") + aws_tools.error(f"The filename of {bucket_file['Key']} doesn't have the valid format.") sys.exit(17) if not self._same_prefix(match_start, aws_account_id, aws_region): @@ -97,7 +97,7 @@ def iter_files_in_bucket(self, aws_account_id: str = None, aws_region: str = Non aws_tools.debug(f"+++ Unexpected error: {err.message}", 2) else: aws_tools.debug(f"+++ Unexpected error: {err}", 2) - print(f"ERROR: Unexpected error querying/working with objects in S3: {err}") + aws_tools.error(f"Unexpected error querying/working with objects in S3: {err}") sys.exit(7) def marker_only_logs_after(self, aws_region: str, aws_account_id: str) -> str: @@ -124,7 +124,7 @@ def check_bucket(self): try: bucket_objects = self.client.list_objects_v2(Bucket=self.bucket, Prefix=self.prefix, Delimiter='/') if not 'CommonPrefixes' in bucket_objects and not 'Contents' in bucket_objects: - print("ERROR: No files were found in '{0}'. No logs will be processed.".format(self.bucket_path)) + aws_tools.error("No files were found in '{0}'. No logs will be processed.".format(self.bucket_path)) exit(14) except botocore.exceptions.ClientError as error: error_code = error.response.get("Error", {}).get("Code") @@ -142,7 +142,7 @@ def check_bucket(self): error_message = UNKNOWN_ERROR_MESSAGE.format(error=error) exit_number = 1 - print(f"ERROR: {error_message}") + aws_tools.error(error_message) exit(exit_number) def load_information_from_file(self, log_key): diff --git a/wodles/aws/buckets_s3/umbrella.py b/wodles/aws/buckets_s3/umbrella.py index a32f0aae38f..1ab939ea3ad 100644 --- a/wodles/aws/buckets_s3/umbrella.py +++ b/wodles/aws/buckets_s3/umbrella.py @@ -3,8 +3,12 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 import csv +import sys +from os import path from aws_bucket import AWSCustomBucket +sys.path.insert(0, path.dirname(path.dirname(path.abspath(__file__)))) +import aws_tools class CiscoUmbrella(AWSCustomBucket): @@ -40,7 +44,7 @@ def load_information_from_file(self, log_key): 'destination_port', 'categories' ) else: - print("ERROR: Only 'dnslogs', 'proxylogs' or 'iplogs' are allowed for Cisco Umbrella") + aws_tools.error("Only 'dnslogs', 'proxylogs' or 'iplogs' are allowed for Cisco Umbrella") exit(12) csv_file = csv.DictReader(f, fieldnames=fieldnames, delimiter=',') diff --git a/wodles/aws/buckets_s3/vpcflow.py b/wodles/aws/buckets_s3/vpcflow.py index e0d56ef9643..8dc241faa47 100644 --- a/wodles/aws/buckets_s3/vpcflow.py +++ b/wodles/aws/buckets_s3/vpcflow.py @@ -163,7 +163,7 @@ def get_ec2_client(self, access_key, secret_key, region, profile_name=None): try: ec2_client = boto_session.client(service_name='ec2', **self.connection_config) except Exception as e: - print("Error getting EC2 client: {}".format(e)) + aws_tools.error("Error getting EC2 client: {}".format(e)) sys.exit(3) return ec2_client @@ -242,7 +242,7 @@ def db_maintenance(self, aws_account_id=None, aws_region=None, flow_log_id=None) 'flow_log_id': flow_log_id, 'retain_db_records': self.retain_db_records}) except Exception as e: - print(f"ERROR: Failed to execute DB cleanup - AWS Account ID: {aws_account_id} Region: {aws_region}: {e}") + aws_tools.error(f"Failed to execute DB cleanup - AWS Account ID: {aws_account_id} Region: {aws_region}: {e}") def _filter_bucket_files(self, bucket_files: list, **kwargs) -> Iterator[dict]: """Filter bucket files that contain the flow_log_id in the filename. diff --git a/wodles/aws/buckets_s3/waf.py b/wodles/aws/buckets_s3/waf.py index 7ca3904929c..cd73106be2b 100644 --- a/wodles/aws/buckets_s3/waf.py +++ b/wodles/aws/buckets_s3/waf.py @@ -4,8 +4,12 @@ import json import sys +from os import path from aws_bucket import AWSCustomBucket +sys.path.insert(0, path.dirname(path.dirname(path.abspath(__file__)))) +import aws_tools + class AWSWAFBucket(AWSCustomBucket): standard_http_headers = ['a-im', 'accept', 'accept-charset', 'accept-encoding', 'accept-language', @@ -45,13 +49,13 @@ def json_event_generator(data): headers[name] = element["value"] event['httpRequest']['headers'] = headers except (KeyError, TypeError): - print(f"ERROR: the {log_key} file doesn't have the expected structure.") + aws_tools.error(f"The {log_key} file doesn't have the expected structure.") if not self.skip_on_error: sys.exit(9) content.append(event) except json.JSONDecodeError: - print("ERROR: Events from {} file could not be loaded.".format(log_key.split('/')[-1])) + aws_tools.error("Events from {} file could not be loaded.".format(log_key.split('/')[-1])) if not self.skip_on_error: sys.exit(9) diff --git a/wodles/aws/services/cloudwatchlogs.py b/wodles/aws/services/cloudwatchlogs.py index 35a3911b36e..428d8fd905f 100644 --- a/wodles/aws/services/cloudwatchlogs.py +++ b/wodles/aws/services/cloudwatchlogs.py @@ -228,10 +228,10 @@ def remove_aws_log_stream(self, log_group, log_stream): aws_tools.debug('Removing log stream "{}" from log group "{}"'.format(log_group, log_stream), 1) self.client.delete_log_stream(logGroupName=log_group, logStreamName=log_stream) except botocore.exceptions.ClientError as err: - aws_tools.debug(f'ERROR: The "remove_aws_log_stream" request failed: {err}', 1) + aws_tools.debug(f'The "remove_aws_log_stream" request failed: {err}', 1) sys.exit(16) except Exception: - aws_tools.debug('Error trying to remove "{}" log stream from "{}" log group.'.format(log_stream, log_group), + aws_tools.debug('ERROR: Error trying to remove "{}" log stream from "{}" log group.'.format(log_stream, log_group), 0) def get_alerts_within_range(self, log_group, log_stream, token, start_time, end_time): @@ -493,9 +493,9 @@ def get_log_streams(self, log_group): aws_tools.debug('No log streams were found for log group "{}"'.format(log_group), 1) except botocore.exceptions.EndpointConnectionError as e: - print(f'ERROR: {str(e)}') + aws_tools.error(f'{str(e)}') except botocore.exceptions.ClientError as err: - aws_tools.debug(f'ERROR: The "get_log_streams" request failed: {err}', 1) + aws_tools.debug(f'The "get_log_streams" request failed: {err}', 1) sys.exit(16) except Exception: aws_tools.debug( diff --git a/wodles/aws/subscribers/s3_log_handler.py b/wodles/aws/subscribers/s3_log_handler.py index 64834bfadfc..c72565faab9 100644 --- a/wodles/aws/subscribers/s3_log_handler.py +++ b/wodles/aws/subscribers/s3_log_handler.py @@ -188,7 +188,7 @@ def obtain_logs(self, bucket: str, log_path: str) -> List[dict]: try: return [dict(full_log=event, source="custom") for event in f.read().splitlines()] except OSError: - print(f"ERROR: Data in the file does not seem to be plain text either.") + aws_tools.error(f"Data in the file does not seem to be plain text either.") sys.exit(9) def process_file(self, message_body: dict) -> None: diff --git a/wodles/aws/subscribers/sqs_queue.py b/wodles/aws/subscribers/sqs_queue.py index 44182736991..3f370652b77 100644 --- a/wodles/aws/subscribers/sqs_queue.py +++ b/wodles/aws/subscribers/sqs_queue.py @@ -81,7 +81,7 @@ def _get_sqs_url(self) -> str: aws_tools.debug(f'The SQS queue is: {url}', 2) return url except botocore.exceptions.ClientError: - print('ERROR: Queue does not exist, verify the given name') + aws_tools.error('Queue does not exist, verify the given name') sys.exit(20) def delete_message(self, message: dict) -> None: diff --git a/wodles/aws/tests/test_cloudwatchlogs.py b/wodles/aws/tests/test_cloudwatchlogs.py index 68cf23bf23d..17964602f9e 100644 --- a/wodles/aws/tests/test_cloudwatchlogs.py +++ b/wodles/aws/tests/test_cloudwatchlogs.py @@ -151,7 +151,7 @@ def test_aws_cloudwatchlogs_remove_aws_log_stream_handles_exceptions(mock_debug, instance.remove_aws_log_stream(TEST_LOG_GROUP, TEST_LOG_STREAM) mock_debug.assert_any_call( - 'Error trying to remove "{}" log stream from "{}" log group.'.format(TEST_LOG_STREAM, TEST_LOG_GROUP), 0) + 'ERROR: Error trying to remove "{}" log stream from "{}" log group.'.format(TEST_LOG_STREAM, TEST_LOG_GROUP), 0) mock_delete_log_stream.side_effect = botocore.exceptions.ClientError( {'Error': {'Code': utils.THROTTLING_ERROR_CODE}}, "name") diff --git a/wodles/azure/tests/azure_services/test_graph.py b/wodles/azure/tests/azure_services/test_graph.py index 728d0573dbc..6a833fd6946 100644 --- a/wodles/azure/tests/azure_services/test_graph.py +++ b/wodles/azure/tests/azure_services/test_graph.py @@ -127,7 +127,6 @@ def test_start_graph_ko_credentials(mock_logging): (PAST_DATE, PAST_DATE, PRESENT_DATE, True), ], ) -@patch('azure_utils.logging.info') @patch('azure_services.graph.offset_to_datetime') @patch('azure_services.graph.create_new_row') @patch('db.orm.get_row', return_value=None) @@ -135,7 +134,6 @@ def test_build_graph_url( mock_get, mock_create, mock_datetime, - mock_logging, min_date, max_date, desired_date, @@ -167,7 +165,6 @@ def test_build_graph_url( expected_str = f'{filtering_condition}+ge+{desired_date}' else: expected_str = f'{filtering_condition}+gt+{max_date}' - mock_logging.assert_called_once() assert URL_GRAPH in result assert query in result assert expected_str in result From 7abe5470fb540f11473debce8088054651a62ce8 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Thu, 30 May 2024 16:06:12 -0300 Subject: [PATCH 378/419] Replaced the last prints in the AWS wodle. --- wodles/aws/aws_s3.py | 8 +++--- wodles/aws/aws_tools.py | 6 ++--- wodles/aws/subscribers/s3_log_handler.py | 2 +- wodles/aws/wazuh_integration.py | 32 ++++++++++++------------ 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/wodles/aws/aws_s3.py b/wodles/aws/aws_s3.py index 736b66220d5..49b4f983b1e 100755 --- a/wodles/aws/aws_s3.py +++ b/wodles/aws/aws_s3.py @@ -152,8 +152,8 @@ def main(argv): elif options.subscriber: if options.subscriber.lower() == "security_lake": if options.aws_profile: - print( - "+++ ERROR: The AWS Security Lake integration does not make use of the Profile authentication " + aws_tools.error( + "The AWS Security Lake integration does not make use of the Profile authentication " f"method. Check the available ones for it in " f"{aws_tools.SECURITY_LAKE_IAM_ROLE_AUTHENTICATION_URL}") sys.exit(3) @@ -188,7 +188,7 @@ def main(argv): aws_tools.debug("+++ Error: {}".format(err), 2) if aws_tools.debug_level > 0: raise - print("ERROR: {}".format(err)) + aws_tools.error(str(err)) sys.exit(12) @@ -199,7 +199,7 @@ def main(argv): main(sys.argv[1:]) sys.exit(0) except Exception as e: - print("Unknown error: {}".format(e)) + aws_tools.error("Unknown error: {}".format(e)) if aws_tools.debug_level > 0: raise sys.exit(1) diff --git a/wodles/aws/aws_tools.py b/wodles/aws/aws_tools.py index a8a158e9e49..87ba9005f1e 100644 --- a/wodles/aws/aws_tools.py +++ b/wodles/aws/aws_tools.py @@ -305,13 +305,13 @@ def arg_validate_security_lake_auth_params(external_id: Optional[str], name: Opt """ if iam_role_arn is None: - print('ERROR: Used a subscriber but no --iam_role_arn provided.') + error('Used a subscriber but no --iam_role_arn provided.') sys.exit(21) if name is None: - print('ERROR: Used a subscriber but no --queue provided.') + error('Used a subscriber but no --queue provided.') sys.exit(21) if external_id is None: - print('ERROR: Used a subscriber but no --external_id provided.') + error('Used a subscriber but no --external_id provided.') sys.exit(21) diff --git a/wodles/aws/subscribers/s3_log_handler.py b/wodles/aws/subscribers/s3_log_handler.py index c72565faab9..3d769b0e5bd 100644 --- a/wodles/aws/subscribers/s3_log_handler.py +++ b/wodles/aws/subscribers/s3_log_handler.py @@ -360,7 +360,7 @@ def obtain_logs(self, bucket: str, log_path: str) -> List[dict]: return extracted_events except (json.JSONDecodeError, AttributeError): - print(f"ERROR: Data in the file does not contain JSON objects.") + aws_tools.error(f"Data in the file does not contain JSON objects.") sys.exit(9) def process_file(self, message_body: dict) -> None: diff --git a/wodles/aws/wazuh_integration.py b/wodles/aws/wazuh_integration.py index 3e3914a671b..90be89ef89c 100644 --- a/wodles/aws/wazuh_integration.py +++ b/wodles/aws/wazuh_integration.py @@ -134,7 +134,7 @@ def default_config(profile: str) -> dict: profile_config = {option: aws_config.get(profile, option) for option in aws_config.options(profile)} except configparser.NoSectionError: - print(f"No profile named: '{profile}' was found in the user config file") + aws_tools.error(f"No profile named: '{profile}' was found in the user config file") sys.exit(23) # Map Primary Botocore Config parameters with profile config file @@ -170,7 +170,7 @@ def default_config(profile: str) -> dict: profile_config=profile_config) except (KeyError, ValueError) as e: - print('Invalid key or value found in config '.format(e)) + aws_tools.error('Invalid key or value found in config '.format(e)) sys.exit(17) aws_tools.debug(f"Created Config object using profile: '{profile}' configuration", 2) @@ -235,7 +235,7 @@ def get_client(self, access_key, secret_key, profile, iam_role_arn, service_name **self.connection_config) except (botocore.exceptions.ClientError, botocore.exceptions.NoCredentialsError) as e: - print("ERROR: Access error: {}".format(e)) + aws_tools.error("Access error: {}".format(e)) sys.exit(3) return client @@ -252,7 +252,7 @@ def get_sts_client(self, access_key, secret_key, profile=None): try: sts_client = boto_session.client(service_name='sts', **self.connection_config) except Exception as e: - print("Error getting STS client: {}".format(e)) + aws_tools.error("Error getting STS client: {}".format(e)) sys.exit(3) return sts_client @@ -301,17 +301,17 @@ def send_msg(self, msg, dump_json=True): s.close() except socket.error as e: if e.errno == 111: - print("ERROR: Wazuh must be running.") + aws_tools.error("Wazuh must be running.") sys.exit(11) elif e.errno == 90: - print("ERROR: Message too long to send to Wazuh. Skipping message...") + aws_tools.error("Message too long to send to Wazuh. Skipping message...") aws_tools.debug('+++ ERROR: Message longer than buffer socket for Wazuh. Consider increasing rmem_max. ' 'Skipping message...', 1) else: - print("ERROR: Error sending message to wazuh: {}".format(e)) + aws_tools.error("Error sending message to wazuh: {}".format(e)) sys.exit(13) except Exception as e: - print("ERROR: Error sending message to wazuh: {}".format(e)) + aws_tools.error("Error sending message to wazuh: {}".format(e)) sys.exit(13) def _decompress_gzip(self, raw_object: io.BytesIO): @@ -334,7 +334,7 @@ def _decompress_gzip(self, raw_object: io.BytesIO): gzip_file.seek(0) return gzip_file except (gzip.BadGzipFile, zlib.error, TypeError): - print(f'ERROR: invalid gzip file received.') + aws_tools.error(f'Invalid gzip file received.') if not self.skip_on_error: sys.exit(8) @@ -355,7 +355,7 @@ def _decompress_zip(self, raw_object: io.BytesIO): zipfile_object = zipfile.ZipFile(raw_object, compression=zipfile.ZIP_DEFLATED) return io.TextIOWrapper(zipfile_object.open(zipfile_object.namelist()[0])) except zipfile.BadZipFile: - print('ERROR: invalid zip file received.') + aws_tools.error('Invalid zip file received.') if not self.skip_on_error: sys.exit(8) @@ -375,7 +375,7 @@ def decompress_file(self, bucket: str, log_key: str): elif log_key[-4:] == '.zip': return self._decompress_zip(raw_object) elif log_key[-7:] == '.snappy': - print(f"ERROR: couldn't decompress the {log_key} file, snappy compression is not supported.") + aws_tools.error(f"Couldn't decompress the {log_key} file, snappy compression is not supported.") if not self.skip_on_error: sys.exit(8) else: @@ -479,7 +479,7 @@ def create_table(self, sql_create_table): aws_tools.debug('+++ Table does not exist; create', 1) self.db_cursor.execute(sql_create_table) except Exception as e: - print("ERROR: Unable to create SQLite DB: {}".format(e)) + aws_tools.error("Unable to create SQLite DB: {}".format(e)) sys.exit(6) def init_db(self, sql_create_table): @@ -489,7 +489,7 @@ def init_db(self, sql_create_table): try: tables = set(map(operator.itemgetter(0), self.db_cursor.execute(self.sql_find_table_names))) except Exception as e: - print("ERROR: Unexpected error accessing SQLite DB: {}".format(e)) + aws_tools.error("Unexpected error accessing SQLite DB: {}".format(e)) sys.exit(5) # if table does not exist, create a new table if self.db_table_name not in tables: @@ -509,7 +509,7 @@ def check_metadata_version(self): if metadata_version != self.wazuh_version: self.db_cursor.execute(self.sql_update_version_metadata, {'wazuh_version': self.wazuh_version}) except (sqlite3.IntegrityError, sqlite3.OperationalError, sqlite3.Error) as err: - print(f'ERROR: Error attempting to update the metadata table: {err}') + aws_tools.error(f'Error attempting to update the metadata table: {err}') sys.exit(5) else: # The table does not exist; create it and insert the metadata value @@ -518,11 +518,11 @@ def check_metadata_version(self): self.db_cursor.execute(self.sql_insert_version_metadata, {'wazuh_version': self.wazuh_version}) self.delete_deprecated_tables() except (sqlite3.IntegrityError, sqlite3.OperationalError, sqlite3.Error) as err: - print(f'ERROR: Error attempting to create the metadata table: {err}') + aws_tools.error(f'Error attempting to create the metadata table: {err}') sys.exit(5) self.db_connector.commit() except (sqlite3.IntegrityError, sqlite3.OperationalError, sqlite3.Error) as err: - print(f'ERROR: Error attempting to operate with the {self.db_path} database: {err}') + aws_tools.error(f'Error attempting to operate with the {self.db_path} database: {err}') sys.exit(5) def delete_deprecated_tables(self): From d1b89b3788aaf4b0cd70ea058f00733b4367798a Mon Sep 17 00:00:00 2001 From: Dwordcito Date: Fri, 31 May 2024 00:51:29 -0300 Subject: [PATCH 379/419] Fix test, homebrew is unsupported at this moment. --- .../vulnerability_scanner/qa/test_data/011/expected_004.out | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_004.out b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_004.out index 6455d06940a..060852eb496 100644 --- a/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_004.out +++ b/src/wazuh_modules/vulnerability_scanner/qa/test_data/011/expected_004.out @@ -1,4 +1,3 @@ [ - "Initiating a vulnerability scan for package 'brotli' (pkg) ( ) with CVE Numbering Authorities (CNA) 'nvd' on Agent '' (ID: '002', Version: '').", - "The vendor information is not available for Package: brotli, Version: 1.1.0, CVE: CVE-2020-8927, Content vendor: google" + "Initiating a vulnerability scan for package 'brotli' (pkg) ( ) with CVE Numbering Authorities (CNA) 'homebrew' on Agent '' (ID: '002', Version: '')." ] From 43f61d2e234457f6d9b21a90f70d7021387f2524 Mon Sep 17 00:00:00 2001 From: Vikman Fernandez-Castro Date: Fri, 31 May 2024 09:59:08 +0200 Subject: [PATCH 380/419] docu: fix a PR confused between 4.7.5 and 4.8.0 --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c62898a3a3a..a42919ff0c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,8 +22,10 @@ All notable changes to this project will be documented in this file. - Improved Wazuh-DB performance by adjusting SQLite synchronization policy. ([#22774](https://github.com/wazuh/wazuh/pull/22774)) #### Fixed + - Updated cluster connection cleanup to remove temporary files when the connection between a worker and a master is broken. ([#17886](https://github.com/wazuh/wazuh/pull/17886)) - Added a mechanism to avoid cluster errors to raise from expected wazuh-db exceptions. ([#23371](https://github.com/wazuh/wazuh/pull/23371)) +- Fixed race condition when creating agent database files from a template. ([#23216](https://github.com/wazuh/wazuh/pull/23216)) ### Agent @@ -143,7 +145,7 @@ All notable changes to this project will be documented in this file. #### Fixed - Fixed an issue in a cluster task where full group synchronization was constantly triggered. ([#23447](https://github.com/wazuh/wazuh/pull/23447)) -- Fixed race condition when creating agent database files from a template. ([#23216](https://github.com/wazuh/wazuh/pull/23216)) +- Fixed a race condition in wazuh-db that might create corrupted database files. ([#23467](https://github.com/wazuh/wazuh/pull/23467)) ### Agent From 4d252e0d0970133acdd272967ec183530592e0b8 Mon Sep 17 00:00:00 2001 From: RamosFe Date: Fri, 31 May 2024 10:09:21 -0300 Subject: [PATCH 381/419] Changed info lvl msg to debug lvl. --- wodles/aws/buckets_s3/config.py | 1 - wodles/aws/buckets_s3/umbrella.py | 1 + wodles/aws/services/cloudwatchlogs.py | 6 +++--- wodles/azure/azure_services/analytics.py | 16 ++++++++-------- wodles/azure/azure_services/graph.py | 12 ++++++------ wodles/azure/azure_services/storage.py | 4 ++-- wodles/gcloud/gcloud.py | 4 ++-- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/wodles/aws/buckets_s3/config.py b/wodles/aws/buckets_s3/config.py index cef2ac6f1b8..50231b5679b 100644 --- a/wodles/aws/buckets_s3/config.py +++ b/wodles/aws/buckets_s3/config.py @@ -14,7 +14,6 @@ import aws_tools - class AWSConfigBucket(aws_bucket.AWSLogsBucket): """ Represents a bucket with AWS Config logs diff --git a/wodles/aws/buckets_s3/umbrella.py b/wodles/aws/buckets_s3/umbrella.py index 1ab939ea3ad..5d8475cc112 100644 --- a/wodles/aws/buckets_s3/umbrella.py +++ b/wodles/aws/buckets_s3/umbrella.py @@ -10,6 +10,7 @@ sys.path.insert(0, path.dirname(path.dirname(path.abspath(__file__)))) import aws_tools + class CiscoUmbrella(AWSCustomBucket): def __init__(self, **kwargs): diff --git a/wodles/aws/services/cloudwatchlogs.py b/wodles/aws/services/cloudwatchlogs.py index 428d8fd905f..4b862d927a4 100644 --- a/wodles/aws/services/cloudwatchlogs.py +++ b/wodles/aws/services/cloudwatchlogs.py @@ -228,7 +228,7 @@ def remove_aws_log_stream(self, log_group, log_stream): aws_tools.debug('Removing log stream "{}" from log group "{}"'.format(log_group, log_stream), 1) self.client.delete_log_stream(logGroupName=log_group, logStreamName=log_stream) except botocore.exceptions.ClientError as err: - aws_tools.debug(f'The "remove_aws_log_stream" request failed: {err}', 1) + aws_tools.error(f'The "remove_aws_log_stream" request failed: {err}') sys.exit(16) except Exception: aws_tools.debug('ERROR: Error trying to remove "{}" log stream from "{}" log group.'.format(log_stream, log_group), @@ -289,7 +289,7 @@ def get_alerts_within_range(self, log_group, log_stream, token, start_time, end_ f'available. Attempting again.', 1) continue # Needed to make the get_log_events request again except botocore.exceptions.ClientError as err: - aws_tools.debug(f'ERROR: The "get_log_events" request failed: {err}', 1) + aws_tools.error(f'The "get_log_events" request failed: {err}') sys.exit(16) # Update token @@ -495,7 +495,7 @@ def get_log_streams(self, log_group): except botocore.exceptions.EndpointConnectionError as e: aws_tools.error(f'{str(e)}') except botocore.exceptions.ClientError as err: - aws_tools.debug(f'The "get_log_streams" request failed: {err}', 1) + aws_tools.error(f'The "get_log_streams" request failed: {err}') sys.exit(16) except Exception: aws_tools.debug( diff --git a/wodles/azure/azure_services/analytics.py b/wodles/azure/azure_services/analytics.py index 44ac62a9700..2581513026c 100644 --- a/wodles/azure/azure_services/analytics.py +++ b/wodles/azure/azure_services/analytics.py @@ -39,12 +39,12 @@ def start_log_analytics(args): # Read credentials if args.la_auth_path and args.la_tenant_domain: - logging.info(f"Log Analytics: Using the auth file {args.la_auth_path} for authentication") + logging.debug(f"Log Analytics: Using the auth file {args.la_auth_path} for authentication") client, secret = read_auth_file( auth_path=args.la_auth_path, fields=('application_id', 'application_key') ) elif args.la_id and args.la_key and args.la_tenant_domain: - logging.info(f"Log Analytics: Using id and key from configuration for authentication") + logging.debug(f"Log Analytics: Using id and key from configuration for authentication") logging.warning( DEPRECATED_MESSAGE.format( name='la_id and la_key', release='4.4', url=CREDENTIALS_URL @@ -94,7 +94,7 @@ def start_log_analytics(args): def build_log_analytics_query( - query: str, offset: str, reparse: bool, md5_hash: str + query: str, offset: str, reparse: bool, md5_hash: str ) -> dict: """Prepare and make the request, building the query based on the time of event generation. @@ -142,17 +142,17 @@ def build_log_analytics_query( else: # Build the filter taking into account the min and max values if desired_datetime < min_datetime: - logging.info(f"Log Analytics: Making request query for the following intervals: " - f"from {desired_str} to {min_str} and from {max_str}") + logging.debug(f"Log Analytics: Making request query for the following intervals: " + f"from {desired_str} to {min_str} and from {max_str}") filter_value = ( f'( TimeGenerated < {min_str} and TimeGenerated >= {desired_str}) or ' f'( TimeGenerated > {max_str})' ) elif desired_datetime > max_datetime: - logging.info(f"Log Analytics: Making request for the following interval: from {desired_str}") + logging.debug(f"Log Analytics: Making request for the following interval: from {desired_str}") filter_value = f'TimeGenerated >= {desired_str}' else: - logging.info(f"Log Analytics: Making request for the following interval: from {max_str}") + logging.debug(f"Log Analytics: Making request for the following interval: from {max_str}") filter_value = f'TimeGenerated > {max_str}' query = f'{query} | order by TimeGenerated asc | where {filter_value} ' @@ -161,7 +161,7 @@ def build_log_analytics_query( def get_log_analytics_events( - url: str, body: dict, headers: dict, md5_hash: str, query: str, tag: str + url: str, body: dict, headers: dict, md5_hash: str, query: str, tag: str ): """Get the logs, process the response and iterate the events. diff --git a/wodles/azure/azure_services/graph.py b/wodles/azure/azure_services/graph.py index 6f1d6919e5a..3c6e3caada4 100644 --- a/wodles/azure/azure_services/graph.py +++ b/wodles/azure/azure_services/graph.py @@ -38,12 +38,12 @@ def start_graph(args): # Read credentials if args.graph_auth_path and args.graph_tenant_domain: - logging.info(f"Graph: Using the auth file {args.graph_auth_path} for authentication") + logging.debug(f"Graph: Using the auth file {args.graph_auth_path} for authentication") client, secret = read_auth_file( auth_path=args.graph_auth_path, fields=('application_id', 'application_key') ) elif args.graph_id and args.graph_key and args.graph_tenant_domain: - logging.info(f"Graph: Using id and key from configuration for authentication") + logging.debug(f"Graph: Using id and key from configuration for authentication") logging.warning( DEPRECATED_MESSAGE.format( name='graph_id and graph_key', release='4.4', url=CREDENTIALS_URL @@ -137,17 +137,17 @@ def build_graph_url(query: str, offset: str, reparse: bool, md5_hash: str): # Build the filter taking into account the min and max values from the file else: if desired_datetime < min_datetime: - logging.info(f"Graph: Making request query for the following intervals: " - f"from {desired_str} to {min_str} and from {max_str}") + logging.debug(f"Graph: Making request query for the following intervals: " + f"from {desired_str} to {min_str} and from {max_str}") filter_value = ( f'({filtering_condition}+lt+{min_str}+and+{filtering_condition}+ge+{desired_str})' f'+or+({filtering_condition}+gt+{max_str})' ) elif desired_datetime > max_datetime: - logging.info(f"Graph: Making request for the following interval: from {desired_str}") + logging.debug(f"Graph: Making request for the following interval: from {desired_str}") filter_value = f'{filtering_condition}+ge+{desired_str}' else: - logging.info(f"Graph: Making request for the following interval: from {max_str}") + logging.debug(f"Graph: Making request for the following interval: from {max_str}") filter_value = f'{filtering_condition}+gt+{max_str}' logging.debug(f'Graph: The search starts for query: "{query}" using {filter_value}') diff --git a/wodles/azure/azure_services/storage.py b/wodles/azure/azure_services/storage.py index 7ef613a507b..4621bdae945 100644 --- a/wodles/azure/azure_services/storage.py +++ b/wodles/azure/azure_services/storage.py @@ -38,12 +38,12 @@ def start_storage(args): # Read credentials logging.info('Storage: Authenticating.') if args.storage_auth_path: - logging.info(f"Storage: Using path {args.storage_auth_path} for authentication") + logging.debug(f"Storage: Using path {args.storage_auth_path} for authentication") name, key = read_auth_file( auth_path=args.storage_auth_path, fields=('account_name', 'account_key') ) elif args.account_name and args.account_key: - logging.info(f"Storage: Using path account name and account key for authentication") + logging.debug(f"Storage: Using path account name and account key for authentication") logging.warning( DEPRECATED_MESSAGE.format( name='account_name and account_key', release='4.4', url=CREDENTIALS_URL diff --git a/wodles/gcloud/gcloud.py b/wodles/gcloud/gcloud.py index 40adaaa6c51..e0687714bfa 100644 --- a/wodles/gcloud/gcloud.py +++ b/wodles/gcloud/gcloud.py @@ -62,7 +62,7 @@ def main(): # check permissions subscriber_client = WazuhGCloudSubscriber(credentials_file, project, logger, subscription_id) - logger.info("Checking credentials") + logger.debug("Checking credentials") subscriber_client.check_permissions() messages_per_thread = max_messages // n_threads remaining_messages = max_messages % n_threads @@ -86,7 +86,7 @@ def main(): "only_logs_after": arguments.only_logs_after, "reparse": arguments.reparse} integration = GCSAccessLogs(arguments.credentials_file, logger, **f_kwargs) - logger.info("Checking credentials") + logger.debug("Checking credentials") integration.check_permissions() num_processed_messages = integration.process_data() From 4c5c836336b5823b33bf7551ba28bc1fa2334032 Mon Sep 17 00:00:00 2001 From: Nico Stefani Date: Fri, 31 May 2024 10:55:40 -0300 Subject: [PATCH 382/419] Specify validator and fix exclusive minimum --- framework/wazuh/core/cluster/cluster.py | 6 +++--- framework/wazuh/core/cluster/tests/test_cluster.py | 5 ++++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/framework/wazuh/core/cluster/cluster.py b/framework/wazuh/core/cluster/cluster.py index 9262645dd1c..cbada2fc290 100644 --- a/framework/wazuh/core/cluster/cluster.py +++ b/framework/wazuh/core/cluster/cluster.py @@ -16,7 +16,7 @@ from os import listdir, path, remove, stat, walk from uuid import uuid4 -from jsonschema import ValidationError, validate +from jsonschema import ValidationError, validate, validators from wazuh import WazuhError, WazuhException, WazuhInternalError from wazuh.core import common from wazuh.core.cluster.utils import ( @@ -52,7 +52,7 @@ AGENT_RECONNECTION_STABILITY_TIME: {'type': 'integer', 'minimum': 10}, AGENT_CHUNK_SIZE: {'type': 'integer', 'minimum': 100}, AGENT_RECONNECTION_TIME: {'type': 'integer', 'minimum': 0}, - IMBALANCE_TOLERANCE: {'type': 'number', 'minimum': 0, 'exclusiveMinimum': True, 'maximum': 1}, + IMBALANCE_TOLERANCE: {'type': 'number', 'exclusiveMinimum': 0, 'maximum': 1}, REMOVE_DISCONNECTED_NODE_AFTER: {'type': 'integer', 'minimum': 0}, }, } @@ -75,7 +75,7 @@ def validate_haproxy_helper_config(config: dict): If there any invalid value. """ try: - validate(config, HAPROXY_HELPER_SCHEMA) + validate(config, HAPROXY_HELPER_SCHEMA, cls=validators.Draft202012Validator) except ValidationError as error: raise WazuhError( 3004, diff --git a/framework/wazuh/core/cluster/tests/test_cluster.py b/framework/wazuh/core/cluster/tests/test_cluster.py index 8062b7c824c..3ab1602d9dd 100644 --- a/framework/wazuh/core/cluster/tests/test_cluster.py +++ b/framework/wazuh/core/cluster/tests/test_cluster.py @@ -11,6 +11,7 @@ from unittest.mock import ANY, MagicMock, call, mock_open, patch import pytest +from jsonschema import validators from wazuh.core import common with patch('wazuh.common.wazuh_uid'): @@ -611,7 +612,9 @@ def test_validate_haproxy_helper_config(): with patch.object(cluster, 'validate') as validate_mock: cluster.validate_haproxy_helper_config(config) - validate_mock.assert_called_once_with(config, cluster.HAPROXY_HELPER_SCHEMA) + validate_mock.assert_called_once_with( + config, cluster.HAPROXY_HELPER_SCHEMA, cls=validators.Draft202012Validator + ) def test_validate_haproxy_helper_config_ko(): From 563fa7fb61b918e9e13a795847b867f9206a7191 Mon Sep 17 00:00:00 2001 From: Openime Oniagbi Date: Fri, 31 May 2024 18:34:49 +0300 Subject: [PATCH 383/419] Update cis_centos8_linux.yml --- ruleset/sca/centos/8/cis_centos8_linux.yml | 6222 ++++++++++++-------- 1 file changed, 3649 insertions(+), 2573 deletions(-) diff --git a/ruleset/sca/centos/8/cis_centos8_linux.yml b/ruleset/sca/centos/8/cis_centos8_linux.yml index 92328a2bf18..14a6c9d0b5d 100644 --- a/ruleset/sca/centos/8/cis_centos8_linux.yml +++ b/ruleset/sca/centos/8/cis_centos8_linux.yml @@ -8,13 +8,13 @@ # Foundation # # Based on: -# Center for Internet Security CentOS Linux 8 Benchmark v1.0.0 - 10-31-2019 +# Center for Internet Security CentOS Linux 8 Benchmark v2.0.0 - 02-23-2022 policy: id: "cis_centos8_linux" file: "cis_centos8_linux.yml" - name: "CIS CentOS Linux 8 Benchmark v1.0.0" - description: "This document provides prescriptive guidance for establishing a secure configuration posture for CentOS Linux 8 systems running on x86 and x64 platforms. This document was tested against CentOS Linux 8" + name: "CIS CentOS Linux 8 Benchmark v2.0.0" + description: "This document provides prescriptive guidance for establishing a secure configuration posture for CentOS Linux 8 systems running on x86 and x64 platforms. This document was tested against CentOS Linux 8." references: - https://www.cisecurity.org/cis-benchmarks/ @@ -29,3347 +29,4423 @@ variables: $sshd_file: /etc/ssh/sshd_config checks: - ############################################### - # 1 Initial setup - ############################################### - ############################################### - # 1.1 Filesystem Configuration - ############################################### - # 1.1.1.1 cramfs: filesystem + # 1.1.1.1 Ensure mounting of cramfs filesystems is disabled. (Automated) - id: 6500 - title: "Ensure mounting of cramfs filesystems is disabled" + title: "Ensure mounting of cramfs filesystems is disabled." description: "The cramfs filesystem type is a compressed read-only Linux filesystem embedded in small footprint systems. A cramfs image can be used without having to first decompress the image." - rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the server. If this filesystem type is not needed, disable it." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf .Example: vim /etc/modprobe.d/cramfs.conf: and add the following line: install cramfs /bin/true. Run the following command to unload the cramfs module: # rmmod cramfs" + rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the system. If this filesystem type is not needed, disable it." + remediation: 'Edit or create a file in the /etc/modprobe.d/ directory ending in .conf with a line that reads install cramfs /bin/false and a line the reads blacklist cramfs. Example: # printf "install cramfs /bin/false blacklist cramfs " >> /etc/modprobe.d/cramfs.conf Run the following command to unload the cramfs module: # modprobe -r cramfs.' compliance: - cis: ["1.1.1.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:modprobe -n -v cramfs -> r:install /bin/true|Module cramfs not found" + - "c:modprobe -n -v cramfs -> r:^install|Module cramfs not found" - "not c:lsmod -> r:cramfs" + - 'd:/etc/modprobe.d -> r:\.+ -> r:^blacklist\s*\t*cramfs' - # 1.1.1.2 vFAT: filesystem + # 1.1.1.2 Ensure mounting of squashfs filesystems is disabled. (Automated) - id: 6501 - title: "Ensure mounting of FAT filesystems is limited" - description: "The VFAT filesystem format is primarily used on older windows systems and portable USB drives or flash modules. It comes in three types FAT12 , FAT16 , and FAT32 all of which are supported by the vfat kernel module." + title: "Ensure mounting of squashfs filesystems is disabled." + description: "The squashfs filesystem type is a compressed read-only Linux filesystem embedded in small footprint systems. A squashfs image can be used without having to first decompress the image." rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the system. If this filesystem type is not needed, disable it." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf . Example: vim /etc/modprobe.d/vfat.conf: install vfat /bin/true. Run the following command to unload the vfat module: # rmmod vfat" + impact: 'As Snap packages utilizes squashfs as a compressed filesystem, disabling squashfs will cause Snap packages to fail. Snap application packages of software are self-contained and work across a range of Linux distributions. This is unlike traditional Linux package management approaches, like APT or RPM, which require specifically adapted packages per Linux distribution on an application update and delay therefore application deployment from developers to their software''s end-user. Snaps themselves have no dependency on any external store ("App store"), can be obtained from any source and can be therefore used for upstream software deployment.' + remediation: 'Edit or create a file in the /etc/modprobe.d/ directory ending in .conf with the lines that reads install squashfs /bin/false and blacklist squashfs. Example: # printf "install squashfs /bin/false blacklist squashfs " >> /etc/modprobe.d/squashfs.conf Run the following command to unload the squashfs module: # modprobe -r squashfs.' compliance: - cis: ["1.1.1.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:modprobe -n -v vfat -> r:install /bin/true|Module vfat not found" - - "not c:lsmod -> r:vfat" + - "c:modprobe -n -v squashfs -> r:^install|Module squashfs not found" + - "not c:lsmod -> r:squashfs" + - 'd:/etc/modprobe.d -> r:\.+ -> r:^blacklist\s*\t*squashfs' - # 1.1.1.3 squashfs: filesystem + # 1.1.1.3 Ensure mounting of udf filesystems is disabled. (Automated) - id: 6502 - title: "Ensure mounting of squashfs filesystems is disabled" - description: "The squashfs filesystem type is a compressed read-only Linux filesystem embedded in small footprint systems (similar to cramfs ). A squashfs image can be used without having to first decompress the image." + title: "Ensure mounting of udf filesystems is disabled." + description: "The udf filesystem type is the universal disk format used to implement ISO/IEC 13346 and ECMA-167 specifications. This is an open vendor filesystem type for data storage on a broad range of media. This filesystem type is necessary to support writing DVDs and newer optical disc formats." rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the system. If this filesystem type is not needed, disable it." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf . Example: vim /etc/modprobe.d/squashfs.conf and add the following line: install squashfs /bin/true. Run the following command to unload the squashfs module: rmmod squashfs" + impact: "Microsoft Azure requires the usage of udf. udf should not be disabled on systems run on Microsoft Azure." + remediation: 'Edit or create a file in the /etc/modprobe.d/ directory ending in .conf with a line that reads install udf /bin/false. Example: # printf "install udf /bin/false blacklist udf " >> /etc/modprobe.d/udf.conf Run the following command to unload the udf module: # modprobe -r udf.' compliance: - cis: ["1.1.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:modprobe -n -v squashfs -> r:install /bin/true|Module squashfs not found" - - "not c:lsmod -> r:squashfs" + - "c:modprobe -n -v udf -> r:^install|Module udf not found" + - "not c:lsmod -> r:udf" + - 'd:/etc/modprobe.d -> r:\.+ -> r:^blacklist\s*\t*udf' - # 1.1.1.4 udfs: filesystem + # 1.1.2.1 Ensure /tmp is a separate partition. (Automated) - id: 6503 - title: "Ensure mounting of udf filesystems is disabled" - description: "The udf filesystem type is the universal disk format used to implement ISO/IEC 13346 and ECMA-167 specifications. This is an open vendor filesystem type for data storage on a broad range of media. This filesystem type is necessary to support writing DVDs and newer optical disc formats." - rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the system. If this filesystem type is not needed, disable it." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf. Example: vim /etc/modprobe.d/udf.conf and add the following line: install udf /bin/true. Run the following command to unload the udf module: # rmmod udf" - compliance: - - cis: ["1.1.1.4"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ - condition: all - rules: - - "c:modprobe -n -v udf -> r:install /bin/true|Module udf not found" - - "not c:lsmod -> r:udf" - # 1.1.2 /tmp: partition - - id: 6504 - title: "Ensure /tmp is configured" + title: "Ensure /tmp is a separate partition." description: "The /tmp directory is a world-writable directory used for temporary storage by all users and some applications." - rationale: "Making /tmp its own file system allows an administrator to set the noexec option on the mount, making /tmp useless for an attacker to install executable code. It would also prevent an attacker from establishing a hardlink to a system setuid program and wait for it to be updated. Once the program was updated, the hardlink would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw. This can be accomplished by either mounting tmpfs to /tmp, or creating a separate partition for /tmp." - remediation: 'Configure /etc/fstab as appropriate. example: "tmpfs /tmp tmpfs defaults,rw,nosuid,nodev,noexec,relatime 0 0" OR Run the following commands to enable systemd /tmp mounting: # systemctl unmask tmp.mount # systemctl enable tmp.mount Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to configure the /tmp mount: [Mount] What=tmpfs Where=/tmp Type=tmpfs Options=mode=1777,strictatime,noexec,nodev,nosuid' - compliance: - - cis: ["1.1.2"] - - cis_csc: ["9.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + rationale: "Making /tmp its own file system allows an administrator to set additional mount options such as the noexec option on the mount, making /tmp useless for an attacker to install executable code. It would also prevent an attacker from establishing a hard link to a system setuid program and wait for it to be updated. Once the program was updated, the hard link would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw. This can be accomplished by either mounting tmpfs to /tmp, or creating a separate partition for /tmp." + impact: "Since the /tmp directory is intended to be world-writable, there is a risk of resource exhaustion if it is not bound to a separate partition. Running out of /tmp space is a problem regardless of what kind of filesystem lies under it, but in a configuration where /tmp is not a separate file system it will essentially have the whole disk available, as the default installation only creates a single / partition. On the other hand, a RAM-based /tmp (as with tmpfs) will almost certainly be much smaller, which can lead to applications filling up the filesystem much more easily. Another alternative is to create a dedicated partition for /tmp from a separate volume or disk. One of the downsides of a disk-based dedicated partition is that it will be slower than tmpfs which is RAM-based. /tmp utilizing tmpfs can be resized using the size={size} parameter in the relevant entry in /etc/fstab." + remediation: "First ensure that systemd is correctly configured to ensure that /tmp will be mounted at boot time. # systemctl unmask tmp.mount For specific configuration requirements of the /tmp mount for your environment, modify /etc/fstab. Example of using tmpfs with specific mount options: tmpfs /tmp 0 tmpfs defaults,rw,nosuid,nodev,noexec,relatime,size=2G 0 Example of using a volume or disk with specific mount options. The source location of the volume or disk will vary depending on your environment. /tmp defaults,nodev,nosuid,noexec 0 0." references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - "https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems/" + - "https://www.freedesktop.org/software/systemd/man/systemd-fstab-generator.html" + compliance: + - cis: ["1.1.2.1"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - 'c:mount -> r:\s/tmp\s' - # 1.1.3 /tmp: nodev - - id: 6505 - title: "Ensure nodev option set on /tmp partition" + # 1.1.2.2 Ensure nodev option set on /tmp partition. (Automated) + - id: 6504 + title: "Ensure nodev option set on /tmp partition." description: "The nodev mount option specifies that the filesystem cannot contain special devices." - rationale: "Since the /tmp filesystem is not intended to support devices, set this option to ensure that users cannot attempt to create block or character special devices in /tmp." - remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /tmp : # mount -o remount,nodev /tmp OR Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to add nodev to the /tmp mount options: [Mount] Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to remount /tmp : # mount -o remount,nodev /tmp" - compliance: - - cis: ["1.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + rationale: "Since the /tmp filesystem is not intended to support devices, set this option to ensure that users cannot create a block or character special devices in /tmp." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /tmp partition. Example: /tmp defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /tmp with the configured options: # mount -o remount /tmp." + compliance: + - cis: ["1.1.2.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: none rules: - 'c:mount -> r:\s/tmp\s && !r:nodev' - # 1.1.4 /tmp: nosuid + # 1.1.2.3 Ensure noexec option set on /tmp partition. (Automated) + - id: 6505 + title: "Ensure noexec option set on /tmp partition." + description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." + rationale: "Since the /tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot run executable binaries from /tmp." + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /tmp partition. Example: /tmp defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /tmp with the configured options: # mount -o remount /tmp." + compliance: + - cis: ["1.1.2.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none + rules: + - 'c:mount -> r:\s/tmp\s && !r:noexec' + + # 1.1.2.4 Ensure nosuid option set on /tmp partition. (Automated) - id: 6506 - title: "Ensure nosuid option set on /tmp partition" + title: "Ensure nosuid option set on /tmp partition." description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." rationale: "Since the /tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot create setuid files in /tmp." - remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /tmp : # mount -o remount,nosuid /tmp OR Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to add nosuid to the /tmp mount options: [Mount] Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to remount /tmp : # mount -o remount,nosuid /tmp" - compliance: - - cis: ["1.1.4"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /tmp partition. Example: /tmp defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /tmp with the configured options: # mount -o remount /tmp." + compliance: + - cis: ["1.1.2.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - 'c:mount -> r:\s/tmp\s && !r:nosuid' - # 1.1.5 /tmp: noexec + # 1.1.3.1 Ensure separate partition exists for /var. (Automated) - id: 6507 - title: "Ensure noexec option set on /tmp partition" - description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." - rationale: "Since the /tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot run executable binaries from /tmp." - remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /tmp : # mount -o remount,noexec /tmp OR Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to add noexec to the /tmp mount options: [Mount] Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to remount /tmp : # mount -o remount,noexec /tmp" - compliance: - - cis: ["1.1.5"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none - rules: - - 'c:mount -> r:\s/tmp\s && !r:noexec' - - # 1.1.6 Build considerations - Partition scheme. - - id: 6508 - title: "Ensure separate partition exists for /var" + title: "Ensure separate partition exists for /var." description: "The /var directory is used by daemons and other system services to temporarily store dynamic data. Some directories created by these processes may be world-writable." - rationale: "Since the /var directory may contain world-writable files and directories, there is a risk of resource exhaustion if it is not bound to a separate partition." + rationale: "The reasoning for mounting /var on a separate partition is as follow. Protection from resource exhaustion The default installation only creates a single / partition. Since the /var directory may contain world-writable files and directories, there is a risk of resource exhaustion. It will essentially have the whole disk available to fill up and impact the system as a whole. In addition, other operations on the system could fill up the disk unrelated to /var and cause unintended behavior across the system as the disk is full. See man auditd.conf for details. Fine grained control over the mount Configuring /var as its own file system allows an administrator to set additional mount options such as noexec/nosuid/nodev. These options limits an attackers ability to create exploits on the system. Other options allow for specific behaviour. See man mount for exact details regarding filesystem-independent and filesystem-specific options. Protection from exploitation An example of exploiting /var may be an attacker establishing a hard-link to a system setuid program and wait for it to be updated. Once the program was updated, the hard-link would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." - compliance: - - cis: ["1.1.6"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - "http://tldp.org/HOWTO/LVM-HOWTO/" + compliance: + - cis: ["1.1.3.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:mount -> r:\s/var\s' - # 1.1.7 bind mount /var/tmp to /tmp + # 1.1.3.2 Ensure nodev option set on /var partition. (Automated) + - id: 6508 + title: "Ensure nodev option set on /var partition." + description: "The nodev mount option specifies that the filesystem cannot contain special devices." + rationale: "Since the /var filesystem is not intended to support devices, set this option to ensure that users cannot create a block or character special devices in /var." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /var partition. Example: /var defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var with the configured options: # mount -o remount /var." + compliance: + - cis: ["1.1.3.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none + rules: + - 'c:mount -> r:\s/var\s && !r:nodev' + + # 1.1.3.3 Ensure noexec option set on /var partition. (Automated) - id: 6509 - title: "Ensure separate partition exists for /var/tmp" - description: "The /var/tmp directory is a world-writable directory used for temporary storage by all users and some applications." - rationale: "Since the /var/tmp directory is intended to be world-writable, there is a risk of resource exhaustion if it is not bound to a separate partition. In addition, making /var/tmp its own file system allows an administrator to set the noexec option on the mount, making /var/tmp useless for an attacker to install executable code." - remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/tmp. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." - compliance: - - cis: ["1.1.7"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + title: "Ensure noexec option set on /var partition." + description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." + rationale: "Since the /var filesystem is only intended for variable files such as logs, set this option to ensure that users cannot run executable binaries from /var." + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /var partition. Example: /var defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var with the configured options: # mount -o remount /var." + compliance: + - cis: ["1.1.3.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none rules: - - 'c:mount -> r:\s/var/tmp\s' + - 'c:mount -> r:\s/var\s && !r:noexec' - # 1.1.8 nodev set on /var/tmp + # 1.1.3.4 Ensure nosuid option set on /var partition. (Automated) - id: 6510 - title: "Ensure nodev option set on /var/tmp partition" - description: "The nodev mount option specifies that the filesystem cannot contain special devices." - rationale: "Since the /var/tmp filesystem is not intended to support devices, set this option to ensure that users cannot attempt to create block or character special devices in /var/tmp ." - remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /var/tmp partition. See the fstab(5) manual page for more information." - compliance: - - cis: ["1.1.8"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + title: "Ensure nosuid option set on /var partition." + description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." + rationale: "Since the /var filesystem is only intended for variable files such as logs, set this option to ensure that users cannot create setuid files in /var." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /var partition. Example: /var defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var with the configured options: # mount -o remount /var." + compliance: + - cis: ["1.1.3.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - - 'c:mount -> r:\s/var/tmp\s && !r:nodev' + - 'c:mount -> r:\s/var\s && !r:nosuid' - # 1.1.9 nosuid set on /var/tmp + # 1.1.4.1 Ensure separate partition exists for /var/tmp. (Automated) - id: 6511 - title: "Ensure nosuid option set on /var/tmp partition" - description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." - rationale: "Since the /var/tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot create setuid files in /var/tmp." - remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /var/tmp partition. See the fstab(5) manual page for more information." + title: "Ensure separate partition exists for /var/tmp." + description: "The /var/tmp directory is a world-writable directory used for temporary storage by all users and some applications. Temporary file residing in /var/tmp is to be preserved between reboots." + rationale: "The reasoning for mounting /var/tmp on a separate partition is as follow. Protection from resource exhaustion The default installation only creates a single / partition. Since the /var/tmp directory may contain world-writable files and directories, there is a risk of resource exhaustion. It will essentially have the whole disk available to fill up and impact the system as a whole. In addition, other operations on the system could fill up the disk unrelated to /var/tmp and cause the potential disruption to daemons as the disk is full. Fine grained control over the mount Configuring /var/tmp as its own file system allows an administrator to set additional mount options such as noexec/nosuid/nodev. These options limits an attackers ability to create exploits on the system. Other options allow for specific behavior. See man mount for exact details regarding filesystem-independent and filesystem-specific options. Protection from exploitation An example of exploiting /var/tmp may be an attacker establishing a hard-link to a system setuid program and wait for it to be updated. Once the program was updated, the hard-link would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." + remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/tmp. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + references: + - "http://tldp.org/HOWTO/LVM-HOWTO/" compliance: - - cis: ["1.1.9"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.1.4.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - 'c:mount -> r:\s/var/tmp\s && !r:nosuid' + - 'c:mount -> r:\s/var/tmp\s' - # 1.1.10 noexec set on /var/tmp + # 1.1.4.2 Ensure noexec option set on /var/tmp partition. (Automated) - id: 6512 - title: "Ensure noexec option set on /var/tmp partition" + title: "Ensure noexec option set on /var/tmp partition." description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." rationale: "Since the /var/tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot run executable binaries from /var/tmp." - remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /var/tmp partition. See the fstab(5) manual page for more information." - compliance: - - cis: ["1.1.10"] - - cis_csc: ["5.1"] - - cis_csc: ["2"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /var/tmp partition. Example: /var/tmp defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/tmp with the configured options: # mount -o remount /var/tmp." + compliance: + - cis: ["1.1.4.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - 'c:mount -> r:\s/var/tmp\s && !r:noexec' - # 1.1.11 /var/log: partition + # 1.1.4.3 Ensure nosuid option set on /var/tmp partition. (Automated) - id: 6513 - title: "Ensure separate partition exists for /var/log" - description: "The /var/log directory is used by system services to store log data ." - rationale: "There are two important reasons to ensure that system logs are stored on a separate partition: protection against resource exhaustion (since logs can grow quite large) and protection of audit data." - remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/log. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." - compliance: - - cis: ["1.1.11"] - - cis_csc: ["6.4"] - - pci_dss: ["2.2.4", "10.7"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + title: "Ensure nosuid option set on /var/tmp partition." + description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." + rationale: "Since the /var/tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot create setuid files in /var/tmp." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /var/tmp partition. Example: /var/tmp defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/tmp with the configured options: # mount -o remount /var/tmp." + compliance: + - cis: ["1.1.4.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none + rules: + - 'c:mount -> r:\s/var/tmp\s && !r:nosuid' + + # 1.1.4.4 Ensure nodev option set on /var/tmp partition. (Automated) + - id: 6514 + title: "Ensure nodev option set on /var/tmp partition." + description: "The nodev mount option specifies that the filesystem cannot contain special devices." + rationale: "Since the /var/tmp filesystem is not intended to support devices, set this option to ensure that users cannot create a block or character special devices in /var/tmp." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /var/tmp partition. Example: /var/tmp defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/tmp with the configured options: # mount -o remount /var/tmp." + compliance: + - cis: ["1.1.4.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none + rules: + - 'c:mount -> r:\s/var/tmp\s && !r:nodev' + + # 1.1.5.1 Ensure separate partition exists for /var/log. (Automated) + - id: 6515 + title: "Configure /var/log The /var/log directory is used by system services to store log data. 1.1.5.1 Ensure separate partition exists for /var/log." + description: "The /var/log directory is used by system services to store log data." + rationale: "The reasoning for mounting /var/log on a separate partition is as follow. Protection from resource exhaustion The default installation only creates a single / partition. Since the /var/log directory contain the log files that can grow quite large, there is a risk of resource exhaustion. It will essentially have the whole disk available to fill up and impact the system as a whole. Fine grained control over the mount Configuring /var/log as its own file system allows an administrator to set additional mount options such as noexec/nosuid/nodev. These options limits an attackers ability to create exploits on the system. Other options allow for specific behavior. See man mount for exact details regarding filesystem-independent and filesystem-specific options. Protection of log data As /var/log contains log files, care should be taken to ensure the security and integrity of the data and mount point." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." + remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/log . For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - "http://tldp.org/HOWTO/LVM-HOWTO/" + compliance: + - cis: ["1.1.5"] + - cis_csc_v8: ["8.3"] + - cis_csc_v7: ["6.4"] + - iso_27001-2013: ["A.12.4.1"] + - pci_dss_v3.2.1: ["10.7"] + - soc_2: ["A1.1"] condition: all rules: - 'c:mount -> r:\s/var/log\s' - # 1.1.12 /var/log/audit: partition - - id: 6514 - title: "Ensure separate partition exists for /var/log/audit" - description: "The auditing daemon, auditd , stores log data in the /var/log/audit directory." - rationale: "There are two important reasons to ensure that data gathered by auditd is stored on a separate partition: protection against resource exhaustion (since the audit.log file can grow quite large) and protection of audit data." + # 1.1.5.2 Ensure nodev option set on /var/log partition. (Automated) + - id: 6516 + title: "Ensure nodev option set on /var/log partition." + description: "The nodev mount option specifies that the filesystem cannot contain special devices." + rationale: "Since the /var/log filesystem is not intended to support devices, set this option to ensure that users cannot create a block or character special devices in /var/log." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /var/log partition. Example: /var/log defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/log with the configured options: # mount -o remount /var/log." + compliance: + - cis: ["1.1.5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none + rules: + - 'c:mount -> r:\s/var/log\s && !r:nodev' + + # 1.1.5.3 Ensure noexec option set on /var/log partition. (Automated) + - id: 6517 + title: "Ensure noexec option set on /var/log partition." + description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." + rationale: "Since the /var/log filesystem is only intended for log files, set this option to ensure that users cannot run executable binaries from /var/log." + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /var/log partition. Example: /var/log defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/log with the configured options: # mount -o remount /var/log." + compliance: + - cis: ["1.1.5.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none + rules: + - 'c:mount -> r:\s/var/log\s && !r:noexec' + + # 1.1.5.4 Ensure nosuid option set on /var/log partition. (Automated) + - id: 6518 + title: "Ensure nosuid option set on /var/log partition." + description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." + rationale: "Since the /var/log filesystem is only intended for log files, set this option to ensure that users cannot create setuid files in /var/log." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /var/log partition. Example: /var/log defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/log with the configured options: # mount -o remount /var/log." + compliance: + - cis: ["1.1.5.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none + rules: + - 'c:mount -> r:\s/var/log\s && !r:noexec' + + # 1.1.6.1 Ensure separate partition exists for /var/log/audit. (Automated) + - id: 6519 + title: "Ensure separate partition exists for /var/log/audit." + description: "The auditing daemon, auditd, stores log data in the /var/log/audit directory." + rationale: "The reasoning for mounting /var/log/audit on a separate partition is as follow. Protection from resource exhaustion The default installation only creates a single / partition. Since the /var/log/audit directory contain the audit.log file that can grow quite large, there is a risk of resource exhaustion. It will essentially have the whole disk available to fill up and impact the system as a whole. In addition, other operations on the system could fill up the disk unrelated to /var/log/audit and cause auditd to trigger it's space_left_action as the disk is full. See man auditd.conf for details. Fine grained control over the mount Configuring /var/log/audit as its own file system allows an administrator to set additional mount options such as noexec/nosuid/nodev. These options limits an attackers ability to create exploits on the system. Other options allow for specific behavior. See man mount for exact details regarding filesystem-independent and filesystem-specific options. Protection of audit data As /var/log/audit contains audit logs, care should be taken to ensure the security and integrity of the data and mount point." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/log/audit. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." - compliance: - - cis: ["1.1.12"] - - cis_csc: ["6.4"] - - pci_dss: ["2.2.4", "10.7"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - "http://tldp.org/HOWTO/LVM-HOWTO/" + compliance: + - cis: ["1.1.6.1"] + - cis_csc_v8: ["8.3"] + - cis_csc_v7: ["6.4"] + - iso_27001-2013: ["A.12.4.1"] + - pci_dss_v3.2.1: ["10.7"] + - soc_2: ["A1.1"] condition: all rules: - 'c:mount -> r:\s/var/log/audit\s' - # 1.1.13 /home: partition - - id: 6515 - title: "Ensure separate partition exists for /home" + # 1.1.6.2 Ensure noexec option set on /var/log/audit partition. (Automated) + - id: 6520 + title: "Ensure noexec option set on /var/log/audit partition." + description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." + rationale: "Since the /var/log/audit filesystem is only intended for audit logs, set this option to ensure that users cannot run executable binaries from /var/log/audit." + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /var partition. Example: /var/log/audit defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/log/audit with the configured options: # mount -o remount /var/log/audit." + compliance: + - cis: ["1.1.6.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none + rules: + - 'c:mount -> r:\s/var/log/audit\s && !r:noexec' + + # 1.1.6.3 Ensure nodev option set on /var/log/audit partition. (Automated) + - id: 6521 + title: "Ensure nodev option set on /var/log/audit partition." + description: "The nodev mount option specifies that the filesystem cannot contain special devices." + rationale: "Since the /var/log/audit filesystem is not intended to support devices, set this option to ensure that users cannot create a block or character special devices in /var/log/audit." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /var/log/audit partition. Example: /var/log/audit defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/log/audit with the configured options: # mount -o remount /var/log/audit." + compliance: + - cis: ["1.1.6.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none + rules: + - 'c:mount -> r:\s/var/log/audit\s && !r:nodev' + + # 1.1.6.4 Ensure nosuid option set on /var/log/audit partition. (Automated) + - id: 6522 + title: "Ensure nosuid option set on /var/log/audit partition." + description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." + rationale: "Since the /var/log/audit filesystem is only intended for variable files such as logs, set this option to ensure that users cannot create setuid files in /var/log/audit." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /var/log/audit partition. Example: /var/log/audit defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/log/audit with the configured options: # mount -o remount /var/log/audit." + compliance: + - cis: ["1.1.6.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none + rules: + - 'c:mount -> r:\s/var/log/audit\s && !r:nosuid' + + # 1.1.7.1 Ensure separate partition exists for /home. (Automated) + - id: 6523 + title: "Ensure separate partition exists for /home." description: "The /home directory is used to support disk storage needs of local users." - rationale: "If the system is intended to support local users, create a separate partition for the /home directory to protect against resource exhaustion and restrict the type of files that can be stored under /home." + rationale: "The reasoning for mounting /home on a separate partition is as follow. Protection from resource exhaustion The default installation only creates a single / partition. Since the /home directory contains user generated data, there is a risk of resource exhaustion. It will essentially have the whole disk available to fill up and impact the system as a whole. In addition, other operations on the system could fill up the disk unrelated to /home and impact all local users. Fine grained control over the mount Configuring /home as its own file system allows an administrator to set additional mount options such as noexec/nosuid/nodev. These options limits an attackers ability to create exploits on the system. In the case of /home options such as usrquota/grpquota may be considered to limit the impact that users can have on each other with regards to disk resource exhaustion. Other options allow for specific behavior. See man mount for exact details regarding filesystem-independent and filesystem-specific options. Protection of user data As /home contains user data, care should be taken to ensure the security and integrity of the data and mount point." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /home. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." - compliance: - - cis: ["1.1.13"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - "http://tldp.org/HOWTO/LVM-HOWTO/" + compliance: + - cis: ["1.1.7.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:mount -> r:\s/home\s' - # 1.1.14 /home: nodev - - id: 6516 - title: "Ensure nodev option set on /home partition" + # 1.1.7.2 Ensure nodev option set on /home partition. (Automated) + - id: 6524 + title: "Ensure nodev option set on /home partition." description: "The nodev mount option specifies that the filesystem cannot contain special devices." - rationale: "Since the user partitions are not intended to support devices, set this option to ensure that users cannot attempt to create block or character special devices." - remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /home partition. # mount -o remount,nodev /home" - compliance: - - cis: ["1.1.14"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + rationale: "Since the /home filesystem is not intended to support devices, set this option to ensure that users cannot create a block or character special devices in /var." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /home partition. Example: /home defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /home with the configured options: # mount -o remount /home." + compliance: + - cis: ["1.1.7.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - 'c:mount -> r:\s/home\s && !r:nodev' - # 1.1.15 /dev/shm: nodev - - id: 6517 - title: "Ensure nodev option set on /dev/shm partition" + # 1.1.7.3 Ensure nosuid option set on /home partition. (Automated) + - id: 6525 + title: "Ensure nosuid option set on /home partition." + description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." + rationale: "Since the /home filesystem is only intended for user file storage, set this option to ensure that users cannot create setuid files in /home." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /home partition. Example: /home defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /home with the configured options: # mount -o remount /home." + compliance: + - cis: ["1.1.7.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none + rules: + - 'c:mount -> r:\s/home\s && !r:nodev' + + # 1.1.7.4 Ensure usrquota option set on /home partition. (Automated) + - id: 6526 + title: "Ensure usrquota option set on /home partition." + description: "The usrquota mount option allows for the filesystem to have disk quotas configured." + rationale: "To ensure the availability of disk space on /home, it is important to limit the impact a single user or group can cause for other users (or the wider system) by accidentally filling up the partition. Quotas can also be applied to inodes for filesystems where inode exhaustion is a concern." + remediation: "Edit the /etc/fstab file and add usrquota to the fourth field (mounting options) for the /home partition. Example: /home defaults,rw,usrquota,grpquota,nodev,relatime 0 0 Run the following command to remount /home with the configured options: # mount -o remount /home Create the quota database. This example will ignore any existing quota files. # quotacheck -cugv /home quotacheck: Your kernel probably supports journaled quota but you are not using it. Consider switching to journaled quota to avoid running quotacheck after an unclean shutdown. quotacheck: Scanning /dev/sdb [/home] done quotacheck: Cannot stat old user quota file /home/aquota.user: No such file or directory. Usage will not be subtracted. quotacheck: Cannot stat old group quota file /home/aquota.group: No such file or directory. Usage will not be subtracted. quotacheck: Cannot stat old user quota file /home/aquota.user: No such file or directory. Usage will not be subtracted. quotacheck: Cannot stat old group quota file /home/aquota.group: No such file or directory. Usage will not be subtracted. quotacheck: Checked 8 directories and 0 files quotacheck: Old file not found. quotacheck: Old file not found. Restore SELinux context on the quota database files. Order of operations is important as quotaon will set the immutable attribute on the files and thus restorecon will fail. # restorecon /home/aquota.user Enable quotas on the partition: # quotaon -vug /home /dev/sdb [/home]: group quotas turned on /dev/sdb [/home]: user quotas turned on." + compliance: + - cis: ["1.1.7.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none + rules: + - 'c:mount -> r:\s/home\s && !r:usrquota' + - "c:quotaon -p user -> r:user" + + # 1.1.7.5 Ensure grpquota option set on /home partition. (Automated) + - id: 6527 + title: "Ensure grpquota option set on /home partition." + description: "The grpquota mount option allows for the filesystem to have disk quotas configured." + rationale: "To ensure the availability of disk space on /home, it is important to limit the impact a single user or group can cause for other users (or the wider system) by accidentally filling up the partition. Quotas can also be applied to inodes for filesystems where inode exhaustion is a concern." + remediation: "Edit the /etc/fstab file and add grpquota to the fourth field (mounting options) for the /home partition. Example: /home defaults,rw,usrquota,grpquota,nodev,relatime 0 0 Run the following command to remount /home with the configured options: # mount -o remount /home Create the quota database. This example will ignore any existing quota files. # quotacheck -cugv /home quotacheck: Your kernel probably supports journaled quota but you are not using it. Consider switching to journaled quota to avoid running quotacheck after an unclean shutdown. quotacheck: Scanning /dev/sdb [/home] done quotacheck: Cannot stat old user quota file /home/aquota.user: No such file or directory. Usage will not be subtracted. quotacheck: Cannot stat old group quota file /home/aquota.group: No such file or directory. Usage will not be subtracted. quotacheck: Cannot stat old user quota file /home/aquota.user: No such file or directory. Usage will not be subtracted. quotacheck: Cannot stat old group quota file /home/aquota.group: No such file or directory. Usage will not be subtracted. quotacheck: Checked 8 directories and 0 files quotacheck: Old file not found. quotacheck: Old file not found. Restore SELinux context on the quota database files. Order of operations is important as quotaon will set the immutable attribute on the files and thus restorecon will fail. # restorecon /home/aquota.group Enable quotas on the partition: # quotaon -vug /home /dev/sdb [/home]: group quotas turned on /dev/sdb [/home]: user quotas turned on." + compliance: + - cis: ["1.1.7.5"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'c:mount -> r:\s/home\s && !r:grpquota' + - "c:quotaon -p group -> r:user" + + # 1.1.8.1 Ensure nodev option set on /dev/shm partition. (Automated) + - id: 6528 + title: "Configure /dev/shm 1.1.8.1 Ensure nodev option set on /dev/shm partition." description: "The nodev mount option specifies that the filesystem cannot contain special devices." rationale: "Since the /dev/shm filesystem is not intended to support devices, set this option to ensure that users cannot attempt to create special devices in /dev/shm partitions." - remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /dev/shm partition. Run the following command to remount /dev/shm: # mount -o remount,nodev /dev/shm" + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /dev/shm partition. See the fstab(5) manual page for more information. Run the following command to remount /dev/shm using the updated options from /etc/fstab: # mount -o remount /dev/shm." compliance: - - cis: ["1.1.15"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.1.8"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - 'c:mount -> r:\s/dev/shm\s && !r:nodev' - # 1.1.16 /dev/shm: nosuid - - id: 6518 - title: "Ensure nosuid option set on /dev/shm partition" - description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." - rationale: "Setting this option on a file system prevents users from introducing privileged programs onto the system and allowing non-root users to execute them." - remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /dev/shm partition. Run the following command to remount /dev/shm: # mount -o remount,nosuid /dev/shm" - compliance: - - cis: ["1.1.16"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + # 1.1.8.2 Ensure noexec option set on /dev/shm partition. (Automated) + - id: 6529 + title: "Ensure noexec option set on /dev/shm partition." + description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." + rationale: "Setting this option on a file system prevents users from executing programs from shared memory. This deters users from introducing potentially malicious software on the system." + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /dev/shm partition. Example: /dev/shm defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /dev/shm with the configured options: # mount -o remount /dev/shm NOTE It is recommended to use tmpfs as the device/filesystem type as /dev/shm is used as shared memory space by applications." + compliance: + - cis: ["1.1.8.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - - 'c:mount -> r:\s/dev/shm\s && !r:nosuid' + - 'c:mount -> r:\s/dev/shm\s && !r:nodev' - # 1.1.17 /dev/shm: noexec - - id: 6519 - title: "Ensure noexec option set on /dev/shm partition" - description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." - rationale: "Setting this option on a file system prevents users from executing programs from shared memory. This deters users from introducing potentially malicious software on the system." - remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /dev/shm partition. Run the following command to remount /dev/shm: # mount -o remount,noexec /dev/shm" - compliance: - - cis: ["1.1.17"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + # 1.1.8.3 Ensure nosuid option set on /dev/shm partition. (Automated) + - id: 6530 + title: "Ensure nosuid option set on /dev/shm partition." + description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." + rationale: "Setting this option on a file system prevents users from introducing privileged programs onto the system and allowing non-root users to execute them." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /dev/shm partition. See the fstab(5) manual page for more information. Run the following command to remount /dev/shm using the updated options from /etc/fstab: # mount -o remount /dev/shm." + compliance: + - cis: ["1.1.8.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - - 'c:mount -> r:\s/dev/shm\s && !r:noexec' + - 'c:mount -> r:\s/dev/shm\s && !r:nosuid' - # 1.1.22 Disable Automounting - - id: 6520 - title: "Disable Automounting" + # 1.1.9 Disable Automounting. (Automated) + - id: 6531 + title: "Disable Automounting." description: "autofs allows automatic mounting of devices, typically including CD/DVDs and USB drives." rationale: "With automounting enabled anyone with physical access could attach a USB drive or disc and have its contents available in system even if they lacked permissions to mount it themselves." - remediation: "Run the following command to disable autofs : systemctl disable autofs" + impact: "The use of portable hard drives is very common for workstation users. If your organization allows the use of portable storage or media on workstations and physical access controls to workstations is considered adequate there is little value add in turning off automounting." + remediation: "If there are no other packages that depends on autofs, remove the package with: # dnf remove autofs Run the following command to disable autofs if it is required: # systemctl --now disable autofs." compliance: - - cis: ["1.1.22"] - - cis_csc: ["8.4", "8.5"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.1.9"] + - cis_csc_v8: ["10.3"] + - cis_csc_v7: ["8.5"] + - cmmc_v2.0: ["MP.L2-3.8.7"] + - hipaa: ["164.310(d)(1)"] + - iso_27001-2013: ["A.12.2.1"] condition: none rules: - - "c:systemctl is-enabled autofs -> enabled" + - "c:systemctl is-enabled autofs -> r:^enabled" - # 1.1.23 Disable USB Storage (Scored) - - id: 6521 - title: "Disable USB Storage" + # 1.1.10 Disable USB Storage. (Automated) + - id: 6532 + title: "Disable USB Storage." description: "USB storage provides a means to transfer and store files insuring persistence and availability of the files independent of network connection status. Its popularity and utility has led to USB-based malware being a simple and common means for network infiltration and a first step to establishing a persistent threat within a networked environment." rationale: "Restricting USB access on the system will decrease the physical attack surface for a device and diminish the possible vectors to introduce malware." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: vim /etc/modprobe.d/usb-storage.conf and add the following line: install usb-storage /bin/true Run the following command to unload the usb-storage module: # rmmod usb-storage" + remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: vim /etc/modprobe.d/usb_storage.conf and add the following line: install usb-storage /bin/true Run the following command to unload the usb-storage module: rmmod usb-storage." compliance: - - cis: ["1.1.23"] - - cis_csc: ["8.4", "8.5"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.1.10"] + - cis_csc_v8: ["10.3"] + - cis_csc_v7: ["13.7"] + - cmmc_v2.0: ["MP.L2-3.8.7"] + - hipaa: ["164.310(d)(1)"] + - iso_27001-2013: ["A.8.3.1"] condition: all rules: - "c:modprobe -n -v usb-storage -> r:install /bin/true" - "not c:lsmod -> r:usb-storage" - ############################################### - # 1.2 Configure Software Updates - ############################################### + # 1.2.1 Ensure GPG keys are configured. (Manual) - Not Implemented - # 1.2.2 Activate gpgcheck - - id: 6522 - title: "Ensure gpgcheck is globally activated" - description: "The gpgcheck option, found in the main section of the /etc/yum.conf and individual /etc/yum/repos.d/* files determines if an RPM package's signature is checked prior to its installation." + # 1.2.2 Ensure gpgcheck is globally activated. (Automated) + - id: 6533 + title: "Ensure gpgcheck is globally activated." + description: "The gpgcheck option, found in the main section of the /etc/dnf/dnf.conf and individual /etc/yum.repos.d/* files, determines if an RPM package's signature is checked prior to its installation." rationale: "It is important to ensure that an RPM's package signature is always checked prior to installation to ensure that the software is obtained from a trusted source." - remediation: "Edit /etc/yum.conf and set ' gpgcheck=1 ' in the [main] section. Edit any failing files in /etc/yum.repos.d/* and set all instances of gpgcheck to ' 1 '." + remediation: "Edit /etc/dnf/dnf.conf and set gpgcheck=1 in the [main] section. Example: # sed -i 's/^gpgcheck\\s*=\\s*.*/gpgcheck=1/' /etc/dnf/dnf.conf Edit any failing files in /etc/yum.repos.d/* and set all instances starting with gpgcheck to 1. Example: # find /etc/yum.repos.d/ -name \"*.repo\" -exec echo \"Checking:\" {} \\; -exec sed -i 's/^gpgcheck\\s*=\\s*.*/gpgcheck=1/' {} \\;." compliance: - cis: ["1.2.2"] - - cis_csc: ["3.4"] - - pci_dss: ["6.2"] - - nist_800_53: ["SI.2", "SA.11", "SI.4"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["A1.2", "CC6.8"] - condition: all - rules: - - "f:/etc/yum.conf -> r:gpgcheck=1" - - "not c:grep -Rh ^gpgcheck /etc/yum.repos.d/ -> r:gpgcheck=0" - - ############################################### - # 1.3 Configure sudo - ############################################### - - # 1.3.1 install sudo - - id: 6523 - title: "Ensure sudo is installed" - description: "sudo allows a permitted user to execute a command as the superuser or another user, as specified by the security policy. The invoking user's real (not effective) user ID is used to determine the user name with which to query the security policy." - rationale: "sudo supports a plugin architecture for security policies and input/output logging. Third parties can develop and distribute their own policy and I/O logging plugins to work seamlessly with the sudo front end. The default security policy is sudoers, which is configured via the file /etc/sudoers. The security policy determines what privileges, if any, a user has to run sudo. The policy may require that users authenticate themselves with a password or another authentication mechanism. If authentication is required, sudo will exit if the user's password is not entered within a configurable time limit. This limit is policy-specific." - remediation: "Run the following command to install sudo: # dnf install sudo" - compliance: - - cis: ["1.3.1"] - - cis_csc: ["4.3"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["7.3"] + - cis_csc_v7: ["3.4"] + - cmmc_v2.0: ["SI.L1-3.14.1"] + - nist_sp_800-53: ["SI-2(2)"] + - pci_dss_v3.2.1: ["6.2"] + - soc_2: ["CC7.1"] condition: all rules: - - 'c:rpm -q sudo -> r:sudo-\S*' - - # 1.3.2 Ensure sudo commands use pty (Scored) - - id: 6524 - title: "Ensure sudo commands use pty" - description: "sudo can be configured to run only from a pseudo-pty" - rationale: "Attackers can run a malicious program using sudo which would fork a background process that remains even when the main program has finished executing." - remediation: "edit the file /etc/sudoers or a file in /etc/sudoers.d/ with visudo -f and add the following line: Defaults use_pty" - compliance: - - cis: ["1.3.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: "AIDE stable manual: http://aide.sourceforge.net/stable/manual.html" - condition: any - rules: - - 'f:/etc/sudoers -> r:^\s*Defaults\s+use_pty' - - 'c:grep -r Default /etc/sudoers.d/ -> !r:# && r:\s*Defaults\s+use_pty' + - 'f:/etc/dnf/dnf.conf -> r:^gpgcheck\s*\t*=\s*\t*1' + - 'not d:/etc/yum.repos.d/ -> r:\.+ -> r:gpgcheck=0' - # 1.3.3 Ensure sudo log file exists (Scored) - - id: 6525 - title: "Ensure sudo log file exists" - description: "sudo can use a custom log file" - rationale: "A sudo log file simplifies auditing of sudo commands" - remediation: 'edit the file /etc/sudoers or a file in /etc/sudoers.d/ with visudo -f and add the following line: Defaults logfile=""' - compliance: - - cis: ["1.3.3"] - - cis_csc: ["6.3"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: "AIDE stable manual: http://aide.sourceforge.net/stable/manual.html" - condition: any - rules: - - 'f:/etc/sudoers -> r:^\s*Defaults\s+logfile=' - - 'c:grep -r Default /etc/sudoers.d/ -> !r:# && r:\s*Defaults\s+logfile' + # 1.2.3 Ensure package manager repositories are configured. (Manual) - Not Implemented - ############################################### - # 1.4 Filesystem Integrity Checking - ############################################### - - # 1.4.1 install AIDE - - id: 6526 - title: "Ensure AIDE is installed" - description: "AIDE takes a snapshot of filesystem state including modification times, permissions, and file hashes which can then be used to compare against the current state of the filesystem to detect modifications to the system." + # 1.3.1 Ensure AIDE is installed. (Automated) + - id: 6534 + title: "Ensure AIDE is installed." + description: "Advanced Intrusion Detection Environment (AIDE) is a intrusion detection tool that uses predefined rules to check the integrity of files and directories in the Linux operating system. AIDE has its own database to check the integrity of files and directories. AIDE takes a snapshot of files and directories including modification times, permissions, and file hashes which can then be used to compare against the current state of the filesystem to detect modifications to the system." rationale: "By monitoring the filesystem state compromised files can be detected to prevent or limit the exposure of accidental or malicious misconfigurations or modified binaries." - remediation: "Run the following command to install aide: # dnf install aide || Configure AIDE as appropriate for your environment. Consult the AIDE documentation for options. Initialize AIDE: #aide --init && mv /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz" + remediation: "Run the following command to install AIDE: # dnf install aide Configure AIDE as appropriate for your environment. Consult the AIDE documentation for options. Initialize AIDE: Run the following commands: # aide --init # mv /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz." + references: + - "http://aide.sourceforge.net/stable/manual.html" compliance: - cis: ["1.3.1"] - - cis_csc: ["14.9"] - - pci_dss: ["11.5"] - - tsc: ["PI1.4", "PI1.5", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - references: - - "AIDE stable manual: http://aide.sourceforge.net/stable/manual.html" + - cis_csc_v8: ["3.14"] + - cis_csc_v7: ["14.9"] + - cmmc_v2.0: ["AC.L2-3.1.7"] + - hipaa: ["164.312(b)", "164.312(c)(1)", "164.312(c)(2)"] + - iso_27001-2013: ["A.12.4.3"] + - nist_sp_800-53: ["AC-6(9)"] + - pci_dss_v3.2.1: ["10.2.1", "11.5"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1"] + - soc_2: ["CC6.1"] condition: all rules: - - 'c:rpm -q aide -> r:aide-\S*' + - "c:rpm -q aide -> r:aide-" - # 1.4.2 AIDE regular checks - - id: 6527 - title: "Ensure filesystem integrity is regularly checked" + # 1.3.2 Ensure filesystem integrity is regularly checked. (Automated) + - id: 6535 + title: "Ensure filesystem integrity is regularly checked." description: "Periodic checking of the filesystem integrity is needed to detect changes to the filesystem." rationale: "Periodic file checking allows the system administrator to determine on a regular basis if critical files have been changed in an unauthorized fashion." - remediation: " Run the following commands: # cp ./config/aidecheck.service /etc/systemd/system/aidecheck.service # cp ./config/aidecheck.timer /etc/systemd/system/aidecheck.timer # chmod 0644 /etc/systemd/system/aidecheck.* # systemctl reenable aidecheck.timer # systemctl restart aidecheck.timer # systemctl daemon-reload. OR Run the following command: crontab -u root -e // Add the following line to the crontab: 0 5 * * * /usr/sbin/aide --check // Notes: The checking in this recommendation occurs every day at 5am. Alter the frequency and time of the checks in compliance with site policy. " - compliance: - - cis: ["1.3.2"] - - cis_csc: ["3.5"] - - pci_dss: ["11.5"] - - tsc: ["PI1.4", "PI1.5", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + remediation: "If cron will be used to schedule and run aide check Run the following command: # crontab -u root -e Add the following line to the crontab: 0 5 * * * /usr/sbin/aide --check OR if aidecheck.service and aidecheck.timer will be used to schedule and run aide check: Create or edit the file /etc/systemd/system/aidecheck.service and add the following lines: [Unit] Description=Aide Check [Service] Type=simple ExecStart=/usr/sbin/aide --check [Install] WantedBy=multi-user.target Create or edit the file /etc/systemd/system/aidecheck.timer and add the following lines: [Unit] Description=Aide check every day at 5AM [Timer] OnCalendar=*-*-* 05:00:00 Unit=aidecheck.service [Install] WantedBy=multi-user.target Run the following commands: # chown root:root /etc/systemd/system/aidecheck.* # chmod 0644 /etc/systemd/system/aidecheck.* # systemctl daemon-reload # systemctl enable aidecheck.service # systemctl --now enable aidecheck.timer." references: - "https://github.com/konstruktoid/hardening/blob/master/config/aidecheck.service" - "https://github.com/konstruktoid/hardening/blob/master/config/aidecheck.timer" - condition: any + compliance: + - cis: ["1.3.2"] + - cis_csc_v8: ["3.14"] + - cis_csc_v7: ["14.9"] + - cmmc_v2.0: ["AC.L2-3.1.7"] + - hipaa: ["164.312(b)", "164.312(c)(1)", "164.312(c)(2)"] + - iso_27001-2013: ["A.12.4.3"] + - nist_sp_800-53: ["AC-6(9)"] + - pci_dss_v3.2.1: ["10.2.1", "11.5"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1"] + - soc_2: ["CC6.1"] + condition: all + rules: + - "c:systemctl is-enabled aidecheck.service -> r:enabled" + - "c:systemctl is-enabled aidecheck.timer -> r:enabled" + - "c:systemctl status aidecheck.timer -> r:active" + + # 1.4.1 Ensure bootloader password is set. (Automated) + - id: 6536 + title: "Ensure bootloader password is set." + description: "Setting the boot loader password will require that anyone rebooting the system must enter a password before being able to set command line boot parameters." + rationale: "Requiring a boot password upon execution of the boot loader will prevent an unauthorized user from entering boot parameters or changing the boot partition. This prevents users from weakening security (e.g. turning off SELinux at boot time)." + impact: 'If password protection is enabled, only the designated superuser can edit a Grub 2 menu item by pressing "e" or access the GRUB 2 command line by pressing "c" If GRUB 2 is set up to boot automatically to a password-protected menu entry the user has no option to back out of the password prompt to select another menu entry. Holding the SHIFT key will not display the menu in this case. The user must enter the correct username and password. If unable, the configuration files will have to be edited via the LiveCD or other means to fix the problem You can add --unrestricted to the menu entries to allow the system to boot without entering a password. Password will still be required to edit menu items.' + remediation: "Create an encrypted password with grub2-setpassword: # grub2-setpassword Enter password: Confirm password: Run the following command to update the grub2 configuration: # grub2-mkconfig -o \"$(dirname \"$(find /boot -type f \\( -name 'grubenv' -o - name 'grub.conf' -o -name 'grub.cfg' \\) -exec grep -Pl '^\\h*(kernelopts=|linux|kernel)' {} \\;)\")/grub.cfg\"." + compliance: + - cis: ["1.4.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - "c:crontab -u root -l -> r:aide" - - "c:grep -r aide /etc/cron.* /etc/crontab -> r:aide" + - 'f:/boot/grub/grub.cfg -> r:^\s*\t*set superusers' + - 'f:/boot/grub/grub.cfg -> r:^\s*\t*password' - ############################################### - # 1.5 Secure Boot Settings - ############################################### - # 1.5.1 Configure bootloader - - id: 6528 - title: "Ensure permissions on bootloader config are configured" - description: "The grub configuration file contains information on boot settings and passwords for unlocking boot options. The grub configuration is usually grub.cfg and grubenv stored in /boot/grub2/" + # 1.4.2 Ensure permissions on bootloader config are configured. (Automated) + - id: 6537 + title: "Ensure permissions on bootloader config are configured." + description: "The grub files contain information on boot settings and passwords for unlocking boot options. The grub2 configuration is usually grub.cfg. On newer grub2 systems the encrypted bootloader password is contained in user.cfg. If the system uses UEFI, /boot/efi is a vfat filesystem. The vfat filesystem itself doesn't have the concept of permissions but can be mounted under Linux with whatever permissions desired." rationale: "Setting the permissions to read and write for root only prevents non-root users from seeing the boot parameters or changing them. Non-root users who read the boot parameters may be able to identify weaknesses in security upon boot and be able to exploit them." - remediation: "Run the following commands to set permissions on your grub configuration: # chown root:root /boot/grub2/grub.cfg # chmod og-rwx /boot/grub2/grub.cfg # chown root:root /boot/grub2/grubenv # chmod og-rwx /boot/grub2/grubenv" + remediation: "Run the following commands to set ownership and permissions on your grub configuration file(s): # [ -f /boot/grub2/grub.cfg ] && chown root:root /boot/grub2/grub.cfg # [ -f /boot/grub2/grub.cfg ] && chmod og-rwx /boot/grub2/grub.cfg # [ -f /boot/grub2/grubenv ] && chown root:root /boot/grub2/grubenv # [ -f /boot/grub2/grubenv ] && chmod og-rwx /boot/grub2/grubenv # [ -f /boot/grub2/user.cfg ] && chown root:root /boot/grub2/user.cfg # [ -f /boot/grub2/user.cfg ] && chmod og-rwx /boot/grub2/user.cfg OR If the system uses UEFI, edit /etc/fstab and add the fmask=0077, uid=0, and gid=0 options: Example: /boot/efi vfat defaults,umask=0027,fmask=0077,uid=0,gid=0 0 0 Note: This may require a re-boot to enable the change." compliance: - - cis: ["1.5.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.4.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /boot/grub2/grub.cfg -> r:Access:\s*\(0\d00/-\w\w\w------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - 'c:stat -L /boot/grub2/grubenv -> r:Access:\s*\(0\d00/-\w\w\w------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' + - 'c:stat -L /boot/grub2/user.cfg -> r:Access:\s*\(0600/-r--------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 1.5.2 Set Boot Loader Password (Scored) - - id: 6529 - title: "Ensure bootloader password is set" - description: "Setting the boot loader password will require that anyone rebooting the system must enter a password before being able to set command line boot parameters." - rationale: "Requiring a boot password upon execution of the boot loader will prevent an unauthorized user from entering boot parameters or changing the boot partition. This prevents users from weakening security (e.g. turning off SELinux at boot time)." - remediation: "Create an encrypted password with grub2-setpassword: # grub2-setpassword || Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg" + # 1.4.3 Ensure authentication is required when booting into rescue mode. (Automated) + - id: 6538 + title: "Ensure authentication is required when booting into rescue mode." + description: "Rescue mode (former single user mode) is used for recovery when the system detects an issue during boot or by manual selection from the bootloader." + rationale: "Requiring authentication in rescue mode (former single user mode) prevents an unauthorized user from rebooting the system into rescue mode to gain root privileges without credentials." + remediation: "The systemd drop-in files must be created if it is necessary to change the default settings: Create the file /etc/systemd/system/rescue.service.d/00-require-auth.conf which contains only the configuration to be overridden: [Service] ExecStart=-/usr/lib/systemd/systemd-sulogin-shell rescue." compliance: - - cis: ["1.5.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.4.3"] + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - iso_27001-2013: ["A.9.4.3"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] condition: all rules: - - 'f:/boot/grub2/user.cfg -> r:^GRUB2_PASSWORD\s*=\.+' + - "f:/usr/lib/systemd/system/rescue.service -> r: systemd-sulogin-shell" + - 'd:/etc/systemd/system/rescue.service.d/ -> r:\.+ -> r: systemd-sulogin-shell' - # 1.5.3 Single user authentication - - id: 6530 - title: "Ensure authentication required for single user mode" - description: "Single user mode (rescue mode) is used for recovery when the system detects an issue during boot or by manual selection from the bootloader." - rationale: "Requiring authentication in single user mode (rescue mode) prevents an unauthorized user from rebooting the system into single user to gain root privileges without credentials." - remediation: "Edit /usr/lib/systemd/system/rescue.service and add/modify the following line: ExecStart=-/usr/lib/systemd/systemd-sulogin-shell rescue Edit /usr/lib/systemd/system/emergency.service and add/modify the following line: ExecStart=-/usr/lib/systemd/systemd-sulogin-shell emergency" + # 1.5.1 Ensure core dump storage is disabled. (Automated) + - id: 6539 + title: "Additional Process Hardening 1.5.1 Ensure core dump storage is disabled." + description: "A core dump is the memory of an executable program. It is generally used to determine why a program aborted. It can also be used to glean confidential information from a core file." + rationale: "A core dump includes a memory image taken at the time the operating system terminates an application. The memory image could contain sensitive data and is generally useful only for developers trying to debug problems." + remediation: "Edit /etc/systemd/coredump.conf and edit or add the following line: Storage=none." + references: + - "https://www.freedesktop.org/software/systemd/man/coredump.conf.html" compliance: - - cis: ["1.5.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.5"] condition: all rules: - - "f:/usr/lib/systemd/system/rescue.service -> r:ExecStart=-/usr/lib/systemd/systemd-sulogin-shell rescue" - - "f:/usr/lib/systemd/system/emergency.service -> r:ExecStart=-/usr/lib/systemd/systemd-sulogin-shell emergency" + - 'f:/etc/systemd/coredump.conf -> r:^\s*Storage\s*=\s*none' - ############################################### - # 1.6 Additional Process Hardening - ############################################### - # 1.6.1 Restrict Core Dumps (Scored) - - id: 6531 - title: "Ensure core dumps are restricted" - description: "A core dump is the memory of an executable program. It is generally used to determine why a program aborted. It can also be used to glean confidential information from a core file.The system provides the ability to set a soft limit for core dumps, but this can be overridden by the user." - rationale: "Setting a hard limit on core dumps prevents users from overriding the soft variable. If core dumps are required, consider setting limits for user groups (see limits.conf(5)). In addition, setting the fs.suid_dumpable variable to 0 will prevent setuid programs from dumping core." - remediation: "Add the following line to /etc/security/limits.conf or a /etc/security/limits.d/* file: * hard core 0. Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: fs.suid_dumpable = 0. Run the following command to set the active kernel parameter: # sysctl -w fs.suid_dumpable=0. If systemd-coredump is installed: edit /etc/systemd/coredump.conf and add/modify the following lines: Storage=none ProcessSizeMax=0 Run the command: # systemctl daemon-reload" + # 1.5.2 Ensure core dump backtraces are disabled. (Automated) + - id: 6540 + title: "Ensure core dump backtraces are disabled." + description: "A core dump is the memory of an executable program. It is generally used to determine why a program aborted. It can also be used to glean confidential information from a core file." + rationale: "A core dump includes a memory image taken at the time the operating system terminates an application. The memory image could contain sensitive data and is generally useful only for developers trying to debug problems, increasing the risk to the system." + remediation: "Edit or add the following line in /etc/systemd/coredump.conf: ProcessSizeMax=0." + references: + - "https://www.freedesktop.org/software/systemd/man/coredump.conf.html" compliance: - - cis: ["1.6.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.5.2"] condition: all rules: - - 'c:grep -Rh ^*[[:space:]]*hard[[:space:]][[:space:]]*core[[:space:]][[:space:]]* /etc/security/limits.conf /etc/security/limits.d -> r:\s*\t*0$' - - 'c:sysctl fs.suid_dumpable -> r:^fs.suid_dumpable\s*=\s*0\s*$' - - 'c:grep -Rh fs\.suid_dumpable /etc/sysctl.conf /etc/sysctl.d -> r:^\s*fs.suid_dumpable\s*=\s*0\s*$' + - 'f:/etc/systemd/coredump.conf -> r:^\s*ProcessSizeMax\s*=\s*0' - # 1.6.2 Ensure address space layout randomization (ASLR) is enabled (Scored) - - id: 6532 - title: "Ensure address space layout randomization (ASLR) is enabled" + # 1.5.3 Ensure address space layout randomization (ASLR) is enabled. (Automated) + - id: 6541 + title: "Ensure address space layout randomization (ASLR) is enabled." description: "Address space layout randomization (ASLR) is an exploit mitigation technique which randomly arranges the address space of key data areas of a process." rationale: "Randomly placing virtual memory regions will make it difficult to write memory page exploits as the memory placement will be consistently shifting." - remediation: "Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: kernel.randomize_va_space = 2 Run the following command to set the active kernel parameter: # sysctl -w kernel.randomize_va_space=2" + remediation: 'Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: Example: # printf " kernel.randomize_va_space = 2 " >> /etc/sysctl.d/60-kernel_sysctl.conf Run the following command to set the active kernel parameter: # sysctl -w kernel.randomize_va_space=2.' compliance: - - cis: ["1.6.2"] - - cis_csc: ["8.3"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + - cis: ["1.5.3"] + - cis_csc_v8: ["10.5"] + - cis_csc_v7: ["8.3"] + - nist_sp_800-53: ["SI-16"] + - pci_dss_v3.2.1: ["1.4"] + - soc_2: ["CC6.8"] + condition: any rules: - - 'c:grep -Rh ^kernel\.randomize_va_space /etc/sysctl.conf /etc/sysctl.d -> r:^\s*kernel.randomize_va_space\s*=\s*2$' - 'c:sysctl kernel.randomize_va_space -> r:^\s*kernel.randomize_va_space\s*=\s*2' + - 'f:/etc/sysctl.conf -> r:^\s*kernel.randomize_va_space\s*=\s*2' + - 'd:/etc/sysctl.d/ -> r:\.+ -> r:^\s*kernel.randomize_va_space\s*=\s*2' - ############################################### - # 1.7 Configure SELinux - ############################################### - # 1.7.1.1 Ensure SELinux is installed(Scored) - - id: 6533 - title: "Ensure SELinux is installed" + # 1.6.1.1 Ensure SELinux is installed. (Automated) + - id: 6542 + title: "Ensure SELinux is installed." description: "SELinux provides Mandatory Access Control." rationale: "Without a Mandatory Access Control system installed only the default Discretionary Access Control system will be available." - remediation: "Run the following command to install SELinux : # dnf install libselinux" + remediation: "Run the following command to install SELinux: # dnf install libselinux." compliance: - - cis: ["1.7.1.1"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.6.1.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - "c:rpm -q libselinux -> r:libselinux-" - # 1.7.1.2 SELinux not disabled - - id: 6534 - title: "Ensure SELinux is not disabled in bootloader configuration" + # 1.6.1.2 Ensure SELinux is not disabled in bootloader configuration. (Automated) + - id: 6543 + title: "Ensure SELinux is not disabled in bootloader configuration." description: "Configure SELINUX to be enabled at boot time and verify that it has not been overwritten by the grub boot parameters." rationale: "SELinux must be enabled at boot time in your grub configuration to ensure that the controls it provides are not overridden." - remediation: 'Edit /etc/default/grub and remove all instances of selinux=0 and enforcing=0 from all CMDLINE_LINUX parameters: GRUB_CMDLINE_LINUX_DEFAULT="quiet" GRUB_CMDLINE_LINUX="" || Run the following command to update the grub2 configuration: grub2-mkconfig -o /boot/grub2/grub.cfg' + impact: "Files created while SELinux is disabled are not labeled at all. This behavior causes problems when changing to enforcing mode because files are labeled incorrectly or are not labeled at all. To prevent incorrectly labeled and unlabeled files from causing problems, file systems are automatically relabeled when changing from the disabled state to permissive or enforcing mode. This can be a long running process that should be accounted for as it may extend downtime during initial re-boot." + remediation: "Run the following command to remove all instances of selinux=0 and enforcing=0 from all CMDLINE_LINUX parameters: grubby --update-kernel ALL --remove-args 'selinux=0 enforcing=0'." compliance: - - cis: ["1.7.1.2"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.6.1.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - 'f:/boot/grub2/grubenv -> r:kernelopts=\.*selinux=0|kernelopts=\.*enforcing=0' + - 'not f:/boot/grub2/grubenv -> r:kernelopts=\.*selinux=0|kernelopts=\.*enforcing=0' - # 1.7.1.3 Set selinux policy - - id: 6535 - title: "Ensure SELinux policy is configured" + # 1.6.1.3 Ensure SELinux policy is configured. (Automated) + - id: 6544 + title: "Ensure SELinux policy is configured." description: "Configure SELinux to meet or exceed the default targeted policy, which constrains daemons and system software only." rationale: "Security configuration requirements vary from site to site. Some sites may mandate a policy that is stricter than the default policy, which is perfectly acceptable. This item is intended to ensure that at least the default recommendations are met." - remediation: "Edit the /etc/selinux/config file to set the SELINUXTYPE parameter: SELINUXTYPE=targeted" + remediation: "Edit the /etc/selinux/config file to set the SELINUXTYPE parameter: SELINUXTYPE=targeted." compliance: - - cis: ["1.7.1.3"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.6.1.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:sestatus -> r:^Loaded policy name:\s+targeted$|^Loaded policy name:\s+mls$' - 'f:/etc/selinux/config -> r:^\s*SELINUXTYPE\s*=\s*targeted|^\s*SELINUXTYPE\s*=\s*mls' - # 1.7.1.4 Set selinux state - - id: 6536 - title: "Ensure the SELinux state is enforcing" - description: "Set SELinux to enable when the system is booted." - rationale: "SELinux must be enabled at boot time in to ensure that the controls it provides are in effect at all times." - remediation: "Edit the /etc/selinux/config file to set the SELINUX parameter: SELINUX=enforcing" + # 1.6.1.4 Ensure the SELinux mode is not disabled. (Automated) + - id: 6545 + title: "Ensure the SELinux mode is not disabled." + description: "SELinux can run in one of three modes: disabled, permissive, or enforcing: - Enforcing - Is the default, and recommended, mode of operation; in enforcing mode SELinux operates normally, enforcing the loaded security policy on the entire system. - Permissive - The system acts as if SELinux is enforcing the loaded security policy, including labeling objects and emitting access denial entries in the logs, but it does not actually deny any operations. While not recommended for production systems, permissive mode can be helpful for SELinux policy development. - Disabled - Is strongly discouraged; not only does the system avoid enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future Note: you can set individual domains to permissive mode while the system runs in enforcing mode. For example, to make the httpd_t domain permissive: # semanage permissive -a httpd_t." + rationale: "Running SELinux in disabled mode is strongly discouraged; not only does the system avoid enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future." + remediation: "Run one of the following commands to set SELinux's running mode: To set SELinux mode to Enforcing: # setenforce 1 OR To set SELinux mode to Permissive: # setenforce 0 Edit the /etc/selinux/config file to set the SELINUX parameter: For Enforcing mode: SELINUX=enforcing OR For Permissive mode: SELINUX=permissive." + references: + - "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/selinux_users_and_administrators_guide/sect-security-enhanced_linux-introduction-selinux_modes" compliance: - - cis: ["1.7.1.4"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.6.1.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:sestatus -> r:^SELinux status:\s+enabled$' - - 'c:sestatus -> r:^Current mode:\s+enforcing$' - - 'c:sestatus -> r:^Mode from config file:\s+enforcing$' - - 'f:/etc/selinux/config -> r:^\s*SELINUX\s*=\s*enforcing' + - "c:getenforce -> r:^Enforcing$|^Permissive$" + - 'f:/etc/selinux/config -> r:^\s*SELINUX\s*=\s*enforcing|\s*SELINUX\s*=\s*permisive' - # 1.7.1.5 Ensure no unconfined services exist (Scored) - - id: 6537 - title: "Ensure no unconfined services exist" - description: "Unconfined processes run in unconfined domains" - rationale: "For unconfined processes, SELinux policy rules are applied, but policy rules exist that allow processes running in unconfined domains almost all access. Processes running in unconfined domains fall back to using DAC rules exclusively. If an unconfined process is compromised, SELinux does not prevent an attacker from gaining access to system resources and data, but of course, DAC rules are still used. SELinux is a security enhancement on top of DAC rules – it does not replace them" - remediation: "Investigate any unconfined processes found during the audit action. They may need to have an existing security context assigned to them or a policy built for them. Notes: Occasionally certain daemons such as backup or centralized management software may require running unconfined. Any such software should be carefully analyzed and documented before such an exception is made." - compliance: - - cis: ["1.7.1.5"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + # 1.6.1.5 Ensure the SELinux mode is enforcing. (Automated) + - id: 6546 + title: "Ensure the SELinux mode is enforcing." + description: "SELinux can run in one of three modes: disabled, permissive, or enforcing: - Enforcing - Is the default, and recommended, mode of operation; in enforcing mode SELinux operates normally, enforcing the loaded security policy on the entire system. - Permissive - The system acts as if SELinux is enforcing the loaded security policy, including labeling objects and emitting access denial entries in the logs, but it does not actually deny any operations. While not recommended for production systems, permissive mode can be helpful for SELinux policy development. - Disabled - Is strongly discouraged; not only does the system avoid enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future Note: you can set individual domains to permissive mode while the system runs in enforcing mode. For example, to make the httpd_t domain permissive: # semanage permissive -a httpd_t." + rationale: "Running SELinux in disabled mode the system not only avoids enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future. Running SELinux in Permissive mode, though helpful for developing SELinux policy, only logs access denial entries, but does not deny any operations." + remediation: "Run the following command to set SELinux's running mode: # setenforce 1 Edit the /etc/selinux/config file to set the SELINUX parameter: For Enforcing mode: SELINUX=enforcing." + references: + - "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/selinux_users_and_administrators_guide/sect-security-enhanced_linux-introduction-selinux_modes" + compliance: + - cis: ["1.6.1.5"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - "c:ps -eZ -> r:unconfined_service_t" + - "c:getenforce -> r:^Enforcing$" + - 'f:/etc/selinux/config -> r:^\s*SELINUX\s*=\s*enforcing' - # 1.7.1.6 Remove SETroubleshoot - - id: 6538 - title: "Ensure SETroubleshoot is not installed" + # 1.6.1.6 Ensure no unconfined services exist. (Automated) + - id: 6547 + title: "Ensure no unconfined services exist." + description: "Unconfined processes run in unconfined domains." + rationale: "For unconfined processes, SELinux policy rules are applied, but policy rules exist that allow processes running in unconfined domains almost all access. Processes running in unconfined domains fall back to using DAC rules exclusively. If an unconfined process is compromised, SELinux does not prevent an attacker from gaining access to system resources and data, but of course, DAC rules are still used. SELinux is a security enhancement on top of DAC rules it does not replace them." + remediation: "Investigate any unconfined processes found during the audit action. They may need to have an existing security context assigned to them or a policy built for them." + compliance: + - cis: ["1.6.1.6"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.13.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - "not c:ps -eZ -> r:unconfined_service_t" + + # 1.6.1.7 Ensure SETroubleshoot is not installed. (Automated) + - id: 6548 + title: "Ensure SETroubleshoot is not installed." description: "The SETroubleshoot service notifies desktop users of SELinux denials through a user-friendly interface. The service provides important information around configuration errors, unauthorized intrusions, and other potential errors." rationale: "The SETroubleshoot service is an unnecessary daemon to have running on a server, especially if X Windows is disabled." - remediation: "Run the following command to uninstall setroubleshoot: # dnf remove setroubleshoot" + remediation: "Run the following command to uninstall setroubleshoot: # dnf remove setroubleshoot." compliance: - - cis: ["1.7.1.6"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.6.1.7"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.9.1.1"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "c:rpm -qa setroubleshoot -> r:setroubleshoot" + - "not c:rpm -qa setroubleshoot -> r:^setroubleshoot" - # 1.7.1.7 Disable MCS Translation service mcstrans - - id: 6539 - title: "Ensure the MCS Translation Service (mcstrans) is not installed" - description: "The mcstransd daemon provides category label information to client processes requesting information. The label translations are defined in /etc/selinux/targeted/setrans.conf" + # 1.6.1.8 Ensure the MCS Translation Service (mcstrans) is not installed. (Automated) + - id: 6549 + title: "Ensure the MCS Translation Service (mcstrans) is not installed." + description: "The mcstransd daemon provides category label information to client processes requesting information. The label translations are defined in /etc/selinux/targeted/setrans.conf." rationale: "Since this service is not used very often, remove it to reduce the amount of potentially vulnerable code running on the system." - remediation: "Run the following command to uninstall mcstrans: # dnf remove mcstrans" - compliance: - - cis: ["1.7.1.5"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none - rules: - - "c:rpm -qa mcstrans -> r:mcstrans" - - # 1.11 Ensure system-wide crypto policy is FUTURE or FIPS (Scored) - - id: 6540 - title: "Ensure system-wide crypto policy is FUTURE or FIPS" - description: "The system-wide crypto-policies followed by the crypto core components allow consistently deprecating and disabling algorithms system-wide. The individual policy levels (DEFAULT, LEGACY, FUTURE, and FIPS) are included in the crypto-policies(7) package." - rationale: "If the Legacy system-wide crypto policy is selected, it includes support for TLS 1.0, TLS 1.1, and SSH2 protocols or later. The algorithms DSA, 3DES, and RC4 are allowed, while RSA and Diffie-Hellman parameters are accepted if larger than 1023-bits. These legacy protocols and algorithms can make the system vulnerable to attacks, including those listed in RFC 7457 FUTURE: Is a conservative security level that is believed to withstand any near-term future attacks. This level does not allow the use of SHA-1 in signature algorithms. The RSA and Diffie-Hellman parameters are accepted if larger than 3071 bits. The level provides at least 128-bit security FIPS: Conforms to the FIPS 140-2 requirements. This policy is used internally by the fips-mode-setup(8) tool which can switch the system into the FIPS 140-2 compliance mode. The level provides at least 112-bit security" - remediation: "Run the following command to change the system-wide crypto policy # update-crypto-policies --set FUTURE OR To switch the system to FIPS mode, run the following command: # fips-mode-setup --enable" + remediation: "Run the following command to uninstall mcstrans: # dnf remove mcstrans." compliance: - - cis: ["1.11"] - - cis_csc: ["14.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.6.1.8"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'f:/etc/crypto-policies/config -> r:^\s*FUTURE|^\s*FIPS' + - "not c:rpm -qa mcstrans -> r:^mcstrans" - ############################################### - # 1.8 Warning Banners - ############################################### - # 1.8.1.1 Configure message of the day (Scored) - - id: 6541 - title: "Ensure message of the day is configured properly" - description: "The contents of the /etc/motd file are displayed to users after login and function as a message of the day for authenticated users. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version" + # 1.7.1 Ensure message of the day is configured properly. (Automated) + - id: 6550 + title: "Ensure message of the day is configured properly." + description: "The contents of the /etc/motd file are displayed to users after login and function as a message of the day for authenticated users. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version." rationale: 'Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Displaying OS and patch level information in login banners also has the side effect of providing detailed system information to attackers attempting to target specific exploits of a system. Authorized users can easily get this information by running the " uname -a " command once they have logged in.' - remediation: "Edit the /etc/motd file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform OR If the motd is not used, this file can be removed. Run the following command to remove the motd file: # rm /etc/motd" - compliance: - - cis: ["1.8.1.1"] - - cis_csc: ["5.1"] - - pci_dss: ["7.1"] - - tsc: ["CC6.4"] - condition: none + remediation: "Edit the /etc/motd file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform OR If the motd is not used, this file can be removed. Run the following command to remove the motd file: # rm /etc/motd." + compliance: + - cis: ["1.7.1"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: any rules: - - 'f:/etc/motd -> r:\\v|\\r|\\m|\\s' + - "not f:/etc/motd" + - 'not f:/etc/motd -> r:\\v|\\r|\\m|\\s' - # 1.8.1.2 Configure local login warning banner (Scored) - - id: 6542 - title: "Ensure local login warning banner is configured properly" - description: "The contents of the /etc/issue file are displayed to users prior to login for local terminals. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version" + # 1.7.2 Ensure local login warning banner is configured properly. (Automated) + - id: 6551 + title: "Ensure local login warning banner is configured properly." + description: "The contents of the /etc/issue file are displayed to users prior to login for local terminals. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version - or the operating system's name." rationale: 'Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Displaying OS and patch level information in login banners also has the side effect of providing detailed system information to attackers attempting to target specific exploits of a system. Authorized users can easily get this information by running the " uname -a " command once they have logged in.' - remediation: "Edit the /etc/issue file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform: # echo \"Authorized uses only. All activity may be monitored and reported.\" > /etc/issue" + remediation: "Edit the /etc/issue file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform # echo \"Authorized uses only. All activity may be monitored and reported.\" > /etc/issue." compliance: - - cis: ["1.8.1.2"] - - cis_csc: ["5.1"] - - pci_dss: ["7.1"] - - tsc: ["CC6.4"] - condition: none + - cis: ["1.7.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all rules: - - 'f:/etc/issue -> r:\\v|\\r|\\m|\\s' + - 'not f:/etc/issue -> r:\\v|\\r|\\m|\\s' - # 1.8.1.3 Configure remote login warning banner (Scored) - - id: 6543 - title: "Ensure remote login warning banner is configured properly" - description: "The contents of the /etc/issue.net file are displayed to users prior to login for remote connections from configured services. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version" + # 1.7.3 Ensure remote login warning banner is configured properly. (Automated) + - id: 6552 + title: "Ensure remote login warning banner is configured properly." + description: "The contents of the /etc/issue.net file are displayed to users prior to login for remote connections from configured services. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version." rationale: 'Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Displaying OS and patch level information in login banners also has the side effect of providing detailed system information to attackers attempting to target specific exploits of a system. Authorized users can easily get this information by running the " uname -a " command once they have logged in.' - remediation: "Edit the /etc/issue.net file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform: # echo \"Authorized uses only. All activity may be monitored and reported.\" > /etc/issue.net" + remediation: "Edit the /etc/issue.net file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform # echo \"Authorized uses only. All activity may be monitored and reported.\" > /etc/issue.net." compliance: - - cis: ["1.8.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["7.1"] - - tsc: ["CC6.4"] - condition: none + - cis: ["1.7.3"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all rules: - 'f:/etc/issue.net -> r:\\v|\\r|\\m|\\s' - # 1.8.1.4 Configure /etc/motd permissions (Scored) - - id: 6544 - title: "Ensure permissions on /etc/motd are configured" + # 1.7.4 Ensure permissions on /etc/motd are configured. (Automated) + - id: 6553 + title: "Ensure permissions on /etc/motd are configured." description: "The contents of the /etc/motd file are displayed to users after login and function as a message of the day for authenticated users." rationale: "If the /etc/motd file does not have the correct ownership it could be modified by unauthorized users with incorrect or misleading information." - remediation: "Run the following commands to set permissions on /etc/motd: # chown root:root /etc/motd # chmod 644 /etc/motd" + remediation: "Run the following commands to set permissions on /etc/motd : # chown root:root /etc/motd # chmod u-x,go-wx /etc/motd." compliance: - - cis: ["1.8.1.4"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["1.7.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/motd -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 1.8.1.5 Configure /etc/issue permissions (Scored) - - id: 6545 - title: "Ensure permissions on /etc/issue are configured" + # 1.7.5 Ensure permissions on /etc/issue are configured. (Automated) + - id: 6554 + title: "Ensure permissions on /etc/issue are configured." description: "The contents of the /etc/issue file are displayed to users prior to login for local terminals." rationale: "If the /etc/issue file does not have the correct ownership it could be modified by unauthorized users with incorrect or misleading information." - remediation: "Run the following commands to set permissions on /etc/issue: # chown root:root /etc/issue # chmod 644 /etc/issue" + remediation: "Run the following commands to set permissions on /etc/issue : # chown root:root /etc/issue # chmod u-x,go-wx /etc/issue." compliance: - - cis: ["1.8.1.5"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["1.7.5"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/issue -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 1.8.1.6 Configure /etc/issue.net permissions (Scored) - - id: 6546 - title: "Ensure permissions on /etc/issue.net are configured" + # 1.7.6 Ensure permissions on /etc/issue.net are configured. (Automated) + - id: 6555 + title: "Ensure permissions on /etc/issue.net are configured." description: "The contents of the /etc/issue.net file are displayed to users prior to login for remote connections from configured services." rationale: "If the /etc/issue.net file does not have the correct ownership it could be modified by unauthorized users with incorrect or misleading information." - remediation: "Run the following commands to set permissions on /etc/issue.net: # chown root:root /etc/issue.net # chmod 644 /etc/issue.net" + remediation: "Run the following commands to set permissions on /etc/issue.net : # chown root:root /etc/issue.net # chmod u-x,go-wx /etc/issue.net." compliance: - - cis: ["1.8.1.6"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["1.7.6"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/issue.net -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 1.8.2 Ensure GDM login banner is configured (Scored) - - id: 6547 - title: "Ensure GDM login banner is configured" - description: "GDM is the GNOME Display Manager which handles graphical login for GNOME based systems." - rationale: "Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place." - remediation: "Edit or create the file /etc/gdm3/greeter.dconf-defaults and add the following in 3 lines: (1) [org/gnome/login-screen] (2) banner-message-enable=true (3) banner-message-text='Authorized uses only. All activity may be monitored and reported.'" - compliance: - - cis: ["1.8.2"] - - cis_csc: ["5.1"] - - pci_dss: ["7.1"] - - tsc: ["CC6.4"] - condition: all - rules: - - "f:/etc/gdm3/greeter.dconf-defaults -> r:[org/gnome/login-screen]" - - "f:/etc/gdm3/greeter.dconf-defaults -> r:banner-message-enable=true" - - "f:/etc/gdm3/greeter.dconf-defaults -> r:banner-message-text=" - - # 1.9 Ensure updates, patches, and additional security software are installed (Not Scored) - - id: 6548 - title: "Ensure updates, patches, and additional security software are installed" - description: "Periodically patches are released for included software either due to security flaws or to include additional functionality." - rationale: "Newer patches may contain security enhancements that would not be available through the latest full update. As a result, it is recommended that the latest software patches be used to take advantage of the latest functionality. As with any software installation, organizations need to determine if a given update meets their requirements and verify the compatibility and supportability of any additional software against the update revision that is selected." - remediation: "Use your package manager to update all packages on the system according to site policy. The following command will install all available security updates: # dnf update --security . Site policy may mandate a testing period before install onto production systems for available updates. The audit and remediation here only cover security updates. Non-security updates can be audited with and comparing against site policy: # dnf check-update" - compliance: - - cis: ["1.9"] - - cis_csc: ["3.4"] - - pci_dss: ["5.2"] - - nist_800_53: ["AU.6", "SI.4"] - - gpg_13: ["4.2"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["A1.2"] - condition: any - rules: - - "c:dnf check-update -> r:No packages needed for security" - - "c:dnf check-update -> r:No security updates needed" - - # 1.10 Ensure system-wide crypto policy is not legacy (Scored) - - id: 6549 - title: "Ensure system-wide crypto policy is not legacy" - description: "The system-wide crypto-policies followed by the crypto core components allow consistently deprecating and disabling algorithms system-wide. The individual policy levels (DEFAULT, LEGACY, FUTURE, and FIPS) are included in the crypto-policies(7) package." - rationale: "If the Legacy system-wide crypto policy is selected, it includes support for TLS 1.0, TLS 1.1, and SSH2 protocols or later. The algorithms DSA, 3DES, and RC4 are allowed, while RSA and Diffie-Hellman parameters are accepted if larger than 1023-bits. These legacy protocols and algorithms can make the system vulnerable to attacks, including those listed in RFC 7457" - remediation: "Run the following command to change the system-wide crypto policy # update-crypto-policies --set Example: # update-crypto-policies --set DEFAULT Run the following to make the updated system-wide crypto policy active # update-crypto-policies" - compliance: - - cis: ["1.10"] - - cis_csc: ["14.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + # 1.8.1 Ensure GNOME Display Manager is removed. (Manual) + - id: 6556 + title: "Ensure GNOME Display Manager is removed." + description: "The GNOME Display Manager (GDM) is a program that manages graphical display servers and handles graphical user logins." + rationale: "If a Graphical User Interface (GUI) is not required, it should be removed to reduce the attack surface of the system." + impact: "Removing the GNOME Display manager will remove the GUI from the system." + remediation: "Run the following command to remove the gdm package # dnf remove gdm." references: - - "https://access.redhat.com/articles/3642912#what-polices-are-provided-1" - condition: none - rules: - - 'f:/etc/crypto-policies/config -> r:^\s*LEGACY' - - ############################################### - # 2 OS Services - ############################################### - ############################################### - # 2.1 inetd Services - ############################################### - # 2.1.1 Ensure xinetd is not installed (Scored) - - id: 6550 - title: "Ensure xinetd is not installed" - description: "The eXtended InterNET Daemon ( xinetd ) is an open source super daemon that replaced the original inetd daemon. The xinetd daemon listens for well known services and dispatches the appropriate daemon to properly respond to service requests." - rationale: "If there are no xinetd services required, it is recommended that the daemon be disabled." - remediation: "Run the following command to remove xinetd: # dnf remove xinetd" + - "https://wiki.gnome.org/Projects/GDM" compliance: - - cis: ["2.1.1"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis: ["1.8.1"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q xinetd -> r:^package xinetd is not installed" - - ############################################### - # 2.2 Remove Legacy Services - ############################################### - - # 2.2.1.1 Ensure time synchronization is in use (Not Scored) - - id: 6551 - title: "Ensure time synchronization is in use" - description: "System time should be synchronized between all systems in an environment. This is typically done by establishing an authoritative time server or set of servers and having all systems synchronize their clocks to them." - rationale: "Time synchronization is important to support time sensitive security mechanisms like Kerberos and also ensures log files have consistent time records across the enterprise, which aids in forensic investigations." - remediation: "On physical systems or virtual systems where host based time synchronization is not available install chrony: # dnf install chrony On virtual systems where host based time synchronization is available consult your virtualization software documentation and verify that host based synchronization is in use." - compliance: - - cis: ["2.2.2.1"] - - cis_csc: ["6.1"] - - pci_dss: ["10.4"] - - nist_800_53: ["AU.8"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - "c:rpm -q chrony -> r:^chrony-" - - # 2.2.1.2 Configure Network Time Protocol (Chrony) (Scored) - - id: 6552 - title: "Ensure chrony is configured" - description: "chrony is a daemon which implements the Network Time Protocol (NTP). It is designed to synchronize system clocks across a variety of systems and use a source that is highly accurate. More information on NTP can be found at https://www.ntp.org. ntp can be configured to be a client and/or a server." - rationale: "If chrony is in use on the system proper configuration is vital to ensuring time synchronization is working properly." - remediation: "Add or edit server or pool lines to /etc/chrony.conf as appropriate: server Configure chrony to run as the chrony user" - compliance: - - cis: ["2.2.1.2"] - - cis_csc: ["6.1"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - "http://chrony.tuxfamily.org/" - condition: all - rules: - - 'f:/etc/chrony.conf -> r:^server\s*\t*\.+|^pool\s*\t*\.+' - - 'not c:ps -ef -> r:\.+/chronyd\s*\t*$ && !r:^\s*\t*chrony\s*\t*' - - # 2.2.2 Ensure X Window System is not installed (Scored) - - id: 6553 - title: "Ensure X Window System is not installed" - description: "The X Window System provides a Graphical User Interface (GUI) where users can have multiple windows in which to run programs and various add on. The X Windows system is typically used on workstations where users login, but not on servers where users typically do not login." - rationale: "Unless your organization specifically requires graphical login access via X Windows, remove it to reduce the potential attack surface." - remediation: "Run the following command to remove the X Windows System packages: # dnf remove xorg-x11*" - compliance: - - cis: ["2.2.2"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none - rules: - - "c:rpm -qa xorg-x11* -> r:^xorg-x11" - - # 2.2.3 Remove rsync service (Scored) - - id: 6554 - title: "Ensure rsync service is not enabled" - description: "The rsyncd service can be used to synchronize files between systems over network links." - rationale: "The rsyncd service presents a security risk as it uses unencrypted protocols for communication." - remediation: "Run the following command to disable rsync: # systemctl --now disable rsyncd" - compliance: - - cis: ["2.2.3"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none - rules: - - "c:systemctl is-enabled rsyncd -> enabled" + - "f:rpm -q gdm -> r:is not installed" - # 2.2.4 Disable Avahi Server (Scored) - - id: 6555 - title: "Ensure Avahi Server is not enabled" - description: "Avahi is a free zeroconf implementation, including a system for multicast DNS/DNS-SD service discovery. Avahi allows programs to publish and discover services and hosts running on a local network with no specific configuration. For example, a user can plug a computer into a network and Avahi automatically finds printers to print to, files to look at and people to talk to, as well as network services running on the machine." - rationale: "Automatic discovery of network services is not normally required for system functionality. It is recommended to disable the service to reduce the potential attack surface." - remediation: "Run the following command to disable avahi-daemon: # systemctl --now disable avahi-daemon" - compliance: - - cis: ["2.2.4"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none - rules: - - "c:systemctl is-enabled avahi-daemon -> enabled" - - # 2.2.5 Remove SNMP Server (Scored) - - id: 6556 - title: "Ensure SNMP Server is not enabled" - description: "The Simple Network Management Protocol (SNMP) server is used to listen for SNMP commands from an SNMP management system, execute the commands or collect the information and then send results back to the requesting system." - rationale: "The SNMP server can communicate using SNMP v1, which transmits data in the clear and does not require authentication to execute commands. Unless absolutely necessary, it is recommended that the SNMP service not be used. If SNMP is required the server should be configured to disallow SNMP v1." - remediation: "Run the following command to disable snmpd: # systemctl --now disable snmpd" - compliance: - - cis: ["2.2.5"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none - rules: - - "c:systemctl is-enabled snmpd -> enabled" - - # 2.2.6 Remove HTTP Proxy Server (Scored) + # 1.8.2 Ensure GDM login banner is configured. (Automated) - id: 6557 - title: "Ensure HTTP Proxy Server is not enabled" - description: "Squid is a standard proxy server used in many distributions and environments." - rationale: "If there is no need for a proxy server, it is recommended that the squid proxy be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable squid: # systemctl --now disable squid" + title: "Ensure GDM login banner is configured." + description: "GDM is the GNOME Display Manager which handles graphical login for GNOME based systems." + rationale: "Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Note: If a graphical login is not required, it should be removed to reduce the attack surface of the system." + remediation: "Edit or create the file /etc/dconf/profile/gdm and add the following: user-db:user system-db:gdm file-db:/usr/share/gdm/greeter-dconf-defaults Edit or create the file /etc/dconf/db/gdm.d/ and add the following: (This is typically /etc/dconf/db/gdm.d/01-banner-message) [org/gnome/login-screen] banner-message-enable=true banner-message-text='' Example Banner Text: 'Authorized users only. All activity may be monitored and reported.' Run the following command to update the system databases: # dconf update." compliance: - - cis: ["2.2.6"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none - rules: - - "c:systemctl is-enabled squid -> enabled" - - # 2.2.7 Remove Samba (Scored) + - cis: ["1.8.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - "f:/etc/dconf/profile/gdm" + - "f:/etc/dconf/profile/gdm -> r:user-db:user" + - "f:/etc/dconf/profile/gdm -> r:system-db:gdm" + - "f:/etc/dconf/profile/gdm -> r:file-db:/usr/share/gdm/greeter-dconf-defaults" + - 'd:/etc/dconf/db/gdm.d -> r:\.+ -> r:banner-message-enable=true' + - 'd:/etc/dconf/db/gdm.d -> r:\.+ -> r:banner-message-text=' + + # 1.8.3 Ensure last logged in user display is disabled. (Automated) - id: 6558 - title: "Ensure Samba is not enabled" - description: "The Samba daemon allows system administrators to configure their Linux systems to share file systems and directories with Windows desktops. Samba will advertise the file systems and directories via the Small Message Block (SMB) protocol. Windows desktop users will be able to mount these directories and file systems as letter drives on their systems." - rationale: "If there is no need to mount directories and file systems to Windows systems, then this service can be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable smb: # systemctl --now disable smb" - compliance: - - cis: ["2.2.7"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none - rules: - - "c:systemctl is-enabled smb -> enabled" - - # 2.2.8 Remove Dovecot (IMAP and POP3 services) (Scored) + title: "Ensure last logged in user display is disabled." + description: "GDM is the GNOME Display Manager which handles graphical login for GNOME based systems." + rationale: "Displaying the last logged in user eliminates half of the Userid/Password equation that an unauthorized person would need to log on. Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Notes: - - If a graphical login is not required, it should be removed to reduce the attack surface of the system. If a different GUI login service is in use and required on the system, consult your documentation to disable displaying the last logged on user." + remediation: "Edit or create the file /etc/dconf/profile/gdm and add the following: user-db:user system-db:gdm file-db:/usr/share/gdm/greeter-dconf-defaults Edit or create the file /etc/dconf/db/gdm.d/ and add the following: (This is typically /etc/dconf/db/gdm.d/00-login-screen) [org/gnome/login-screen] # Do not show the user list disable-user-list=true Run the following command to update the system databases: # dconf update." + compliance: + - cis: ["1.8.3"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - "f:/etc/dconf/profile/gdm" + - "f:/etc/dconf/profile/gdm -> r:user-db:user" + - "f:/etc/dconf/profile/gdm -> r:system-db:gdm" + - "f:/etc/dconf/profile/gdm -> r:file-db:/usr/share/gdm/greeter-dconf-defaults" + - 'd:/etc/dconf/db/gdm.d -> r:\.+ -> r:disable-user-list=true' + + # 1.8.4 Ensure XDMCP is not enabled. (Automated) - id: 6559 - title: "Ensure IMAP and POP3 server is not enabled" - description: "dovecot is an open source IMAP and POP3 server for Linux based systems." - rationale: "Unless POP3 and/or IMAP servers are to be provided by this system, it is recommended that the service be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable dovecot: # systemctl --now disable dovecot" + title: "Ensure XDMCP is not enabled." + description: "X Display Manager Control Protocol (XDMCP) is designed to provide authenticated access to display management services for remote displays." + rationale: "XDMCP is inherently insecure. - XDMCP is not a ciphered protocol. This may allow an attacker to capture keystrokes entered by a user - XDMCP is vulnerable to man-in-the-middle attacks. This may allow an attacker to steal the credentials of legitimate users by impersonating the XDMCP server." + remediation: "Edit the file /etc/gdm/custom.conf and remove the line Enable=true." + references: + - "https://help.gnome.org/admin/gdm/2.32/configuration.html.en" compliance: - - cis: ["2.2.8"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.8.4"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "c:systemctl is-enabled dovecot -> enabled" + - 'not f:/etc/gdm/custom.conf -> r:^\s*Enable\s*=\s*true' - # 2.2.9 Remove HTTP Server (Scored) + # 1.8.5 Ensure automatic mounting of removable media is disabled. (Automated) - id: 6560 - title: "Ensure HTTP server is not enabled" - description: "HTTP or web servers provide the ability to host web site content." - rationale: "Unless there is a need to run the system as a web server, it is recommended that the service be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable httpd: # systemctl --now disable httpd" + title: "Ensure automatic mounting of removable media is disabled." + description: "By default GNOME automatically mounts removable media when inserted as a convenience to the user." + rationale: "With automounting enabled anyone with physical access could attach a USB drive or disc and have its contents available in system even if they lacked permissions to mount it themselves." + impact: "The use of portable hard drives is very common for workstation users. If your organization allows the use of portable storage or media on workstations and physical access controls to workstations is considered adequate there is little value add in turning off automounting." + remediation: "Ensure that automatic mounting of media is disabled for all GNOME users: # cat << EOF >> /etc/dconf/db/local.d/00-media-automount [org/gnome/desktop/media-handling] automount=false automount-open=false EOF Apply the changes with: # dconf update." + references: + - "https://access.redhat.com/solutions/20107" compliance: - - cis: ["2.2.9"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.8.5"] + - cis_csc_v8: ["10.3"] + - cis_csc_v7: ["8.5"] + - cmmc_v2.0: ["MP.L2-3.8.7"] + - hipaa: ["164.310(d)(1)"] + - iso_27001-2013: ["A.12.2.1"] + condition: all rules: - - "c:systemctl is-enabled httpd -> enabled" + - "c:gsettings get org.gnome.desktop.media-handling automount -> r:^false" + - "c:gsettings get org.gnome.desktop.media-handling automount-open -> r:^false" - # 2.2.10 Remove FTP Server (Scored) + # 1.9 Ensure updates, patches, and additional security software are installed. (Manual) - id: 6561 - title: "Ensure FTP Server is not enabled" - description: "The File Transfer Protocol (FTP) provides networked computers with the ability to transfer files." - rationale: "FTP does not protect the confidentiality of data or authentication credentials. It is recommended sftp be used if file transfer is required. Unless there is a need to run the system as a FTP server (for example, to allow anonymous downloads), it is recommended that the service be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable vsftpd: # systemctl --now disable vsftpd" + title: "Ensure updates, patches, and additional security software are installed." + description: "Periodically patches are released for included software either due to security flaws or to include additional functionality." + rationale: "Newer patches may contain security enhancements that would not be available through the latest full update. As a result, it is recommended that the latest software patches be used to take advantage of the latest functionality. As with any software installation, organizations need to determine if a given update meets their requirements and verify the compatibility and supportability of any additional software against the update revision that is selected." + remediation: "Use your package manager to update all packages on the system according to site policy. The following command will install all available updates: # dnf update." compliance: - - cis: ["2.2.10"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.9"] + - cis_csc_v8: ["7.3", "7.4"] + - cis_csc_v7: ["3.4"] + - cmmc_v2.0: ["SI.L1-3.14.1"] + - nist_sp_800-53: ["SI-2(2)"] + - pci_dss_v3.2.1: ["6.2"] + - soc_2: ["CC7.1"] + condition: all rules: - - "c:systemctl is-enabled vsftpd -> enabled" + - 'not c:sh -c "dnf check-update | egrep -v \"Updating|Last metadata|^$\"" -> r:^\w' - # 2.2.11 Ensure DNS Server is not enabled (Scored) + # 1.10 Ensure system-wide crypto policy is not legacy. (Automated) - id: 6562 - title: "Ensure DNS Server is not enabled" - description: "The Domain Name System (DNS) is a hierarchical naming system that maps names to IP addresses for computers, services and other resources connected to a network." - rationale: "Unless a system is specifically designated to act as a DNS server, it is recommended that the service be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable named : # systemctl --now disable named" + title: "Ensure system-wide crypto policy is not legacy." + description: "The system-wide crypto-policies followed by the crypto core components allow consistently deprecating and disabling algorithms system-wide. The individual policy levels (DEFAULT, LEGACY, FUTURE, and FIPS) are included in the crypto-policies(7) package." + rationale: "If the Legacy system-wide crypto policy is selected, it includes support for TLS 1.0, TLS 1.1, and SSH2 protocols or later. The algorithms DSA, 3DES, and RC4 are allowed, while RSA and Diffie-Hellman parameters are accepted if larger than 1023-bits. These legacy protocols and algorithms can make the system vulnerable to attacks, including those listed in RFC 7457." + impact: "Environments that require compatibility with older insecure protocols may require the use of the less secure LEGACY policy level." + remediation: "Run the following command to change the system-wide crypto policy # update-crypto-policies --set Example: # update-crypto-policies --set DEFAULT Run the following to make the updated system-wide crypto policy active # update-crypto-policies." + references: + - "https://access.redhat.com/articles/3642912#what-polices-are-provided-1" compliance: - - cis: ["2.2.11"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.10"] + - cis_csc_v8: ["3.10"] + - cis_csc_v7: ["14.4"] + - cmmc_v2.0: ["AC.L2-3.1.13", "AC.L2-3.1.17", "IA.L2-3.5.10", "SC.L2-3.13.11", "SC.L2-3.13.15", "SC.L2-3.13.8"] + - hipaa: ["164.312(a)(2)(iv)", "164.312(e)(1)", "164.312(e)(2)(i)", "164.312(e)(2)(ii)"] + - iso_27001-2013: ["A.10.1.1", "A.13.1.1"] + - nist_sp_800-53: ["AC-17(2)", "SC-8", "SC-8(1)"] + - pci_dss_v3.2.1: ["2.1.1", "4.1", "4.1.1", "8.2.1"] + - pci_dss_v4.0: ["2.2.7", "4.1.1", "4.2.1", "4.2.1.2", "4.2.2", "8.3.2"] + condition: all rules: - - "c:systemctl is-enabled named -> enabled" + - 'f:/etc/crypto-policies/config -> r:^\s*LEGACY' - # 2.2.12 Ensure NFS is not enabled (Scored) + # 2.1.1 Ensure time synchronization is in use. (Automated) - id: 6563 - title: "Ensure NFS is not enabled" - description: "The Network File System (NFS) is one of the first and most widely distributed file systems in the UNIX environment. It provides the ability for systems to mount file systems of other servers through the network." - rationale: "If the system does not export NFS shares, it is recommended that the NFS be disabled to reduce remote attack surface." - remediation: "Run the following commands to disable nfs: # systemctl --now disable nfs" + title: "Ensure time synchronization is in use." + description: "System time should be synchronized between all systems in an environment. This is typically done by establishing an authoritative time server or set of servers and having all systems synchronize their clocks to them. Note: If another method for time synchronization is being used, this section may be skipped." + rationale: "Time synchronization is important to support time sensitive security mechanisms like Kerberos and also ensures log files have consistent time records across the enterprise, which aids in forensic investigations." + remediation: "Run the following command to install chrony: # dnf install chrony." compliance: - - cis: ["2.2.12"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["2.1.1"] + - cis_csc_v8: ["8.4"] + - cis_csc_v7: ["6.1"] + - cmmc_v2.0: ["AU.L2-3.3.7"] + - iso_27001-2013: ["A.12.4.4"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.4"] + - pci_dss_v4.0: ["10.6", "10.6.1", "10.6.2", "10.6.3"] + - soc_2: ["CC4.1", "CC5.2"] + condition: all rules: - - "c:systemctl is-enabled nfs -> enabled" + - "c:rpm -q chrony -> r:^chrony-" - # 2.2.13 Ensure RPC is not enabled (Scored) + # 2.1.2 Ensure chrony is configured. (Automated) - id: 6564 - title: "Ensure RPC is not enabled" - description: "The rpcbind service maps Remote Procedure Call (RPC) services to the ports on which they listen. RPC processes notify rpcbind when they start, registering the ports they are listening on and the RPC program numbers they expect to serve. The client system then contacts rpcbind on the server with a particular RPC program number. The rpcbind service redirects the client to the proper port number so it can communicate with the requested service." - rationale: "If the system does not require rpc based services, it is recommended that rpcbind be disabled to reduce the remote attack surface." - remediation: "Run the following commands to disable nfs: # systemctl --now disable rpcbind" + title: "Ensure chrony is configured." + description: "chrony is a daemon which implements the Network Time Protocol (NTP) and is designed to synchronize system clocks across a variety of systems and use a source that is highly accurate. More information on chrony can be found at http://chrony.tuxfamily.org/. chrony can be configured to be a client and/or a server." + rationale: "If chrony is in use on the system proper configuration is vital to ensuring time synchronization is working properly." + remediation: 'Add or edit server or pool lines to /etc/chrony.conf as appropriate: server Add or edit the OPTIONS in /etc/sysconfig/chronyd to include ''-u chrony'': OPTIONS="-u chrony".' compliance: - - cis: ["2.2.13"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["2.1.2"] + - cis_csc_v8: ["8.4"] + - cis_csc_v7: ["6.1"] + - cmmc_v2.0: ["AU.L2-3.3.7"] + - iso_27001-2013: ["A.12.4.4"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.4"] + - pci_dss_v4.0: ["10.6", "10.6.1", "10.6.2", "10.6.3"] + - soc_2: ["CC4.1", "CC5.2"] + condition: all rules: - - "c:systemctl is-enabled rpcbind -> enabled" + - "f:/etc/chrony.conf" + - 'f:/etc/chrony.conf -> r:^\s*\t*server|^\s*\t*pool' + - 'f:/etc/sysconfig/chronyd -> r:^\s*\t*OPTIONS\.*-u chrony' - # 2.2.14 Remove LDAP Server (Scored) + # 2.2.1 Ensure xinetd is not installed. (Automated) - id: 6565 - title: "Ensure LDAP Server is not enabled" - description: "The Lightweight Directory Access Protocol (LDAP) was introduced as a replacement for NIS/YP. It is a service that provides a method for looking up information from a central database." - rationale: "If the system will not need to act as an LDAP server, it is recommended that the software be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable slapd: # systemctl --now disable slapd" + title: "Ensure xinetd is not installed." + description: "The eXtended InterNET Daemon (xinetd) is an open source super daemon that replaced the original inetd daemon. The xinetd daemon listens for well known services and dispatches the appropriate daemon to properly respond to service requests." + rationale: "If there are no xinetd services required, it is recommended that the package be removed to reduce the attack surface are of the system. Note: If an xinetd service or services are required, ensure that any xinetd service not required is stopped and disabled." + remediation: "Run the following command to remove xinetd: # dnf remove xinetd." compliance: - - cis: ["2.2.14"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - More detailed documentation on OpenLDAP is available at https://www.openldap.org - condition: none + - cis: ["2.2.1"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6", "9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2", "A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "c:systemctl is-enabled slapd -> enabled" + - "c:rpm -q xinetd -> r:^package xinetd is not installed" - # 2.2.15 Remove DHCP Server (Scored) + # 2.2.2 Ensure xorg-x11-server-common is not installed. (Automated) - id: 6566 - title: "Ensure DHCP Server is not enabled" - description: "The Dynamic Host Configuration Protocol (DHCP) is a service that allows machines to be dynamically assigned IP addresses." - rationale: "Unless a system is specifically set up to act as a DHCP server, it is recommended that this service be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable dhcpd: # systemctl --now disable dhcpd" + title: "Ensure xorg-x11-server-common is not installed." + description: "The X Window System provides a Graphical User Interface (GUI) where users can have multiple windows in which to run programs and various add on. The X Windows system is typically used on workstations where users login, but not on servers where users typically do not login." + rationale: "Unless your organization specifically requires graphical login access via X Windows, remove it to reduce the potential attack surface." + impact: 'Many Linux systems run applications which require a Java runtime. Some Linux Java packages have a dependency on specific X Windows xorg-x11-fonts. One workaround to avoid this dependency is to use the "headless" Java packages for your specific Java runtime.' + remediation: "Run the following command to remove the X Windows Server packages: # dnf remove xorg-x11-server-common." compliance: - - cis: ["2.2.15"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - More detailed documentation on DHCP is available at https://www.isc.org/software/dhcp - condition: none + - cis: ["2.2.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "c:systemctl is-enabled dhcpd -> enabled" + - "c:rpm -q xorg-x11-server-common -> r:^package xorg-x11-server-common is not installed" - # 2.2.16 Ensure CUPS is not enabled (Scored) + # 2.2.3 Ensure Avahi Server is not installed. (Automated) - id: 6567 - title: "Ensure CUPS is not enabled" - description: "The Common Unix Print System (CUPS) provides the ability to print to both local and network printers. A system running CUPS can also accept print jobs from remote systems and print them to local printers. It also provides a web based remote administration capability." - rationale: "If the system does not need to print jobs or accept print jobs from other systems, it is recommended that CUPS be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable cups : # systemctl --now disable cups" + title: "Ensure Avahi Server is not installed." + description: "Avahi is a free zeroconf implementation, including a system for multicast DNS/DNS-SD service discovery. Avahi allows programs to publish and discover services and hosts running on a local network with no specific configuration. For example, a user can plug a computer into a network and Avahi automatically finds printers to print to, files to look at and people to talk to, as well as network services running on the machine." + rationale: "Automatic discovery of network services is not normally required for system functionality. It is recommended to remove this package to reduce the potential attack surface." + remediation: "Run the following commands to stop, mask and remove avahi-autoipd and avahi: # systemctl stop avahi-daemon.socket avahi-daemon.service # dnf remove avahi-autoipd avahi." compliance: - - cis: ["2.2.16"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - "More detailed documentation on CUPS is available at the project homepage at http://www.cups.org." - condition: none + - cis: ["2.2.3"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "c:systemctl is-enabled cups -> enabled" + - "c:rpm -q avahi -> r:^package avahi is not installed" + - "c:rpm -q avahi-autoipd -> r:^package avahi-autoipd is not installed" - # 2.2.17 Remove NIS Server (Scored) + # 2.2.4 Ensure CUPS is not installed. (Automated) - id: 6568 - title: "Ensure NIS Server is not enabled" - description: "The Network Information Service (NIS) (formally known as Yellow Pages) is a client-server directory service protocol for distributing system configuration files. The NIS server is a collection of programs that allow for the distribution of configuration files." - rationale: "The NIS service is inherently an insecure system that has been vulnerable to DOS attacks, buffer overflows and has poor authentication for querying NIS maps. NIS generally been replaced by such protocols as Lightweight Directory Access Protocol (LDAP). It is recommended that the service be disabled and other, more secure services be used" - remediation: "Run the following command to disable ypserv: # systemctl --now disable ypserv" + title: "Ensure CUPS is not installed." + description: "The Common Unix Print System (CUPS) provides the ability to print to both local and network printers. A system running CUPS can also accept print jobs from remote systems and print them to local printers. It also provides a web based remote administration capability." + rationale: "If the system does not need to print jobs or accept print jobs from other systems, it is recommended that CUPS be removed to reduce the potential attack surface. Note: Removing CUPS will prevent printing from the system." + impact: "Disabling CUPS will prevent printing from the system, a common task for workstation systems." + remediation: "Run the following command to remove cups: # dnf remove cups." + references: + - "http://www.cups.org." compliance: - - cis: ["2.2.17"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none + - cis: ["2.2.4"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "c:systemctl is-enabled ypserv -> enabled" + - "c:rpm -q cups -> r:^package cups is not installed" - # 2.2.18 Ensure mail transfer agent is configured for local-only mode (Scored) + # 2.2.5 Ensure DHCP Server is not installed. (Automated) - id: 6569 - title: "Ensure mail transfer agent is configured for local-only mode" - description: "Mail Transfer Agents (MTA), such as sendmail and Postfix, are used to listen for incoming mail and transfer the messages to the appropriate user or mail server. If the system is not intended to be a mail server, it is recommended that the MTA be configured to only process local mail." - rationale: "Mail Transfer Agents (MTA), such as sendmail and Postfix, are used to listen for incoming mail and transfer the messages to the appropriate user or mail server. If the system is not intended to be a mail server, it is recommended that the MTA be configured to only process local mail." - remediation: "Edit /etc/postfix/main.cf and add the following line to the RECEIVING MAIL section. If the line already exists, change it to look like the line below: inet_interfaces = loopback-only . Restart postfix: # systemctl restart postfix" + title: "Ensure DHCP Server is not installed." + description: "The Dynamic Host Configuration Protocol (DHCP) is a service that allows machines to be dynamically assigned IP addresses." + rationale: "Unless a system is specifically set up to act as a DHCP server, it is recommended that the rpm -q dhcp-server package be removed to reduce the potential attack surface." + remediation: "Run the following command to remove dhcp: # dnf remove dhcp-server." compliance: - - cis: ["2.2.18"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1", "AC.4", "SC.7"] - - tsc: ["CC5.2", "CC6.4", "CC6.6", "CC6.7"] - condition: none + - cis: ["2.2.5"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - 'c:ss -lntu -> r:\.*:25\.* && !r:\s*127.0.0.1:25\s*|\s*::1:25\s*' + - "c:rpm -q dhcp-server -> r:^package dhcp-server is not installed" - ############################################### - # 2.3 Service Clients - ############################################### - - # 2.3.1 Remove NIS Client (Scored) + # 2.2.6 Ensure DNS Server is not installed. (Automated) - id: 6570 - title: "Ensure NIS Client is not installed" - description: "The Network Information Service (NIS), formerly known as Yellow Pages, is a client-server directory service protocol used to distribute system configuration files. The NIS client ( ypbind ) was used to bind a machine to an NIS server and receive the distributed configuration files." - rationale: "The NIS service is inherently an insecure system that has been vulnerable to DOS attacks, buffer overflows and has poor authentication for querying NIS maps. NIS generally has been replaced by such protocols as Lightweight Directory Access Protocol (LDAP). It is recommended that the service be removed." - remediation: "Run the following command to uninstall ypbind: # dnf remove ypbind" + title: "Ensure DNS Server is not installed." + description: "The Domain Name System (DNS) is a hierarchical naming system that maps names to IP addresses for computers, services and other resources connected to a network." + rationale: "Unless a system is specifically designated to act as a DNS server, it is recommended that the package be removed to reduce the potential attack surface." + remediation: "Run the following command to remove bind: # dnf remove bind." compliance: - - cis: ["2.3.1"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none + - cis: ["2.2.6"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "c:rpm -qa ypbind -> r:ypbind" + - "c:rpm -q bind -> r:^package bind is not installed" - # 2.3.2 Ensure telnet client is not installed (Scored) + # 2.2.7 Ensure FTP Server is not installed. (Automated) - id: 6571 - title: "Ensure telnet client is not installed" - description: "The telnet package contains the telnet client, which allows users to start connections to other systems via the telnet protocol." - rationale: "The telnet protocol is insecure and unencrypted. The use of an unencrypted transmission medium could allow an unauthorized user to steal credentials. The ssh package provides an encrypted session and stronger security and is included in most Linux distributions." - remediation: "Run the following command to uninstall telnet : # dnf remove telnet" + title: "Ensure FTP Server is not installed." + description: "FTP (File Transfer Protocol) is a traditional and widely used standard tool for transferring files between a server and clients over a network, especially where no authentication is necessary (permits anonymous users to connect to a server)." + rationale: "FTP does not protect the confidentiality of data or authentication credentials. It is recommended SFTP be used if file transfer is required. Unless there is a need to run the system as a FTP server (for example, to allow anonymous downloads), it is recommended that the package be removed to reduce the potential attack surface." + remediation: "Run the following command to remove ftp: # dnf remove ftp." compliance: - - cis: ["2.3.2"] - - cis_csc: ["4.5"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis: ["2.2.7"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q telnet -> r:^package telnet is not installed" + - "c:rpm -q ftp -> r:^package ftp is not installed" - # 2.3.3 Ensure LDAP client is not installed (Scored) + # 2.2.8 Ensure VSFTP Server is not installed. (Automated) - id: 6572 - title: "Ensure LDAP client is not installed" - description: "The Lightweight Directory Access Protocol (LDAP) was introduced as a replacement for NIS/YP. It is a service that provides a method for looking up information from a central database." - rationale: "If the system will not need to act as an LDAP client, it is recommended that the software be removed to reduce the potential attack surface." - remediation: "Run the following command to uninstall openldap-clients : # dnf remove openldap-clients" + title: "Ensure VSFTP Server is not installed." + description: "FTP (File Transfer Protocol) is a traditional and widely used standard tool for transferring files between a server and clients over a network, especially where no authentication is necessary (permits anonymous users to connect to a server)." + rationale: "Unless there is a need to run the system as a FTP server, it is recommended that the package be removed to reduce the potential attack surface." + remediation: "Run the following command to remove vsftpd: # dnf remove vsftpd." compliance: - - cis: ["2.3.3"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis: ["2.2.8"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q openldap-clients -> r:^package openldap-clients is not installed" + - "c:rpm -q vsftpd -> r:^package vsftpd is not installed" - ############################################### - # 3 Network Configuration - ############################################### - ############################################### - # 3.1 Network Parameters (Host Only) - ############################################### - # 3.1.1 Ensure IP forwarding is disabled (Scored) + # 2.2.9 Ensure TFTP Server is not installed. (Automated) - id: 6573 - title: "Ensure IP forwarding is disabled" - description: "The net.ipv4.ip_forward and net.ipv6.conf.all.forwarding flags are used to tell the system whether it can forward packets or not." - rationale: "Setting the flags to 0 ensures that a system with multiple interfaces (for example, a hard proxy), will never be able to forward packets, and therefore, never serve as a router." - remediation: "Run the following commands to restore the default parameters and set the active kernel parameters: # grep -Els \"^\\s*net\\.ipv4\\.ip_forward\\s*=\\s*1\" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri \"s/^\\s*(net\\.ipv4\\.ip_forward\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" \"s/^\\s*(net\\.ipv4\\.ip_forward\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.ipv4.ip_forward=0; sysctl -w net.ipv4.route.flush=1 && # grep -Els \"^\\s*net\\.ipv6\\.conf\\.all\\.forwarding\\s*=\\s*1\" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri \"s/^\\s*(net\\.ipv6\\.conf\\.all\\.forwarding\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.ipv6.conf.all.forwarding=0; sysctl -w net.ipv6.route.flush=1" - compliance: - - cis: ["3.1.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + title: "Ensure TFTP Server is not installed." + description: "Trivial File Transfer Protocol (TFTP) is a simple protocol for exchanging files between two TCP/IP machines. TFTP servers allow connections from a TFTP Client for sending and receiving files." + rationale: "TFTP does not have built-in encryption, access control or authentication. This makes it very easy for an attacker to exploit TFTP to gain access to files." + remediation: "Run the following command to remove tftp-server: # dnf remove tftp-server." + compliance: + - cis: ["2.2.9"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - 'c:grep -Rh -E -s ^\s*net.ipv4.ip_forward /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv4.ip_forward\s*=\s*1' - - 'c:grep -Rh -E -s ^\s*net.ipv6.conf.all.forwarding /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv6.conf.all.forwarding\s*=\s*1' - - 'c:sysctl net.ipv4.ip_forward -> r:^\s*net.ipv4.ip_forward\s*=\s*1' - - 'c:sysctl net.ipv6.conf.all.forwarding -> r:^\s*net.ipv6.conf.all.forwarding\s*=\s*1' + - "c:rpm -q tftp-server -> r:^package tftp-server is not installed" - # 3.1.2 Ensure packet redirect sending is disabled (Scored) + # 2.2.10 Ensure a web server is not installed. (Automated) - id: 6574 - title: "Ensure packet redirect sending is disabled" - description: "ICMP Redirects are used to send routing information to other hosts. As a host itself does not act as a router (in a host only configuration), there is no need to send redirects." - rationale: "An attacker could use a compromised host to send invalid ICMP redirects to other router devices in an attempt to corrupt routing and have users access a system set up by the attacker as opposed to a valid system." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.send_redirects = 0 net.ipv4.conf.default.send_redirects = 0 .Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.send_redirects=0; # sysctl -w net.ipv4.conf.default.send_redirects=0; # sysctl -w net.ipv4.route.flush=1" + title: "Ensure a web server is not installed." + description: "Web servers provide the ability to host web site content." + rationale: "Unless there is a need to run the system as a web server, it is recommended that the packages be removed to reduce the potential attack surface. Note: Several http servers exist. They should also be audited, and removed, if not required." + remediation: "Run the following command to remove httpd and nginx: # dnf remove httpd nginx." compliance: - - cis: ["3.1.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["2.2.10"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:grep -Rh net.ipv4.conf.all.send_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.all.send_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.default.send_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.default.send_redirects\s*=\s*0' - - 'c:sysctl net.ipv4.conf.all.send_redirects -> r:^\s*net.ipv4.conf.all.send_redirects\s*=\s*0' - - 'c:sysctl net.ipv4.conf.default.send_redirects -> r:^\s*net.ipv4.conf.default.send_redirects\s*=\s*0' + - "c:rpm -q nginx -> r:^package nginx is not installed" + - "c:rpm -q httpd -> r:^package httpd is not installed" - ############################################### - # 3.2 Network Parameters (Host and Router) - ############################################### - # 3.2.1 Ensure source routed packets are not accepted (Scored) + # 2.2.11 Ensure IMAP and POP3 server is not installed. (Automated) - id: 6575 - title: "Ensure source routed packets are not accepted" - description: "In networking, source routing allows a sender to partially or fully specify the route packets take through a network. In contrast, non-source routed packets travel a path determined by routers in the network. In some cases, systems may not be routable or reachable from some locations (e.g. private addresses vs. Internet routable), and so source routed packets would need to be used." - rationale: "Setting net.ipv4.conf.all.accept_source_route, net.ipv4.conf.default.accept_source_route, net.ipv6.conf.all.accept_source_route and net.ipv6.conf.default.accept_source_route to 0 disables the system from accepting source routed packets. Assume this system was capable of routing packets to Internet routable addresses on one interface and private addresses on another interface. Assume that the private addresses were not routable to the Internet routable addresses and vice versa. Under normal routing circumstances, an attacker from the Internet routable addresses could not use the system as a way to reach the private address systems. If, however, source routed packets were allowed, they could be used to gain access to the private address systems as the route could be specified, rather than rely on routing protocols that did not allow this routing." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.accept_source_route = 0 net.ipv4.conf.default.accept_source_route = 0 net.ipv6.conf.all.accept_source_route = 0 net.ipv6.conf.default.accept_source_route = 0 and Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.accept_source_route=0; # sysctl -w net.ipv4.conf.default.accept_source_route=0; # sysctl -w net.ipv6.conf.all.accept_source_route=0; # sysctl -w net.ipv6.conf.default.accept_source_route=0; # sysctl -w net.ipv4.route.flush=1; # sysctl -w net.ipv6.route.flush=1" - compliance: - - cis: ["3.2.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:sysctl net.ipv4.conf.all.accept_source_route -> r:^\s*net.ipv4.conf.all.accept_source_route\s*=\s*0' - - 'c:sysctl net.ipv4.conf.default.accept_source_route -> r:^\s*net.ipv4.conf.default.accept_source_route\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.all.accept_source_route /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.all.accept_source_route\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.default.accept_source_route /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.default.accept_source_route\s*=\s*0' - - 'c:sysctl net.ipv6.conf.all.accept_source_route -> r:^\s*net.ipv6.conf.all.accept_source_route\s*=\s*0|No such file or directory' - - 'c:sysctl net.ipv6.conf.default.accept_source_route -> r:^\s*net.ipv6.conf.default.accept_source_route\s*=\s*0|No such file or directory' - - 'c:grep -Rh net.ipv6.conf.all.accept_source_route /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv6.conf.all.accept_source_route\s*=\s*0' - - 'c:grep -Rh net.ipv6.conf.default.accept_source_route /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv6.conf.default.accept_source_route\s*=\s*0' - - # 3.2.2 Ensure ICMP redirects are not accepted (Scored) + title: "Ensure IMAP and POP3 server is not installed." + description: "dovecot is an open source IMAP and POP3 server for Linux based systems." + rationale: "Unless POP3 and/or IMAP servers are to be provided by this system, it is recommended that the package be removed to reduce the potential attack surface. Note: Several IMAP/POP3 servers exist and can use other service names. These should also be audited and the packages removed if not required." + remediation: "Run the following command to remove dovecot and cyrus-imapd: # dnf remove dovecot cyrus-imapd." + compliance: + - cis: ["2.2.11"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - "c:rpm -q dovecot -> r:^package dovecot is not installed" + - "c:rpm -q cyrus-imapd -> r:^package cyrus-imapd is not installed" + + # 2.2.12 Ensure Samba is not installed. (Automated) - id: 6576 - title: "Ensure ICMP redirects are not accepted" - description: "ICMP redirect messages are packets that convey routing information and tell your host (acting as a router) to send packets via an alternate path. It is a way of allowing an outside routing device to update your system routing tables. By setting net.ipv4.conf.all.accept_redirects and net.ipv6.conf.all.accept_redirects to 0, the system will not accept any ICMP redirect messages, and therefore, won't allow outsiders to update the system's routing tables." - rationale: "Attackers could use bogus ICMP redirect messages to maliciously alter the system routing tables and get them to send packets to incorrect networks and allow your system packets to be captured." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.accept_redirects = 0 net.ipv4.conf.default.accept_redirects = 0 net.ipv6.conf.all.accept_redirects = 0 net.ipv6.conf.default.accept_redirects = 0. Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.accept_redirects=0; # sysctl -w net.ipv4.conf.default.accept_redirects=0; # sysctl -w net.ipv6.conf.all.accept_redirects=0; # sysctl -w net.ipv6.conf.default.accept_redirects=0; # sysctl -w net.ipv4.route.flush=1 and # sysctl -w net.ipv6.route.flush=1" - compliance: - - cis: ["3.2.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:sysctl net.ipv4.conf.all.accept_redirects -> r:^\s*net.ipv4.conf.all.accept_redirects\s*=\s*0' - - 'c:sysctl net.ipv4.conf.default.accept_redirects -> r:^\s*net.ipv4.conf.default.accept_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.all.accept_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.all.accept_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.default.accept_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.default.accept_redirects\s*=\s*0' - - 'c:sysctl net.ipv6.conf.all.accept_redirects -> r:^\s*net.ipv6.conf.all.accept_redirects\s*=\s*0' - - 'c:sysctl net.ipv6.conf.default.accept_redirects -> r:^\s*net.ipv6.conf.default.accept_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv6.conf.all.accept_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv6.conf.all.accept_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv6.conf.default.accept_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv6.conf.default.accept_redirects\s*=\s*0' - - # 3.2.3 Ensure secure ICMP redirects are not accepted (Scored) + title: "Ensure Samba is not installed." + description: "The Samba daemon allows system administrators to configure their Linux systems to share file systems and directories with Windows desktops. Samba will advertise the file systems and directories via the Server Message Block (SMB) protocol. Windows desktop users will be able to mount these directories and file systems as letter drives on their systems." + rationale: "If there is no need to mount directories and file systems to Windows systems, then this package can be removed to reduce the potential attack surface." + remediation: "Run the following command to remove samba: # dnf remove samba." + compliance: + - cis: ["2.2.12"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - "c:rpm -q samba -> r:^package samba is not installed" + + # 2.2.13 Ensure HTTP Proxy Server is not installed. (Automated) - id: 6577 - title: "Ensure secure ICMP redirects are not accepted" - description: "Secure ICMP redirects are the same as ICMP redirects, except they come from gateways listed on the default gateway list. It is assumed that these gateways are known to your system, and that they are likely to be secure." - rationale: "It is still possible for even known gateways to be compromised. Setting net.ipv4.conf.all.secure_redirects to 0 protects the system from routing table updates by possibly compromised known gateways." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.secure_redirects = 0 and net.ipv4.conf.default.secure_redirects = 0. Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.secure_redirects=0; # sysctl -w net.ipv4.conf.default.secure_redirects=0 and # sysctl -w net.ipv4.route.flush=1" + title: "Ensure HTTP Proxy Server is not installed." + description: "Squid is a standard proxy server used in many distributions and environments." + rationale: "Unless a system is specifically set up to act as a proxy server, it is recommended that the squid package be removed to reduce the potential attack surface. Note: Several HTTP proxy servers exist. These should be checked and removed unless required." + remediation: "Run the following command to remove the squid package: # dnf remove squid." compliance: - - cis: ["3.2.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["2.2.13"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:sysctl net.ipv4.conf.all.secure_redirects -> r:^\s*net.ipv4.conf.all.secure_redirects\s*=\s*0' - - 'c:sysctl net.ipv4.conf.default.secure_redirects -> r:^\s*net.ipv4.conf.default.secure_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.all.secure_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.all.secure_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.default.secure_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.default.secure_redirects\s*=\s*0' + - "c:rpm -q squid -> r:^package squid is not installed" - # 3.2.4 Ensure suspicious packets are logged (Scored) + # 2.2.14 Ensure net-snmp is not installed. (Automated) - id: 6578 - title: "Ensure suspicious packets are logged" - description: "When enabled, this feature logs packets with un-routable source addresses to the kernel log." - rationale: "Enabling this feature and logging these packets allows an administrator to investigate the possibility that an attacker is sending spoofed packets to their system." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.log_martians = 1 and net.ipv4.conf.default.log_martians = 1. Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.log_martians=1; # sysctl -w net.ipv4.conf.default.log_martians=1 and # sysctl -w net.ipv4.route.flush=1" + title: "Ensure net-snmp is not installed." + description: 'Simple Network Management Protocol (SNMP) is a widely used protocol for monitoring the health and welfare of network equipment, computer equipment and devices like UPSs. Net-SNMP is a suite of applications used to implement SNMPv1 (RFC 1157), SNMPv2 (RFCs 1901-1908), and SNMPv3 (RFCs 3411-3418) using both IPv4 and IPv6. Support for SNMPv2 classic (a.k.a. "SNMPv2 historic" - RFCs 1441-1452) was dropped with the 4.0 release of the UCD-snmp package. The Simple Network Management Protocol (SNMP) server is used to listen for SNMP commands from an SNMP management system, execute the commands or collect the information and then send results back to the requesting system.' + rationale: "The SNMP server can communicate using SNMPv1, which transmits data in the clear and does not require authentication to execute commands. SNMPv3 replaces the simple/clear text password sharing used in SNMPv2 with more securely encoded parameters. If the the SNMP service is not required, the net-snmp package should be removed to reduce the attack surface of the system. Note: If SNMP is required: - The server should be configured for SNMP v3 only. User Authentication and Message Encryption should be configured. If SNMP v2 is absolutely necessary, modify the community strings' values. -." + remediation: "Run the following command to remove net-snmpd: # dnf remove net-snmp." compliance: - - cis: ["3.2.4"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["2.2.14"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6", "9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2", "A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:sysctl net.ipv4.conf.all.log_martians -> r:^\s*net.ipv4.conf.all.log_martians\s*=\s*1' - - 'c:sysctl net.ipv4.conf.default.log_martians -> r:^\s*net.ipv4.conf.default.log_martians\s*=\s*1' - - 'c:grep -Rh net.ipv4.conf.all.log_martians /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.all.log_martians\s*=\s*1' - - 'c:grep -Rh net.ipv4.conf.default.log_martians /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.default.log_martians\s*=\s*1' + - "c:rpm -q net-snmp -> r:^package net-snmp is not installed" - # 3.2.5 Ensure broadcast ICMP requests are ignored (Scored) + # 2.2.15 Ensure NIS server is not installed. (Automated) - id: 6579 - title: "Ensure broadcast ICMP requests are ignored" - description: "Setting net.ipv4.icmp_echo_ignore_broadcasts to 1 will cause the system to ignore all ICMP echo and timestamp requests to broadcast and multicast addresses." - rationale: "Accepting ICMP echo and timestamp requests with broadcast or multicast destinations for your network could be used to trick your host into starting (or participating) in a Smurf attack. A Smurf attack relies on an attacker sending large amounts of ICMP broadcast messages with a spoofed source address. All hosts receiving this message and responding would send echo-reply messages back to the spoofed address, which is probably not routable. If many hosts respond to the packets, the amount of traffic on the network could be significantly multiplied." - remediation: "Run the following command to restore the default parameters and set the active kernel parameters: # grep -Els \"^\\s*net\\.ipv4\\.icmp_echo_ignore_broadcasts\\s*=\\s*0\" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf \"s/^\\s*(net\\.ipv4\\.icmp_echo_ignore_broadcasts\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.icmp_echo_ignore_broadcasts=1; sysctl -w net.ipv4.route.flush=1" + title: "Ensure NIS server is not installed." + description: "The ypserv package provides the Network Information Service (NIS). This service, formally known as Yellow Pages, is a client-server directory service protocol for distributing system configuration files. The NIS server is a collection of programs that allow for the distribution of configuration files." + rationale: "The NIS service is inherently an insecure system that has been vulnerable to DOS attacks, buffer overflows and has poor authentication for querying NIS maps. NIS generally has been replaced by such protocols as Lightweight Directory Access Protocol (LDAP). It is recommended that the ypserv package be removed, and if required a more secure services be used." + remediation: "Run the following command to remove ypserv: # dnf remove ypserv." compliance: - - cis: ["3.2.5"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["2.2.15"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6", "9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2", "A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:sysctl net.ipv4.icmp_echo_ignore_broadcasts -> r:^\s*net.ipv4.icmp_echo_ignore_broadcasts\s*=\s*1' - - 'not c:grep -E -s -Rh ^\s*net.ipv4.icmp_echo_ignore_broadcasts /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv4.icmp_echo_ignore_broadcasts\s*=\s*0' + - "c:rpm -q ypserv -> r:^package ypserv is not installed" - # 3.2.6 Ensure bogus ICMP responses are ignored (Scored) + # 2.2.16 Ensure telnet-server is not installed. (Automated) - id: 6580 - title: "Ensure bogus ICMP responses are ignored" - description: "Setting icmp_ignore_bogus_error_responses to 1 prevents the kernel from logging bogus responses (RFC-1122 non-compliant) from broadcast reframes, keeping file systems from filling up with useless log messages." - rationale: "Some routers (and some attackers) will send responses that violate RFC-1122 and attempt to fill up a log file system with many useless error messages." - remediation: "Run the following commands to restore the default parameters and set the active kernel parameters: # grep -Els \"^\\s*net\\.ipv4\\.icmp_ignore_bogus_error_responses\\s*=\\s*0 /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri \"s/^\\s*(net\\.ipv4\\.icmp_ignore_bogus_error_responses\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.ipv4.icmp_ignore_bogus_error_responses=1; sysctl -w net.ipv4.route.flush=1\"" + title: "Ensure telnet-server is not installed." + description: "The telnet-server package contains the telnet daemon, which accepts connections from users from other systems via the telnet protocol." + rationale: "The telnet protocol is insecure and unencrypted. The use of an unencrypted transmission medium could allow a user with access to sniff network traffic the ability to steal credentials. The ssh package provides an encrypted session and stronger security." + remediation: "Run the following command to remove the telnet-server package: # dnf remove telnet-server." compliance: - - cis: ["3.2.6"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["2.2.16"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6", "9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2", "A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:sysctl net.ipv4.icmp_ignore_bogus_error_responses -> r:^\s*net.ipv4.icmp_ignore_bogus_error_responses\s*=\s*1' - - 'not c:grep -E -s -Rh ^\s*net.ipv4.icmp_ignore_bogus_error_responses /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv4.icmp_ignore_bogus_error_responses\s*=\s*0' + - "c:rpm -q telnet-server -> r:^package telnet-server is not installed" - # 3.2.7 Ensure Reverse Path Filtering is enabled (Scored) + # 2.2.17 Ensure mail transfer agent is configured for local-only mode. (Automated) - id: 6581 - title: "Ensure Reverse Path Filtering is enabled" - description: "Setting net.ipv4.conf.all.rp_filter and net.ipv4.conf.default.rp_filter to 1 forces the Linux kernel to utilize reverse path filtering on a received packet to determine if the packet was valid. Essentially, with reverse path filtering, if the return packet does not go out the same interface that the corresponding source packet came from, the packet is dropped (and logged if log_martians is set)." - rationale: "Setting these flags is a good way to deter attackers from sending your system bogus packets that cannot be responded to. One instance where this feature breaks down is if asymmetrical routing is employed. This would occur when using dynamic routing protocols (bgp, ospf, etc) on your system. If you are using asymmetrical routing on your system, you will not be able to enable this feature without breaking the routing." - remediation: "Run the following command to restore the default net.ipv4.conf.all.rp_filter = 1 parameter and set the active kernel parameter: # grep -Els \"^\\s*net\\.ipv4\\.conf\\.all\\.rp_filter\\s*=\\s*0\" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri \"s/^\\s*(net\\.ipv4\\.net.ipv4.conf\\.all\\.rp_filter\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.ipv4.conf.all.rp_filter=1; sysctl -w net.ipv4.route.flush=1 .Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.default.rp_filter=1 and Run the following commands to set the active kernel parameter: # sysctl -w net.ipv4.conf.default.rp_filter=1 and # sysctl -w net.ipv4.route.flush=1" + title: "Ensure mail transfer agent is configured for local-only mode." + description: "Mail Transfer Agents (MTA), such as sendmail and Postfix, are used to listen for incoming mail and transfer the messages to the appropriate user or mail server. If the system is not intended to be a mail server, it is recommended that the MTA be configured to only process local mail." + rationale: "The software for all Mail Transfer Agents is complex and most have a long history of security issues. While it is important to ensure that the system can process local mail messages, it is not necessary to have the MTA's daemon listening on a port unless the server is intended to be a mail server that receives and processes mail from other systems. Notes: - This recommendation is designed around the postfix mail server. - Depending on your environment you may have an alternative MTA installed such as sendmail. If this is the case consult the documentation for your installed MTA to configure the recommended state." + remediation: "Edit /etc/postfix/main.cf and add the following line to the RECEIVING MAIL section. If the line already exists, change it to look like the line below: inet_interfaces = loopback-only Run the following command to restart postfix: # systemctl restart postfix." compliance: - - cis: ["3.2.7"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["2.2.17"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:sysctl net.ipv4.conf.all.rp_filter -> r:^\s*net.ipv4.conf.all.rp_filter\s*=\s*1' - - 'c:sysctl net.ipv4.conf.default.rp_filter -> r:^\s*net.ipv4.conf.default.rp_filter\s*=\s*1' - - 'not c:grep -E -s -Rh ^\s*net.ipv4.conf.all.rp_filter /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv4.conf.all.rp_filter\s*=\s*0' - - 'c:grep -E -s -Rh ^\s*net.ipv4.conf.default.rp_filter /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv4.conf.default.rp_filter\s*=\s*1' + - 'not c:ss -lntu -> r:\s*127.0.0.1:25\s*|\s*::1:25\s*' - # 3.2.8 Ensure TCP SYN Cookies is enabled (Scored) + # 2.2.18 Ensure nfs-utils is not installed or the nfs-server service is masked. (Automated) - id: 6582 - title: "Ensure TCP SYN Cookies is enabled" - description: "When tcp_syncookies is set, the kernel will handle TCP SYN packets normally until the half-open connection queue is full, at which time, the SYN cookie functionality kicks in. SYN cookies work by not using the SYN queue at all. Instead, the kernel simply replies to the SYN with a SYN|ACK, but will include a specially crafted TCP sequence number that encodes the source and destination IP address and port number and the time the packet was sent. A legitimate connection would send the ACK packet of the three way handshake with the specially crafted sequence number. This allows the system to verify that it has received a valid response to a SYN cookie and allow the connection, even though there is no corresponding SYN in the queue." - rationale: "Attackers use SYN flood attacks to perform a denial of service attacked on a system by sending many SYN packets without completing the three way handshake. This will quickly use up slots in the kernel's half-open connection queue and prevent legitimate connections from succeeding. SYN cookies allow the system to keep accepting valid connections, even if under a denial of service attack." - remediation: "Run the following command to restore the default parameter and set the active kernel parameters: grep -Els \"^\\s*net\\.ipv4\\.tcp_syncookies\\s*=\\s*[02]*\" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri \"s/^\\s*(net\\.ipv4\\.tcp_syncookies\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.ipv4.tcp_syncookies=1; sysctl -w net.ipv4.route.flush=1" + title: "Ensure nfs-utils is not installed or the nfs-server service is masked." + description: "The Network File System (NFS) is one of the first and most widely distributed file systems in the UNIX environment. It provides the ability for systems to mount file systems of other servers through the network." + rationale: "If the system does not require network shares, it is recommended that the nfs-utils package be removed to reduce the attack surface of the system." + impact: "Many of the libvirt packages used by Enterprise Linux virtualization are dependent on the nfs-utils package. If the nfs-package is required as a dependency, the nfs-server should be disabled and masked to reduce the attack surface of the system." + remediation: "Run the following command to remove nfs-utils: # dnf remove nfs-utils OR If the nfs-package is required as a dependency, run the following command to stop and mask the nfs-server service: # systemctl --now mask nfs-server." compliance: - - cis: ["3.2.8"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + - cis: ["2.2.18"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: any rules: - - 'c:sysctl net.ipv4.tcp_syncookies -> r:^\s*net.ipv4.tcp_syncookies\s*=\s*1' - - 'not c:grep -E -r -Rh ^\s*net.ipv4.tcp_syncookies /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv4.tcp_syncookies\s*=\s*[02]' + - "c:rpm -q nfs-utils -> r:^package nfs-utils is not installed" + - "c:systemctl is-enabled nfs-server -> r:masked|No such file or directory" - # 3.2.9 Ensure IPv6 router advertisements are not accepted (Scored) + # 2.2.19 Ensure rpcbind is not installed or the rpcbind services are masked. (Automated) - id: 6583 - title: "Ensure IPv6 router advertisements are not accepted" - description: "This setting disables the system's ability to accept IPv6 router advertisements." - rationale: "It is recommended that systems do not accept router advertisements as they could be tricked into routing traffic to compromised machines. Setting hard routes within the system (usually a single default route to a trusted router) protects the system from bad routes." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv6.conf.all.accept_ra = 0 and net.ipv6.conf.default.accept_ra = 0 . Run the following commands to set the active kernel parameters: # sysctl -w net.ipv6.conf.all.accept_ra=0; # sysctl -w net.ipv6.conf.default.accept_ra=0 and # sysctl -w net.ipv6.route.flush=1;" - compliance: - - cis: ["3.2.9"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:sysctl net.ipv6.conf.all.accept_ra -> r:^\s*net.ipv6.conf.all.accept_ra\s*=\s*0' - - 'c:sysctl net.ipv6.conf.default.accept_ra -> r:^\s*net.ipv6.conf.default.accept_ra\s*=\s*0' - - 'c:grep -Rh net.ipv6.conf.all.accept_ra /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv6.conf.all.accept_ra\s*=\s*0' - - 'c:grep -Rh net.ipv6.conf.default.accept_ra /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv6.conf.default.accept_ra\s*=\s*0' - - ############################################### - # 3.3 Uncommon Network Protocols - ############################################### - # 3.3.1 Ensure DCCP is disabled (Scored) + title: "Ensure rpcbind is not installed or the rpcbind services are masked." + description: "The rpcbind utility maps RPC services to the ports on which they listen. RPC processes notify rpcbind when they start, registering the ports they are listening on and the RPC program numbers they expect to serve. The client system then contacts rpcbind on the server with a particular RPC program number. The rpcbind service redirects the client to the proper port number so it can communicate with the requested service Portmapper is an RPC service, which always listens on tcp and udp 111, and is used to map other RPC services (such as nfs, nlockmgr, quotad, mountd, etc.) to their corresponding port number on the server. When a remote host makes an RPC call to that server, it first consults with portmap to determine where the RPC server is listening." + rationale: "A small request (~82 bytes via UDP) sent to the Portmapper generates a large response (7x to 28x amplification), which makes it a suitable tool for DDoS attacks. If rpcbind is not required, it is recommended that the rpcbind package be removed to reduce the attack surface of the system." + impact: "Many of the libvirt packages used by Enterprise Linux virtualization, and the nfs-utils package used for The Network File System (NFS), are dependent on the rpcbind package. If the rpcbind package is required as a dependency, the services rpcbind.service and rpcbind.socket should be stopped and masked to reduce the attack surface of the system." + remediation: "Run the following command to remove nfs-utils: # dnf remove rpcbind OR If the rpcbind package is required as a dependency, run the following commands to stop and mask the rpcbind and rpcbind.socket services: # systemctl --now mask rpcbind # systemctl --now mask rpcbind.socket." + compliance: + - cis: ["2.2.19"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: any + rules: + - "c:rpm -q rpcbind -> r:^package rpcbind is not installed" + - "c:systemctl is-enabled rpcbind -> r:masked|No such file or directory" + - "c:systemctl is-enabled rpcbind.socket -> r:masked|No such file or directory" + + # 2.2.20 Ensure rsync is not installed or the rsyncd service is masked. (Automated) - id: 6584 - title: "Ensure DCCP is disabled" - description: "The Datagram Congestion Control Protocol (DCCP) is a transport layer protocol that supports streaming media and telephony. DCCP provides a way to gain access to congestion control, without having to do it at the application layer, but does not provide in-sequence delivery" - rationale: "If the protocol is not required, it is recommended that the drivers not be installed to reduce the potential attack surface." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf. Example: vim /etc/modprobe.d/dccp.conf and add the following line: install dccp /bin/true" - compliance: - - cis: ["3.3.1"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all + title: "Ensure rsync is not installed or the rsyncd service is masked." + description: "The rsyncd service can be used to synchronize files between systems over network links." + rationale: "Unless required, the rsync package should be removed to reduce the attack surface area of the system. The rsyncd service presents a security risk as it uses unencrypted protocols for communication. Note: If a required dependency exists for the rsync package, but the rsyncd service is not required, the service should be masked." + impact: "There are packages that are dependent on the rsync package. If the rsync package is removed, these packages will be removed as well. Before removing the rsync package, review any dependent packages to determine if they are required on the system. If a dependent package is required, mask the rsyncd service and leave the rsync package installed." + remediation: "Run the following command to remove the rsync package: # dnf remove rsync OR Run the following command to mask the rsyncd service: # systemctl --now mask rsyncd." + compliance: + - cis: ["2.2.20"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: any rules: - - 'c:modprobe -n -v dccp -> r:^\s*install\s*/bin/true|Module dccp not found' - - "not c:lsmod -> r:dccp" + - "c:rpm -q rsync -> r:^package rsync is not installed" + - "c:systemctl is-enabled rsyncd -> r:masked|No such file or directory" - # 3.3.2 Ensure SCTP is disabled (Scored) + # 2.3.1 Ensure NIS Client is not installed. (Automated) - id: 6585 - title: "Ensure SCTP is disabled" - description: "The Stream Control Transmission Protocol (SCTP) is a transport layer protocol used to support message oriented communication, with several streams of messages in one connection. It serves a similar function as TCP and UDP, incorporating features of both. It is message-oriented like UDP, and ensures reliable in-sequence transport of messages with congestion control like TCP." - rationale: "If the protocol is not being used, it is recommended that kernel module not be loaded, disabling the service to reduce the potential attack surface." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf .Example: vim /etc/modprobe.d/sctp.conf and add the following line: install sctp /bin/true" + title: "Ensure NIS Client is not installed." + description: "The Network Information Service (NIS), formerly known as Yellow Pages, is a client-server directory service protocol used to distribute system configuration files. The NIS client ( ypbind) was used to bind a machine to an NIS server and receive the distributed configuration files." + rationale: "The NIS service is inherently an insecure system that has been vulnerable to DOS attacks, buffer overflows and has poor authentication for querying NIS maps. NIS generally has been replaced by such protocols as Lightweight Directory Access Protocol (LDAP). It is recommended that the service be removed." + impact: "Many insecure service clients are used as troubleshooting tools and in testing environments. Uninstalling them can inhibit capability to test and troubleshoot. If they are required it is advisable to remove the clients after use to prevent accidental or intentional misuse." + remediation: "Run the following command to remove the ypbind package: # dnf remove ypbind." compliance: - - cis: ["3.3.2"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis: ["2.3.1"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:modprobe -n -v sctp -> r:^\s*install\s*/bin/true|Module sctp not found' - - "not c:lsmod -> r:sctp" + - "c:rpm -q ypbind -> r:^package ypbind is not installed" - # 3.3.3 Ensure RDS is disabled (Scored) + # 2.3.2 Ensure rsh client is not installed. (Automated) - id: 6586 - title: "Ensure RDS is disabled" - description: "The Reliable Datagram Sockets (RDS) protocol is a transport layer protocol designed to provide low-latency, high-bandwidth communications between cluster nodes. It was developed by the Oracle Corporation." - rationale: "If the protocol is not being used, it is recommended that kernel module not be loaded, disabling the service to reduce the potential attack surface." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf .Example: vim /etc/modprobe.d/rds.conf and add the following line: install rds /bin/true" + title: "Ensure rsh client is not installed." + description: "The rsh package contains the client commands for the rsh services." + rationale: "These legacy clients contain numerous security exposures and have been replaced with the more secure SSH package. Even if the server is removed, it is best to ensure the clients are also removed to prevent users from inadvertently attempting to use these commands and therefore exposing their credentials. Note that removing the rsh package removes the clients for rsh, rcp and rlogin." + impact: "Many insecure service clients are used as troubleshooting tools and in testing environments. Uninstalling them can inhibit capability to test and troubleshoot. If they are required it is advisable to remove the clients after use to prevent accidental or intentional misuse." + remediation: "Run the following command to remove the rsh package: # dnf remove rsh." compliance: - - cis: ["3.3.3"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis: ["2.3.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:modprobe -n -v rds -> r:^\s*install\s*/bin/true|Module rds not found' - - "not c:lsmod -> r:rds" + - "c:rpm -q rsh -> r:^package rsh is not installed" - # 3.3.4 Ensure TIPC is disabled (Scored) + # 2.3.3 Ensure talk client is not installed. (Automated) - id: 6587 - title: "Ensure TIPC is disabled" - description: "The Transparent Inter-Process Communication (TIPC) protocol is designed to provide communication between cluster nodes." - rationale: "If the protocol is not being used, it is recommended that kernel module not be loaded, disabling the service to reduce the potential attack surface." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf .Example: vim /etc/modprobe.d/tipc.conf and add the following line: install tipc /bin/true" + title: "Ensure talk client is not installed." + description: "The talk software makes it possible for users to send and receive messages across systems through a terminal session. The talk client, which allows initialization of talk sessions, is installed by default." + rationale: "The software presents a security risk as it uses unencrypted protocols for communication." + impact: "Many insecure service clients are used as troubleshooting tools and in testing environments. Uninstalling them can inhibit capability to test and troubleshoot. If they are required it is advisable to remove the clients after use to prevent accidental or intentional misuse." + remediation: "Run the following command to remove the talk package: # dnf remove talk." compliance: - - cis: ["3.3.4"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis: ["2.3.3"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:modprobe -n -v tipc -> r:^\s*install\s*/bin/true|Module tipc not found' - - "not c:lsmod -> r:tipc" + - "c:rpm -q talk -> r:^package talk is not installed" - # 3.6 Disable IPv6 (Not Scored) + # 2.3.4 Ensure telnet client is not installed. (Automated) - id: 6588 - title: "Disable IPv6" - description: "Although IPv6 has many advantages over IPv4, not all organizations have IPv6 or dual stack configurations implemented." - rationale: "If IPv6 or dual stack is not to be used, it is recommended that IPv6 be disabled to reduce the attack surface of the system." - remediation: 'Edit /etc/default/grub and add ipv6.disable=1 to the GRUB_CMDLINE_LINUX parameters: GRUB_CMDLINE_LINUX="ipv6.disable=1" .Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg' - compliance: - - cis: ["3.6"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.6", "CC5.2"] - condition: all - rules: - - 'f:/boot/grub2/grubenv -> r:^\s*kernelopts=\.+ipv6.disable=1' - - ############################################### - # 3.4 Firewall Configuration - ############################################### - ############################################### - # 3.4.1 Ensure Firewall software is installed - ############################################### - # 3.4.1.1 Ensure a Firewall package is installed (Scored) - - id: 6589 - title: "Ensure a Firewall package is installed" - description: "A Firewall package should be selected. Most firewall configuration utilities operate as a front end to nftables or iptables." - rationale: "A Firewall package is required for firewall management and configuration." - remediation: "Run one of the following commands to install a Firewall package. For firewalld: dnf install firewalld .For nftables: # dnf install nftables. For iptables: # dnf install iptables" + title: "Ensure telnet client is not installed." + description: "The telnet package contains the telnet client, which allows users to start connections to other systems via the telnet protocol." + rationale: "The telnet protocol is insecure and unencrypted. The use of an unencrypted transmission medium could allow an unauthorized user to steal credentials. The ssh package provides an encrypted session and stronger security and is included in most Linux distributions." + impact: "Many insecure service clients are used as troubleshooting tools and in testing environments. Uninstalling them can inhibit capability to test and troubleshoot. If they are required it is advisable to remove the clients after use to prevent accidental or intentional misuse." + remediation: "Run the following command to remove the telnet package: # dnf remove telnet." compliance: - - cis: ["3.4.1.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.1"] - - tsc: ["CC6.6"] - condition: any + - cis: ["2.3.4"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "c:rpm -q firewalld -> r:^firewalld-" - - "c:rpm -q nftables -> r:^nftables-" - - "c:rpm -q iptables -> r:^iptables-" + - "c:rpm -q telnet -> r:^package telnet is not installed" - ############################################### - # 3.4.2 Configure firewalld - ############################################### + # 2.3.5 Ensure LDAP client is not installed. (Automated) + - id: 6589 + title: "Ensure LDAP client is not installed." + description: "The Lightweight Directory Access Protocol (LDAP) was introduced as a replacement for NIS/YP. It is a service that provides a method for looking up information from a central database." + rationale: "If the system will not need to act as an LDAP client, it is recommended that the software be removed to reduce the potential attack surface." + impact: "Removing the LDAP client will prevent or inhibit using LDAP for authentication in your environment." + remediation: "Run the following command to remove the openldap-clients package: # dnf remove openldap-clients." + compliance: + - cis: ["2.3.5"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - "c:rpm -q openldap-clients -> r:^package openldap-clients is not installed" - # 3.4.2.1 Ensure firewalld service is enabled and running (Scored) + # 2.3.6 Ensure TFTP client is not installed. (Automated) - id: 6590 - title: "Ensure firewalld service is enabled and running" - description: "Ensure that the firewalld service is enabled to protect your system" - rationale: "firewalld (Dynamic Firewall Manager) tool provides a dynamically managed firewall. The tool enables network/firewall zones to define the trust level of network connections and/or interfaces. It has support both for IPv4 and IPv6 firewall settings. Also, it supports Ethernet bridges and allow you to separate between runtime and permanent configuration options. Finally, it supports an interface for services or applications to add firewall rules directly" - remediation: "Run the following command to enable and start firewalld: # systemctl --now enable firewalld" + title: "Ensure TFTP client is not installed." + description: "Trivial File Transfer Protocol (TFTP) is a simple protocol for exchanging files between two TCP/IP machines. TFTP servers allow connections from a TFTP Client for sending and receiving files." + rationale: "TFTP does not have built-in encryption, access control or authentication. This makes it very easy for an attacker to exploit TFTP to gain access to files." + remediation: "Run the following command to remove tftp: # dnf remove tftp." compliance: - - cis: ["3.4.2.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + - cis: ["2.3.6"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:systemctl is-enabled firewalld -> enabled" - - "c:firewall-cmd --state -> r:running" + - "c:rpm -q tftp -> r:^package tftp is not installed" - # 3.4.2.3 Ensure nftables is not enabled (Scored) + # 2.4 Ensure nonessential services are removed or masked. (Manual) - Not Implemented + + # 3.1.1 Verify if IPv6 is enabled on the system. (Manual) - Not Implemented + + # 3.1.2 Ensure SCTP is disabled. (Automated) - id: 6591 - title: "Ensure nftables is not enabled" - description: "nftables is a subsystem of the Linux kernel providing filtering and classification of network packets/datagrams/frames and is the successor to iptables. nftables are installed as a dependency with firewalld." - rationale: "Running firewalld and nftables concurrently may lead to conflict, therefore nftables should be stopped and masked when using firewalld." - remediation: "Run the following command to mask and stop nftables: systemctl --now mask nftables" + title: "Ensure SCTP is disabled." + description: "The Stream Control Transmission Protocol (SCTP) is a transport layer protocol used to support message oriented communication, with several streams of messages in one connection. It serves a similar function as TCP and UDP, incorporating features of both. It is message-oriented like UDP, and ensures reliable in-sequence transport of messages with congestion control like TCP." + rationale: "If the protocol is not being used, it is recommended that kernel module not be loaded, disabling the service to reduce the potential attack surface." + remediation: 'Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: printf " install sctp /bin/true " >> /etc/modprobe.d/sctp.conf.' compliance: - - cis: ["3.4.2.3"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + - cis: ["3.1.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:systemctl status nftables -> r:Loaded:\s*disabled|Loaded:\s*masked|could not be found' - - 'c:systemctl status nftables -> r:Active:\s*inactive\s*\(dead\)|could not be found' - - "c:systemctl is-enabled nftables -> !r:^enabled" + - 'c:modprobe -n -v sctp -> r:^install\s*\t*/bin/true' + - "not c:lsmod -> r:^sctp" - # 3.4.2.6 Ensure iptables is not enabled (Scored) + # 3.1.3 Ensure DCCP is disabled. (Automated) - id: 6592 - title: "Ensure iptables is not enabled" - description: "IPtables is an application that allows a system administrator to configure the IPv4 and IPv6 tables, chains and rules provided by the Linux kernel firewall. IPtables is installed as a dependency with firewalld." - rationale: "Running firewalld and IPtables concurrently may lead to conflict, therefore IPtables should be stopped and masked when using firewalld." - remediation: "Run the following command to stop and mask iptables: systemctl --now mask iptables" + title: "Ensure DCCP is disabled." + description: "The Datagram Congestion Control Protocol (DCCP) is a transport layer protocol that supports streaming media and telephony. DCCP provides a way to gain access to congestion control, without having to do it at the application layer, but does not provide in-sequence delivery." + rationale: "If the protocol is not required, it is recommended that the drivers not be installed to reduce the potential attack surface." + remediation: 'Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: printf " install dccp /bin/true " >> /etc/modprobe.d/dccp.conf.' + compliance: + - cis: ["3.1.3"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - 'c:modprobe -n -v dccp -> r:^install\s*\t*/bin/true' + - "not c:lsmod -> r:^dccp" + + # 3.1.4 Ensure wireless interfaces are disabled. (Automated) - Not Implemented + + # 3.2.1 Ensure IP forwarding is disabled. (Automated) - Not Implemented + # 3.2.2 Ensure packet redirect sending is disabled. (Automated) - Not Implemented + # 3.3.1 Ensure source routed packets are not accepted. (Automated) - Not Implemented + # 3.3.2 Ensure ICMP redirects are not accepted. (Automated) - Not Implemented + # 3.3.3 Ensure secure ICMP redirects are not accepted. (Automated) - Not Implemented + # 3.3.4 Ensure suspicious packets are logged. (Automated) - Not Implemented + # 3.3.5 Ensure broadcast ICMP requests are ignored. (Automated) - Not Implemented + # 3.3.6 Ensure bogus ICMP responses are ignored. (Automated) - Not Implemented + # 3.3.7 Ensure Reverse Path Filtering is enabled. (Automated) - Not Implemented + # 3.3.8 Ensure TCP SYN Cookies is enabled. (Automated) - Not Implemented + # 3.3.9 Ensure IPv6 router advertisements are not accepted. (Automated) - Not Implemented + + # 3.4.1.1 Ensure firewalld is installed. (Automated) + - id: 6593 + title: "Ensure firewalld is installed." + description: "firewalld is a firewall management tool for Linux operating systems. It provides firewall features by acting as a front-end for the Linux kernel's netfilter framework via the iptables backend or provides firewall features by acting as a front-end for the Linux kernel's netfilter framework via the nftables utility. firewalld replaces iptables as the default firewall management tool. Use the firewalld utility to configure a firewall for less complex firewalls. The utility is easy to use and covers the typical use cases scenario. FirewallD supports both IPv4 and IPv6 networks and can administer separate firewall zones with varying degrees of trust as defined in zone profiles. Note: Starting in v0.6.0, FirewallD added support for acting as a front-end for the Linux kernel's netfilter framework via the nftables userspace utility, acting as an alternative to the nft command line program." + rationale: "A firewall utility is required to configure the Linux kernel's netfilter framework via the iptables or nftables back-end. The Linux kernel's netfilter framework host-based firewall can protect against threats originating from within a corporate network to include malicious mobile code and poorly configured software on a host. Note: Only one firewall utility should be installed and configured. FirewallD is dependent on the iptables package." + impact: "Changing firewall settings while connected over the network can result in being locked out of the system." + remediation: "Run the following command to install FirewallD and iptables: # dnf install firewalld iptables." compliance: - - cis: ["3.4.2.6"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + - cis: ["3.4.1.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - 'c:systemctl status iptables -> r:Loaded:\s*disabled|Loaded:\s*masked|could not be found' - - 'c:systemctl status iptables -> r:Active:\s*inactive\s*\(dead\)|could not be found' - - "c:systemctl is-enabled iptables -> !r:enabled" + - "c:rpm -q firewalld -> r:^firewalld-" + - "c:rpm -q iptables -> r:^iptables-" - ############################################### - # 3.4.3 Configure nftables - ############################################### + # 3.4.1.2 Ensure iptables-services not installed with firewalld. (Automated) + - id: 6594 + title: "Ensure iptables-services not installed with firewalld." + description: "The iptables-services package contains the iptables.service and ip6tables.service. These services allow for management of the Host Based Firewall provided by the iptables package." + rationale: "iptables.service and ip6tables.service are still supported and can be installed with the iptables-services package. Running both firewalld and the services included in the iptables-services package may lead to conflict." + impact: "Running both firewalld and iptables/ip6tables service may lead to conflict." + remediation: "Run the following commands to stop the services included in the iptables-services package and remove the iptables-services package # systemctl stop iptables # systemctl stop ip6tables # dnf remove iptables-services." + compliance: + - cis: ["3.4.1.2"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - "c:rpm -q iptables-services -> r:^package iptables-services is not installed" + + # 3.4.1.3 Ensure nftables either not installed or masked with firewalld. (Automated) + - id: 6595 + title: "Ensure nftables either not installed or masked with firewalld." + description: "nftables is a subsystem of the Linux kernel providing filtering and classification of network packets/datagrams/frames and is the successor to iptables. _Note: Support for using nftables as the back-end for firewalld was added in release v0.6.0. In Fedora 19 Linux derivatives, firewalld utilizes iptables as its back-end by default." + rationale: "Running both firewalld and nftables may lead to conflict. Note: firewalld may configured as the front-end to nftables. If this case, nftables should be stopped and masked instead of removed." + remediation: 'Run the following command to remove nftables: # dnf remove nftables OR Run the following command to stop and mask nftables" systemctl --now mask nftables.' + compliance: + - cis: ["3.4.1.3"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: any + rules: + - "c:rpm -q nftables -> r:^package nftables is not installed" + - "c:systemctl is-active nftables -> r:^inactive" + - "c:systemctl is-enabled nftables -> r:^masked" - # 3.4.3.1 Ensure iptables are flushed (Not Scored) - - id: 6593 - title: "Ensure iptables are flushed" - description: "nftables is a replacement for iptables, ip6tables, ebtables and arptables" - rationale: "It is possible to mix iptables and nftables. However, this increases complexity and also the chance to introduce errors. For simplicity flush out all iptables rules, and ensure it is not loaded." - remediation: "Run the following commands to flush iptables: For iptables: # iptables -F and For ip6tables: # ip6tables -F" - compliance: - - cis: ["3.4.3.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] - condition: none + # 3.4.1.4 Ensure firewalld service enabled and running. (Automated) + - id: 6596 + title: "Ensure firewalld service enabled and running." + description: "firewalld.service enables the enforcement of firewall rules configured through firewalld." + rationale: "Ensure that the firewalld.service is enabled and running to enforce firewall rules configured through firewalld." + impact: "Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following command to unmask firewalld # systemctl unmask firewalld Run the following command to enable and start firewalld # systemctl --now enable firewalld." + compliance: + - cis: ["3.4.1.4"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all rules: - - 'c:iptables -L -> !r:^\s*Chain|^\s*target && r:\s*\S+' - - 'c:ip6tables -L -> !r:^\s*Chain|^\s*target && r:\s*\S+' + - "c:systemctl is-enabled firewalld -> r:^enabled" + - "c:firewall-cmd --state -> r:^running" - # 3.4.3.2 Ensure a table exists (Scored) - - id: 6594 - title: "Ensure a table exists" + # 3.4.1.5 Ensure firewalld default zone is set. (Automated) - Not Implemented + # 3.4.1.6 Ensure network interfaces are assigned to appropriate zone. (Manual) - Not Implemented + + # 3.4.1.7 Ensure firewalld drops unnecessary services and ports. (Manual) - Not Implemented + + # 3.4.2.1 Ensure nftables is installed. (Automated) + - id: 6597 + title: "Ensure nftables is installed." + description: "nftables provides a new in-kernel packet classification framework that is based on a network-specific Virtual Machine (VM) and a new nft userspace command line tool. nftables reuses the existing Netfilter subsystems such as the existing hook infrastructure, the connection tracking system, NAT, userspace queuing and logging subsystem. Note: - nftables is available in Linux kernel 3.13 and newer. - Only one firewall utility should be installed and configured." + rationale: "nftables is a subsystem of the Linux kernel that can protect against threats originating from within a corporate network to include malicious mobile code and poorly configured software on a host." + impact: "Changing firewall settings while connected over the network can result in being locked out of the system." + remediation: "Run the following command to install nftables # dnf install nftables." + compliance: + - cis: ["3.4.2.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all + rules: + - "c:rpm -q nftables -> r:^nftables-" + + # 3.4.2.2 Ensure firewalld is either not installed or masked with nftables. (Automated) + - id: 6598 + title: "Ensure firewalld is either not installed or masked with nftables." + description: 'firewalld (Dynamic Firewall Manager) provides a dynamically managed firewall with support for network/firewall "zones" to assign a level of trust to a network and its associated connections, interfaces or sources. It has support for IPv4, IPv6, Ethernet bridges and also for IPSet firewall settings. There is a separation of the runtime and permanent configuration options.' + rationale: "Running both nftables.service and firewalld.service may lead to conflict and unexpected results." + remediation: "Run the following command to remove firewalld # dnf remove firewalld OR Run the following command to stop and mask firewalld # systemctl --now mask firewalld." + compliance: + - cis: ["3.4.2.2"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: any + rules: + - "c:rpm -q firewalld -> r:^package firewalld is not installed" + - "not c:firewall-cmd --state -> r:^running" + - "c:systemctl is-enabled firewalld -> r:^masked" + + # 3.4.2.3 Ensure iptables-services not installed with nftables. (Automated) + - id: 6599 + title: "Ensure iptables-services not installed with nftables." + description: "The iptables-services package contains the iptables.service and ip6tables.service. These services allow for management of the Host Based Firewall provided by the iptables package." + rationale: "iptables.service and ip6tables.service are still supported and can be installed with the iptables-services package. Running both nftables and the services included in the iptables-services package may lead to conflict." + remediation: "Run the following commands to stop the services included in the iptables-services package and remove the iptables-services package # systemctl stop iptables # systemctl stop ip6tables # dnf remove iptables-services." + compliance: + - cis: ["3.4.2.3"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - "c:rpm -q iptables-services -> r:^package iptables-services is not installed" + + # 3.4.2.4 Ensure iptables are flushed with nftables. (Manual) - Not Implemented + + # 3.4.2.5 Ensure an nftables table exists. (Automated) + - id: 6600 + title: "Ensure an nftables table exists." description: "Tables hold chains. Each table only has one address family and only applies to packets of this family. Tables can have one of five families." rationale: "nftables doesn't have any default tables. Without a table being build, nftables will not filter network traffic." - remediation: "Run the following command to create a table in nftables: # nft create table inet .Example: # nft create table inet filter" + impact: "Adding rules to a running nftables can cause loss of connectivity to the system." + remediation: "Run the following command to create a table in nftables # nft create table inet
Example: # nft create table inet filter." compliance: - - cis: ["3.4.3.2"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + - cis: ["3.4.2.5"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - 'c:nft list tables -> r:\w+' - # 3.4.3.3 Ensure base chains exist (Scored) - - id: 6595 - title: "Ensure base chains exist" + # 3.4.2.6 Ensure nftables base chains exist. (Automated) + - id: 6601 + title: "Ensure nftables base chains exist." description: "Chains are containers for rules. They exist in two kinds, base chains and regular chains. A base chain is an entry point for packets from the networking stack, a regular chain may be used as jump target and is used for better rule organization." rationale: "If a base chain doesn't exist with a hook for input, forward, and delete, packets that would flow through those chains will not be touched by nftables." - remediation: "Run the following command to create the base chains: # nft create chain inet
{ type filter hook <(input|forward|output)> priority 0 \\; } . Example: # nft create chain inet filter input { type filter hook input priority 0 \\; } # nft create chain inet filter forward { type filter hook forward priority 0\\; } # nft create chain inet filter output { type filter hook output priority 0 \\; }" + impact: "If configuring nftables over ssh, creating a base chain with a policy of drop will cause loss of connectivity. Ensure that a rule allowing ssh has been added to the base chain prior to setting the base chain's policy to drop." + remediation: "Run the following command to create the base chains: # nft create chain inet
{ type filter hook <(input|forward|output)> priority 0 \\; } Example: # nft create chain inet filter input { type filter hook input priority 0 \\; } # nft create chain inet filter forward { type filter hook forward priority 0 \\; } # nft create chain inet filter output { type filter hook output priority 0 \\; }." compliance: - - cis: ["3.4.3.3"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + - cis: ["3.4.2.6"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - "c:nft list ruleset -> r:hook input" - "c:nft list ruleset -> r:hook forward" - "c:nft list ruleset -> r:hook output" - # 3.4.3.6 Ensure default deny firewall policy (Scored) - - id: 6596 - title: "Ensure default deny firewall policy" + # 3.4.2.7 Ensure nftables loopback traffic is configured. (Automated) + - id: 6602 + title: "Ensure nftables loopback traffic is configured." + description: "Configure the loopback interface to accept traffic. Configure all other interfaces to deny traffic to the loopback network." + rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure." + remediation: "Run the following commands to implement the loopback rules: # nft add rule inet filter input iif lo accept # nft create rule inet filter input ip saddr 127.0.0.0/8 counter drop IF IPv6 is enabled: Run the following command to implement the IPv6 loopback rules: # nft add rule inet filter input ip6 saddr ::1 counter drop." + compliance: + - cis: ["3.4.2.7"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all + rules: + - 'c:sh -c "nft list ruleset | awk ''/hook input/,/}/''" -> r:iif "lo" accept' + - 'c:sh -c "nft list ruleset | awk ''/hook input/,/}/''" -> r:ip saddr|ip6 saddr' + + # 3.4.2.8 Ensure nftables outbound and established connections are configured. (Manual) -Not Implemented + + # 3.4.2.9 Ensure nftables default deny firewall policy. (Automated) + - id: 6603 + title: "Ensure nftables default deny firewall policy." description: "Base chain policy is the default verdict that will be applied to packets reaching the end of the chain." - rationale: "There are two policies: accept (Default) and drop. If the policy is set to accept , the firewall will accept any packet that is not configured to be denied and the packet will continue transversing the network stack. It is easier to white list acceptable usage than to black list unacceptable usage." - remediation: "Run the following command for the base chains with the input, forward, and output hooks to implement a default DROP policy: # nft chain
{ policy drop \\; } . Example: # nft chain inet filter input { policy drop \\; } ; # nft chain inet filter forward { policy drop \\; } and # nft chain inet filter output { policy drop \\; }" + rationale: "There are two policies: accept (Default) and drop. If the policy is set to accept, the firewall will accept any packet that is not configured to be denied and the packet will continue traversing the network stack. It is easier to white list acceptable usage than to black list unacceptable usage. Note: Changing firewall settings while connected over the network can result in being locked out of the system." + impact: "If configuring nftables over ssh, creating a base chain with a policy of drop will cause loss of connectivity. Ensure that a rule allowing ssh has been added to the base chain prior to setting the base chain's policy to drop." + remediation: "Run the following command for the base chains with the input, forward, and output hooks to implement a default DROP policy: # nft chain
{ policy drop \\; } Example: # nft chain inet filter input { policy drop \\; } # nft chain inet filter forward { policy drop \\; } # nft chain inet filter output { policy drop \\; }." compliance: - - cis: ["3.4.3.6"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC6.6"] + - cis: ["3.4.2.9"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - "c:nft list ruleset -> r:hook input && r:policy drop" - "c:nft list ruleset -> r:hook forward && r:policy drop" - "c:nft list ruleset -> r:hook output && r:policy drop" - # 3.4.3.7 Ensure nftables service is enabled (Scored) - - id: 6597 - title: "Ensure nftables service is enabled" - description: "The nftables service allows for the loading of nftables rulesets during boot, or starting of the nftables service." + # 3.4.2.10 Ensure nftables service is enabled. (Automated) + - id: 6604 + title: "Ensure nftables service is enabled." + description: "The nftables service allows for the loading of nftables rulesets during boot, or starting on the nftables service." rationale: "The nftables service restores the nftables rules from the rules files referenced in the /etc/sysconfig/nftables.conf file during boot or the starting of the nftables service." - remediation: "Run the following command to enable the nftables service: # systemctl --now enable nftables" + remediation: "Run the following command to enable the nftables service: # systemctl enable nftables." compliance: - - cis: ["3.4.3.7"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + - cis: ["3.4.2.10"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - "c:systemctl is-enabled nftables -> r:^enabled" - ############################################### - # 3.4.4 Configure iptables - ############################################### - ############################################### - # 3.4.4.1 Configure IPv4 iptables - ############################################### - # 3.4.4.1.1 Ensure default deny firewall policy (Scored) - - id: 6598 - title: "Configure IPv4 iptables" - description: "A default deny all policy on connections ensures that any unconfigured network usage will be rejected." - rationale: "With a default accept policy the firewall will accept any packet that is not configured to be denied. It is easier to white list acceptable usage than to black list unacceptable usage." - remediation: "Run the following commands to implement a default DROP policy: # iptables -P INPUT DROP; # iptables -P OUTPUT DROP; # iptables -P FORWARD DROP" + # 3.4.2.11 Ensure nftables rules are permanent. (Automated) + - id: 6605 + title: "Ensure nftables rules are permanent." + description: "nftables is a subsystem of the Linux kernel providing filtering and classification of network packets/datagrams/frames. The nftables service reads the /etc/sysconfig/nftables.conf file for a nftables file or files to include in the nftables ruleset. A nftables ruleset containing the input, forward, and output base chains allow network traffic to be filtered." + rationale: "Changes made to nftables ruleset only affect the live system, you will also need to configure the nftables ruleset to apply on boot." + remediation: 'Edit the /etc/sysconfig/nftables.conf file and un-comment or add a line with include for each nftables file you want included in the nftables ruleset on boot: Example: include "/etc/nftables/nftables.rules".' compliance: - - cis: ["3.4.4.1.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC6.6"] + - cis: ["3.4.2.11"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - 'c:iptables -L -> r:Chain INPUT \(policy DROP\)' - - 'c:iptables -L -> r:Chain FORWARD \(policy DROP\)' - - 'c:iptables -L -> r:Chain OUTPUT \(policy DROP\)' + - "f:/etc/nftables.conf -> r:include *" - # 3.4.4.1.2 Ensure loopback traffic is configured (Scored) - - id: 6599 - title: "Ensure loopback traffic is configured" + # 3.4.3.1.1 Ensure iptables packages are installed. (Automated) + - id: 6606 + title: "Ensure iptables packages are installed." + description: "iptables is a utility program that allows a system administrator to configure the tables provided by the Linux kernel firewall, implemented as different Netfilter modules, and the chains and rules it stores. Different kernel modules and programs are used for different protocols; iptables applies to IPv4, ip6tables to IPv6, arptables to ARP, and ebtables to Ethernet frames." + rationale: "A method of configuring and maintaining firewall rules is necessary to configure a Host Based Firewall." + remediation: "Run the following command to install iptables and iptables-services # dnf install iptables iptables-services." + compliance: + - cis: ["3.4.3.1.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all + rules: + - "c:rpm -q iptables -> r:^iptables-" + - "c:rpm -q iptables-services -> r:^iptables-services-" + + # 3.4.3.1.2 Ensure nftables is not installed with iptables. (Automated) + - id: 6607 + title: "Ensure nftables is not installed with iptables." + description: "nftables is a subsystem of the Linux kernel providing filtering and classification of network packets/datagrams/frames and is the successor to iptables." + rationale: "Running both iptables and nftables may lead to conflict." + remediation: "Run the following command to remove nftables: # dnf remove nftables." + compliance: + - cis: ["3.4.3.1.2"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - "c:rpm -q nftables -> r:package nftables is not installed" + + # 3.4.3.1.3 Ensure firewalld is either not installed or masked with iptables. (Automated) + - id: 6608 + title: "Ensure firewalld is either not installed or masked with iptables." + description: 'firewalld (Dynamic Firewall Manager) provides a dynamically managed firewall with support for network/firewall "zones" to assign a level of trust to a network and its associated connections, interfaces or sources. It has support for IPv4, IPv6, Ethernet bridges and also for IPSet firewall settings. There is a separation of the runtime and permanent configuration options.' + rationale: "Running iptables.service and\\or ip6tables.service with firewalld.service may lead to conflict and unexpected results." + remediation: "Run the following command to remove firewalld # yum remove firewalld OR Run the following command to stop and mask firewalld # systemctl --now mask firewalld." + compliance: + - cis: ["3.4.3.1.3"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - "c:rpm -q firewalld -> r:^package firewalld is not installed" + - "not c:systemctl status firewalld -> r:active && r:running" + - "c:systemctl is-enabled firewalld -> r:^masked" + + # 3.4.3.2.1 Ensure iptables loopback traffic is configured. (Automated) + - id: 6609 + title: "Ensure iptables loopback traffic is configured." description: "Configure the loopback interface to accept traffic. Configure all other interfaces to deny traffic to the loopback network (127.0.0.0/8)." - rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network (127.0.0.0/8) traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure." - remediation: "Run the following commands to implement the loopback rules: # iptables -A INPUT -i lo -j ACCEPT # iptables -A OUTPUT -o lo -j ACCEPT # iptables -A INPUT -s 127.0.0.0/8 -j DROP" + rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network (127.0.0.0/8) traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure. Note: Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following commands to implement the loopback rules: # iptables -A INPUT -i lo -j ACCEPT # iptables -A OUTPUT -o lo -j ACCEPT # iptables -A INPUT -s 127.0.0.0/8 -j DROP." compliance: - - cis: ["3.4.4.1.2"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC6.6"] + - cis: ["3.4.3.2.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - 'c:iptables -L INPUT -v -n -> r:\.*ACCEPT\.*all\.*lo\.**\.*0.0.0.0/0\.*0.0.0.0/0' - 'c:iptables -L INPUT -v -n -> r:\.*DROP\.*all\.**\.**\.*127.0.0.0/8\.*0.0.0.0/0' - 'c:iptables -L OUTPUT -v -n -> r:\.*ACCEPT\.*all\.**\.*lo\.*0.0.0.0/0\.*0.0.0.0/0' - ############################################### - # 3.4.4.2 Configure IPv6 ip6tables - ############################################### - # 3.4.4.2.1 Ensure IPv6 default deny firewall policy (Scored) - - id: 6600 - title: "Ensure IPv6 default deny firewall policy" + # 3.4.3.2.2 Ensure iptables outbound and established connections are configured. (Manual) - Not Implemented + # 3.4.3.2.3 Ensure iptables rules exist for all open ports. (Automated) - Not Implemented + + # 3.4.3.2.4 Ensure iptables default deny firewall policy. (Automated) + - id: 6610 + title: "Ensure iptables default deny firewall policy." description: "A default deny all policy on connections ensures that any unconfigured network usage will be rejected." - rationale: "With a default accept policy the firewall will accept any packet that is not configured to be denied. It is easier to white list acceptable usage than to black list unacceptable usage." - remediation: "Run the following commands to implement a default DROP policy: # ip6tables -P INPUT DROP; # ip6tables -P OUTPUT DROP; # ip6tables -P FORWARD DROP" + rationale: "With a default accept policy the firewall will accept any packet that is not configured to be denied. It is easier to white list acceptable usage than to black list unacceptable usage. Note: Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following commands to implement a default DROP policy: # iptables -P INPUT DROP # iptables -P OUTPUT DROP # iptables -P FORWARD DROP." compliance: - - cis: ["3.4.4.2.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC6.6"] + - cis: ["3.4.3.2.4"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - 'c:ip6tables -L -> r:Chain INPUT \(policy DROP\)' - - 'c:ip6tables -L -> r:Chain FORWARD \(policy DROP\)' - - 'c:ip6tables -L -> r:Chain OUTPUT \(policy DROP\)' + - "c:iptables -L -> r:^Chain INPUT && r:policy DROP" + - "c:iptables -L -> r:^Chain FORWARD && r:policy DROP" + - "c:iptables -L -> r:^Chain OUTPUT && r:policy DROP" - # 3.4.4.2.2 Ensure IPv6 loopback traffic is configured (Scored) - - id: 6601 - title: "Ensure loopback traffic is configured" + # 3.4.3.2.5 Ensure iptables rules are saved. (Automated) - Not Implemented + + # 3.4.3.2.6 Ensure iptables is enabled and active. (Automated) + - id: 6611 + title: "Ensure iptables is enabled and active." + description: "iptables.service is a utility for configuring and maintaining iptables." + rationale: "iptables.service will load the iptables rules saved in the file /etc/sysconfig/iptables at boot, otherwise the iptables rules will be cleared during a re-boot of the system." + remediation: "Run the following command to enable and start iptables: # systemctl --now enable iptables." + compliance: + - cis: ["3.4.3.2.6"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all + rules: + - "c:systemctl is-enabled iptables -> r:^enabled" + - "c:systemctl is-active iptables -> r:^active" + + # 3.4.3.3.1 Ensure ip6tables loopback traffic is configured. (Automated) + - id: 6612 + title: "Ensure ip6tables loopback traffic is configured." description: "Configure the loopback interface to accept traffic. Configure all other interfaces to deny traffic to the loopback network (::1)." - rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network (::1) traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure." - remediation: "Run the following commands to implement the loopback rules: # ip6tables -A INPUT -i lo -j ACCEPT # ip6tables -A OUTPUT -o lo -j ACCEPT # ip6tables -A INPUT -s ::1 -j DROP" + rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network (::1) traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure. Note: Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following commands to implement the loopback rules: # ip6tables -A INPUT -i lo -j ACCEPT # ip6tables -A OUTPUT -o lo -j ACCEPT # ip6tables -A INPUT -s ::1 -j DROP." compliance: - - cis: ["3.4.4.2.2"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC6.6"] + - cis: ["3.4.3.3.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - 'c:ip6tables -L INPUT -v -n -> r:\.*ACCEPT\.*all\.*lo\.**\.*::/0\.*::/0' - - 'c:ip6tables -L INPUT -v -n -> r:\.*DROP\.*all\.**\.**\.*::1\.*::/0' - - 'c:ip6tables -L OUTPUT -v -n -> r:\.*ACCEPT\.*all\.**\.*lo\.*::/0\.*::/0' + - 'c:ip6tables -L INPUT -v -n -> r:ACCEPT\s*\t*all\s*\t*lo && r:\s*::/0' + - 'c:ip6tables -L INPUT -v -n -> r:DROP\s*\t*all\s*\t*lo && r:\s*::/0' + - 'c:ip6tables -L OUTPUT -v -n -> r:ACCEPT\s*\t*all\s*\t*lo && r:\s*::/0' - # 3.5 Ensure wireless interfaces are disabled (Scored) - - id: 6602 - title: "Ensure wireless interfaces are disabled" - description: "Wireless networking is used when wired networks are unavailable. CentOS Linux contains a wireless tool kit to allow system administrators to configure and use wireless networks." - rationale: "If wireless is not to be used, wireless devices can be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable any wireless interfaces: # nmcli radio all off . Disable any wireless interfaces in your network configuration." - compliance: - - cis: ["3.5"] - - cis_csc: ["15.4", "15.5"] - - pci_dss: ["1.2.3"] - - tsc: ["CC6.6"] - references: - - nmcli(1) - Linux man page + # 3.4.3.3.2 Ensure ip6tables outbound and established connections are configured. (Manual) - Not Implemented + # 3.4.3.3.3 Ensure ip6tables firewall rules exist for all open ports. (Automated) - Not Implemented + + # 3.4.3.3.4 Ensure ip6tables default deny firewall policy. (Automated) + - id: 6613 + title: "Ensure ip6tables default deny firewall policy." + description: "A default deny all policy on connections ensures that any unconfigured network usage will be rejected." + rationale: "With a default accept policy the firewall will accept any packet that is not configured to be denied. It is easier to white list acceptable usage than to black list unacceptable usage. Note: Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following commands to implement a default DROP policy: # ip6tables -P INPUT DROP # ip6tables -P OUTPUT DROP # ip6tables -P FORWARD DROP." + compliance: + - cis: ["3.4.3.3.4"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - "c:nmcli radio wifi -> r:^disabled" - - "c:nmcli radio wwan -> r:^disabled" + - "c:ip6tables -L -> r:^Chain INPUT && r:policy DROP" + - "c:ip6tables -L -> r:^Chain FORWARD && r:policy DROP" + - "c:ip6tables -L -> r:^Chain OUTPUT && r:policy DROP" - ############################################### - # 4 Logging and Auditing - ############################################### - ############################################### - # 4.1 Configure System Accounting (auditd) - ############################################### + # 3.4.3.3.5 Ensure ip6tables rules are saved. (Automated) - Not Implemented - # 4.1.1.1 Ensure auditd is installed (Scored) - - id: 6603 - title: "Ensure auditd is installed" + # 3.4.3.3.6 Ensure ip6tables is enabled and active. (Automated) + - id: 6614 + title: "Ensure ip6tables is enabled and active." + description: "ip6tables.service is a utility for configuring and maintaining ip6tables." + rationale: "ip6tables.service will load the iptables rules saved in the file /etc/sysconfig/ip6tables at boot, otherwise the ip6tables rules will be cleared during a re-boot of the system." + remediation: "Run the following command to enable and start ip6tables: # systemctl --now start ip6tables." + compliance: + - cis: ["3.4.3.3.6"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all + rules: + - "c:systemctl is-enabled ip6tables -> r:^enabled" + - "c:systemctl is-active ip6tables -> r:^active" + + # 4.1.1.1 Ensure auditd is installed. (Automated) + - id: 6615 + title: "Ensure auditd is installed." description: "auditd is the userspace component to the Linux Auditing System. It's responsible for writing audit records to the disk." rationale: "The capturing of system events provides system administrators with information to allow them to determine if unauthorized access to their system is occurring." - remediation: "Run the following command to Install auditd # dnf install audit audit-libs" + remediation: "Run the following command to Install auditd # dnf install audit." compliance: - cis: ["4.1.1.1"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.1"] - - nist_800_53: ["AU.2"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.2", "CC6.3", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.2", "8.5"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - - "c:rpm -q audit audit-libs -> r:^audit-" + - "c:rpm -q audit -> r:^audit-" - # 4.1.1.2 Ensure auditd service is enabled (Scored) - - id: 6604 - title: "Ensure auditd service is enabled" + # 4.1.1.2 Ensure auditd service is enabled. (Automated) + - id: 6616 + title: "Ensure auditd service is enabled." description: "Turn on the auditd daemon to record system events." rationale: "The capturing of system events provides system administrators with information to allow them to determine if unauthorized access to their system is occurring." - remediation: "Run the following command to enable auditd : # systemctl --now enable auditd" + remediation: "Run the following command to enable auditd: # systemctl --now enable auditd." compliance: - - cis: ["4.1.2"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.1", "10.7"] - - nist_800_53: ["AU.2"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.2", "CC6.3", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["4.1.1.2"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - - "c:systemctl is-enabled auditd -> enabled" + - "c:systemctl is-enabled auditd -> r:^enabled" - # 4.1.1.3 Ensure auditing for processes that start prior to auditd is enabled (Scored) - - id: 6605 - title: "Ensure auditing for processes that start prior to auditd is enabled" + # 4.1.1.3 Ensure auditing for processes that start prior to auditd is enabled. (Automated) + - id: 6617 + title: "Ensure auditing for processes that start prior to auditd is enabled." description: "Configure grub2 so that processes that are capable of being audited can be audited even if they start up prior to auditd startup." - rationale: "The capturing of system events provides system administrators with information to allow them to determine if unauthorized access to their system is occurring." - remediation: 'Edit /etc/default/grub and add audit=1 to GRUB_CMDLINE_LINUX : GRUB_CMDLINE_LINUX="audit=1" . Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg' + rationale: "Audit events need to be captured on processes that start up prior to auditd , so that potential malicious activity cannot go undetected." + remediation: "Run the following command to add audit=1 to GRUB_CMDLINE_LINUX: # grubby --update-kernel ALL --args 'audit=1'." compliance: - cis: ["4.1.1.3"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.2.6", "10.7"] - - nist_800_53: ["AU.2"] - - gpg_13: ["7.9"] - - gdpr_IV: ["35.7.d", "32.2"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: none + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all rules: - - "f:/boot/grub2/grubenv -> r:kernelopts= && !r:audit=1" + - "not f:/boot/grub2/grubenv -> r:kernelopts= && !r:audit=1" - # 4.1.1.4 Ensure auditing for processes that start prior to auditd is enabled (Scored) - - id: 6606 - title: "Ensure audit_backlog_limit is sufficient" + # 4.1.1.4 Ensure audit_backlog_limit is sufficient. (Automated) + - id: 6618 + title: "Ensure audit_backlog_limit is sufficient." description: "The backlog limit has a default setting of 64." - rationale: "During boot if audit=1, then the backlog will hold 64 records. If more than 64 records are created during boot, auditd records will be lost and potential malicious activity could go undetected." - remediation: "Edit /etc/default/grub and add audit_backlog_limit= to GRUB_CMDLINE_LINUX: Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg" + rationale: "During boot if audit=1, then the backlog will hold 64 records. If more that 64 records are created during boot, auditd records will be lost and potential malicious activity could go undetected." + remediation: "Run the following command to add audit_backlog_limit= to GRUB_CMDLINE_LINUX: # grubby --update-kernel ALL --args 'audit_backlog_limit=' Example: # grubby --update-kernel ALL --args 'audit_backlog_limit=8192'." compliance: - cis: ["4.1.1.4"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.7"] - - nist_800_53: ["AU.4"] - - hipaa: ["164.312.b"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - 'f:/boot/grub2/grubenv -> r:kernelopts= && n:audit_backlog_limit=(\d+) compare >= 8192' - # 4.1.2.1 Ensure audit log storage size is configured (Not Scored) - - id: 6607 - title: "Ensure audit log storage size is configured" + # 4.1.2.1 Ensure audit log storage size is configured. (Automated) + - id: 6619 + title: "Ensure audit log storage size is configured." description: "Configure the maximum size of the audit log file. Once the log reaches the maximum size, it will be rotated and a new log file will be started." rationale: "It is important that an appropriate size is determined for log files so that they do not impact the system and audit data is not lost." - remediation: "Set the following parameter in /etc/audit/auditd.conf in accordance with site policy: max_log_file = " + remediation: "Set the following parameter in /etc/audit/auditd.conf in accordance with site policy: max_log_file = ." compliance: - cis: ["4.1.2.1"] - - cis_csc: ["6.4"] - - pci_dss: ["10.7"] - - nist_800_53: ["AU.4"] - - hipaa: ["164.312.b"] + - cis_csc_v8: ["8.3"] + - cis_csc_v7: ["6.4"] + - iso_27001-2013: ["A.12.4.1"] + - pci_dss_v3.2.1: ["10.7"] + - soc_2: ["A1.1"] condition: all rules: - - 'f:/etc/audit/auditd.conf -> r:^max_log_file\s*=\s*\d+' + - "f:/etc/audit/auditd.conf" + - 'f:/etc/audit/auditd.conf -> r:^\s*\t*max_log_file\s*\t*=\s*\t*\d+' - # 4.1.2.2 Ensure audit logs are not automatically deleted (Scored) - - id: 6608 - title: "Ensure audit logs are not automatically deleted" + # 4.1.2.2 Ensure audit logs are not automatically deleted. (Automated) + - id: 6620 + title: "Ensure audit logs are not automatically deleted." description: "The max_log_file_action setting determines how to handle the audit log file reaching the max file size. A value of keep_logs will rotate the logs but never delete old logs." rationale: "In high security contexts, the benefits of maintaining a long audit history exceed the cost of storing the audit history." - remediation: "Set the following parameter in /etc/audit/auditd.conf: max_log_file_action = keep_logs" + remediation: "Set the following parameter in /etc/audit/auditd.conf: max_log_file_action = keep_logs." compliance: - cis: ["4.1.2.2"] - - cis_csc: ["6.4"] - - pci_dss: ["10.7"] - - nist_800_53: ["AU.9"] - - hipaa: ["164.312.b"] + - cis_csc_v8: ["8.3"] + - cis_csc_v7: ["6.4"] + - iso_27001-2013: ["A.12.4.1"] + - pci_dss_v3.2.1: ["10.7"] + - soc_2: ["A1.1"] condition: all rules: - - 'f:/etc/audit/auditd.conf -> r:^max_log_file_action\s*=\s*keep_logs' + - "f:/etc/audit/auditd.conf" + - 'f:/etc/audit/auditd.conf -> r:^\s*\t*max_log_file_action\s*\t*=\s*\t*keep_logs' - # 4.1.2.3 Ensure system is disabled when audit logs are full (Scored) - - id: 6609 - title: "Ensure system is disabled when audit logs are full" - description: "The auditd daemon can be configured to halt the system when the audit logs are full." + # 4.1.2.3 Ensure system is disabled when audit logs are full. (Automated) + - id: 6621 + title: "Ensure system is disabled when audit logs are full." + description: "The auditd daemon can be configured to halt the system when the audit logs are full. The admin_space_left_action parameter tells the system what action to take when the system has detected that it is low on disk space. Valid values are ignore, syslog, suspend, single, and halt. - ignore, the audit daemon does nothing - Syslog, the audit daemon will issue a warning to syslog - Suspend, the audit daemon will stop writing records to the disk - single, the audit daemon will put the computer system in single user mode - halt, the audit daemon will shutdown the system." rationale: "In high security contexts, the risk of detecting unauthorized access or nonrepudiation exceeds the benefit of the system's availability." - remediation: "Set the following parameters in /etc/audit/auditd.conf: space_left_action = email action_mail_acct = root admin_space_left_action = halt" - compliance: - - cis: ["4.1.1.2"] - - cis_csc: ["6.3"] - - pci_dss: ["10.7"] - condition: all - rules: - - 'f:/etc/audit/auditd.conf -> r:^space_left_action\s*=\s*email' - - 'f:/etc/audit/auditd.conf -> r:^action_mail_acct\s*=\s*root' - - 'f:/etc/audit/auditd.conf -> r:^admin_space_left_action\s*=\s*halt' - - ## 4.1.3 Ensure changes to system administration scope (sudoers) is collected (Scored) - - id: 6610 - title: "Ensure changes to system administration scope (sudoers) is collected" - description: 'Monitor scope changes for system administrations. If the system has been properly configured to force system administrators to log in as themselves first and then use the sudo command to execute privileged commands, it is possible to monitor changes in scope. The file /etc/sudoers will be written to when the file or its attributes have changed. The audit records will be tagged with the identifier "scope."' - rationale: "Changes in the /etc/sudoers file can indicate that an unauthorized change has been made to scope of system administrator activity." - remediation: "Add the following line to the /etc/audit/audit.rules file: -w /etc/sudoers -p wa -k scope -w /etc/sudoers.d/ -p wa -k scope" - compliance: - - cis: ["4.1.15"] - - cis_csc: ["4.8"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - "d:/etc/audit/rules.d -> .rules -> r:-w /etc/sudoers && r:-p wa && r:-k scope" - - "d:/etc/audit/rules.d -> .rules -> r:-w /etc/sudoers.d/ && r:-p wa && r:-k scope" - - # 4.1.4 Ensure login and logout events are collected (Scored) - - id: 6611 - title: "Ensure login and logout events are collected" - description: "Monitor login and logout events. The parameters below track changes to files associated with login/logout events. The file /var/log/faillog tracks failed events from login. The file /var/log/lastlog maintain records of the last time a user successfully logged in." - rationale: "Monitoring login/logout events could provide a system administrator with information associated with brute force attacks against user logins." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -w /var/log/lastlog -p wa -k logins -w /var/run/faillock/ -p wa -k logins" + impact: "If the admin_space_left_action parameter is set to halt the audit daemon will shutdown the system when the disk partition containing the audit logs becomes full." + remediation: "Set the following parameters in /etc/audit/auditd.conf: space_left_action = email action_mail_acct = root set admin_space_left_action to either halt or single in /etc/audit/auditd.conf. Example: admin_space_left_action = halt." compliance: - - cis: ["4.1.4"] - - cis_csc: ["4.9", "16.13"] - - pci_dss: ["10.2.1", "10.2.4", "10.3"] - - nist_800_53: ["AC.7", "AU.14"] - - gpg_13: ["7.8"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["4.1.2.3"] + - cis_csc_v8: ["8.2", "8.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3", "10.7"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["A1.1"] condition: all rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/log/lastlog -p wa -k logins' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/run/faillock/ -p wa -k logins' + - 'f:/etc/audit/auditd.conf -> r:^\s*space_left_action\s*=\s*email' + - 'f:/etc/audit/auditd.conf -> r:^\s*action_mail_acct\s*=\s*root' + - 'f:/etc/audit/auditd.conf -> r:^\s*admin_space_left_action\s*=\s*halt|^\s*admin_space_left_action\s*=\s*single' - # 4.1.5 Ensure session initiation information is collected (Scored) - - id: 6612 - title: "Ensure session initiation information is collected" - description: 'Monitor session initiation events. The parameters in this section track changes to the files associated with session events. The file /var/run/utmp file tracks all currently logged in users. All audit records will be tagged with the identifier "session." The /var/log/wtmp file tracks logins, logouts, shutdown, and reboot events. The file /var/log/btmp keeps track of failed login attempts and can be read by entering the command /usr/bin/last -f /var/log/btmp . All audit records will be tagged with the identifier "logins.".' - rationale: "Monitoring these files for changes could alert a system administrator to logins occurring at unusual hours, which could indicate intruder activity (i.e. a user logging in at a time when they do not normally log in)." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -w /var/run/utmp -p wa -k session -w /var/log/wtmp -p wa -k logins -w /var/log/btmp -p wa -k logins" + # 4.1.3.1 Ensure changes to system administration scope (sudoers) is collected. (Automated) + - id: 6622 + title: "Ensure changes to system administration scope (sudoers) is collected." + description: 'Monitor scope changes for system administrators. If the system has been properly configured to force system administrators to log in as themselves first and then use the sudo command to execute privileged commands, it is possible to monitor changes in scope. The file /etc/sudoers, or files in /etc/sudoers.d, will be written to when the file(s) or related attributes have changed. The audit records will be tagged with the identifier "scope".' + rationale: "Changes in the /etc/sudoers and /etc/sudoers.d files can indicate that an unauthorized change has been made to the scope of system administrator activity." + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor scope changes for system administrators. Example: # printf \" -w /etc/sudoers -p wa -k scope -w /etc/sudoers.d -p wa -k scope \" >> /etc/audit/rules.d/50-scope.rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi." compliance: - - cis: ["4.1.5"] - - cis_csc: ["4.9", "16.13"] - - pci_dss: ["10.3"] - - nist_800_53: ["AC.7", "AU.14"] - - hipaa: ["164.312.b"] + - cis: ["4.1.3.1"] + - cis_csc_v7: ["4.8"] + - iso_27001-2013: ["A.12.4.3"] condition: all rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/run/utmp -p wa -k session' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/log/wtmp -p wa -k logins' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/log/btmp -p wa -k logins' + - "d:/etc/audit/rules.d" + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/sudoers && r:-p wa && r:-k scope|key=\\s*\t*scope' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/sudoers.d && r:-p wa && r:-k scope|key=\\s*\t*scope' + - 'c:auditctl -l -> r:^-w && r:/etc/sudoers && r:-p wa && r:-k scope|key=\\s*\t*scope' + - 'c:auditctl -l -> r:^-w && r:/etc/sudoers.d && r:-p wa && r:-k scope|key=\\s*\t*scope' - # 4.1.6 Ensure events that modify date and time information are collected (Scored) - - id: 6613 - title: "Ensure events that modify date and time information are collected" - description: 'Capture events where the system date and/or time has been modified. The parameters in this section are set to determine if the adjtimex (tune kernel clock), settimeofday (Set time, using timeval and timezone structures) stime (using seconds since 1/1/1970) or clock_settime (allows for the setting of several internal clocks and timers) system calls have been executed and always write an audit record to the /var/log/audit.log file upon exit, tagging the records with the identifier "time-change".' + # 4.1.3.2 Ensure actions as another user are always logged. (Automated) + - id: 6623 + title: "Ensure actions as another user are always logged." + description: "sudo provides users with temporary elevated privileges to perform operations, either as the superuser or another user." + rationale: "Creating an audit log of users with temporary elevated privileges and the operation(s) they performed is essential to reporting. Administrators will want to correlate the events written to the audit trail with the records written to sudo's logfile to verify if unauthorized commands have been executed." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor elevated privileges. 64 Bit systems Example: # printf \" -a always,exit -F arch=b64 -C euid!=uid -F auid!=unset -S execve -k user_emulation -a always,exit -F arch=b32 -C euid!=uid -F auid!=unset -S execve -k user_emulation \" >> /etc/audit/rules.d/50-user_emulation.rules Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." + compliance: + - cis: ["4.1.3.2"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["4.9"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.9.4.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:exit,always|always,exit && r:-F arch=b64 && r:-C euid!=uid|-C uid!=euid && r:-F auid!=unset|-F auid!=-1|-F auid!=4294967295 && r:-S execve && r:-k user_emulation|key=user_emulation' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:exit,always|always,exit && r:-F arch=b32 && r:-C euid!=uid|-C uid!=euid && r:-F auid!=unset|-F auid!=-1|-F auid!=4294967295 && r:-S execve && r:-k user_emulation|key=user_emulation' + - "c:auditctl -l -> r:^-a && r:exit,always|always,exit && r:-F arch=b64 && r:-C euid!=uid|-C uid!=euid && r:-F auid!=unset|-F auid!=-1|-F auid!=4294967295 && r:-S execve && r:-k user_emulation|key=user_emulation" + - "c:auditctl -l -> r:^-a && r:exit,always|always,exit && r:-F arch=b32 && r:-C euid!=uid|-C uid!=euid && r:-F auid!=unset|-F auid!=-1|-F auid!=4294967295 && r:-S execve && r:-k user_emulation|key=user_emulation" + + # 4.1.3.3 Ensure events that modify the sudo log file are collected. (Automated) - Not Implemented + + # 4.1.3.4 Ensure events that modify date and time information are collected. (Automated) + - id: 6624 + title: "Ensure events that modify date and time information are collected." + description: 'Capture events where the system date and/or time has been modified. The parameters in this section are set to determine if the; - adjtimex - tune kernel clock - settimeofday - set time using timeval and timezone structures - stime - using seconds since 1/1/1970 - clock_settime - allows for the setting of several internal clocks and timers system calls have been executed. Further, ensure to write an audit record to the configured audit log file upon exit, tagging the records with a unique identifier such as "time-change".' rationale: "Unexpected changes in system date and/or time could be a sign of malicious activity on the system." - remediation: "For 32 bit systems add the following lines to the /etc/audit/audit.rules file: -a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change -a always,exit -F arch=b32 -S clock_settime -k time-change -w /etc/localtime -p wa -k time-change For 64 bit systems add the following lines to the /etc/audit/audit.rules file: -a always,exit -F arch=b64 -S adjtimex -S settimeofday -k time-change -a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change -a always,exit -F arch=b64 -S clock_settime -k time-change -a always,exit -Farch=b32 -S clock_settime -k time-change -w /etc/localtime -p wa -k time-change" - compliance: - - cis: ["4.1.6"] - - cis_csc: ["5.5"] - - pci_dss: ["10.4.2", "10.2.7"] - - nist_800_53: ["AU.14", "AU.6"] - - gpg_13: ["7.9"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-a always,exit -F arch=b32 -S clock_settime -k time-change' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-a always,exit -F arch=b64 -S adjtimex -S settimeofday -S stime -k time-change' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-a always,exit -F arch=b64 -S clock_settime -k time-change' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/localtime -p wa -k time-change' - - # 4.1.7 Ensure events that modify the system's Mandatory Access Controls are collected (Scored) - - id: 6614 - title: "Ensure events that modify the system's Mandatory Access Controls are collected" - description: "Monitor SELinux mandatory access controls. The parameters below monitor any write access (potential additional, deletion or modification of files in the directory) or attribute changes to the /etc/selinux or /etc/apparmor and /etc/apparmor.d directories." - rationale: "Changes to files in these directories could indicate that an unauthorized user is attempting to modify access controls and change security contexts, leading to a compromise of the system." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -w /etc/selinux/ -p wa -k MAC-policy -w /usr/share/selinux/ -p wa -k MAC-policy" - compliance: - - cis: ["4.1.7"] - - cis_csc: ["5.5"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/selinux/ -p wa -k MAC-policy' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /usr/share/selinux/ -p wa -k MAC-policy' - - # 4.1.8 Ensure events that modify the system's network environment are collected (Scored) - - id: 6615 - title: "Ensure events that modify the system's network environment are collected" - description: "Record changes to network environment files or system calls. The below parameters monitor the sethostname (set the systems host name) or setdomainname (set the systems domainname) system calls, and write an audit event on system call exit. The other parameters monitor the /etc/issue and /etc/issue.net files (messages displayed pre-login), /etc/hosts (file containing host names and associated IP addresses), /etc/sysconfig/network file and /etc/sysconfig/network-scripts/ directory (containing network interface scripts and configurations)." - rationale: 'Monitoring sethostname and setdomainname will identify potential unauthorized changes to host and domainname of a system. The changing of these names could potentially break security parameters that are set based on those names. The /etc/hosts file is monitored for changes in the file that can indicate an unauthorized intruder is trying to change machine associations with IP addresses and trick users and processes into connecting to unintended machines. Monitoring /etc/issue and /etc/issue.net is important, as intruders could put disinformation into those files and trick users into providing information to the intruder. Monitoring /etc/sysconfig/network and /etc/sysconfig/network-scripts/ is important as it can show if network interfaces or scripts are being modified in a way that can lead to the machine becoming unavailable or compromised. All audit records will be tagged with the identifier "system-locale."' - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale -a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale -w /etc/issue -p wa -k system-locale -w /etc/issue.net -p wa -k system-locale -w /etc/hosts -p wa -k system-locale -w /etc/sysconfig/network -p wa -k system-locale -w /etc/sysconfig/network-scripts/ -p wa -k system-locale" - compliance: - - cis: ["4.1.8"] - - cis_csc: ["5.5"] - - pci_dss: ["10.2.7"] - - nist_800_53: ["AU.14", "AU.6"] - - gpg_13: ["7.9"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/issue -p wa -k system-locale' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/issue.net -p wa -k system-locale' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/hosts -p wa -k system-locale' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/sysconfig/network -p wa -k system-locale' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/sysconfig/network-scripts/ -p wa -k system-locale' - - # 4.1.9 Ensure discretionary access control permission modification events are collected (Scored) - - id: 6616 - title: "Ensure discretionary access control permission modification events are collected" - description: 'Monitor changes to file permissions, attributes, ownership and group. The parameters in this section track changes for system calls that affect file permissions and attributes. The chmod , fchmod and fchmodat system calls affect the permissions associated with a file. The chown , fchown , fchownat and lchown system calls affect owner and group attributes on a file. The setxattr , lsetxattr , fsetxattr (set extended file attributes) and removexattr , lremovexattr , fremovexattr (remove extended file attributes) control extended file attributes. In all cases, an audit record will only be written for non-system user ids (auid >= 1000) and will ignore Daemon events (auid = 4294967295). All audit records will be tagged with the identifier "perm_mod."' - rationale: "Monitoring for changes in file attributes could alert a system administrator to activity that could indicate intruder activity or policy violation." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lrem" - compliance: - - cis: ["4.1.9"] - - cis_csc: ["5.5"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod' - - # 4.1.10 Ensure unsuccessful unauthorized file access attempts are collected (Scored) - - id: 6617 - title: "Ensure unsuccessful unauthorized file access attempts are collected" - description: 'Monitor for unsuccessful attempts to access files. The parameters below are associated with system calls that control creation ( creat ), opening ( open , openat ) and truncation ( truncate , ftruncate ) of files. An audit log record will only be written if the user is a non- privileged user (auid > = 1000), is not a Daemon event (auid=4294967295) and if the system call returned EACCES (permission denied to the file) or EPERM (some other permanent error associated with the specific system call). All audit records will be tagged with the identifier "access."' - rationale: "Failed attempts to open, create or truncate files could be an indication that an individual or process is trying to gain unauthorized access to the system." - remediation: "For 32 bit systems add the following lines to the /etc/audit/audit.rules file: -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access For 64 bit systems add the following lines to the /etc/audit/audit.rules file: -a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access" - compliance: - - cis: ["4.1.10"] - - cis_csc: ["14.9"] - - pci_dss: ["10.2.4"] - - nist_800_53: ["AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access' - - # 4.1.11 Ensure events that modify user/group information are collected (Scored) - - id: 6618 - title: "Ensure events that modify user/group information are collected" - description: 'Record events affecting the group , passwd (user IDs), shadow and gshadow (passwords) or /etc/security/opasswd (old passwords, based on remember parameter in the PAM configuration) files. The parameters in this section will watch the files to see if they have been opened for write or have had attribute changes (e.g. permissions) and tag them with the identifier "identity" in the audit log file.' + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor events that modify date and time information. 64 Bit systems Example: # printf \" -a always,exit -F arch=b64 -S adjtimex,settimeofday,clock_settime -k time-change -a always,exit -F arch=b32 -S adjtimex,settimeofday,clock_settime -k time-change -w /etc/localtime -p wa -k time-change \" >> /etc/audit/rules.d/50-time-change.rules Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64. In addition, add stime to the system call audit. Example: -a always,exit -F arch=b32 -S adjtimex,settimeofday,clock_settime,stime -k time-change." + compliance: + - cis: ["4.1.3.4"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["5.5"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.1.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F arch=b64 && r:-S && r:adjtimex && r:settimeofday && r:clock_settime && r:-k time-change|key=time-change' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S && r:adjtimex && r:settimeofday && r:clock_settime && r:-k time-change|key=time-change' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/localtime && r:-p wa && r:-k time-change|key=time-change' + - "c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b64 && r:-S && r:adjtimex && r:settimeofday && r:clock_settime && r:-k time-change|key=time-change" + - "c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S && r:adjtimex && r:settimeofday && r:clock_settime && r:-k time-change|key=time-change" + - "c:auditctl -l -> r:^-w && r:/etc/localtime && r:-p wa && r:-k time-change|key=time-change" + + # 4.1.3.5 Ensure events that modify the system's network environment are collected. (Automated) + - id: 6625 + title: "Ensure events that modify the system's network environment are collected." + description: "Record changes to network environment files or system calls. The below parameters monitors the following system calls, and write an audit event on system call exit: - sethostname - set the systems host name - setdomainname - set the systems domain name The files being monitored are: - /etc/issue and /etc/issue.net - messages displayed pre-login - /etc/hosts - file containing host names and associated IP addresses - /etc/sysconfig/network - additional information that is valid to all network interfaces - /etc/sysconfig/network-scripts/ - directory containing network interface scripts and configurations files." + rationale: "Monitoring sethostname and setdomainname will identify potential unauthorized changes to host and domainname of a system. The changing of these names could potentially break security parameters that are set based on those names. The /etc/hosts file is monitored for changes that can indicate an unauthorized intruder is trying to change machine associations with IP addresses and trick users and processes into connecting to unintended machines. Monitoring /etc/issue and /etc/issue.net is important, as intruders could put disinformation into those files and trick users into providing information to the intruder. Monitoring /etc/sysconfig/network is important as it can show if network interfaces or scripts are being modified in a way that can lead to the machine becoming unavailable or compromised. All audit records should have a relevant tag associated with them." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor events that modify the system's network environment. 64 Bit systems Example: # printf \" -a always,exit -F arch=b64 -S sethostname,setdomainname -k system-locale -a always,exit -F arch=b32 -S sethostname,setdomainname -k system-locale -w /etc/issue -p wa -k system-locale -w /etc/issue.net -p wa -k system-locale -w /etc/hosts -p wa -k system-locale -w /etc/sysconfig/network -p wa -k system-locale -w /etc/sysconfig/network-scripts/ -p wa -k system-locale \" >> /etc/audit/rules.d/50-system_local.rules Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." + compliance: + - cis: ["4.1.3.5"] + - cis_csc_v7: ["5.5"] + - iso_27001-2013: ["A.12.1.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:exit,always|always,exit && r:-F arch=b32 && r:-S && r:sethostname && r:setdomainname && r:-k system-locale|key=system-locale' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:exit,always|always,exit && r:-F arch=b64 && r:-S && r:sethostname && r:setdomainname && r:-k system-locale|key=system-locale' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/issue && r:-p wa && r:-k system-locale|key=system-locale' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/issue.net && r:-p wa && r:-k system-locale|key=system-locale' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/hosts && r:-p wa && r:-k system-locale|key=system-locale' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/sysconfig/network && r:-p wa && r:-k system-locale|key=system-locale' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/sysconfig/network-scripts/ && r:-p wa && r:-k system-locale|key=system-locale' + - "c:auditctl -l -> r:^-a && r:exit,always|always,exit && r:-F arch=b64 && r:-S && r:sethostname && r:setdomainname && r:-k system-locale|-F key=system-locale" + - "c:auditctl -l -> r:^-a && r:exit,always|always,exit && r:-F arch=b32 && r:-S && r:sethostname && r:setdomainname && r:-k system-locale|-F key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/issue && r:-p wa && r:-k system-locale|key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/issue.net && r:-p wa && r:-k system-locale|key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/hosts && r:-p wa && r:-k system-locale|key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/sysconfig/network && r:-p wa && r:-k system-locale|key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/sysconfig/network-scripts/ && r:-p wa && r:-k system-locale|key=system-locale" + + # 4.1.3.6 Ensure use of privileged commands are collected. (Automated) - Not Implemented + + # 4.1.3.7 Ensure unsuccessful file access attempts are collected. (Automated) - Not Implemented + + # 4.1.3.8 Ensure events that modify user/group information are collected. (Automated) + - id: 6626 + title: "Ensure events that modify user/group information are collected." + description: 'Record events affecting the modification of user or group information, including that of passwords and old passwords if in use. - /etc/group - system groups - /etc/passwd - system users - /etc/gshadow - encrypted password for each group - /etc/shadow - system user passwords - /etc/security/opasswd - storage of old passwords if the relevant PAM module is in use The parameters in this section will watch the files to see if they have been opened for write or have had attribute changes (e.g. permissions) and tag them with the identifier "identity" in the audit log file.' rationale: "Unexpected changes to these files could be an indication that the system has been compromised and that an unauthorized user is attempting to hide their activities or compromise additional accounts." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -w /etc/group -p wa -k identity -w /etc/passwd -p wa -k identity -w /etc/gshadow -p wa -k identity -w /etc/shadow -p wa -k identity -w /etc/security/opasswd -p wa -k identity" + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor events that modify user/group information. Example: # printf \" -w /etc/group -p wa -k identity -w /etc/passwd -p wa -k identity -w /etc/gshadow -p wa -k identity -w /etc/shadow -p wa -k identity -w /etc/security/opasswd -p wa -k identity \" >> /etc/audit/rules.d/50-identity.rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi." compliance: - - cis: ["4.1.11"] - - cis_csc: ["4.8"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["4.1.3.8"] + - cis_csc_v7: ["4.8"] + - iso_27001-2013: ["A.12.4.3"] condition: all rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/group -p wa -k identity' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/passwd -p wa -k identity' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/gshadow -p wa -k identity' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/shadow -p wa -k identity' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/security/opasswd -p wa -k identity' + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/group && r:-p wa && r:-k identity|key=identity' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/passwd && r:-p wa && r:-k identity|key=identity' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/gshadow && r:-p wa && r:-k identity|key=identity' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/shadow && r:-p wa && r:-k identity|key=identity' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/security/opasswd && r:-p wa && r:-k identity|key=identity' + - "c:auditctl -l -> r:^-w && r:/etc/group && r:-p wa && r:-k identity|key=identity" + - "c:auditctl -l -> r:^-w && r:/etc/passwd && r:-p wa && r:-k identity|key=identity" + - "c:auditctl -l -> r:^-w && r:/etc/gshadow && r:-p wa && r:-k identity|key=identity" + - "c:auditctl -l -> r:^-w && r:/etc/shadow && r:-p wa && r:-k identity|key=identity" + - "c:auditctl -l -> r:^-w && r:/etc/security/opasswd && r:-p wa && r:-k identity|key=identity" - # 4.1.12 Ensure successful file system mounts are collected (Scored) - - id: 6619 - title: "Ensure successful file system mounts are collected" - description: "Monitor the use of the mount system call. The mount (and umount ) system call controls the mounting and unmounting of file systems. The parameters below configure the system to create an audit record when the mount system call is used by a non-privileged user." - rationale: "It is highly unusual for a non privileged user to mount file systems to the system. While tracking mount commands gives the system administrator evidence that external media may have been mounted (based on a review of the source of the mount and confirming it's an external media type), it does not conclusively indicate that data was exported to the media. System administrators who wish to determine if data were exported, would also have to track successful open , creat and truncate system calls requiring write access to a file under the mount point of the external media file system. This could give a fair indication that a write occurred. The only way to truly prove it, would be to track successful writes to the external media. Tracking write system calls could quickly fill up the audit log and is not recommended. Recommendations on configuration options to track data export to media is beyond the scope of this document." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts -a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts" + # 4.1.3.9 Ensure discretionary access control permission modification events are collected. (Automated) - Not Implemented + + # 4.1.3.10 Ensure successful file system mounts are collected. (Automated) + - id: 6627 + title: "Ensure successful file system mounts are collected." + description: "Monitor the use of the mount system call. The mount (and umount) system call controls the mounting and unmounting of file systems. The parameters below configure the system to create an audit record when the mount system call is used by a non-privileged user." + rationale: "It is highly unusual for a non privileged user to mount file systems to the system. While tracking mount commands gives the system administrator evidence that external media may have been mounted (based on a review of the source of the mount and confirming it's an external media type), it does not conclusively indicate that data was exported to the media. System administrators who wish to determine if data were exported, would also have to track successful open, creat and truncate system calls requiring write access to a file under the mount point of the external media file system. This could give a fair indication that a write occurred. The only way to truly prove it, would be to track successful writes to the external media. Tracking write system calls could quickly fill up the audit log and is not recommended. Recommendations on configuration options to track data export to media is beyond the scope of this document." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor successful file system mounts. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=unset -k mounts -a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=unset -k mounts \" >> /etc/audit/rules.d/50-perm_mod.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." compliance: - - cis: ["4.1.12"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.7"] - - nist_800_53: ["AU.14", "AU.6"] - - gpg_13: ["7.9"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["4.1.3.10"] + - cis_csc_v7: ["5.1"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] condition: all rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts' + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S mount && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k mounts|key=mounts' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F arch=b64 && r:-S mount && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k mounts|key=mounts' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b64 && r:-S mount && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k mounts|key=mounts' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S mount && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k mounts|key=mounts' - # 4.1.14 Ensure file deletion events by users are collected (Scored) - - id: 6620 - title: "Ensure file deletion events by users are collected" - description: 'Monitor the use of system calls associated with the deletion or renaming of files and file attributes. This configuration statement sets up monitoring for the unlink (remove a file), unlinkat (remove a file attribute), rename (rename a file) and renameat (rename a file attribute) system calls and tags them with the identifier "delete".' + # 4.1.3.11 Ensure session initiation information is collected. (Automated) + - id: 6628 + title: "Ensure session initiation information is collected." + description: 'Monitor session initiation events. The parameters in this section track changes to the files associated with session events. - /var/run/utmp - tracks all currently logged in users. - /var/log/wtmp - file tracks logins, logouts, shutdown, and reboot events. - /var/log/btmp - keeps track of failed login attempts and can be read by entering the command /usr/bin/last -f /var/log/btmp. All audit records will be tagged with the identifier "session.".' + rationale: "Monitoring these files for changes could alert a system administrator to logins occurring at unusual hours, which could indicate intruder activity (i.e. a user logging in at a time when they do not normally log in)." + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor session initiation information. Example: # printf \" -w /var/run/utmp -p wa -k session -w /var/log/wtmp -p wa -k session -w /var/log/btmp -p wa -k session \" >> /etc/audit/rules.d/50-session.rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi." + compliance: + - cis: ["4.1.3.11"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["4.9", "16.13"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.9.4.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/var/run/utmp && r:-p wa && r:-k session|key=session' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/var/log/wtmp && r:-p wa && r:-k session|key=session' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/var/log/btmp && r:-p wa && r:-k session|key=session' + - "c:auditctl -l -> r:^-w && r:/var/run/utmp && r:-p wa && r:-k session|key=session" + - "c:auditctl -l -> r:^-w && r:/var/log/wtmp && r:-p wa && r:-k session|key=session" + - "c:auditctl -l -> r:^-w && r:/var/log/btmp && r:-p wa && r:-k session|key=session" + + # 4.1.3.12 Ensure login and logout events are collected. (Automated) + - id: 6629 + title: "Ensure login and logout events are collected." + description: "Monitor login and logout events. The parameters below track changes to files associated with login/logout events. - /var/log/lastlog - maintain records of the last time a user successfully logged in. - /var/run/faillock - directory maintains records of login failures via the pam_faillock module." + rationale: "Monitoring login/logout events could provide a system administrator with information associated with brute force attacks against user logins." + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor login and logout events. Example: # printf \" -w /var/log/lastlog -p wa -k logins -w /var/run/faillock -p wa -k logins \" >> /etc/audit/rules.d/50-login.rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi." + compliance: + - cis: ["4.1.3.12"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["4.9", "16.11", "16.13"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.8.1.3", "A.9.4.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/var/log/lastlog && r:-p wa && r:-k logins|key=logins' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/var/run/faillock && r:-p wa && r:-k logins|key=logins' + - "c:auditctl -l -> r:^-w && r:/var/log/lastlog && r:-p wa && r:-k logins|key=logins" + - "c:auditctl -l -> r:^-w && r:/var/run/faillock && r:-p wa && r:-k logins|key=logins" + + # 4.1.3.13 Ensure file deletion events by users are collected. (Automated) + - id: 6630 + title: "Ensure file deletion events by users are collected." + description: 'Monitor the use of system calls associated with the deletion or renaming of files and file attributes. This configuration statement sets up monitoring for: - unlink - remove a file - unlinkat - remove a file attribute - rename - rename a file - renameat rename a file attribute system calls and tags them with the identifier "delete".' rationale: "Monitoring these calls from non-privileged users could provide a system administrator with evidence that inappropriate removal of files and file attributes associated with protected files is occurring. While this audit option will look at all events, system administrators will want to look for specific privileged files that are being deleted or altered." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete -a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete" + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor file deletion events by users. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F arch=b64 -S rename,unlink,unlinkat,renameat -F auid>=${UID_MIN} -F auid!=unset -F key=delete -a always,exit -F arch=b32 -S rename,unlink,unlinkat,renameat -F auid>=${UID_MIN} -F auid!=unset -F key=delete \" >> /etc/audit/rules.d/50-delete.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." compliance: - - cis: ["4.1.14"] - - cis_csc: ["13"] - - pci_dss: ["10.5.5"] - - nist_800_53: ["AU.14"] - - hipaa: ["164.312.b"] - - tsc: ["PI1.4", "PI1.5", "CC7.1", "CC7.2", "CC7.3", "CC8.1"] + - cis: ["4.1.3.13"] + - cis_csc_v7: ["13"] condition: all rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete' + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F arch=b64 && r:-S && r:unlink && r:unlinkat && r:rename && r:renameat && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k delete|key=delete' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S && r:unlink && r:unlinkat && r:rename && r:renameat && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k delete|key=delete' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b64 && r:-S && r:unlink && r:unlinkat && r:rename && r:renameat && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k delete|key=delete' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S && r:unlink && r:unlinkat && r:rename && r:renameat && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k delete|key=delete' - # 4.1.15 Ensure kernel module loading and unloading is collected (Scored) - - id: 6621 - title: "Ensure kernel module loading and unloading is collected" - description: 'Monitor the loading and unloading of kernel modules. The programs insmod (install a kernel module), rmmod (remove a kernel module), and modprobe (a more sophisticated program to load and unload modules, as well as some other features) control loading and unloading of modules. The init_module (load a module) and delete_module (delete a module) system calls control loading and unloading of modules. Any execution of the loading and unloading module programs and system calls will trigger an audit record with an identifier of "modules".' - rationale: "Monitoring the use of insmod , rmmod and modprobe could provide system administrators with evidence that an unauthorized user loaded or unloaded a kernel module, possibly compromising the security of the system. Monitoring of the init_module and delete_module system calls would reflect an unauthorized user attempting to use a different program to load and unload modules." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -w /sbin/insmod -p x -k modules -w /sbin/rmmod -p x -k modules -w /sbin/modprobe -p x -k modules -a always,exit -F arch=b32 -S init_module -S delete_module -k modules -a always,exit -F arch=b64 -S init_module -S delete_module -k modules" - compliance: - - cis: ["4.1.15"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.7"] - - nist_800_53: ["AU.14", "AU.6"] - - gpg_13: ["7.9"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /sbin/insmod -p x -k modules' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /sbin/rmmod -p x -k modules' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /sbin/modprobe -p x -k modules' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-a always,exit -F arch=b64 -S init_module -S delete_module -k modules' - - # 4.1.16 Ensure system administrator actions (sudolog) are collected (Scored) - - id: 6622 - title: "Ensure system administrator actions (sudolog) are collected" - description: "Monitor the sudo log file. If the system has been properly configured to disable the use of the su command and force all administrators to have to log in first and then use sudo to execute privileged commands, then all administrator commands will be logged to /var/log/sudo.log . Any time a command is executed, an audit event will be triggered as the /var/log/sudo.log file will be opened for write and the executed administration command will be written to the log." - rationale: "Changes in /var/log/sudo.log indicate that an administrator has executed a command or the log file itself has been tampered with. Administrators will want to correlate the events written to the audit trail with the records written to /var/log/sudo.log to verify if unauthorized commands have been executed." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -w /var/log/sudo.log -p wa -k actions" + # 4.1.3.14 Ensure events that modify the system's Mandatory Access Controls are collected. (Automated) + - id: 6631 + title: "Ensure events that modify the system's Mandatory Access Controls are collected." + description: "Monitor SELinux, an implementation of mandatory access controls. The parameters below monitor any write access (potential additional, deletion or modification of files in the directory) or attribute changes to the /etc/selinux/ and /usr/share/selinux/ directories. Note: If a different Mandatory Access Control method is used, changes to the corresponding directories should be audited." + rationale: "Changes to files in the /etc/selinux/ and /usr/share/selinux/ directories could indicate that an unauthorized user is attempting to modify access controls and change security contexts, leading to a compromise of the system." + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor events that modify the system's Mandatory Access Controls. Example: # printf \" -w /etc/selinux -p wa -k MAC-policy -w /usr/share/selinux -p wa -k MAC-policy \" >> /etc/audit/rules.d/50-MAC-policy.rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi." + compliance: + - cis: ["4.1.3.14"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["5.5"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.1.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/selinux && r:-p wa && r:-k MAC-policy|key=MAC-policy' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/usr/share/selinux && r:-p wa && r:-k MAC-policy|key=MAC-policy' + - "c:auditctl -l -> r:^-w && r:/etc/selinux && r:-p wa && r:-k MAC-policy|key=MAC-policy" + - "c:auditctl -l -> r:^-w && r:/usr/share/selinux && r:-p wa && r:-k MAC-policy|key=MAC-policy" + + # 4.1.3.15 Ensure successful and unsuccessful attempts to use the chcon command are recorded. (Automated) + - id: 6632 + title: "Ensure successful and unsuccessful attempts to use the chcon command are recorded." + description: "The operating system must generate audit records for successful/unsuccessful uses of the chcon command." + rationale: "Without generating audit records that are specific to the security and mission needs of the organization, it would be difficult to establish, correlate, and investigate the events relating to an incident or identify those responsible for one. Audit records can be generated from various components within the information system (e.g., module or policy filter)." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor successful and unsuccessful attempts to use the chcon command. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F path=/usr/bin/chcon -F perm=x -F auid>=${UID_MIN} -F auid!=unset -k perm_chng \" >> /etc/audit/rules.d/50-perm_chng.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." + compliance: + - cis: ["4.1.3.15"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/chcon && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k perm_chng|key=perm_chng' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/chcon && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k perm_chng|key=perm_chng' + + # 4.1.3.16 Ensure successful and unsuccessful attempts to use the setfacl command are recorded. (Automated) + - id: 6633 + title: "Ensure successful and unsuccessful attempts to use the setfacl command are recorded." + description: "The operating system must generate audit records for successful/unsuccessful uses of the setfacl command." + rationale: "Without generating audit records that are specific to the security and mission needs of the organization, it would be difficult to establish, correlate, and investigate the events relating to an incident or identify those responsible for one. Audit records can be generated from various components within the information system (e.g., module or policy filter)." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor successful and unsuccessful attempts to use the setfacl command. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F path=/usr/bin/setfacl -F perm=x -F auid>=${UID_MIN} -F auid!=unset -k perm_chng \" >> /etc/audit/rules.d/50-priv_cmd.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." + compliance: + - cis: ["4.1.3.16"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/setfacl && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k perm_chng|-F key=perm_chng' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/setfacl && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k perm_chng|-F key=perm_chng' + + # 4.1.3.17 Ensure successful and unsuccessful attempts to use the chacl command are recorded. (Automated) + - id: 6634 + title: "Ensure successful and unsuccessful attempts to use the chacl command are recorded." + description: "The operating system must generate audit records for successful/unsuccessful uses of the chacl command." + rationale: "Without generating audit records that are specific to the security and mission needs of the organization, it would be difficult to establish, correlate, and investigate the events relating to an incident or identify those responsible for one. Audit records can be generated from various components within the information system (e.g., module or policy filter)." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor successful and unsuccessful attempts to use the chacl command. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F path=/usr/bin/chacl -F perm=x -F auid>=${UID_MIN} -F auid!=unset -k perm_chng \" >> /etc/audit/rules.d/50-perm_chng.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." + compliance: + - cis: ["4.1.3.17"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/chacl && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k perm_chng|-F key=perm_chng' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/chacl && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k perm_chng|-F key=perm_chng' + + # 4.1.3.18 Ensure successful and unsuccessful attempts to use the usermod command are recorded. (Automated) + - id: 6635 + title: "Ensure successful and unsuccessful attempts to use the usermod command are recorded." + description: "The operating system must generate audit records for successful/unsuccessful uses of the usermod command." + rationale: "Without generating audit records that are specific to the security and mission needs of the organization, it would be difficult to establish, correlate, and investigate the events relating to an incident or identify those responsible for one. Audit records can be generated from various components within the information system (e.g., module or policy filter)." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor successful and unsuccessful attempts to use the usermod command. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F path=/usr/sbin/usermod -F perm=x -F auid>=${UID_MIN} -F auid!=unset -k usermod \" >> /etc/audit/rules.d/50-usermod.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." + compliance: + - cis: ["4.1.3.18"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/sbin/usermod && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k usermod|-F key=usermod' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/sbin/usermod && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k usermod|-F key=usermod' + + # 4.1.3.19 Ensure kernel module loading unloading and modification is collected. (Automated) + - id: 6636 + title: "Ensure kernel module loading unloading and modification is collected." + description: "Monitor the loading and unloading of kernel modules. All the loading / listing / dependency checking of modules is done by kmod via symbolic links. The following system calls control loading and unloading of modules: - init_module - load a module - finit_module - load a module (used when the overhead of using cryptographically signed modules to determine the authenticity of a module can be avoided) - delete_module - delete a module - create_module - create a loadable module entry - query_module - query the kernel for various bits pertaining to modules Any execution of the loading and unloading module programs and system calls will trigger an audit record with an identifier of modules." + rationale: "Monitoring the use of all the various ways to manipulate kernel modules could provide system administrators with evidence that an unauthorized change was made to a kernel module, possibly compromising the security of the system." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor kernel module modification. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F arch=b64 -S init_module,finit_module,delete_module,create_module,query_module -F auid>=${UID_MIN} -F auid!=unset -k kernel_modules -a always,exit -F path=/usr/bin/kmod -F perm=x -F auid>=${UID_MIN} -F auid!=unset -k kernel_modules \" >> /etc/audit/rules.d/50-kernel_modules.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi." + compliance: + - cis: ["4.1.3.19"] + - cis_csc_v7: ["5.1"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F arch=b64|-F arch=b32 && r:-S && r:init_module && r:finit_module && r:delete_module && r:create_module && r:query_module && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k kernel_modules|-F key=kernel_modules' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/kmod && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k kernel_modules|-F key=kernel_modules' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b64|-F arch=b32 && r:-S && r:init_module && r:finit_module && r:delete_module && r:create_module && r:query_module && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k kernel_modules|-F key=kernel_modules' + - 'c:auditctl -l-> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/kmod && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k kernel_modules|-F key=kernel_modules' + - "c:ls -l /usr/sbin/lsmod -> r:/bin/kmod" + - "c:ls -l /usr/sbin/rmmod -> r:/bin/kmod" + - "c:ls -l /usr/sbin/insmod -> r:/bin/kmod" + - "c:ls -l /usr/sbin/modinfo -> r:/bin/kmod" + - "c:ls -l /usr/sbin/modprobe -> r:/bin/kmod" + - "c:ls -l /usr/sbin/depmod -> r:/bin/kmod" + + # 4.1.3.20 Ensure the audit configuration is immutable. (Automated) + - id: 6637 + title: "Ensure the audit configuration is immutable." + description: 'Set system audit so that audit rules cannot be modified with auditctl. Setting the flag "-e 2" forces audit to be put in immutable mode. Audit changes can only be made on system reboot. Note: This setting will require the system to be rebooted to update the active auditd configuration settings.' + rationale: "In immutable mode, unauthorized users cannot execute changes to the audit system to potentially hide malicious activity and then put the audit rules back. Users would most likely notice a system reboot and that could alert administrators of an attempt to make unauthorized audit changes." + remediation: "Edit or create the file /etc/audit/rules.d/99-finalize.rules and add the following line at the end of the file: -e 2." compliance: - - cis: ["4.1.16"] - - cis_csc: ["4.9"] - - pci_dss: ["10.2.2"] - - nist_800_53: ["AU.14", "AC.6", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["4.1.3.20"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /var/log/sudo.log -p wa -k actions' + - 'c:sh -c "grep -iEh ''^\s*\t*-e 2\s*$'' /etc/audit/rules.d/*.rules | tail -1" -> r:^\s*\t*-e 2\s*$' - # 4.1.17 Ensure the audit configuration is immutable (Scored) - - id: 6623 - title: "Ensure the audit configuration is immutable" - description: 'Set system audit so that audit rules cannot be modified with auditctl . Setting the flag "-e 2" forces audit to be put in immutable mode. Audit changes can only be made on system reboot.' - rationale: "In immutable mode, unauthorized users cannot execute changes to the audit system to potentially hide malicious activity and then put the audit rules back. Users would most likely notice a system reboot and that could alert administrators of an attempt to make unauthorized audit changes." - remediation: "Edit or create the file /etc/audit/rules.d/99-finalize.rules and add the line: -e 2 at the end of the file" + # 4.1.3.21 Ensure the running and on disk configuration is the same. (Manual) + - id: 6638 + title: "Ensure the running and on disk configuration is the same." + description: "The Audit system have both on disk and running configuration. It is possible for these configuration settings to differ. Note: Due to the limitations of augenrules and auditctl, it is not absolutely guaranteed that loading the rule sets via augenrules --load will result in all rules being loaded or even that the user will be informed if there was a problem loading the rules." + rationale: "Configuration differences between what is currently running and what is on disk could cause unexpected problems or may give a false impression of compliance requirements." + remediation: 'If the rules are not aligned across all three () areas, run the following command to merge and load all rules: # augenrules --load Check if reboot is required. if [[ $(auditctl -s | grep "enabled") =~ "2" ]]; then echo "Reboot required to load rules"; fi.' compliance: - - cis: ["4.1.17"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.5"] - - nist_800_53: ["AU.9"] - - hipaa: ["164.312.b"] + - cis: ["4.1.3.21"] + - cis_csc_v8: ["8.5"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - - "c:tail -1 /etc/audit/rules.d/99-finalize.rules -> -e 2" + - 'c:augenrules --check -> r:^\s*/usr/sbin/augenrules && r:No change$' - # 4.2.1.1 Ensure rsyslog or syslog-ng is installed (Scored) - - id: 6624 - title: "Ensure rsyslog is installed" - description: "The rsyslog software is a recommended replacement to the original syslogd daemon which provide improvements over syslogd , such as connection-oriented (i.e. TCP) transmission of logs, the option to log to database formats, and the encryption of log data en route to a central logging server." + # 4.2.1.1 Ensure rsyslog is installed. (Automated) + - id: 6639 + title: "Ensure rsyslog is installed." + description: "The rsyslog software is recommended in environments where journald does not meet operation requirements." rationale: "The security enhancements of rsyslog such as connection-oriented (i.e. TCP) transmission of logs, the option to log to database formats, and the encryption of log data en route to a central logging server) justify installing and configuring the package." - remediation: "Run the following command to install rsyslog: # dnf install rsyslog" + remediation: "Run the following command to install rsyslog: # dnf install rsyslog." compliance: - cis: ["4.2.1.1"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.1"] - - nist_800_53: ["CM.1"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.2", "CC6.3", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - "c:rpm -q rsyslog -> r:^rsyslog-" - # 4.2.1.2 Ensure rsyslog Service is enabled (Scored) - - id: 6625 - title: "Ensure rsyslog Service is enabled" - description: "Once the rsyslog package is installed it needs to be activated." - rationale: "If the rsyslog service is not activated the system may default to the syslogd service or lack logging instead." - remediation: "Run the following command to enable rsyslog : # systemctl --now enable rsyslog" + # 4.2.1.2 Ensure rsyslog service is enabled. (Automated) + - id: 6640 + title: "Ensure rsyslog service is enabled." + description: "Once the rsyslog package is installed, ensure that the service is enabled." + rationale: "If the rsyslog service is not enabled to start on boot, the system will not capture logging events." + remediation: "Run the following command to enable rsyslog: # systemctl --now enable rsyslog." compliance: - cis: ["4.2.1.2"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.1"] - - nist_800_53: ["CM.1"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.2", "CC6.3", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - - "c:systemctl is-enabled rsyslog -> enabled" + - "c:systemctl is-enabled rsyslog -> r:^enabled" - # 4.2.1.3 Ensure rsyslog default file permissions configured (Scored) - - id: 6626 - title: "Ensure rsyslog default file permissions configured" - description: "rsyslog will create logfiles that do not already exist on the system. This setting controls what permissions will be applied to these newly created files." - rationale: "It is important to ensure that log files have the correct permissions to ensure that sensitive data is archived and protected." - remediation: "Edit the /etc/rsyslog.conf and /etc/rsyslog.d/*.conf files and set $FileCreateMode to 0640 or more restrictive: $FileCreateMode 0640" + # 4.2.1.3 Ensure journald is configured to send logs to rsyslog. (Manual) + - id: 6641 + title: "Ensure journald is configured to send logs to rsyslog." + description: "Data from journald may be stored in volatile memory or persisted locally on the server. Utilities exist to accept remote export of journald logs, however, use of the RSyslog service provides a consistent means of log collection and export." + rationale: "IF RSyslog is the preferred method for capturing logs, all logs of the system should be sent to it for further processing." + remediation: "Edit the /etc/systemd/journald.conf file and add the following line: ForwardToSyslog=yes Restart the service: # systemctl restart rsyslog." compliance: - cis: ["4.2.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["10.5.1", "10.5.2"] - - nist_800_53: ["CM.1", "AU.9"] - - tsc: ["CC5.2", "CC7.2"] + - cis_csc_v8: ["8.2", "8.9"] + - cis_csc_v7: ["6.2", "6.3", "6.5"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-6(3)", "AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3", "10.5.3", "10.5.4"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "10.3.3", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["PL1.4"] + condition: all + rules: + - 'f:/etc/systemd/journald.conf -> r:^\s*\t*ForwardToSyslog\s*=\s*yes' + + # 4.2.1.4 Ensure rsyslog default file permissions are configured. (Automated) + - id: 6642 + title: "Ensure rsyslog default file permissions are configured." + description: "RSyslog will create logfiles that do not already exist on the system. This setting controls what permissions will be applied to these newly created files." + rationale: "It is important to ensure that log files have the correct permissions to ensure that sensitive data is archived and protected." + impact: "The systems global umask could override, but only making the file permissions stricter, what is configured in RSyslog with the FileCreateMode directive. RSyslog also has it's own $umask directive that can alter the intended file creation mode. In addition, consideration should be given to how FileCreateMode is used. Thus it is critical to ensure that the intended file creation mode is not overridden with less restrictive settings in /etc/rsyslog.conf, /etc/rsyslog.d/*conf files and that FileCreateMode is set before any file is created." + remediation: "Edit either /etc/rsyslog.conf or a dedicated .conf file in /etc/rsyslog.d/ and set $FileCreateMode to 0640 or more restrictive: $FileCreateMode 0640 Restart the service: # systemctl restart rsyslog." + compliance: + - cis: ["4.2.1.4"] + - cis_csc_v8: ["3.3", "8.2"] + - cis_csc_v7: ["5.1", "6.2", "6.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "AU.L2-3.3.1", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)", "164.312(b)"] + - iso_27001-2013: ["A.12.4.1", "A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: any rules: - - 'f:/etc/rsyslog.conf -> r:^\$FileCreateMode 06\d0|^\$FileCreateMode 04\d0|^\$FileCreateMode 02\d0|^\$FileCreateMode 00\d0' - - 'f:/etc/rsyslog.conf -> r:^\$FileCreateMode 0\d40|^\$FileCreateMode 0\d20|^\$FileCreateMode 0\d00' + - 'f:/etc/rsyslog.conf -> r:^\$FileCreateMode 06\d0|^\$FileCreateMode 04\d0|^\$FileCreateMode 02\d0|^\$FileCreateMode 00\d0 && r:^\$FileCreateMode 0\d40|^\$FileCreateMode 0\d00' + - 'd:/etc/rsyslog.d/ -> r:\.*.conf -> r:^\$FileCreateMode 06\d0|^\$FileCreateMode 04\d0|^\$FileCreateMode 02\d0|^\$FileCreateMode 00\d0 && r:^\$FileCreateMode 0\d40|^\$FileCreateMode 0\d00' - # 4.2.1.5 Ensure rsyslog is configured to send logs to a remote log host (Scored) - - id: 6627 - title: "Ensure rsyslog is configured to send logs to a remote log host" - description: "The rsyslog utility supports the ability to send logs it gathers to a remote log host running syslogd(8) or to receive messages from remote hosts, reducing administrative overhead." + # 4.2.1.5 Ensure logging is configured. (Manual) - Not Implemented + + # 4.2.1.6 Ensure rsyslog is configured to send logs to a remote log host. (Manual) - Not Implemented + + # 4.2.1.7 Ensure rsyslog is not configured to recieve logs from a remote client. (Automated) - Not Implemented + - id: 6643 + title: "Ensure rsyslog is not configured to recieve logs from a remote client." + description: "RSyslog supports the ability to receive messages from remote hosts, thus acting as a log server. Clients should not receive data from other hosts." + rationale: "If a client is configured to also receive data, thus turning it into a server, the client system is acting outside it's operational boundary." + remediation: 'Should there be any active log server configuration found in the auditing section, modify those file and remove the specific lines highlighted by the audit. Ensure none of the following entries are present in any of /etc/rsyslog.conf or /etc/rsyslog.d/*.conf. Old format $ModLoad imtcp $InputTCPServerRun New format module(load="imtcp") input(type="imtcp" port="514") Restart the service: # systemctl restart rsyslog.' + compliance: + - cis: ["4.2.1.7"] + - cis_csc_v8: ["4.8", "8.2"] + - cis_csc_v7: ["6.2", "6.3", "9.2"] + - cmmc_v2.0: ["AU.L2-3.3.1", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1", "A.13.1.3"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "10.2", "10.3", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "2.2.4", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - 'not d:/etc/rsyslog.d -> r:\.+.conf$ -> r:^\s*\t*\$ModLoad imtcp|\s*\t*^\$InputTCPServerRun|^\s*\t*module load="imtcp"|^\s*\t*input type="imtcp" port="514"' + - 'not f:/etc/rsyslog.conf -> r:^\s*\t*\$ModLoad imtcp|^\s*\t*\$InputTCPServerRun|^\s*\t*module load="imtcp"|^\s*\t*input type="imtcp" port="514"' + + # 4.2.2.1 Ensure journald is configured to send logs to a remote log host 4.2.2.1.1 Ensure systemd-journal-remote is installed. (Manual) + - id: 6644 + title: "Ensure journald is configured to send logs to a remote log host 4.2.2.1.1 Ensure systemd-journal-remote is installed." + description: "Journald (via systemd-journal-remote) supports the ability to send log events it gathers to a remote log host or to receive messages from remote hosts, thus enabling centralised log management." rationale: "Storing log data on a remote host protects log integrity from local attacks. If an attacker gains root access on the local system, they could tamper with or remove log data that is stored on the local system." - remediation: "Edit the /etc/rsyslog.conf and /etc/rsyslog.d/*.conf files and add the following line (where loghost.example.com is the name of your central log host). *.* @@loghost.example.com Run the following command to reload the rsyslogd configuration: # pkill -HUP rsyslogd" + remediation: "Run the following command to install systemd-journal-remote: # dnf install systemd-journal-remote." compliance: - - cis: ["4.2.1.5"] - - cis_csc: ["6.6", "6.8"] - - pci_dss: ["10.5.3"] - - nist_800_53: ["CM.1", "AU.4"] - - tsc: ["CC5.2"] + - cis: ["4.2.2.1"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - - 'c:grep *.*[^I][^I]*@ /etc/rsyslog.conf /etc/rsyslog.d/*.conf -> !r:# && r:*.* @@\.+' + - "c:rpm -q systemd-journal-remote -> r:^systemd-journal-remote-" - # 4.2.2.1 Ensure journald is configured to send logs to rsyslog (Scored) - - id: 6628 - title: "Ensure journald is configured to send logs to rsyslog" - description: "Data from journald may be stored in volatile memory or persisted locally on the server. Utilities exist to accept remote export of journald logs, however, use of the rsyslog service provides a consistent means of log collection and export." + # 4.2.2.1.2 Ensure systemd-journal-remote is configured. (Manual) + - id: 6645 + title: "Ensure systemd-journal-remote is configured." + description: "Journald (via systemd-journal-remote) supports the ability to send log events it gathers to a remote log host or to receive messages from remote hosts, thus enabling centralised log management." rationale: "Storing log data on a remote host protects log integrity from local attacks. If an attacker gains root access on the local system, they could tamper with or remove log data that is stored on the local system." - remediation: "Edit the /etc/systemd/journald.conf file and add the following line: ForwardToSyslog=yes" + remediation: "Edit the /etc/systemd/journal-upload.conf file and ensure the following lines are set per your environment: URL=192.168.50.42 ServerKeyFile=/etc/ssl/private/journal-upload.pem ServerCertificateFile=/etc/ssl/certs/journal-upload.pem TrustedCertificateFile=/etc/ssl/ca/trusted.pem Restart the service: # systemctl restart systemd-journal-upload." compliance: - - cis: ["4.2.2.1"] - - cis_csc: ["6.5"] - - pci_dss: ["10.5.3"] - - nist_800_53: ["CM.1", "AU.9", "AU.4"] - - tsc: ["CC5.2", "CC7.2"] - references: - - "https://github.com/konstruktoid/hardening/blob/master/systemd.adoc#etcsystemdjournaldconf" + - cis: ["4.2.2.1.2"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - "f:/etc/systemd/journal-upload.conf -> r:URL=" + - "f:/etc/systemd/journal-upload.conf -> r:ServerKeyFile=" + - "f:/etc/systemd/journal-upload.conf -> r:ServerCertificateFile=" + - "f:/etc/systemd/journal-upload.conf -> r:TrustedCertificateFile=" + + # 4.2.2.1.3 Ensure systemd-journal-remote is enabled. (Manual) + - id: 6646 + title: "Ensure systemd-journal-remote is enabled." + description: "Journald (via systemd-journal-remote) supports the ability to send log events it gathers to a remote log host or to receive messages from remote hosts, thus enabling centralised log management." + rationale: "Storing log data on a remote host protects log integrity from local attacks. If an attacker gains root access on the local system, they could tamper with or remove log data that is stored on the local system." + remediation: "Run the following command to enable systemd-journal-remote: # systemctl --now enable systemd-journal-upload.service." + compliance: + - cis: ["4.2.2.1.3"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - - 'f:/etc/systemd/journald.conf -> r:^\s*\t*ForwardToSyslog\s*=\s*yes' + - "c:systemctl is-enabled systemd-journal-upload.service -> r:^enabled" - # 4.2.2.2 Ensure journald is configured to compress large log files (Scored) - - id: 6629 - title: "Ensure journald is configured to compress large log files" - description: "The journald system includes the capability of compressing overly large files to avoid filling up the system with logs or making the logs unmanageably large." - rationale: "Uncompressed large files may unexpectedly fill a filesystem leading to resource unavailability. Compressing logs prior to write can prevent sudden, unexpected filesystem impacts." - remediation: "Edit the /etc/systemd/journald.conf file and add the following line: Compress=yes" + # 4.2.2.1.4 Ensure journald is not configured to recieve logs from a remote client. (Automated) + - id: 6647 + title: "Ensure journald is not configured to recieve logs from a remote client." + description: "Journald supports the ability to receive messages from remote hosts, thus acting as a log server. Clients should not receive data from other hosts. NOTE: - The same package, systemd-journal-remote, is used for both sending logs to remote hosts and receiving incoming logs. - With regards to receiving logs, there are two services; systemd-journal- remote.socket and systemd-journal-remote.service." + rationale: "If a client is configured to also receive data, thus turning it into a server, the client system is acting outside it's operational boundary." + remediation: "Run the following command to disable systemd-journal-remote.socket: # systemctl --now mask systemd-journal-remote.socket." + compliance: + - cis: ["4.2.2.1.4"] + - cis_csc_v8: ["4.8", "8.2"] + - cis_csc_v7: ["6.2", "6.3", "9.2"] + - cmmc_v2.0: ["AU.L2-3.3.1", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1", "A.13.1.3"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "10.2", "10.3", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "2.2.4", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - "c:systemctl is-enabled systemd-journal-remote.socket -> r:^masked" + + # 4.2.2.2 Ensure journald service is enabled. (Automated) + - id: 6648 + title: "Ensure journald service is enabled." + description: "Ensure that the systemd-journald service is enabled to allow capturing of logging events." + rationale: "If the systemd-journald service is not enabled to start on boot, the system will not capture logging events." + remediation: "By default the systemd-journald service does not have an [Install] section and thus cannot be enabled / disabled. It is meant to be referenced as Requires or Wants by other unit files. As such, if the status of systemd-journald is not static, investigate why." compliance: - cis: ["4.2.2.2"] - - cis_csc: ["6.4"] - - pci_dss: ["10.7"] - - nist_800_53: ["CM.1", "AU.4"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - - 'f:/etc/systemd/journald.conf -> r:^\s*\t*Compress\s*=\s*yes' + - "c:systemctl is-enabled systemd-journald.service -> r:^static" - # 4.2.2.3 Ensure journald is configured to write logfiles to persistent disk (Scored) - - id: 6630 - title: "Ensure journald is configured to write logfiles to persistent disk" - description: "Data from journald may be stored in volatile memory or persisted locally on the server. Logs in memory will be lost upon a system reboot. By persisting logs to local disk on the server they are protected from loss." - rationale: "Writing log data to disk will provide the ability to forensically reconstruct events which may have impacted the operations or security of a system even after a system crash or reboot." - remediation: "Edit the /etc/systemd/journald.conf file and add the following line: Compress=yes" + # 4.2.2.3 Ensure journald is configured to compress large log files. (Automated) + - id: 6649 + title: "Ensure journald is configured to compress large log files." + description: "The journald system includes the capability of compressing overly large files to avoid filling up the system with logs or making the logs unmanageably large." + rationale: "Uncompressed large files may unexpectedly fill a filesystem leading to resource unavailability. Compressing logs prior to write can prevent sudden, unexpected filesystem impacts." + remediation: "Edit the /etc/systemd/journald.conf file and add the following line: Compress=yes Restart the service: # systemctl restart systemd-journal-upload." compliance: - cis: ["4.2.2.3"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.7"] - - nist_800_53: ["CM.1", "AU.4"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["8.2", "8.3"] + - cis_csc_v7: ["6.2", "6.3", "6.4"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3", "10.7"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["A1.1"] condition: all rules: - - 'f:/etc/systemd/journald.conf -> r:^\s*\t*Storage\s*=\s*persistent' + - "f:/etc/systemd/journald.conf -> r:^Compress=yes" - # 4.2.3 Ensure permissions on all logfiles are configured (Scored) - - id: 6631 - title: "Ensure permissions on all logfiles are configured" - description: "Log files stored in /var/log/ contain logged information from many services on the system, or on log hosts others as well." - rationale: "It is important to ensure that log files have the correct permissions to ensure that sensitivebdata is archived and protected." - remediation: "Run the following command to set permissions on all existing log files: # find /var/log -type f -exec chmod g-wx,o-rwx {} +" - compliance: - - cis: ["4.2.3"] - - cis_csc: ["5.1"] - - pci_dss: ["10.5.1", "10.5.2"] - - nist_800_53: ["CM.1", "AU.9"] - - tsc: ["CC5.2", "CC7.2"] - condition: none + # 4.2.2.4 Ensure journald is configured to write logfiles to persistent disk. (Automated) + - id: 6650 + title: "Ensure journald is configured to write logfiles to persistent disk." + description: "Data from journald may be stored in volatile memory or persisted locally on the server. Logs in memory will be lost upon a system reboot. By persisting logs to local disk on the server they are protected from loss due to a reboot." + rationale: "Writing log data to disk will provide the ability to forensically reconstruct events which may have impacted the operations or security of a system even after a system crash or reboot." + remediation: "Edit the /etc/systemd/journald.conf file and add the following line: Storage=persistent Restart the service: # systemctl restart systemd-journal-upload." + compliance: + - cis: ["4.2.2.4"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all rules: - - 'c:find /var/log -type f -ls -> r:-\w\w\w\ww\w\w\w\w|-\w\w\w\w\wx\w\w\w|-\w\w\w\w\w\w\ww\w|-\w\w\w\w\w\wr\w\w|-\w\w\w\w\w\w\w\wx' + - "f:/etc/systemd/journald.conf -> r:^Storage=persistent" - ############################################### - # 5 Access, Authentication and Authorization - ############################################### - ############################################### - # 5.1 Configure cron - ############################################### - # 5.1.1 Ensure cron daemon is enabled (Scored) - - id: 6632 - title: "Ensure cron daemon is enabled" + # 4.2.2.5 Ensure journald is not configured to send logs to rsyslog. (Manual) + - id: 6651 + title: "Ensure journald is not configured to send logs to rsyslog." + description: "Data from journald should be kept in the confines of the service and not forwarded on to other services." + rationale: "IF journald is the method for capturing logs, all logs of the system should be handled by journald and not forwarded to other logging mechanisms." + remediation: "Edit the /etc/systemd/journald.conf file and ensure that ForwardToSyslog=yes is removed. Restart the service: # systemctl restart systemd-journal-upload." + compliance: + - cis: ["4.2.2.5"] + - cis_csc_v8: ["8.2", "8.9"] + - cis_csc_v7: ["6.2", "6.3", "6.5"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-6(3)", "AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3", "10.5.3", "10.5.4"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "10.3.3", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["PL1.4"] + condition: all + rules: + - 'not f:/etc/systemd/journald.conf -> !r:^# && r:ForwardToSyslog\s*=\s*yes' + + # 4.2.2.6 Ensure journald log rotation is configured per site policy. (Manual) + - id: 6652 + title: "Ensure journald log rotation is configured per site policy." + description: "Journald includes the capability of rotating log files regularly to avoid filling up the system with logs or making the logs unmanageably large. The file /etc/systemd/journald.conf is the configuration file used to specify how logs generated by Journald should be rotated." + rationale: "By keeping the log files smaller and more manageable, a system administrator can easily archive these files to another system and spend less time looking through inordinately large log files." + remediation: "Review /etc/systemd/journald.conf and verify logs are rotated according to site policy. The settings should be carefully understood as there are specific edge cases and prioritisation of parameters. The specific parameters for log rotation are: SystemMaxUse= SystemKeepFree= RuntimeMaxUse= RuntimeKeepFree= MaxFileSec=." + compliance: + - cis: ["4.2.2.6"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - "f:/etc/systemd/journald.conf -> r:SystemMaxUse=" + - "f:/etc/systemd/journald.conf -> r:SystemKeepFree=" + - "f:/etc/systemd/journald.conf -> r:RuntimeMaxUse=" + - "f:/etc/systemd/journald.conf -> r:RuntimeKeepFree=" + - "f:/etc/systemd/journald.conf -> r:MaxFileSec=" + + # 4.2.2.7 Ensure journald default file permissions configured. (Manual) - Not Implemented + # 4.2.3 Ensure permissions on all logfiles are configured. (Automated) - Not Implemented + # 4.3 Ensure logrotate is configured. (Manual) - Not Implemented + + # 5.1.1 Ensure cron daemon is enabled. (Automated) + - id: 6653 + title: "Ensure cron daemon is enabled." description: "The cron daemon is used to execute batch jobs on the system." rationale: "While there may not be user jobs that need to be run on the system, the system does have maintenance jobs that may include security monitoring that have to run, and cron is used to execute them." - remediation: "Run the following command to enable cron : # systemctl --now enable crond" + remediation: "Run the following command to enable cron: # systemctl --now enable crond." compliance: - cis: ["5.1.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - "c:systemctl is-enabled crond -> enabled" + - "c:systemctl is-enabled crond -> r:^enabled" - # 5.1.2 Ensure permissions on /etc/crontab are configured (Scored) - - id: 6633 - title: "Ensure permissions on /etc/crontab are configured" + # 5.1.2 Ensure permissions on /etc/crontab are configured. (Automated) + - id: 6654 + title: "Ensure permissions on /etc/crontab are configured." description: "The /etc/crontab file is used by cron to control its own jobs. The commands in this item make sure that root is the user and group owner of the file and that only the owner can access the file." rationale: "This file contains information on what system jobs are run by cron. Write access to these files could provide unprivileged users with the ability to elevate their privileges. Read access to these files could provide users with the ability to gain insight on system jobs that run on the system and could provide them a way to gain unauthorized privileged access." - remediation: "Run the following commands to set ownership and permissions on /etc/crontab : # chown root:root /etc/crontab # chmod og-rwx /etc/crontab" + remediation: "Run the following commands to set ownership and permissions on /etc/crontab : # chown root:root /etc/crontab # chmod og-rwx /etc/crontab." compliance: - cis: ["5.1.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/crontab -> r:^Access: \(0\d00/-\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - 'c:sh -c "stat -Lc ''%a %A %u %U %g %G'' /etc/crontab" -> r:600\s*\t*-rw------- && r:0\s*\t*root\s*\t*0\s*\t*root' - # 5.1.3 Ensure permissions on /etc/cron.hourly are configured (Scored) - - id: 6634 - title: "Ensure permissions on /etc/cron.hourly are configured" + # 5.1.3 Ensure permissions on /etc/cron.hourly are configured. (Automated) + - id: 6655 + title: "Ensure permissions on /etc/cron.hourly are configured." description: "This directory contains system cron jobs that need to run on an hourly basis. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.hourly : # chown root:root /etc/cron.hourly # chmod og-rwx /etc/cron.hourly" + remediation: "Run the following commands to set ownership and permissions on /etc/cron.hourly : # chown root:root /etc/cron.hourly # chmod og-rwx /etc/cron.hourly." compliance: - cis: ["5.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/cron.hourly -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - 'c:stat -Lc "%a %A %u %U %g %G" /etc/cron.hourly/ -> r:700\s*\t*drwx------\s*\t*0\s*\t*root\s*\t*0\s*\t*root' - # 5.1.4 Ensure permissions on /etc/cron.daily are configured (Scored) - - id: 6635 - title: "Ensure permissions on /etc/cron.daily are configured" + # 5.1.4 Ensure permissions on /etc/cron.daily are configured. (Automated) + - id: 6656 + title: "Ensure permissions on /etc/cron.daily are configured." description: "The /etc/cron.daily directory contains system cron jobs that need to run on a daily basis. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.daily : # chown root:root /etc/cron.daily # chmod og-rwx /etc/cron.daily" + remediation: "Run the following commands to set ownership and permissions on /etc/cron.daily : # chown root:root /etc/cron.daily # chmod og-rwx /etc/cron.daily." compliance: - cis: ["5.1.4"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/cron.daily -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - 'c:stat -Lc "%a %A %u %U %g %G" /etc/cron.daily/ -> r:700\s*\t*drwx------\s*\t*0\s*\t*root\s*\t*0\s*\t*root' - # 5.1.5 Ensure permissions on /etc/cron.weekly are configured (Scored) - - id: 6636 - title: "Ensure permissions on /etc/cron.weekly are configured" + # 5.1.5 Ensure permissions on /etc/cron.weekly are configured. (Automated) + - id: 6657 + title: "Ensure permissions on /etc/cron.weekly are configured." description: "The /etc/cron.weekly directory contains system cron jobs that need to run on a weekly basis. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.weekly : # chown root:root /etc/cron.weekly # chmod og-rwx /etc/cron.weekly" + remediation: "Run the following commands to set ownership and permissions on /etc/cron.weekly : # chown root:root /etc/cron.weekly # chmod og-rwx /etc/cron.weekly." compliance: - cis: ["5.1.5"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/cron.weekly -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - 'c:stat -Lc "%a %A %u %U %g %G" /etc/cron.weekly/ -> r:700\s*\t*drwx------\s*\t*0\s*\t*root\s*\t*0\s*\t*root' - # 5.1.6 Ensure permissions on /etc/cron.monthly are configured (Scored) - - id: 6637 - title: "Ensure permissions on /etc/cron.monthly are configured" + # 5.1.6 Ensure permissions on /etc/cron.monthly are configured. (Automated) + - id: 6658 + title: "Ensure permissions on /etc/cron.monthly are configured." description: "The /etc/cron.monthly directory contains system cron jobs that need to run on a monthly basis. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.monthly : # chown root:root /etc/cron.monthly # chmod og-rwx /etc/cron.monthly" + remediation: "Run the following commands to set ownership and permissions on /etc/cron.monthly : # chown root:root /etc/cron.monthly # chmod og-rwx /etc/cron.monthly." compliance: - cis: ["5.1.6"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/cron.monthly -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - 'c:stat -Lc "%a %A %u %U %g %G" /etc/cron.monthly/ -> r:700\s*\t*drwx------\s*\t*0\s*\t*root\s*\t*0\s*\t*root' - # 5.1.7 Ensure permissions on /etc/cron.d are configured (Scored) - - id: 6638 - title: "Ensure permissions on /etc/cron.d are configured" - description: "Configure /etc/cron.allow and /etc/at.allow to allow specific users to use these services. If /etc/cron.allow or /etc/at.allow do not exist, then /etc/at.deny and /etc/cron.deny are checked. Any user not specifically defined in those files is allowed to use at and cron. By removing the files, only users in /etc/cron.allow and /etc/at.allow are allowed to use at and cron. Note that even though a given user is not listed in cron.allow , cron jobs can still be run as that user. The cron.allow file only controls administrative access to the crontab command for scheduling and modifying cron jobs." - rationale: "On many systems, only the system administrator is authorized to schedule cron jobs. Using the cron.allow file to control who can run cron jobs enforces this policy. It is easier to manage an allow list than a deny list. In a deny list, you could potentially add a user ID to the system and forget to add it to the deny files." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.d : # chown root:root /etc/cron.d # chmod og-rwx /etc/cron.d" + # 5.1.7 Ensure permissions on /etc/cron.d are configured. (Automated) + - id: 6659 + title: "Ensure permissions on /etc/cron.d are configured." + description: "The /etc/cron.d directory contains system cron jobs that need to run in a similar manner to the hourly, daily weekly and monthly jobs from /etc/crontab , but require more granular control as to when they run. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." + rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." + remediation: "Run the following commands to set ownership and permissions on /etc/cron.d : # chown root:root /etc/cron.d # chmod og-rwx /etc/cron.d." compliance: - cis: ["5.1.7"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/cron.d -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - 'c:stat -Lc "%a %A %u %U %g %G" /etc/cron.d/ -> r:700\s*\t*drwx------\s*\t*0\s*\t*root\s*\t*0\s*\t*root' - # 5.1.8 Ensure at/cron is restricted to authorized users (Scored) - - id: 6639 - title: "Ensure at/cron is restricted to authorized users" - description: "Configure /etc/cron.allow and /etc/at.allow to allow specific users to use these services. If /etc/cron.allow or /etc/at.allow do not exist, then /etc/at.deny and /etc/cron.deny are checked. Any user not specifically defined in those files is allowed to use at and cron. By removing the files, only users in /etc/cron.allow and /etc/at.allow are allowed to use at and cron. Note that even though a given user is not listed in cron.allow , cron jobs can still be run as that user. The cron.allow file only controls administrative access to the crontab command for scheduling and modifying cron jobs." + # 5.1.8 Ensure cron is restricted to authorized users. (Automated) + - id: 6660 + title: "Ensure cron is restricted to authorized users." + description: "If cron is installed in the system, configure /etc/cron.allow to allow specific users to use these services. If /etc/cron.allow does not exist, then /etc/cron.deny is checked. Any user not specifically defined in those files is allowed to use cron. By removing the file, only users in /etc/cron.allow are allowed to use cron. Note: Even though a given user is not listed in cron.allow, cron jobs can still be run as that user. The cron.allow file only controls administrative access to the crontab command for scheduling and modifying cron jobs." rationale: "On many systems, only the system administrator is authorized to schedule cron jobs. Using the cron.allow file to control who can run cron jobs enforces this policy. It is easier to manage an allow list than a deny list. In a deny list, you could potentially add a user ID to the system and forget to add it to the deny files." - remediation: "Run the following commands to remove /etc/cron.deny and /etc/at.deny and create and set permissions and ownership for /etc/cron.allow and /etc/at.allow : # rm /etc/cron.deny # rm /etc/at.deny # touch /etc/cron.allow # touch /etc/at.allow # chmod og-rwx /etc/cron.allow # chmod og-rwx /etc/at.allow # chown root:root /etc/cron.allow" + remediation: 'Run the following scritp to remove /etc/cron.deny, create /etc/cron.allow, and set the file mode on /etc/cron.allow`: #!/usr/bin/env bash cron_fix() { if rpm -q cronie >/dev/null; then [ -e /etc/cron.deny ] && rm -f /etc/cron.deny [ ! -e /etc/cron.allow ] && touch /etc/cron.allow chown root:root /etc/cron.allow chmod u-x,go-rwx /etc/cron.allow else echo "cron is not installed on the system" fi } cron_fix OR Run the following command to remove cron: # dnf remove cronie.' compliance: - cis: ["5.1.8"] - - cis_csc: ["16"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - "c:stat -L /etc/cron.deny -> r:No such file or directory$" - - "c:stat -L /etc/at.deny -> r:No such file or directory$" - - 'c:stat -L /etc/cron.allow -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - - 'c:stat -L /etc/at.allow -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - "f:/etc/cron.allow" + - "not f:/etc/cron.deny" + - 'c:stat -Lc "%a %A %u %U %g %G" /etc/cron.allow -> r:640\s*\t*-rw-r-----\s*\t*0\s*\t*root\s*\t*0\s*\t*crontab' - ############################################### - # 5.2 Configure SSH - ############################################### - # 5.2.1 Ensure permissions on /etc/ssh/sshd_config are configured (Scored) - - id: 6640 - title: "Ensure permissions on /etc/ssh/sshd_config are configured" + # 5.1.9 Ensure at is restricted to authorized users. (Automated) + - id: 6661 + title: "Ensure at is restricted to authorized users." + description: "If at is installed in the system, configure /etc/at.allow to allow specific users to use these services. If /etc/at.allow does not exist, then /etc/at.deny is checked. Any user not specifically defined in those files is allowed to use at. By removing the file, only users in /etc/at.allow are allowed to use at. Note: Even though a given user is not listed in at.allow, at jobs can still be run as that user. The at.allow file only controls administrative access to the at command for scheduling and modifying at jobs." + rationale: "On many systems, only the system administrator is authorized to schedule at jobs. Using the at.allow file to control who can run at jobs enforces this policy. It is easier to manage an allow list than a deny list. In a deny list, you could potentially add a user ID to the system and forget to add it to the deny files." + remediation: 'Run the following script to remove /etc/at.deny, create /etc/at.allow, and set the file mode for /etc/at.allow: #!/usr/bin/env bash at_fix() { if rpm -q at >/dev/null; then [ -e /etc/at.deny ] && rm -f /etc/at.deny [ ! -e /etc/at.allow ] && touch /etc/at.allow chown root:root /etc/at.allow chmod u-x,go-rwx /etc/at.allow else echo "at is not installed on the system" fi } at_fix OR Run the following command to remove at: # dnf remove at.' + compliance: + - cis: ["5.1.9"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - "f:/etc/at.allow" + - "not f:/etc/at.deny" + - 'c:stat -Lc "%a %A %u %U %g %G" /etc/at.allow -> r:640\s*\t*-rw-r-----\s*\t*0\s*\t*root\s*\t*0\s*\t*root' + + # 5.2.1 Ensure permissions on /etc/ssh/sshd_config are configured. (Automated) + - id: 6662 + title: "Ensure permissions on /etc/ssh/sshd_config are configured." description: "The /etc/ssh/sshd_config file contains configuration specifications for sshd. The command below sets the owner and group of the file to root." rationale: "The /etc/ssh/sshd_config file needs to be protected from unauthorized changes by non-privileged users." - remediation: "Run the following commands to set ownership and permissions on /etc/ssh/sshd_config : # chown root:root /etc/ssh/sshd_config # chmod og-rwx /etc/ssh/sshd_config" + remediation: "Run the following commands to set ownership and permissions on /etc/ssh/sshd_config: # chown root:root /etc/ssh/sshd_config # chmod og-rwx /etc/ssh/sshd_config." compliance: - cis: ["5.2.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/ssh/sshd_config -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - 'c:sh -c "stat -Lc ''%a %A %u %U %g %G'' /etc/ssh/sshd_config" -> r:600\s*\t*-rw------- && r:0\s*\t*root\s*\t*0\s*\t*root' - # 5.2.2 Ensure SSH access is limited (Scored) - - id: 6641 - title: "Ensure SSH access is limited" - description: "There are several options available to limit which users and group can access the system via SSH. It is recommended that at least one of the following options be leveraged: AllowUsers The AllowUsers variable gives the system administrator the option of allowing specific users to ssh into the system. The list consists of space separated user names. Numeric user IDs are not recognized with this variable. If a system administrator wants to restrict user access further by only allowing the allowed users to log in from a particular host, the entry can be specified in the form of user@host. AllowGroups The AllowGroups variable gives the system administrator the option of allowing specific groups of users to ssh into the system. The list consists of space separated group names. Numeric group IDs are not recognized with this variable. DenyUsers The DenyUsers variable gives the system administrator the option of denying specific users to ssh into the system. The list consists of space separated user names. Numeric user IDs are not recognized with this variable. If a system administrator wants to restrict user access further by specifically denying a user's access from a particular host, the entry can be specified in the form of user@host. DenyGroups The DenyGroups variable gives the system administrator the option of denying specific groups of users to ssh into the system. The list consists of space separated group names. Numeric group IDs are not recognized with this variable." - rationale: "Restricting which users can remotely access the system via SSH will help ensure that only authorized users access the system." - remediation: "Edit the /etc/ssh/sshd_config file to set one or more of the parameter as follows: AllowUsers ; AllowGroups ; DenyUsers and DenyGroups " - compliance: - - cis: ["5.2.2"] - - cis_csc: ["4.3"] - - pci_dss: ["8.1"] - - tsc: ["CC6.1"] - condition: any - rules: - - 'f:/etc/ssh/sshd_config -> r:^\s*AllowUsers' - - 'f:/etc/ssh/sshd_config -> r:^\s*AllowGroups' - - 'f:/etc/ssh/sshd_config -> r:^\s*DenyUsers' - - 'f:/etc/ssh/sshd_config -> r:^\s*DenyGroups' + # 5.2.2 Ensure permissions on SSH private host key files are configured. (Automated) - Not Implemented - # 5.2.3 Ensure permissions on SSH private host key files are configured (Scored) - - id: 6642 - title: "Ensure permissions on SSH private host key files are configured" - description: "An SSH private key is one of two files used in SSH public key authentication. In this authentication method, The possession of the private key is proof of identity. Only a private key that corresponds to a public key will be able to authenticate successfully. The private keys need to be stored and handled carefully, and no copies of the private key should be distributed." - rationale: "If an unauthorized user obtains the private SSH host key file, the host could be impersonated" - remediation: "Run the following commands to set ownership and permissions on the private SSH host key files: # find /etc/ssh -xdev -type f -name 'ssh_host_*_key' -exec chown root:root {} \\; # find /etc/ssh -xdev -type f -name 'ssh_host_*_key' -exec chmod 0600 {} \\;" - compliance: - - cis: ["5.2.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:stat -L /etc/ssh/ssh_host_rsa_key -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - - 'c:stat -L /etc/ssh/ssh_host_ecdsa_key -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - - 'c:stat -L /etc/ssh/ssh_host_ed25519_key -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + # 5.2.3 Ensure permissions on SSH public host key files are configured. (Automated) - Not Implemented - # 5.2.4 Ensure permissions on SSH public host key files are configured (Scored) - - id: 6643 - title: "Ensure permissions on SSH public host key files are configured" - description: "An SSH public key is one of two files used in SSH public key authentication. In this authentication method, a public key is a key that can be used for verifying digital signatures generated using a corresponding private key. Only a public key that corresponds to a private key will be able to authenticate successfully." - rationale: "If a public host key file is modified by an unauthorized user, the SSH service may be compromised." - remediation: "Run the following commands to set permissions and ownership on the SSH host public key files: # find /etc/ssh -xdev -type f -name 'ssh_host_*_key.pub' -exec chmod 0644 {} \\; #find /etc/ssh -xdev -type f -name 'ssh_host_*_key.pub' -exec chown root:root {} \\;" + # 5.2.4 Ensure SSH access is limited. (Automated) + - id: 6663 + title: "Ensure SSH access is limited." + description: "There are several options available to limit which users and group can access the system via SSH. It is recommended that at least one of the following options be leveraged: - AllowUsers: o The AllowUsers variable gives the system administrator the option of allowing specific users to ssh into the system. The list consists of space separated user names. Numeric user IDs are not recognized with this variable. If a system administrator wants to restrict user access further by only allowing the allowed users to log in from a particular host, the entry can be specified in the form of user@host. - AllowGroups: o The AllowGroups variable gives the system administrator the option of allowing specific groups of users to ssh into the system. The list consists of space separated group names. Numeric group IDs are not recognized with this variable. - DenyUsers: o The DenyUsers variable gives the system administrator the option of denying specific users to ssh into the system. The list consists of space separated user names. Numeric user IDs are not recognized with this variable. If a system administrator wants to restrict user access further by specifically denying a user's access from a particular host, the entry can be specified in the form of user@host. - DenyGroups: o The DenyGroups variable gives the system administrator the option of denying specific groups of users to ssh into the system. The list consists of space separated group names. Numeric group IDs are not recognized with this variable." + rationale: "Restricting which users can remotely access the system via SSH will help ensure that only authorized users access the system." + remediation: "Edit the /etc/ssh/sshd_config file to set one or more of the parameter as follows: AllowUsers OR AllowGroups OR DenyUsers OR DenyGroups ." compliance: - cis: ["5.2.4"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/ssh/ssh_host_rsa_key.pub -> r:^Access: \(0\d\d\d/\w\w\w\w\w\w-\w\w-\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - - 'c:stat -L /etc/ssh/ssh_host_ecdsa_key.pub -> r:^Access: \(0\d\d\d/\w\w\w\w\w\w-\w\w-\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - - 'c:stat -L /etc/ssh/ssh_host_ed25519_key.pub -> r:^Access: \(0\d\d\d/\w\w\w\w\w\w-\w\w-\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - 'c:sshd -T -> r:^\s*AllowUsers\s+\w+|^\s*AllowGroups\s+\w+|^\s*DenyUsers\s+\w+|^\s*DenyGroups\s+\w+' + - 'f:/etc/ssh/sshd_config -> r:^\s*AllowUsers\s+\w+|^\s*AllowGroups\s+\w+|^\s*DenyUsers\s+\w+|^\s*DenyGroups\s+\w+' - # 5.2.5 Ensure SSH LogLevel is appropriate (Scored) - - id: 6644 - title: "Ensure SSH LogLevel is appropriate" + # 5.2.5 Ensure SSH LogLevel is appropriate. (Automated) + - id: 6664 + title: "Ensure SSH LogLevel is appropriate." description: "INFO level is the basic level that only records login activity of SSH users. In many situations, such as Incident Response, it is important to determine when a particular user was active on a system. The logout record can eliminate those users who disconnected, which helps narrow the field. VERBOSE level specifies that login and logout activity as well as the key fingerprint for any SSH key used for login will be logged. This information is important for SSH key management, especially in legacy environments." rationale: "SSH provides several logging levels with varying amounts of verbosity. DEBUG is specifically not recommended other than strictly for debugging SSH communications since it provides so much data that it is difficult to identify important security information." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: LogLevel VERBOSE or LogLevel INFO" + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: LogLevel VERBOSE OR LogLevel INFO." + references: + - "https://www.ssh.com/ssh/sshd_config/" compliance: - cis: ["5.2.5"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] - references: - - https://www.ssh.com/ssh/sshd_config/ + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - - 'c:sshd -T -> r:^\s*LogLevel\s+VERBOSE|^\s*loglevel\s+INFO' + - 'c:sshd -T -> r:^\s*LogLevel\s+INFO|^\s*LogLevel\s*VERBOSE' + - 'not f:/etc/ssh/sshd_config -> !r:^\s*LogLevel\s+INFO|^\s*LogLevel\s+VERBOSE' + - 'not d:/etc/ssh/sshd_config.d/ -> r:\.*.conf$ -> !r:^\s*LogLevel\s+INFO|^\s*LogLevel\s+VERBOSE' - # 5.2.6 Ensure SSH X11 forwarding is disabled (Scored) - - id: 6645 - title: "Ensure SSH X11 forwarding is disabled" - description: "The X11Forwarding parameter provides the ability to tunnel X11 traffic through the connection to enable remote graphic connections." - rationale: "Disable X11 forwarding unless there is an operational requirement to use X11 applications directly. There is a small risk that the remote X11 servers of users who are logged in via SSH with X11 forwarding could be compromised by other users on the X11 server. Note that even if X11 forwarding is disabled, users can always install their own forwarders." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: X11Forwarding no" + # 5.2.6 Ensure SSH PAM is enabled. (Automated) + - id: 6665 + title: "Ensure SSH PAM is enabled." + description: 'UsePAM Enables the Pluggable Authentication Module interface. If set to "yes" this will enable PAM authentication using ChallengeResponseAuthentication and PasswordAuthentication in addition to PAM account and session module processing for all authentication types.' + rationale: "When usePAM is set to yes, PAM runs through account and session types properly. This is important if you want to restrict access to services based off of IP, time or other factors of the account. Additionally, you can make sure users inherit certain environment variables on login or disallow access to the server." + impact: "If UsePAM is enabled, you will not be able to run sshd(8) as a non-root user." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: UsePAM yes." compliance: - cis: ["5.2.6"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -> r:^\s*X11Forwarding\s*\t*no' + - 'c:sshd -T -> r:^\s*usepam\s+yes' + - 'not f:/etc/ssh/sshd_config -> r:^\s*usepam\s+no' - # 5.2.7 Ensure SSH MaxAuthTries is set to 4 or less (Scored) - - id: 6646 - title: "Ensure SSH MaxAuthTries is set to 4 or less" - description: "The MaxAuthTries parameter specifies the maximum number of authentication attempts permitted per connection. When the login failure count reaches half the number, error messages will be written to the syslog file detailing the login failure." - rationale: "Setting the MaxAuthTries parameter to a low number will minimize the risk of successful brute force attacks to the SSH server. While the recommended setting is 4, set the number based on site policy." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: MaxAuthTries 4" + # 5.2.7 Ensure SSH root login is disabled. (Automated) + - id: 6666 + title: "Ensure SSH root login is disabled." + description: "The PermitRootLogin parameter specifies if the root user can log in using ssh. The default is no." + rationale: "Disallowing root logins over SSH requires system admins to authenticate using their own individual account, then escalating to root via sudo or su. This in turn limits opportunity for non-repudiation and provides a clear audit trail in the event of a security incident." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitRootLogin no." compliance: - cis: ["5.2.7"] - - cis_csc: ["16.13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] condition: all rules: - - 'c:sshd -T -> !r:^# && n:^MaxAuthTries\s*\t*(\d+) compare <= 4' + - 'c:sshd -T -> r:^\s*PermitRootLogin\s+no' + - 'not f:/etc/ssh/sshd_config -> r:^\s*PermitRootLogin\s+yes' - # 5.2.8 Ensure SSH IgnoreRhosts is enabled (Scored) - - id: 6647 - title: "Ensure SSH IgnoreRhosts is enabled" - description: "The IgnoreRhosts parameter specifies that .rhosts and .shosts files will not be used in RhostsRSAAuthentication or HostbasedAuthentication." - rationale: "Setting this parameter forces users to enter a password when authenticating with ssh." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: Ignorerhosts yes" + # 5.2.8 Ensure SSH HostbasedAuthentication is disabled. (Automated) + - id: 6667 + title: "Ensure SSH HostbasedAuthentication is disabled." + description: "The HostbasedAuthentication parameter specifies if authentication is allowed through trusted hosts via the user of .rhosts, or /etc/hosts.equiv, along with successful public key client host authentication. This option only applies to SSH Protocol Version 2." + rationale: "Even though the .rhosts files are ineffective if support is disabled in /etc/pam.conf, disabling the ability to use .rhosts files in SSH provides an additional layer of protection." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: HostbasedAuthentication no." compliance: - cis: ["5.2.8"] - - cis_csc: ["9.2"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -> !r:^# && r:ignorerhosts\s*\t*yes' + - 'c:sshd -T -> r:^\s*HostbasedAuthentication\s+no' + - 'not f:/etc/ssh/sshd_config -> r:^\s*HostbasedAuthentication\s+yes' - # 5.2.9 Ensure SSH HostbasedAuthentication is disabled (Scored) - - id: 6648 - title: "Ensure SSH HostbasedAuthentication is disabled" - description: "The HostbasedAuthentication parameter specifies if authentication is allowed through trusted hosts via the user of .rhosts , or /etc/hosts.equiv, along with successful public key client host authentication. This option only applies to SSH Protocol Version 2." - rationale: "Even though the .rhosts files are ineffective if support is disabled in /etc/pam.conf, disabling the ability to use .rhosts files in SSH provides an additional layer of protection." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: HostbasedAuthentication no" + # 5.2.9 Ensure SSH PermitEmptyPasswords is disabled. (Automated) + - id: 6668 + title: "Ensure SSH PermitEmptyPasswords is disabled." + description: "The PermitEmptyPasswords parameter specifies if the SSH server allows login to accounts with empty password strings." + rationale: "Disallowing remote shell access to accounts that have an empty password reduces the probability of unauthorized access to the system." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitEmptyPasswords no." compliance: - cis: ["5.2.9"] - - cis_csc: ["16.3"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -> !r:^# && r:HostbasedAuthentication\s*\t*no' + - 'c:sshd -T -> r:^\s*PermitEmptyPasswords\s+no' + - 'not f:/etc/ssh/sshd_config -> r:^\s*PermitEmptyPasswords\s+yes' - # 5.2.10 Ensure SSH root login is disabled (Scored) - - id: 6649 - title: "Ensure SSH root login is disabled" - description: "The PermitRootLogin parameter specifies if the root user can log in using ssh. The default is no." - rationale: "Disallowing root logins over SSH requires system admins to authenticate using their own individual account, then escalating to root via sudo or su . This in turn limits opportunity for non-repudiation and provides a clear audit trail in the event of a security incident." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitRootLogin no" + # 5.2.10 Ensure SSH PermitUserEnvironment is disabled. (Automated) + - id: 6669 + title: "Ensure SSH PermitUserEnvironment is disabled." + description: "The PermitUserEnvironment option allows users to present environment options to the ssh daemon." + rationale: "Permitting users the ability to set environment variables through the SSH daemon could potentially allow users to bypass security controls (e.g. setting an execution path that has ssh executing trojan'd programs)." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitUserEnvironment no." compliance: - cis: ["5.2.10"] - - cis_csc: ["4.3"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -> !r:^# && r:PermitRootLogin\s*\t*no' + - 'c:sshd -T -> r:^\s*PermitUserEnvironment\s+no' + - 'not f:/etc/ssh/sshd_config -> r:^\s*PermitUserEnvironment\s+yes' - # 5.2.11 Ensure SSH PermitEmptyPasswords is disabled (Scored) - - id: 6650 - title: "Ensure SSH PermitEmptyPasswords is disabled" - description: "The PermitEmptyPasswords parameter specifies if the SSH server allows login to accounts with empty password strings." - rationale: "Disallowing remote shell access to accounts that have an empty password reduces the probability of unauthorized access to the system." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitEmptyPasswords no" + # 5.2.11 Ensure SSH IgnoreRhosts is enabled. (Automated) + - id: 6670 + title: "Ensure SSH IgnoreRhosts is enabled." + description: "The IgnoreRhosts parameter specifies that .rhosts and .shosts files will not be used in RhostsRSAAuthentication or HostbasedAuthentication." + rationale: "Setting this parameter forces users to enter a password when authenticating with ssh." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: IgnoreRhosts yes." compliance: - cis: ["5.2.11"] - - cis_csc: ["16.3"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.13.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -> !r:^# && r:PermitEmptyPasswords\s*\t*no' + - 'c:sshd -T -> r:^\s*IgnoreRhosts\s+yes' + - 'not f:/etc/ssh/sshd_config -> r:^\s*IgnoreRhosts\s+no' - # 5.2.12 Ensure SSH PermitUserEnvironment is disabled (Scored) - - id: 6651 - title: "Ensure SSH PermitUserEnvironment is disabled" - description: "The PermitUserEnvironment option allows users to present environment options to the ssh daemon." - rationale: "Permitting users the ability to set environment variables through the SSH daemon could potentially allow users to bypass security controls (e.g. setting an execution path that has ssh executing trojan'd programs)" - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitUserEnvironment no" + # 5.2.12 Ensure SSH X11 forwarding is disabled. (Automated) + - id: 6671 + title: "Ensure SSH X11 forwarding is disabled." + description: "The X11Forwarding parameter provides the ability to tunnel X11 traffic through the connection to enable remote graphic connections." + rationale: "Disable X11 forwarding unless there is an operational requirement to use X11 applications directly. There is a small risk that the remote X11 servers of users who are logged in via SSH with X11 forwarding could be compromised by other users on the X11 server. Note that even if X11 forwarding is disabled, users can always install their own forwarders." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: X11Forwarding no." compliance: - cis: ["5.2.12"] - - cis_csc: ["5.1"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:sshd -T -> r:^\s*PermitUserEnvironment\s*\t*no' + - 'c:sshd -T -> r:^\s*X11Forwarding\s+no' + - 'not f:/etc/ssh/sshd_config -> r:^\s*X11Forwarding\s+yes' - # 5.2.13 Ensure SSH Idle Timeout Interval is configured (Scored) - - id: 6652 - title: "Ensure SSH Idle Timeout Interval is configured" - description: "The two options ClientAliveInterval and ClientAliveCountMax control the timeout of ssh sessions. When the ClientAliveInterval variable is set, ssh sessions that have no activity for the specified length of time are terminated. When the ClientAliveCountMax variable is set, sshd will send client alive messages at every ClientAliveInterval interval. When the number of consecutive client alive messages are sent with no response from the client, the ssh session is terminated. For example, if the ClientAliveInterval is set to 15 seconds and the ClientAliveCountMax is set to 3, the client ssh session will be terminated after 45 seconds of idle time." - rationale: "Having no timeout value associated with a connection could allow an unauthorized user access to another user's ssh session (e.g. user walks away from their computer and doesn't lock the screen). Setting a timeout value at least reduces the risk of this happening. While the recommended setting is 300 seconds (5 minutes), set this timeout value based on site policy. The recommended setting for ClientAliveCountMax is 0. In this case, the client session will be terminated after 5 minutes of idle time and no keepalive messages will be sent." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameters according to site policy: ClientAliveInterval 300 and ClientAliveCountMax 0" + # 5.2.13 Ensure SSH AllowTcpForwarding is disabled. (Automated) + - id: 6672 + title: "Ensure SSH AllowTcpForwarding is disabled." + description: "SSH port forwarding is a mechanism in SSH for tunneling application ports from the client to the server, or servers to clients. It can be used for adding encryption to legacy applications, going through firewalls, and some system administrators and IT professionals use it for opening backdoors into the internal network from their home machines." + rationale: "Leaving port forwarding enabled can expose the organization to security risks and back-doors. SSH connections are protected with strong encryption. This makes their contents invisible to most deployed network monitoring and traffic filtering solutions. This invisibility carries considerable risk potential if it is used for malicious purposes such as data exfiltration. Cybercriminals or malware could exploit SSH to hide their unauthorized communications, or to exfiltrate stolen data from the target network." + impact: "SSH tunnels are widely used in many corporate environments that employ mainframe systems as their application backends. In those environments the applications themselves may have very limited native support for security. By utilizing tunneling, compliance with SOX, HIPAA, PCI-DSS, and other standards can be achieved without having to modify the applications." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: AllowTcpForwarding no." + references: + - "https://www.ssh.com/ssh/tunneling/example" compliance: - cis: ["5.2.13"] - - cis_csc: ["16.11"] - - pci_dss: ["12.3.8"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.13.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -> n:^\s*ClientAliveInterval\s*\t*(\d+) compare <= 300' - - 'c:sshd -T -> n:^\s*ClientAliveCountMax\s*\t*(\d+) compare <= 3' + - 'c:sshd -T -> r:^\s*AllowTcpForwarding\s+no' + - 'not f:/etc/ssh/sshd_config -> r:^\s*AllowTcpForwarding\s+yes' - # 5.2.14 Ensure SSH LoginGraceTime is set to one minute or less (Scored) - - id: 6653 - title: "Ensure SSH LoginGraceTime is set to one minute or less" - description: "The LoginGraceTime parameter specifies the time allowed for successful authentication to the SSH server. The longer the Grace period is the more open unauthenticated connections can exist. Like other session controls in this session the Grace Period should be limited to appropriate organizational limits to ensure the service is available for needed access." - rationale: "Setting the LoginGraceTime parameter to a low number will minimize the risk of successful brute force attacks to the SSH server. It will also limit the number of concurrent unauthenticated connections While the recommended setting is 60 seconds (1 Minute), set the number based on site policy." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: LoginGraceTime 60" + # 5.2.14 Ensure system-wide crypto policy is not over-ridden. (Automated) + - id: 6673 + title: "Ensure system-wide crypto policy is not over-ridden." + description: "System-wide Crypto policy can be over-ridden or opted out of for openSSH." + rationale: "Over-riding or opting out of the system-wide crypto policy could allow for the use of less secure Ciphers, MACs, KexAlgorithms and GSSAPIKexAlgorithm." + remediation: "Run the following commands: # sed -ri \"s/^\\s*(CRYPTO_POLICY\\s*=.*)$/# \\1/\" /etc/sysconfig/sshd # systemctl reload sshd." compliance: - cis: ["5.2.14"] - - cis_csc: ["5.1"] - - pci_dss: ["8.1"] - - tsc: ["CC6.1"] + - cis_csc_v8: ["3.10"] + - cis_csc_v7: ["14.4"] + - cmmc_v2.0: ["AC.L2-3.1.13", "AC.L2-3.1.17", "IA.L2-3.5.10", "SC.L2-3.13.11", "SC.L2-3.13.15", "SC.L2-3.13.8"] + - hipaa: ["164.312(a)(2)(iv)", "164.312(e)(1)", "164.312(e)(2)(i)", "164.312(e)(2)(ii)"] + - iso_27001-2013: ["A.10.1.1", "A.13.1.1"] + - nist_sp_800-53: ["AC-17(2)", "SC-8", "SC-8(1)"] + - pci_dss_v3.2.1: ["2.1.1", "4.1", "4.1.1", "8.2.1"] + - pci_dss_v4.0: ["2.2.7", "4.1.1", "4.2.1", "4.2.1.2", "4.2.2", "8.3.2"] condition: all rules: - - 'c:sshd -T -> n:^\s*LoginGraceTime\s*\t*(\d+) compare <= 60' + - 'not f:/etc/sysconfig/sshd -> ^\s*CRYPTO_POLICY=' - # 5.2.15 Ensure SSH warning banner is configured (Scored) - - id: 6654 - title: "Ensure SSH warning banner is configured" + # 5.2.15 Ensure SSH warning banner is configured. (Automated) + - id: 6674 + title: "Ensure SSH warning banner is configured." description: "The Banner parameter specifies a file whose contents must be sent to the remote user before authentication is permitted. By default, no banner is displayed." rationale: "Banners are used to warn connecting users of the particular site's policy regarding connection. Presenting a warning message prior to the normal user login may assist the prosecution of trespassers on the computer system." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: Banner /etc/issue.net" + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: Banner /etc/issue.net." compliance: - cis: ["5.2.15"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -> r:^\s*Banner\s*\t*/etc/issue.net' + - 'c:sshd -T -> r:^\s*banner\s*\t*/etc/issue.net' - # 5.2.16 Ensure SSH PAM is enabled (Scored) - - id: 6655 - title: "Ensure SSH PAM is enabled" - description: "UsePAM Enables the Pluggable Authentication Module interface. If set to “yes” this will enable PAM authentication using ChallengeResponseAuthentication and PasswordAuthentication in addition to PAM account and session module processing for all authentication types." - rationale: "When usePAM is set to yes, PAM runs through account and session types properly. This is important if you want to restrict access to services based off of IP, time or other factors of the account. Additionally, you can make sure users inherit certain environment variables on login or disallow access to the server." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: UsePAM yes" + # 5.2.16 Ensure SSH MaxAuthTries is set to 4 or less. (Automated) + - id: 6675 + title: "Ensure SSH MaxAuthTries is set to 4 or less." + description: "The MaxAuthTries parameter specifies the maximum number of authentication attempts permitted per connection. When the login failure count reaches half the number, error messages will be written to the syslog file detailing the login failure." + rationale: "Setting the MaxAuthTries parameter to a low number will minimize the risk of successful brute force attacks to the SSH server. While the recommended setting is 4, set the number based on site policy." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: MaxAuthTries 4." compliance: - cis: ["5.2.16"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["16.13"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - - 'c:sshd -T -> r:^\s*usepam\s+yes' + - 'c:sshd -T -> n:^MaxAuthTries\s*\t*(\d+) compare <= 4' + - 'not f:/etc/ssh/sshd_config -> n:^MaxAuthTries\s*\t*(\d+) compare > 4' - # 5.2.17 Ensure SSH AllowTcpForwarding is disabled (Scored) - - id: 6656 - title: "Ensure SSH AllowTcpForwarding is disabled" - description: "SSH port forwarding is a mechanism in SSH for tunneling application ports from the client to the server, or servers to clients. It can be used for adding encryption to legacy applications, going through firewalls, and some system administrators and IT professionals use it for opening backdoors into the internal network from their home machines." - rationale: "Leaving port forwarding enabled can expose the organization to security risks and back-doors. SSH connections are protected with strong encryption. This makes their contents invisible to most deployed network monitoring and traffic filtering solutions. This invisibility carries considerable risk potential if it is used for malicious purposes such as data exfiltration. Cybercriminals or malware could exploit SSH to hide their unauthorized communications, or to exfiltrate stolen data from the target network." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: AllowTcpForwarding no" + # 5.2.17 Ensure SSH MaxStartups is configured. (Automated) + - id: 6676 + title: "Ensure SSH MaxStartups is configured." + description: "The MaxStartups parameter specifies the maximum number of concurrent unauthenticated connections to the SSH daemon." + rationale: "To protect a system from denial of service due to a large number of pending authentication connection attempts, use the rate limiting function of MaxStartups to protect availability of sshd logins and prevent overwhelming the daemon." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: maxstartups 10:30:60." compliance: - cis: ["5.2.17"] - - cis_csc: ["9.2"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] - references: - - https://www.ssh.com/ssh/tunneling/example + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -> r:^\s*AllowTcpForwarding\s+no' + - 'c:sshd -T -> r:^\s*maxstartups\s+10:30:60' + - 'not f:/etc/ssh/sshd_config -> r:^\s*maxstartups\s+(((1[1-9]|[1-9][0-9][0-9]+):([0-9]+):([0-9]+))|(([0-9]+):(3[1-9]|[4-9][0-9]|[1-9][0-9][0-9]+):([0-9]+))|(([0-9]+):([0-9]+):(6[1-9]|[7-9][0-9]|[1-9][0-9][0-9]+)))' - # 5.2.19 Ensure SSH MaxSessions is set to 4 or less (Scored) - - id: 6657 - title: "Ensure SSH MaxSessions is set to 4 or less" + # 5.2.18 Ensure SSH MaxSessions is set to 10 or less. (Automated) + - id: 6677 + title: "Ensure SSH MaxSessions is set to 10 or less." description: "The MaxSessions parameter specifies the maximum number of open sessions permitted from a given connection." rationale: "To protect a system from denial of service due to a large number of concurrent sessions, use the rate limiting function of MaxSessions to protect availability of sshd logins and prevent overwhelming the daemon." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: MaxSessions 4" + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: MaxSessions 10." compliance: - - cis: ["5.2.19"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["5.2.18"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -> n:^\s*MaxSessions\s+(\d+) compare <= 4' + - 'c:sshd -T -> n:^\s*maxsessions\s+(\d+) compare <= 10' + - 'not f:/etc/ssh/sshd_config -> r:^\s*MaxSessions\s+(1[1-9]|[2-9][0-9]|[1-9][0-9][0-9]+)' - # 5.2.20 Ensure system-wide crypto policy is not over-ridden (Scored) - - id: 6658 - title: "Ensure system-wide crypto policy is not over-ridden" - description: "System-wide Crypto policy can be over-ridden or opted out of for openSSH" - rationale: "Over-riding or opting out of the system-wide crypto policy could allow for the use of less secure Ciphers, MACs, KexAlgoritms and GSSAPIKexAlgorithsm" - remediation: "Run the following commands: # sed -ri \"s/^\\s*(CRYPTO_POLICY\\s*=.*)$/# \\1/\" /etc/sysconfig/sshd; # systemctl reload sshd" + # 5.2.19 Ensure SSH LoginGraceTime is set to one minute or less. (Automated) + - id: 6678 + title: "Ensure SSH LoginGraceTime is set to one minute or less." + description: "The LoginGraceTime parameter specifies the time allowed for successful authentication to the SSH server. The longer the Grace period is the more open unauthenticated connections can exist. Like other session controls in this session the Grace Period should be limited to appropriate organizational limits to ensure the service is available for needed access." + rationale: "Setting the LoginGraceTime parameter to a low number will minimize the risk of successful brute force attacks to the SSH server. It will also limit the number of concurrent unauthenticated connections While the recommended setting is 60 seconds (1 Minute), set the number based on site policy." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: LoginGraceTime 60." compliance: - - cis: ["5.2.20"] - - cis_csc: ["14.4"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis: ["5.2.19"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:/etc/sysconfig/sshd -> !r:^\s*CRYPTO_POLICY=' + - 'c:sshd -T -> n:logingracetime\s*\t*(\d+) compare <= 60 && n:LoginGraceTime\s*\t*(\d+) compare != 0' + - 'not f:/etc/ssh/sshd_config -> r:\s*LoginGraceTime\s+(0|6[1-9]|[7-9][0-9]|[1-9][0-9][0-9]+|[^1]m)' - ############################################### - # 5.3 Configure authselect - ############################################### - # 5.3.1 Create custom authselect profile (Scored) - - id: 6659 - title: "Create custom authselect profile" - description: "A custom profile can be created by copying and customizing one of the default profiles. The default profiles include: sssd, winbind, or the nis." - rationale: "A custom profile is required to customize many of the pam options" - remediation: "Run the following command to create a custom authselect profile: # authselect create-profile -b .Example: # authselect create-profile custom-profile -b sssd --symlink-meta" + # 5.2.20 Ensure SSH Idle Timeout Interval is configured. (Automated) + - id: 6679 + title: "Ensure SSH Idle Timeout Interval is configured." + description: "The two options ClientAliveInterval and ClientAliveCountMax control the timeout of ssh sessions. - ClientAliveInterval sets a timeout interval in seconds after which if no data has been received from the client, sshd will send a message through the encrypted channel to request a response from the client. The default is 0, indicating that these messages will not be sent to the client. - ClientAliveCountMax sets the number of client alive messages which may be sent without sshd receiving any messages back from the client. If this threshold is reached while client alive messages are being sent, sshd will disconnect the client, terminating the session. The default value is 3. o The client alive messages are sent through the encrypted channel o Setting ClientAliveCountMax to 0 disables connection termination Example: The default value is 3. If ClientAliveInterval is set to 15, and ClientAliveCountMax is left at the default, unresponsive SSH clients will be disconnected after approximately 45 seconds." + rationale: "Having no timeout value associated with a connection could allow an unauthorized user access to another user's ssh session (e.g. user walks away from their computer and doesn't lock the screen). Setting a timeout value reduces this risk. - The recommended ClientAliveInterval setting is no greater than 900 seconds (15 minutes) - The recommended ClientAliveCountMax setting is 0 - At the 15 minute interval, if the ssh session is inactive, the session will be terminated." + impact: "In some cases this setting may cause termination of long-running scripts over SSH or remote automation tools which rely on SSH. In developing the local site policy, the requirements of such scripts should be considered and appropriate ServerAliveInterval and ClientAliveInterval settings should be calculated to insure operational continuity." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameters according to site policy. This should include ClientAliveInterval between 1 and 900 and ClientAliveCountMax of 0: ClientAliveInterval 900 ClientAliveCountMax 0." + references: + - "https://man.openbsd.org/sshd_config" + compliance: + - cis: ["5.2.20"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.11"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - 'c:sshd -T -> n:ClientAliveInterval\s*\t*(\d+) compare > 0' + - 'c:sshd -T -> n:ClientAliveInterval\s*\t*(\d+) compare <= 900' + - 'c:sshd -T -> r:clientalivecountmax\s*\t*0' + - 'not f:/etc/ssh/sshd_config -> r:^ClientAliveInterval\s*\t*0' + - 'not f:/etc/ssh/sshd_config -> n:^ClientAliveInterval\s*\t*(\d+) > 900' + - 'not f:/etc/ssh/sshd_config -> n:^clientalivecountmax\s*\t*(\d+) > 0' + + # 5.3.1 Ensure sudo is installed. (Automated) + - id: 6680 + title: "Ensure sudo is installed." + description: "sudo allows a permitted user to execute a command as the superuser or another user, as specified by the security policy. The invoking user's real (not effective) user ID is used to determine the user name with which to query the security policy." + rationale: "sudo supports a plug-in architecture for security policies and input/output logging. Third parties can develop and distribute their own policy and I/O logging plug-ins to work seamlessly with the sudo front end. The default security policy is sudoers, which is configured via the file /etc/sudoers and any entries in /etc/sudoers.d. The security policy determines what privileges, if any, a user has to run sudo. The policy may require that users authenticate themselves with a password or another authentication mechanism. If authentication is required, sudo will exit if the user's password is not entered within a configurable time limit. This limit is policy-specific." + remediation: "Run the following command to install sudo # dnf install sudo." compliance: - cis: ["5.3.1"] - - pci_dss: ["8.1"] - - tsc: ["CC6.1"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] condition: all rules: - - "c:authselect current -> r:^Profile ID: custom" + - "c:dnf list sudo -> r:^sudo.x86_64" - # 5.3.3 Ensure authselect includes with-faillock (Scored) - - id: 6660 - title: "Ensure authselect includes with-faillock" - description: "The pam_faillock.so module maintains a list of failed authentication attempts per user during a specified interval and locks the account in case there were more than deny consecutive failed authentications. It stores the failure records into per-user files in the tally directory." - rationale: "Locking out user IDs after n unsuccessful consecutive login attempts mitigates brute force password attacks against your systems." - remediation: "Run the following command to include the with-faillock option: # authselect select with-faillock Example: # authselect select custom/custom-profile with-sudo with-faillock without-nullok" + # 5.3.2 Ensure sudo commands use pty. (Automated) + - id: 6681 + title: "Ensure sudo commands use pty." + description: "sudo can be configured to run only from a pseudo terminal (pseudo-pty)." + rationale: "Attackers can run a malicious program using sudo which would fork a background process that remains even when the main program has finished executing." + impact: "WARNING: Editing the sudo configuration incorrectly can cause sudo to stop functioning. Always use visudo to modify sudo configuration files." + remediation: "Edit the file /etc/sudoers with visudo or a file in /etc/sudoers.d/ with visudo -f and add the following line: Defaults use_pty." + compliance: + - cis: ["5.3.2"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] + condition: any + rules: + - 'f:/etc/sudoers -> r:^\s*\t*Defaults\s*\t*use_pty' + - 'd:/etc/sudoers.d -> r:\.* -> r:^\s*\t*Defaults\s*\t*use_pty' + + # 5.3.3 Ensure sudo log file exists. (Automated) + - id: 6682 + title: "Ensure sudo log file exists." + description: "sudo can use a custom log file." + rationale: "A sudo log file simplifies auditing of sudo commands." + impact: "WARNING: Editing the sudo configuration incorrectly can cause sudo to stop functioning. Always use visudo to modify sudo configuration files." + remediation: 'Edit the file /etc/sudoers or a file in /etc/sudoers.d/ with visudo or visudo -f and add the following line: Defaults logfile="" Example Defaults logfile="/var/log/sudo.log".' compliance: - cis: ["5.3.3"] - - pci_dss: ["8.1"] - - tsc: ["CC6.1"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: any + rules: + - 'f:/etc/sudoers -> r:^\s*\t*Defaults\s*\t*logfile=' + - 'd:/etc/sudoers.d -> r:\.* -> r:^\s*\t*Defaults\s*\t*logfile=' + + # 5.3.4 Ensure users must provide password for escalation. (Automated) + - id: 6683 + title: "Ensure users must provide password for escalation." + description: "The operating system must be configured so that users must provide a password for privilege escalation." + rationale: "Without re-authentication, users may access resources or perform tasks for which they do not have authorization. When operating systems provide the capability to escalate a functional capability, it is critical the user re-authenticate." + impact: "This will prevent automated processes from being able to elevate privileges. To include Ansible and AWS builds." + remediation: "Based on the outcome of the audit procedure, use visudo -f to edit the relevant sudoers file. Remove any line with occurrences of NOPASSWD tags in the file." + compliance: + - cis: ["5.3.4"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] + condition: all + rules: + - "not f:/etc/sudoers -> !r:^# && r:*NOPASSWD" + - 'not d:/etc/sudoers.d -> r:\.* -> !r:^# && r:*NOPASSWD' + + # 5.3.5 Ensure re-authentication for privilege escalation is not disabled globally. (Automated) + - id: 6684 + title: "Ensure re-authentication for privilege escalation is not disabled globally." + description: "The operating system must be configured so that users must re-authenticate for privilege escalation." + rationale: "Without re-authentication, users may access resources or perform tasks for which they do not have authorization. When operating systems provide the capability to escalate a functional capability, it is critical the user re-authenticate." + remediation: "Configure the operating system to require users to reauthenticate for privilege escalation. Based on the outcome of the audit procedure, use visudo -f to edit the relevant sudoers file. Remove any occurrences of !authenticate tags in the file(s)." + compliance: + - cis: ["5.3.5"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] condition: all rules: - - "c:authselect current -> r:with-faillock" - - "f:/etc/authselect/authselect.conf -> r:with-faillock" + - 'not f:/etc/sudoers -> !r:^# && r:*\!authenticate' + - 'not d:/etc/sudoers.d -> r:\.* -> !r:^# && r:*\!authenticate' - ############################################### - # 5.4 Configure PAM - ############################################### - # 5.4.1 Ensure password creation requirements are configured (Scored) - - id: 6661 - title: "Ensure password creation requirements are configured" - description: "The pam_pwquality.so module checks the strength of passwords. It performs checks such as making sure a password is not a dictionary word, it is a certain length, contains a mix of characters (e.g. alphabet, numeric, other) and more. The following are definitions of the pam_pwquality.so options. try_first_pass - retrieve the password from a previous stacked PAM module. If not available, then prompt the user for a password. retry=3 - Allow 3 tries before sending back a failure. minlen=14 - password must be 14 characters or more Either of the following can be used to enforce complex passwords: minclass=4 - provide at least four classes of characters for the new password OR dcredit=-1 - provide at least one digit ucredit=-1 - provide at least one uppercase character ocredit=-1 - provide at least one special character lcredit=-1 - provide at least one lowercase character The settings shown above are one possible policy. Alter these values to conform to your own organization's password policies" - rationale: "Strong passwords protect systems from being hacked through brute force methods." - remediation: "Edit the file /etc/security/pwquality.conf and add or modify the following line for password length to conform to site policy: minlen = 14 Edit the file /etc/security/pwquality.conf and add or modify the following line for password complexity to conform to site policy: minclass = 4 OR dcredit = -1 ucredit = -1 ocredit = -1 -1 = -1 Run the following to update the system-auth and password-auth files: CP=$(authselect current | awk 'NR == 1 {print $3}' | grep custom/) for FN in system-auth password-auth; do [[ -n $CP ]] && PTF=/etc/authselect/$CP/$FN || PTF=/etc/authselect/$FN [[ -z $(grep -E '^\\s*password\\s+requisite\\s+pam_pwquality.so\\s+.*enforce-for-root\\s*.*$' $PTF) ]] && sed -ri 's/^\\s*(password\\s+requisite\\s+pam_pwquality.so\\s+)(.*)$/\\1\\2 enforce-for-root/' $PTF [[ -n $(grep -E '^\\s*password\\s+requisite\\s+pam_pwquality.so\\s+.*\\s+retry=\\S+\\s*.*$' $PTF) ]] && sed -ri '/pwquality/s/retry=\\S+/retry=3/' $PTF || sed -ri 's/^\\s*(password\\s+requisite\\s+pam_pwquality.so\\s+)(.*)$/\\1\\2 retry=3/' $PTF done authselect apply-changes" + # 5.3.6 Ensure sudo authentication timeout is configured correctly. (Automated) + - id: 6685 + title: "Ensure sudo authentication timeout is configured correctly." + description: "sudo caches used credentials for a default of 5 minutes. This is for ease of use when there are multiple administrative tasks to perform. The timeout can be modified to suit local security policies." + rationale: "Setting a timeout value reduces the window of opportunity for unauthorized privileged access to another user." + remediation: "If the currently configured timeout is larger than 15 minutes, edit the file listed in the audit section with visudo -f and modify the entry timestamp_timeout= to 15 minutes or less as per your site policy. The value is in minutes. This particular entry may appear on it's own, or on the same line as env_reset. See the following two examples: Defaults env_reset, timestamp_timeout=15 Defaults timestamp_timeout=15 Defaults env_reset." + references: + - "https://www.sudo.ws/man/1.9.0/sudoers.man.html" + compliance: + - cis: ["5.3.6"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] + condition: any + rules: + - 'f:/etc/sudoers -> n:timestamp_timeout=(\d+) compare =< 15 && n:timestamp_timeout=(\d+) compare != -1' + - 'c:sudo -V -> r:Authentication timestamp timeout:\s*\t*5.0 minutes' + + # 5.3.7 Ensure access to the su command is restricted. (Automated) + - id: 6686 + title: "Ensure access to the su command is restricted." + description: "The su command allows a user to run a command or shell as another user. The program has been superseded by sudo, which allows for more granular control over privileged access. Normally, the su command can be executed by any user. By uncommenting the pam_wheel.so statement in /etc/pam.d/su, the su command will only allow users in a specific groups to execute su. This group should be empty to reinforce the use of sudo for privileged access." + rationale: "Restricting the use of su , and using sudo in its place, provides system administrators better control of the escalation of user privileges to execute privileged commands. The sudo utility also provides a better logging and audit mechanism, as it can log each command executed via sudo , whereas su can only record that a user executed the su program." + remediation: "Create an empty group that will be specified for use of the su command. The group should be named according to site policy. Example: # groupadd sugroup Add the following line to the /etc/pam.d/su file, specifying the empty group: auth required pam_wheel.so use_uid group=sugroup." compliance: - - cis: ["5.4.1"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2.3"] - - tsc: ["CC6.1"] + - cis: ["5.3.7"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - "f:/etc/pam.d/password-auth -> r:pam_pwquality.so && r:try_first_pass" - - "f:/etc/pam.d/system-auth -> r:pam_pwquality.so && r:try_first_pass" - - 'f:/etc/security/pwquality.conf -> n:^\s*minlen\s+\t*=\s+\t*(\d+) compare >= 14' + - 'f:/etc/pam.d/su -> !r:^\s*\t*# && r:auth\s*\t*required\s*\t*pam_wheel.so && r:use_uid && r:group=\w+' - # 5.4.2 Ensure lockout for failed password attempts is configured (Scored) - - id: 6662 - title: "Ensure lockout for failed password attempts is configured" - description: "Lock out users after n unsuccessful consecutive login attempts. deny= - Number of attempts before the account is locked. unlock_time= - Time in seconds before the account is unlocked. Set the lockout number and unlock time to follow local site policy." + # 5.4.1 Ensure custom authselect profile is used. (Manual) - Not Implemented + + # 5.4.2 Ensure authselect includes with-faillock. (Automated) + - id: 6687 + title: "Ensure authselect includes with-faillock." + description: "The pam_faillock.so module maintains a list of failed authentication attempts per user during a specified interval and locks the account in case there were more than deny consecutive failed authentications. It stores the failure records into per-user files in the tally directory." rationale: "Locking out user IDs after n unsuccessful consecutive login attempts mitigates brute force password attacks against your systems." - remediation: "Set password lockouts and unlock times to conform to site policy. Run the following to update the system-auth and password-auth files. This script will update/add the deny=5 and unlock_time=900 options. This script should be modified as needed to follow local site policy.CP=$(authselect current | awk \"NR == 1 {print $3}\" | grep custom/) for FN in system-auth password-auth; do [[ -n $CP ]] && PTF=/etc/authselect/$CP/$FN || PTF=/etc/authselect/$FN [[ -n $(grep -E \"^\\s*auth\\s+required\\s+pam_faillock.so\\s+.*deny=\\S+\\s*.*$\" $PTF) ]] && sed -ri \"/pam_faillock.so/s/deny=\\S+/deny=5/g\" $PTF || sed -ri \"s/^\\^\\s*(auth\\s+required\\s+pam_faillock\\.so\\s+)(.*[^{}])(\\{.*\\}|)$/\\1\\2 deny=5 \\3/\" $PTF [[ -n $(grep -E \"^\\s*auth\\s+required\\s+pam_faillock.so\\s+.*unlock_time=\\S+\\s*.*$\" $PTF) ]] && sed -ri \"/pam_faillock.so/s/unlock_time=\\S+/unlock_time=900/g\" $PTF || sed -ri \"s/^\\s*(auth\\s+required\\s+pam_faillock\\.so\\s+)(.*[^{}])(\\{.*\\}|)$/\\1\\2 unlock_time=900 \\3/\" $PTF done authselect apply-changes" + remediation: "Run the following commands to include the with-faillock option to the current authselect profile: # authselect enable-feature with-faillock # authselect apply-changes." compliance: - cis: ["5.4.2"] - - cis_csc: ["16.7"] - - pci_dss: ["8.2.5"] - - tsc: ["CC6.1"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.7"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.9.2.6"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - "f:/etc/pam.d/password-auth -> r:required && r:pam_faillock.so" + - "f:/etc/pam.d/system-auth -> r:required && r:pam_faillock.so" + + # 5.5.1 Ensure password creation requirements are configured. (Automated) + - id: 6688 + title: "Ensure password creation requirements are configured." + description: "The pam_pwquality.so module checks the strength of passwords. It performs checks such as making sure a password is not a dictionary word, it is a certain length, contains a mix of characters (e.g. alphabet, numeric, other) and more. The following are definitions of the pam_pwquality.so options. - try_first_pass - retrieve the password from a previous stacked PAM module. If not available, then prompt the user for a password. - retry=3 - Allow 3 tries before sending back a failure. - minlen=14 - password must be 14 characters or more ** Either of the following can be used to enforce complex passwords:** - minclass=4 - provide at least four classes of characters for the new password OR - dcredit=-1 - provide at least one digit - ucredit=-1 - provide at least one uppercase character - ocredit=-1 - provide at least one special character - lcredit=-1 - provide at least one lowercase character The settings shown above are one possible policy. Alter these values to conform to your own organization's password policies." + rationale: "Strong passwords protect systems from being hacked through brute force methods." + remediation: "Edit the file /etc/security/pwquality.conf and add or modify the following line for password length to conform to site policy minlen = 14 Edit the file /etc/security/pwquality.conf and add or modify the following line for password complexity to conform to site policy minclass = 4 OR dcredit = -1 ucredit = -1 ocredit = -1 lcredit = -1 Run the following script to update the system-auth and password-auth files #!/usr/bin/env bash for fn in system-auth password-auth; do file=\"/etc/authselect/$(head -1 /etc/authselect/authselect.conf | grep 'custom/')/$fn\" if ! grep -Pq -- '^\\h*password\\h+requisite\\h+pam_pwquality.so(\\h+[^#\\n\\r]+)?\\h+.*enforce_for_r oot\\b.*$' \"$file\"; then sed -ri 's/^\\s*(password\\s+requisite\\s+pam_pwquality.so\\s+)(.*)$/\\1\\2 enforce_for_root/' \"$file\" fi if grep -Pq -- '^\\h*password\\h+requisite\\h+pam_pwquality.so(\\h+[^#\\n\\r]+)?\\h+retry=([4- 9]|[1-9][0-9]+)\\b.*$' \"$file\"; then sed -ri '/pwquality/s/retry=\\S+/retry=3/' \"$file\" elif ! grep -Pq -- '^\\h*password\\h+requisite\\h+pam_pwquality.so(\\h+[^#\\n\\r]+)?\\h+retry=\\d+\\b.*$' \"$file\"; then sed -ri 's/^\\s*(password\\s+requisite\\s+pam_pwquality.so\\s+)(.*)$/\\1\\2 retry=3/' \"$file\" fi done authselect apply-changes." + compliance: + - cis: ["5.5.1"] + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - iso_27001-2013: ["A.9.4.3"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] condition: all rules: - - 'f:/etc/pam.d/password-auth -> r:^\s*auth\.+required\.+pam_faillock.so\.+ && n:deny=(\d+) compare <= 5 && n:unlock_time=(\d+) compare >= 900' - - 'f:/etc/pam.d/system-auth -> r:^\s*auth\.+required\.+pam_faillock.so\.+ && n:deny=(\d+) compare <= 5 && n:unlock_time=(\d+) compare >= 900' + - "f:/etc/pam.d/password-auth -> r:pam_pwquality.so && r:try_first_pass && r:retry=" + - "f:/etc/pam.d/system-auth -> r:pam_pwquality.so && r:try_first_pass && r:retry=" + - 'f:/etc/security/pwquality.conf -> n:^\s*minlen\s+\t*=\s+\t*(\d+) compare >= 14' + - 'f:/etc/security/pwquality.conf -> r:^\s*minclass|^\s*\Scredit' - # 5.4.3 Ensure password reuse is limited (Scored) - - id: 6663 - title: "Ensure password reuse is limited" - description: 'The /etc/security/opasswd file stores the users" old passwords and can be checked to ensure that users are not recycling recent passwords. remember=<5> - Number of old passwords to remember' - rationale: "Forcing users not to reuse their past 5 passwords make it less likely that an attacker will be able to guess the password. Note that these change only apply to accounts configured on the local system." - remediation: "Set remembered password history to conform to site policy. Run the following script to add or modify the pam_pwhistory.so and pam_unix.so lines to include the remember option: CP=$(authselect current | awk \"NR == 1 {print $3}\" | grep custom/) [[ -n $CP ]] && PTF=/etc/authselect/$CP/system-auth || PTF=/etc/authselect/system-auth [[ -n $(grep -E \"^\\s*password\\s+(sufficient\\s+pam_unix|requi(red|site)\\s+pam_pwhistory).so\\s+ ([^#]+\\s+)*remember=\\S+\\s*.*$\" $PTF) ]] && sed -ri \"s/^\\s*(password\\s+(requisite|sufficient)\\s+(pam_pwquality\\.so|pam_unix\\.so)\\s+)(.*)(remember=\\S+\\s*)(.*)$/\\1\\4 remember=5 \\6/\" $PTF || sed -ri \"s/^\\s*(password\\s+(requisite|sufficient)\\s+(pam_pwquality\\.so|pam_unix\\.so)\\s+)(.*)$/\\1\\4 remember=5/\" $PTF authselect apply-changes" + # 5.5.2 Ensure lockout for failed password attempts is configured. (Automated) - Not Implemented + + # 5.5.3 Ensure password reuse is limited. (Automated) + - id: 6689 + title: "Ensure password reuse is limited." + description: "The /etc/security/opasswd file stores the users' old passwords and can be checked to ensure that users are not recycling recent passwords. - remember=<5> - Number of old passwords to remember." + rationale: "Forcing users not to reuse their past 5 passwords make it less likely that an attacker will be able to guess the password. Note: These change only apply to accounts configured on the local system." + remediation: "Set remembered password history to conform to site policy. Run the following script to add or modify the pam_pwhistory.so and pam_unix.so lines to include the remember option: #!/usr/bin/env bash { file=\"/etc/authselect/$(head -1 /etc/authselect/authselect.conf | grep 'custom/')/system-auth\" if ! grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_pwhistory\\.so\\h+([^#\\n\\ r]+\\h+)?remember=([5-9]|[1-9][0-9]+)\\b.*$' \"$file\"; then if grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_pwhistory\\.so\\h+([^#\\n\\ r]+\\h+)?remember=\\d+\\b.*$' \"$file\"; then sed -ri 's/^\\s*(password\\s+(requisite|required|sufficient)\\s+pam_pwhistory\\.so\\s+([^# \\n\\r]+\\s+)?)(remember=\\S+\\s*)(\\s+.*)?$/\\1 remember=5 \\5/' $file elif grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_pwhistory\\.so\\h+([^#\\n\\ r]+\\h+)?.*$' \"$file\"; then sed -ri '/^\\s*password\\s+(requisite|required|sufficient)\\s+pam_pwhistory\\.so/ s/$/ remember=5/' $file else sed -ri '/^\\s*password\\s+(requisite|required|sufficient)\\s+pam_unix\\.so/i password required pam_pwhistory.so remember=5 use_authtok' $file fi fi if ! grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_unix\\.so\\h+([^#\\n\\r]+\\h +)?remember=([5-9]|[1-9][0-9]+)\\b.*$' \"$file\"; then if grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_unix\\.so\\h+([^#\\n\\r]+\\h +)?remember=\\d+\\b.*$' \"$file\"; then sed -ri 's/^\\s*(password\\s+(requisite|required|sufficient)\\s+pam_unix\\.so\\s+([^#\\n\\r] +\\s+)?)(remember=\\S+\\s*)(\\s+.*)?$/\\1 remember=5 \\5/' $file else sed -ri '/^\\s*password\\s+(requisite|required|sufficient)\\s+pam_unix\\.so/ s/$/ remember=5/' $file fi fi authselect apply-changes }." compliance: - - cis: ["5.4.3"] - - cis_csc: ["16"] - - pci_dss: ["8.2.5"] - - tsc: ["CC6.1"] + - cis: ["5.5.3"] + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - iso_27001-2013: ["A.9.4.3"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] condition: all rules: - - 'f:/etc/pam.d/system-auth -> r:^\s*password\.+requisite\.+pam_pwquality\.so\.+ && n:remember=(\d+) compare >= 5' - - 'f:/etc/pam.d/system-auth -> r:^\s*password\.+sufficient\.+pam_unix\.so\.+ && n:remember=(\d+) compare >= 5' + - 'f:/etc/pam.d/system-auth -> !r:^\s*\t*# && r:password\s*\t*requisite|password\s*\t*sufficient && r:pam_pwhistory.so|pam_unix.so && n:remember\s*\t*=\s*\t*(\d+) compare => 5' - # 5.4.4 Ensure password hashing algorithm is SHA-512 (Scored) - - id: 6664 - title: "Ensure password hashing algorithm is SHA-512" - description: "The commands below change password encryption from md5 to sha512 (a much stronger hashing algorithm). All existing accounts will need to perform a password change to upgrade the stored hashes to the new algorithm." - rationale: "The SHA-512 algorithm provides much stronger hashing than MD5, thus providing additional protection to the system by increasing the level of effort for an attacker to successfully determine passwords. Note that these change only apply to accounts configured on the local system." - remediation: "Set password hashing algorithm to sha512. Run the following script to dd or modify the pam_unix.so lines in the password-auth and system-auth files to include the sha512 option: CP=$(authselect current | awk 'NR == 1 {print $3}' | grep custom/) for FN in system-auth password-auth; do [[ -z $(grep -E '^\\s*password\\s+sufficient\\s+pam_unix.so\\s+.*sha512\\s*.*$' $PTF) ]] && sed -ri 's/^\\s*(password\\s+sufficient\\s+pam_unix.so\\s+)(.*)$/\\1\\2 sha512/' $PTF done authselect apply-changes" - compliance: - - cis: ["5.4.4"] - - cis_csc: ["16.4"] - - pci_dss: ["3.6.1", "8.2.1"] - - tsc: ["CC6.1", "CC6.7"] - condition: all - rules: - - 'f:/etc/pam.d/password-auth -> r:^\s*password\.+sufficient\.+pam_unix\.so && r:sha512' - - 'f:/etc/pam.d/system-auth -> r:^\s*password\.+sufficient\.+pam_unix\.so && r:sha512' - - ############################################### - # 5.5 User Accounts and Environment - ############################################### - ############################################### - # 5.5.1 Set Shadow Password Suite Parameters - ############################################### - # 5.5.1.1 Ensure password expiration is 365 days or less (Scored) - - id: 6665 - title: "Ensure password expiration is 365 days or less" + # 5.5.4 Ensure password hashing algorithm is SHA-512. (Automated) + - id: 6690 + title: "Ensure password hashing algorithm is SHA-512." + description: "A cryptographic hash function converts an arbitrary-length input into a fixed length output. Password hashing performs a one-way transformation of a password, turning the password into another string, called the hashed password." + rationale: "The SHA-512 algorithm provides stronger hashing than other hashing algorithms used for password hashing with Linux, providing additional protection to the system by increasing the level of effort for an attacker to successfully determine passwords. Note: These changes only apply to accounts configured on the local system." + remediation: "Set password hashing algorithm to sha512. Edit /etc/libuser.conf and edit of add the following line: crypt_style = sha512 Edit /etc/login.defs and edit or add the following line: ENCRYPT_METHOD SHA512 Run the following script to configure pam_unix.so to use the sha512 hashing algorithm: #!/usr/bin/env bash for fn in system-auth password-auth; do file=\"/etc/authselect/$(head -1 /etc/authselect/authselect.conf | grep 'custom/')/$fn\" if ! grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_unix\\.so(\\h+[^#\\n\\r]+)? \\h+sha512\\b.*$' \"$file\"; then if grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_unix\\.so(\\h+[^#\\n\\r]+)? \\h+(md5|blowfish|bigcrypt|sha256)\\b.*$' \"$file\"; then sed -ri 's/(md5|blowfish|bigcrypt|sha256)/sha512/' \"$file\" else sed -ri 's/(^\\s*password\\s+(requisite|required|sufficient)\\s+pam_unix.so\\s+)(.*)$/\\1s ha512 \\3/' $file fi fi done authselect apply-changes Note: This only effects local users and passwords created after updating the files to use sha512. If it is determined that the password algorithm being used is not SHA-512, once it is changed, it is recommended that all user ID's be immediately expired and forced to change their passwords on next login." + compliance: + - cis: ["5.5.4"] + - cis_csc_v8: ["3.11"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L2-3.1.19", "IA.L2-3.5.10", "MP.L2-3.8.1", "SC.L2-3.13.11", "SC.L2-3.13.16"] + - hipaa: ["164.312(a)(2)(iv)", "164.312(e)(2)(ii)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["SC-28", "SC-28(1)"] + - pci_dss_v3.2.1: ["3.4", "3.4.1", "8.2.1"] + - pci_dss_v4.0: ["3.1.1", "3.3.2", "3.3.3", "3.5.1", "3.5.1.2", "3.5.1.3", "8.3.2"] + - soc_2: ["CC6.1"] + condition: all + rules: + - 'f:/etc/login.defs -> r:^\s*ENCRYPT_METHOD SHA512' + - 'f:/etc/libuser.conf -> r:^\s*crypt_style = sha512' + - 'f:/etc/pam.d/password-auth -> r:\s*password && r:requisite|required|sufficient && r:pam_unix.so && r:sha512' + - 'f:/etc/pam.d/system-auth -> r:\s*password && r:requisite|required|sufficient && r:pam_unix.so && r:sha512' + + # 5.6.1.1 Ensure password expiration is 365 days or less. (Automated) + - id: 6691 + title: "Ensure password expiration is 365 days or less." description: "The PASS_MAX_DAYS parameter in /etc/login.defs allows an administrator to force passwords to expire once they reach a defined age. It is recommended that the PASS_MAX_DAYS parameter be set to less than or equal to 365 days." rationale: "The window of opportunity for an attacker to leverage compromised credentials or successfully compromise credentials via an online brute force attack is limited by the age of the password. Therefore, reducing the maximum age of a password also reduces an attacker's window of opportunity." - remediation: "Set the PASS_MAX_DAYS parameter to conform to site policy in /etc/login.defs : PASS_MAX_DAYS 90 and modify user parameters for all users with a password set to match: chage --maxdays 90 " + remediation: "Set the PASS_MAX_DAYS parameter to conform to site policy in /etc/login.defs : PASS_MAX_DAYS 365 Modify user parameters for all users with a password set to match: # chage --maxdays 365 ." compliance: - - cis: ["5.5.1.1"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2.4"] - - tsc: ["CC6.1"] + - cis: ["5.6.1.1"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.9.4.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:/etc/login.defs -> n:^\s*PASS_MAX_DAYS\s*\t*(\d+) compare <= 365' + - 'f:/etc/login.defs -> n:^\s*\t*PASS_MAX_DAYS\s*\t*(\d+) compare <= 365' + - 'not f:/etc/shadow -> n:^\w+:\$\.*:\d+:\d+:(\d+): compare > 365' - # 5.5.1.2 Ensure minimum days between password changes is 7 or more (Scored) - - id: 6666 - title: "Ensure minimum days between password changes is 7 or more" + # 5.6.1.2 Ensure minimum days between password changes is 7 or more. (Automated) + - id: 6692 + title: "Ensure minimum days between password changes is 7 or more." description: "The PASS_MIN_DAYS parameter in /etc/login.defs allows an administrator to prevent users from changing their password until a minimum number of days have passed since the last time the user changed their password. It is recommended that PASS_MIN_DAYS parameter be set to 7 or more days." rationale: "By restricting the frequency of password changes, an administrator can prevent users from repeatedly changing their password in an attempt to circumvent password reuse controls." - remediation: "Set the PASS_MIN_DAYS parameter to 7 in /etc/login.defs: PASS_MIN_DAYS 7 and modify user parameters for all users with a password set to match: chage --mindays 7 " + remediation: "Set the PASS_MIN_DAYS parameter to 7 in /etc/login.defs : PASS_MIN_DAYS 7 Modify user parameters for all users with a password set to match: # chage --mindays 7 ." compliance: - - cis: ["5.5.1.2"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis: ["5.6.1.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.9.4.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:/etc/login.defs -> n:^\s*PASS_MIN_DAYS\s*\t*(\d+) compare >= 7' + - 'f:/etc/login.defs -> n:^\s*\t*PASS_MIN_DAYS\s*\t*(\d+) compare >= 7' + - 'not f:/etc/shadow -> n:^\w+:\$\.*:\d+:(\d+): compare < 7' - # 5.5.1.3 Ensure password expiration warning days is 7 or more (Scored) - - id: 6667 - title: "Ensure minimum days between password changes is 7 or more" + # 5.6.1.3 Ensure password expiration warning days is 7 or more. (Automated) + - id: 6693 + title: "Ensure password expiration warning days is 7 or more." description: "The PASS_WARN_AGE parameter in /etc/login.defs allows an administrator to notify users that their password will expire in a defined number of days. It is recommended that the PASS_WARN_AGE parameter be set to 7 or more days." rationale: "Providing an advance warning that a password will be expiring gives users time to think of a secure password. Users caught unaware may choose a simple password or write it down where it may be discovered." - remediation: "Set the PASS_WARN_AGE parameter to 7 in /etc/login.defs: PASS_WARN_AGE 7 and modify user parameters for all users with a password set to match: chage --warndays 7 " + remediation: "Set the PASS_WARN_AGE parameter to 7 in /etc/login.defs : PASS_WARN_AGE 7 Modify user parameters for all users with a password set to match: # chage --warndays 7 ." compliance: - - cis: ["5.5.1.3"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis: ["5.6.1.3"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.9.4.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:/etc/login.defs -> n:^\s*PASS_WARN_AGE\s*\t*(\d+) compare >= 7' + - 'f:/etc/login.defs -> n:^\s*\t*PASS_WARN_AGE\s*\t*(\d+) compare >= 7' + - 'not f:/etc/shadow -> n:^\w+:\$\.*:\d+:\d+:\d+:(\d+): compare < 7' - # 5.4.1.4 Ensure inactive password lock is 30 days or less (Scored) - - id: 6668 - title: "Ensure inactive password lock is 30 days or less" + # 5.6.1.4 Ensure inactive password lock is 30 days or less. (Automated) + - id: 6694 + title: "Ensure inactive password lock is 30 days or less." description: "User accounts that have been inactive for over a given period of time can be automatically disabled. It is recommended that accounts that are inactive for 30 days after password expiration be disabled." rationale: "Inactive accounts pose a threat to system security since the users are not logging in to notice failed login attempts or other anomalies." - remediation: "Run the following command to set the default password inactivity period to 30 days: useradd -D -f 30 and modify user parameters for all users with a password set to match: chage --inactive 30 " - compliance: - - cis: ["5.4.1.4"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] - condition: all - rules: - - 'c:useradd -D -> n:^\s*INACTIVE\s*=\s*(\d+) compare <= 30' - - # 5.5.3 Ensure default user shell timeout is 900 seconds or less (Scored) - - id: 6669 - title: "Ensure default user shell timeout is 900 seconds or less" - description: "The default TMOUT determines the shell timeout for users. The TMOUT value is measured in seconds." - rationale: "Having no timeout value associated with a shell could allow an unauthorized user access to another user's shell session (e.g. user walks away from their computer and doesn't lock the screen). Setting a timeout value at least reduces the risk of this happening." - remediation: "Edit the /etc/bashrc , /etc/profile and /etc/profile.d/*.sh files (and the appropriate files for any other shell supported on your system) and add or edit any umask parameters as follows: readonly TMOUT=900 ; export TMOUT . Note that setting the value to readonly prevents unwanted modification during runtime." - compliance: - - cis: ["5.5.3"] - - cis_csc: ["16.11"] - - pci_dss: ["12.3.8"] - condition: all - rules: - - 'not f:/etc/bashrc -> !r:^# && n:TMOUT\s*\t*=\s*\t*(\d+) compare > 900' - - 'not c:grep -Rh TMOUT /etc/profile /etc/profile.d/*.sh -> !r:^# && n:TMOUT\s*\t*=\s*\t*(\d+) compare > 900' - - 'f:/etc/bashrc -> !r:^# && n:readonly TMOUT\s*=\s*(\d+)\s*; compare <= 900 && r:export TMOUT\s*$' - - 'c:grep -Rh TMOUT /etc/profile /etc/profile.d/*.sh -> !r:^# && n:readonly TMOUT\s*=\s*(\d+)\s*; compare <= 900 && r:export TMOUT\s*$' - - # 5.5.4 Ensure default group for the root account is GID 0 (Scored) - - id: 6670 - title: "Ensure default group for the root account is GID 0" - description: "The usermod command can be used to specify which group the root user belongs to. This affects permissions of files that are created by the root user." + remediation: "Run the following command to set the default password inactivity period to 30 days: # useradd -D -f 30 Modify user parameters for all users with a password set to match: # chage --inactive 30 ." + compliance: + - cis: ["5.6.1.4"] + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - iso_27001-2013: ["A.9.4.3"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] + condition: all + rules: + - "not c:useradd -D -> r:^INACTIVE=-1" + - 'c:useradd -D -> n:^INACTIVE=(\d+) compare <= 30' + - 'not f:/etc/shadow -> n:^\w+:\w+:\w+:\w+:\w+:\w+:(\d+): compare > 30' + - 'not f:/etc/shadow -> r:^\w+:\w+:\w+:\w+:\w+:\w+:-1:' + + # 5.6.1.5 Ensure all users last password change date is in the past. (Automated) - Not Implemented + # 5.6.2 Ensure system accounts are secured. (Automated) - Not Implemented + # 5.6.3 Ensure default user shell timeout is 900 seconds or less. (Automated) - Not Implemented + + # 5.6.4 Ensure default group for the root account is GID 0. (Automated) + - id: 6695 + title: "Ensure default group for the root account is GID 0." + description: "The usermod command can be used to specify which group the root account belongs to. This affects permissions of files that are created by the root account." rationale: "Using GID 0 for the root account helps prevent root -owned files from accidentally becoming accessible to non-privileged users." - remediation: "Run the following command to set the root user default group to GID 0: usermod -g 0 root" + remediation: "Run the following command to set the root account default group to GID 0 : # usermod -g 0 root." compliance: - - cis: ["5.5.4"] - - cis_csc: ["5.1"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis: ["5.6.4"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:/etc/passwd -> r:^root:\w:\w:0' + - 'f:/etc/passwd -> !r:^\s*\t*# && r:root:\w+:\w+:0:' - # 5.5.5 Ensure default user umask is 027 or more restrictive (Scored) - - id: 6671 - title: "Ensure default user umask is 027 or more restrictive" - description: "The default umask determines the permissions of files created by users. The user creating the file has the discretion of making their files and directories readable by others via the chmod command. Users who wish to allow their files and directories to be readable by others by default may choose a different default umask by inserting the umask command into the standard shell configuration files ( .profile , .bashrc , etc.) in their home directories." - rationale: "Setting a very secure default value for umask ensures that users make a conscious choice about their file permissions. A default umask setting of 077 causes files and directories created by users to not be readable by any other user on the system. A umask of 027 would make files and directories readable by users in the same Unix group, while a umask of 022 would make files readable by every user on the system." - remediation: "Edit the /etc/bashrc , /etc/profile and /etc/profile.d/*.sh files (and the appropriate files for any other shell supported on your system) and add or edit any umask parameters as follows: umask 027" - compliance: - - cis: ["5.5.5"] - - cis_csc: ["5.1", "13"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] - condition: none - rules: - - 'f:/etc/bashrc -> !r:^\s*\t*# && r:umask \d0\d|umask \d1\d|umask \d4\d|umask \d5\d' - - 'f:/etc/bashrc -> !r:^\s*\t*# && n:umask \d\d(\d) compare != 7' - - 'f:/etc/profile -> !r:^\s*\t*# && r:umask \d0\d|umask \d1\d|umask \d4\d|umask \d5\d' - - 'f:/etc/profile -> !r:^\s*\t*# && n:umask \d\d(\d) compare != 7' - - 'd:/etc/profile.d -> .sh -> !r:^\s*\t*# && r:umask \d0\d|umask \d1\d|umask \d4\d|umask \d5\d' - - 'd:/etc/profile.d -> .sh -> !r:^\s*t*# && n:umask \d\d(\d) compare != 7' + # 5.6.5 Ensure default user umask is 027 or more restrictive. (Automated) - Not Implemented - # 5.7 Ensure access to the su command is restricted (Scored) - - id: 6672 - title: "Ensure access to the su command is restricted." - description: "The su command allows a user to run a command or shell as another user. The program has been superseded by sudo , which allows for more granular control over privileged access. Normally, the su command can be executed by any user. By uncommenting the pam_wheel.so statement in /etc/pam.d/su , the su command will only allow users in the wheel group to execute su ." - rationale: "Restricting the use of su , and using sudo in its place, provides system administrators better control of the escalation of user privileges to execute privileged commands. The sudo utility also provides a better logging and audit mechanism, as it can log each command executed via sudo , whereas su can only record that a user executed the su program." - remediation: "Add the following line to the /etc/pam.d/su file: auth required pam_wheel.so use_uid" - compliance: - - cis: ["5.7"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'f:/etc/pam.d/su -> !r:# && r:auth\s*\t*required\s*\t*pam_wheel.so\s*\t*use_uid' - - ############################################### - # 6 System Maintenance - ############################################### - ############################################### - # 6.1 System File Permissions - ############################################### - # 6.1.2 Configure /etc/passwd permissions (Scored) - - id: 6673 - title: "Ensure permissions on /etc/passwd are configured" + # 6.1.1 Audit system file permissions. (Manual) - Not Implemented + # 6.1.2 Ensure sticky bit is set on all world-writable directories. (Automated) - Not Implemented + + # 6.1.3 Ensure permissions on /etc/passwd are configured. (Automated) + - id: 6696 + title: "Ensure permissions on /etc/passwd are configured." description: "The /etc/passwd file contains user account information that is used by many system utilities and therefore must be readable for these utilities to operate." rationale: "It is critical to ensure that the /etc/passwd file is protected from unauthorized write access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/passwd: # chown root:root /etc/passwd # chmod 644 /etc/passwd" + remediation: "Run the following command to set permissions on /etc/passwd: # chown root:root /etc/passwd # chmod 644 /etc/passwd." compliance: - - cis: ["6.1.2"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:stat -L /etc/passwd -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - - # 6.1.3 Configure /etc/shadow permissions (Scored) - - id: 6674 - title: "Ensure permissions on /etc/shadow are configured" + - cis: ["6.1.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'c:stat -Lc "%n %a %u %U %g %G" /etc/passwd -> r:0 root 0 root && r:644|640|604|600|400|500' + + # 6.1.4 Ensure permissions on /etc/shadow are configured. (Automated) + - id: 6697 + title: "Ensure permissions on /etc/shadow are configured." description: "The /etc/shadow file is used to store the information about user accounts that is critical to the security of those accounts, such as the hashed password and other security information." rationale: "If attackers can gain read access to the /etc/shadow file, they can easily run a password cracking program against the hashed password to break it. Other security information that is stored in the /etc/shadow file (such as expiration) could also be useful to subvert the user accounts." - remediation: "Run the following command to set permissions on /etc/ shadow: # chown root:root /etc/shadow # chown root:shadow /etc/shadow # chmod o-rwx,g-wx /etc/shadow" + remediation: "Run the following commands to set owner, group, and permissions on /etc/shadow: # chown root:root /etc/shadow # chmod 0000 /etc/shadow." compliance: - - cis: ["6.1.3"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:stat -L /etc/shadow -> r:Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*\w*/\s*\t*shadow\)' - - # 6.1.4 Configure /etc/group permissions (Scored) - - id: 6675 - title: "Ensure permissions on /etc/group are configured" + - cis: ["6.1.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'c:stat -Lc "%n %a %u %U %g %G" /etc/shadow -> r:0 root 0 root && n:shadow (\d+) compare == 0' + + # 6.1.5 Ensure permissions on /etc/group are configured. (Automated) + - id: 6698 + title: "Ensure permissions on /etc/group are configured." description: "The /etc/group file contains a list of all the valid groups defined in the system. The command below allows read/write access for root and read access for everyone else." rationale: "The /etc/group file needs to be protected from unauthorized changes by non-privileged users, but needs to be readable as this information is used with many non-privileged programs." - remediation: "Run the following command to set permissions on /etc/group: # chown root:root /etc/group # chmod 644 /etc/group" + remediation: "Run the following commands to set owner, group, and permissions on /etc/group: # chown root:root /etc/group # chmod u-x,g-wx,o-wx /etc/group." compliance: - - cis: ["6.1.4"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:stat -L /etc/group -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - - # 6.1.5 Configure /etc/gshadow permissions (Scored) - - id: 6676 - title: "Ensure permissions on /etc/gshadow are configured" + - cis: ["6.1.5"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'c:stat -Lc "%n %a %u %U %g %G" /etc/group -> r:0 root 0 root && r:644|640|604|600|400|500' + + # 6.1.6 Ensure permissions on /etc/gshadow are configured. (Automated) + - id: 6699 + title: "Ensure permissions on /etc/gshadow are configured." description: "The /etc/gshadow file is used to store the information about groups that is critical to the security of those accounts, such as the hashed password and other security information." - rationale: "If attackers can gain read access to the /etc/gshadow file, they can easily run a password cracking program against the hashed password to break it. Other security information that is stored in the /etc/gshadow file (such as group administrators) could also be useful to subvert the group" - remediation: "Run the following command to set permissions on /etc/gshadow: # chown root:root /etc/gshadow # chown root:shadow /etc/gshadow # chmod o-rwx,g-rw /etc/gshadow" + rationale: "If attackers can gain read access to the /etc/gshadow file, they can easily run a password cracking program against the hashed password to break it. Other security information that is stored in the /etc/gshadow file (such as group administrators) could also be useful to subvert the group." + remediation: "Run the following commands to set owner, group, and permissions on /etc/gshadow # chown root:root /etc/gshadow # chmod 0000 /etc/gshadow." compliance: - - cis: ["6.1.5"] - - cis_csc: ["16.14"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:stat -L /etc/gshadow -> r:Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*\w*/\s*\t*shadow\)' - - # 6.1.6 Configure /etc/passwd- permissions (Scored) - - id: 6677 - title: "Ensure permissions on /etc/passwd- are configured" + - cis: ["6.1.6"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'c:stat -Lc "%n %a %u %U %g %G" /etc/gshadow -> r:0 root 0 root && n:gshadow (\d+) compare == 0' + + # 6.1.7 Ensure permissions on /etc/passwd- are configured. (Automated) + - id: 6700 + title: "Ensure permissions on /etc/passwd- are configured." description: "The /etc/passwd- file contains backup user account information." rationale: "It is critical to ensure that the /etc/passwd- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/passwd-: # chown root:root /etc/passwd- # chmod 600 /etc/passwd-" + remediation: "Run the following commands to set owner, group, and permissions on /etc/passwd-: # chown root:root /etc/passwd- # chmod chmod u-x,go-wx /etc/passwd-." compliance: - - cis: ["6.1.6"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:stat -L /etc/passwd- -> r:Access:\s*\(0\d00/-\w\w-------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - - # 6.1.7 Configure /etc/shadow- permissions (Scored) - - id: 6678 - title: "Ensure permissions on /etc/shadow- are configured" + - cis: ["6.1.7"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'c:stat -Lc "%n %a %u %U %g %G" /etc/passwd- -> r:0 root 0 root && r:644|640|604|600|400|500' + + # 6.1.8 Ensure permissions on /etc/shadow- are configured. (Automated) + - id: 6701 + title: "Ensure permissions on /etc/shadow- are configured." description: "The /etc/shadow- file is used to store backup information about user accounts that is critical to the security of those accounts, such as the hashed password and other security information." rationale: "It is critical to ensure that the /etc/shadow- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/shadow-: # chown root:shadow /etc/shadow- # chmod u-x,go-rwx /etc/shadow-" + remediation: "Run the following commands to set owner, group, and permissions on /etc/shadow- : # chown root:root /etc/shadow- # chmod 0000 /etc/shadow-." compliance: - - cis: ["6.1.7"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:stat -L /etc/shadow- -> r:Access:\s*\(0\d00/-\w\w-------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|Access:\s*\(0\d00/-\w\w-------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*\w*/\s*\t*shadow\)' - - # 6.1.8 Configure /etc/group- permissions (Scored) - - id: 6679 - title: "Ensure permissions on /etc/group- are configured" + - cis: ["6.1.8"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'c:stat -Lc "%n %a %u %U %g %G" /etc/shadow- -> r:0 root 0 root && n:shadow- (\d+) compare == 0' + + # 6.1.9 Ensure permissions on /etc/group- are configured. (Automated) + - id: 6702 + title: "Ensure permissions on /etc/group- are configured." description: "The /etc/group- file contains a backup list of all the valid groups defined in the system." rationale: "It is critical to ensure that the /etc/group- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/group-: # chown root:root /etc/group- # chmod 644 /etc/group-" + remediation: "Run the following commands to set owner, group, and permissions on /etc/group-: # chown root:root /etc/group- # chmod u-x,go-wx /etc/group-." compliance: - - cis: ["6.1.8"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:stat -L /etc/group- -> r:Access:\s*\(0\d\d\d/-\w\w-\w--\w--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - - # 6.1.9 Configure /etc/gshadow- permissions (Scored) - - id: 6680 - title: "Ensure permissions on /etc/gshadow- are configured" + - cis: ["6.1.9"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'c:stat -Lc "%n %a %u %U %g %G" /etc/group- -> r:0 root 0 root && r:644|640|604|600|400|500' + + # 6.1.10 Ensure permissions on /etc/gshadow- are configured. (Automated) + - id: 6703 + title: "Ensure permissions on /etc/gshadow- are configured." description: "The /etc/gshadow- file is used to store backup information about groups that is critical to the security of those accounts, such as the hashed password and other security information." rationale: "It is critical to ensure that the /etc/gshadow- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the one of the following chown commands as appropriate and the chmod to set permissions on /etc/gshadow- : # chown root:root /etc/gshadow- # chown root:shadow /etc/gshadow- # chmod o-rwx,g-rw /etc/gshadow-" - compliance: - - cis: ["6.1.9"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:stat -L /etc/gshadow- -> r:Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*\w*/\s*\t*shadow\)' - - ############################################### - # 6.2 Review User and Group Settings - ############################################### - # 6.2.1 Check passwords fields (Scored) - - id: 6681 - title: "Ensure password fields are not empty" + remediation: "Run the following commands to set owner, group, and permissions on /etc/gshadow- : # chown root:root /etc/gshadow- # chmod 0000 /etc/gshadow-." + compliance: + - cis: ["6.1.10"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'c:stat -Lc "%n %a %u %U %g %G" /etc/gshadow- -> r:0 root 0 root && n:gshadow- (\d+) compare == 0' + + # 6.1.11 Ensure no world writable files exist. (Automated) - Not Implemented + # 6.1.12 Ensure no unowned files or directories exist. (Automated) - Not Implemented + # 6.1.13 Ensure no ungrouped files or directories exist. (Automated) - Not Implemented + # 6.1.14 Audit SUID executables. (Manual) - Not Implemented + # 6.1.15 Audit SGID executables. (Manual) - Not Implemented + + # 6.2.1 Ensure password fields are not empty. (Automated) + - id: 6704 + title: "Ensure password fields are not empty." description: "An account with an empty password field means that anybody may log in as that user without providing a password." rationale: "All accounts must have passwords or be locked to prevent the account from being used by an unauthorized user." - remediation: "If any accounts in the /etc/shadow file do not have a password, run the following command to lock the account until it can be determined why it does not have a password: passwd -l || Also, check to see if the account is logged in and investigate what it is being used for to determine if it needs to be forced off." + remediation: "If any accounts in the /etc/shadow file do not have a password, run the following command to lock the account until it can be determined why it does not have a password: # passwd -l Also, check to see if the account is logged in and investigate what it is being used for to determine if it needs to be forced off." compliance: - cis: ["6.2.1"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] - condition: none - rules: - - 'f:/etc/shadow -> !r:^# && r:^\w+::' - - # 6.2.2 Delete legacy entries in /etc/passwd (Scored) - - id: 6682 - title: 'Ensure no legacy "+" entries exist in /etc/passwd' - description: "The character + in various files used to be markers for systems to insert data from NIS maps at a certain point in a system configuration file. These entries are no longer required on most systems, but may exist in files that have been imported from other platforms." - rationale: "These entries may provide an avenue for attackers to gain privileged access on the system." - remediation: "Remove any legacy '+' entries from /etc/passwd if they exist." - compliance: - - cis: ["6.2.2"] - - cis_csc: ["16.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none - rules: - - "f:/etc/passwd -> !r:^# && r:^+:" - - # 6.2.4 Delete legacy entries in /etc/shadow (Scored) - - id: 6683 - title: 'Ensure no legacy "+" entries exist in /etc/shadow' - description: "The character + in various files used to be markers for systems to insert data from NIS maps at a certain point in a system configuration file. These entries are no longer required on most systems, but may exist in files that have been imported from other platforms." - rationale: "These entries may provide an avenue for attackers to gain privileged access on the system." - remediation: "Remove any legacy '+' entries from /etc/shadow if they exist." - compliance: - - cis: ["6.2.4"] - - cis_csc: ["16.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - iso_27001-2013: ["A.9.4.3"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] + condition: all rules: - - "f:/etc/shadow -> !r:^# && r:^+:" + - 'not f:/etc/shadow -> !r:^# && r:^\w+::' - # 6.2.5 Delete legacy entries in /etc/group (Scored) - - id: 6684 - title: 'Ensure no legacy "+" entries exist in /etc/group' - description: "The character + in various files used to be markers for systems to insert data from NIS maps at a certain point in a system configuration file. These entries are no longer required on most systems, but may exist in files that have been imported from other platforms." - rationale: "These entries may provide an avenue for attackers to gain privileged access on the system." - remediation: "Remove any legacy '+' entries from /etc/group if they exist." - compliance: - - cis: ["6.2.5"] - - cis_csc: ["16.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none - rules: - - "f:/etc/group -> !r:^# && r:^+:" + # 6.2.2 Ensure all groups in /etc/passwd exist in /etc/group. (Automated) - Not Implemented + # 6.2.3 Ensure no duplicate UIDs exist. (Automated) - Not Implemented + # 6.2.4 Ensure no duplicate GIDs exist. (Automated) - Not Implemented + # 6.2.5 Ensure no duplicate user names exist. (Automated) - Not Implemented + # 6.2.7 Ensure root PATH Integrity. (Automated) - Not Implemented - # 6.2.6 Verify No UID 0 Accounts Exist Other Than root (Scored) - - id: 6685 - title: "Ensure root is the only UID 0 account" + # 6.2.8 Ensure root is the only UID 0 account. (Automated) + - id: 6705 + title: "Ensure root is the only UID 0 account." description: "Any account with UID 0 has superuser privileges on the system." - rationale: "This access must be limited to only the default root account and only from the system console. Administrative access must be through an unprivileged account using an approved mechanism as noted in Item 5.6 Ensure access to the su command is restricted." + rationale: 'This access must be limited to only the default root account and only from the system console. Administrative access must be through an unprivileged account using an approved mechanism as noted in recommendation "Ensure access to the su command is restricted".' remediation: "Remove any users other than root with UID 0 or assign them a new UID if appropriate." compliance: - - cis: ["6.2.6"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: none - rules: - - 'f:/etc/passwd -> !r:^# && !r:^\s*\t*root: && r:^\w+:\w+:0:' + - cis: ["6.2.8"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - 'not f:/etc/passwd -> !r:^\s*\t*# && !r:^root: && r:^\w+:\w+:0:' + + # 6.2.9 Ensure all users' home directories exist. (Automated) - Not Implemented + # 6.2.10 Ensure users own their home directories. (Automated) - Not Implemented + # 6.2.11 Ensure users' home directories permissions are 750 or more restrictive. (Automated) - Not Implemented + # 6.2.12 Ensure users' dot files are not group or world writable. (Automated) - Not Implemented + # 6.2.13 Ensure users' .netrc Files are not group or world accessible. (Automated) - Not Implemented + # 6.2.14 Ensure no users have .forward files. (Automated) - Not Implemented + # 6.2.15 Ensure no users have .netrc files. (Automated) - Not Implemented + # 6.2.16 Ensure no users have .rhosts files. (Automated) - Not Implemented From 8456bb6a1ba681e89a111befd606c970ce956b0a Mon Sep 17 00:00:00 2001 From: Openime Oniagbi Date: Fri, 31 May 2024 18:40:50 +0300 Subject: [PATCH 384/419] Update cis_rhel7_linux.yml --- ruleset/sca/rhel/7/cis_rhel7_linux.yml | 5761 ++++++++++++++---------- 1 file changed, 3389 insertions(+), 2372 deletions(-) diff --git a/ruleset/sca/rhel/7/cis_rhel7_linux.yml b/ruleset/sca/rhel/7/cis_rhel7_linux.yml index 5739f888a47..bdfa8fec565 100644 --- a/ruleset/sca/rhel/7/cis_rhel7_linux.yml +++ b/ruleset/sca/rhel/7/cis_rhel7_linux.yml @@ -1,20 +1,20 @@ # Security Configuration Assessment -# CIS Checks for RHEL 7 +# CIS Checks for RHEL 7. # Copyright (C) 2015, Wazuh Inc. # # This program is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public -# License (version 2) as published by the FSF - Free Software -# Foundation +# License (version 2) as published by the FSF - Free Software Foundation +# # # Based on: -# Center for Internet Security Red Hat Enterprise Linux 7 Benchmark v3.0.1 - 09-21-2020 +# Center for Internet Security Red Hat Enterprise Linux 7 Benchmark v3.1.1 - 05-21-2021 policy: id: "cis_rhel7_linux" file: "cis_rhel7_linux.yml" - name: "CIS Red Hat Enterprise Linux 7 Benchmark v3.0.1" - description: "This document provides prescriptive guidance for establishing a secure configuration posture for Red Hat Enterprise Linux 7 systems running on x86 and x64 platforms. This document was tested against Red Hat Enterprise Linux 7.4." + name: "CIS Red Hat Enterprise Linux 7 Benchmark v3.1.1" + description: "This document provides prescriptive guidance for establishing a secure configuration posture for Red Hat Enterprise Linux 7 systems running on x86 and x64 platforms. This document was tested against Red Hat Enterprise Linux 7." references: - https://www.cisecurity.org/cis-benchmarks/ @@ -33,3303 +33,4271 @@ variables: $sshd_file: /etc/ssh/sshd_config checks: - # 1.1.1.1 cramfs: filesystem + # 1.1.1.1 Ensure mounting of cramfs filesystems is disabled. (Automated) - id: 4500 - title: "Ensure mounting of cramfs filesystems is disabled" + title: "Ensure mounting of cramfs filesystems is disabled." description: "The cramfs filesystem type is a compressed read-only Linux filesystem embedded in small footprint systems. A cramfs image can be used without having to first decompress the image." rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the server. If this filesystem type is not needed, disable it." - remediation: "Edit or create a file in the /etc/modprobe.d/directory ending in .conf. Example: vim /etc/modprobe.d/cramfs.confand add the following line: install cramfs /bin/true. Run the following command to unload the cramfs module: rmmod cramfs" + remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: vim /etc/modprobe.d/cramfs.conf and add the following line: install cramfs /bin/true Run the following command to unload the cramfs module: # rmmod cramfs." compliance: - cis: ["1.1.1.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - "c:modprobe -n -v cramfs -> r:install /bin/true|Module cramfs not found" - "not c:lsmod -> r:cramfs" - # 1.1.1.2 squashfs: filesystem + # 1.1.1.2 Ensure mounting of squashfs filesystems is disabled. (Automated) - id: 4501 - title: "Ensure mounting of squashfs filesystems is disabled" - description: "The squashfs filesystem type is a compressed read-only Linux filesystem embedded in small footprint systems (similar to cramfs ). A squashfs image can be used without having to first decompress the image." + title: "Ensure mounting of squashfs filesystems is disabled." + description: "The squashfs filesystem type is a compressed read-only Linux filesystem embedded in small footprint systems (similar to cramfs). A squashfs image can be used without having to first decompress the image." rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the system. If this filesystem type is not needed, disable it." - remediation: "Edit or create the file /etc/modprobe.d/CIS.conf and add the following line: install squashfs /bin/true. Run the following command to unload the squashfs module: rmmod squashfs" + impact: 'Disabling squashfs will prevent the use of snap. Snap is a package manager for Linux for installing Snap packages. "Snap" application packages of software are self-contained and work across a range of Linux distributions. This is unlike traditional Linux package management approaches, like APT or RPM, which require specifically adapted packages per Linux distribution on an application update and delay therefore application deployment from developers to their software''s end-user. Snaps themselves have no dependency on any external store ("App store"), can be obtained from any source and can be therefore used for upstream software deployment. When snaps are deployed on versions of Linux, the Ubuntu app store is used as default back-end, but other stores can be enabled as well.' + remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: vi /etc/modprobe.d/squashfs.conf and add the following line: install squashfs /bin/true Run the following command to unload the squashfs module: # rmmod squashfs." compliance: - cis: ["1.1.1.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - "c:modprobe -n -v squashfs -> r:install /bin/true|Module squashfs not found" - "not c:lsmod -> r:squashfs" - # 1.1.1.3 udf: filesystem + # 1.1.1.3 Ensure mounting of udf filesystems is disabled. (Automated) - id: 4502 - title: "Ensure mounting of udf filesystems is disabled" + title: "Ensure mounting of udf filesystems is disabled." description: "The udf filesystem type is the universal disk format used to implement ISO/IEC 13346 and ECMA-167 specifications. This is an open vendor filesystem type for data storage on a broad range of media. This filesystem type is necessary to support writing DVDs and newer optical disc formats." rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the system. If this filesystem type is not needed, disable it." - remediation: "Edit or create the file /etc/modprobe.d/CIS.conf and add the following line: install udf /bin/true. Run the following command to unload the udf module: rmmod udf" + remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: vi /etc/modprobe.d/udf.conf and add the following line: install udf /bin/true Run the following command to unload the udf module: # rmmod udf." compliance: - cis: ["1.1.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - "c:modprobe -n -v udf -> r:install /bin/true|Module udf not found" - "not c:lsmod -> r:udf" - # 1.1.1.4 FAT: filesystem + # 1.1.2 Ensure /tmp is configured. (Automated) - id: 4503 - title: "Ensure mounting of FAT filesystems is disabled" - description: "The FAT filesystem format is primarily used on older windows systems and portable USB drives or flash modules. It comes in three types FAT12, FAT16, and FAT32 all of which are supported by the vfat kernel module." - rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the system. If this filesystem type is not needed, disable it." - remediation: "If utilizing UEFI the FAT filesystem format is required. If this case, ensure that the FAT filesystem is only used where appropriate. Run the following command: grep -E -i '\\svfat\\s' /etc/fstab And review that any output is appropriate for your environment. If not utilizing UEFI: Edit or create a file in the /etc/modprobe.d/ directory ending in .conf and add the following lines: Example: vim /etc/modprobe.d/fat.conf install fat /bin/true install vfat /bin/true install msdos /bin/true Run the following commands to unload the msdos, vfat, and fatmodules: # rmmod msdos # rmmod vfat # rmmod fat " - compliance: - - cis: ["1.1.1.4"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ - condition: all - rules: - - "c:modprobe -n -v vfat -> r:install /bin/true|Module vfat not found" - - "not c:lsmod -> r:vfat" - - "c:modprobe -n -v fat -> r:install /bin/true|Module fat not found" - - "not c:lsmod -> r:fat" - - "c:modprobe -n -v msdos -> r:install /bin/true|Module msdos not found" - - "not c:lsmod -> r:msdos" - - # 1.1.2 /tmp: partition - - id: 4504 - title: "Ensure /tmp is configured" + title: "Ensure /tmp is configured." description: "The /tmp directory is a world-writable directory used for temporary storage by all users and some applications." - rationale: "Since the /tmp directory is intended to be world-writable, there is a risk of resource exhaustion if it is not bound to a separate partition. In addition, making /tmp its own file system allows an administrator to set the noexec option on the mount, making /tmp useless for an attacker to install executable code. It would also prevent an attacker from establishing a hardlink to a system setuid program and wait for it to be updated. Once the program was updated, the hardlink would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw." - remediation: "Create or update an entry for /tmp in either /etc/fstab OR in a systemd tmp.mount file: If /etc/fstab is used: Configure /etc/fstab as appropriate. Example: tmpfs /tmp tmpfs defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /tmp: # mount -o remount,noexec,nodev,nosuid /tmp OR If systemd tmp.mount file is used: Run the following command to create the file /etc/systemd/system/tmp.mount if it doesn't exist: # [ ! -f /etc/systemd/system/tmp.mount ] && cp -v /usr/lib/systemd/system/tmp.mount /etc/systemd/system/ Edit the file /etc/systemd/system/tmp.mount: [Mount] What=tmpfs Where=/tmp Type=tmpfs Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to reload the systemd daemon:# systemctl daemon-reload Run the following command to unmask tmp.mount: # systemctl unmask tmp.mpunt Run the following command to enable and start tmp.mount: # systemctl enable --now tmp.mount" + rationale: "Making /tmp its own file system allows an administrator to set the noexec option on the mount, making /tmp useless for an attacker to install executable code. It would also prevent an attacker from establishing a hardlink to a system setuid program and wait for it to be updated. Once the program was updated, the hardlink would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw. This can be accomplished by either mounting tmpfs to /tmp, or creating a separate partition for /tmp." + impact: "Since the /tmp directory is intended to be world-writable, there is a risk of resource exhaustion if it is not bound to a separate partition. Running out of /tmp space is a problem regardless of what kind of filesystem lies under it, but in a default installation a disk-based /tmp will essentially have the whole disk available, as it only creates a single / partition. On the other hand, a RAM-based /tmp as with tmpfs will almost certainly be much smaller, which can lead to applications filling up the filesystem much more easily." + remediation: "Create or update an entry for /tmp in either /etc/fstab OR in a systemd tmp.mount file: If /etc/fstab is used: configure /etc/fstab as appropriate. _ Example:_ tmpfs /tmp tmpfs defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /tmp # mount -o remount,noexec,nodev,nosuid /tmp OR if systemd tmp.mount file is used: run the following command to create the file /etc/systemd/system/tmp.mount if it doesn't exist: # [ ! -f /etc/systemd/system/tmp.mount ] && cp -v /usr/lib/systemd/system/tmp.mount /etc/systemd/system/ Edit the file /etc/systemd/system/tmp.mount: [Mount] What=tmpfs Where=/tmp Type=tmpfs Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to reload the systemd daemon: # systemctl daemon-reload Run the following command to unmask and start tmp.mount: # systemctl --now unmask tmp.mount." + references: + - "http://tldp.org/HOWTO/LVM-HOWTO/" + - "https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems/" compliance: - cis: ["1.1.2"] - - cis_csc: ["9.4", "13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["9.4", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: any rules: - - 'c:mount -> r:\s/tmp\s' - - 'f:/etc/fstab -> r:\s/tmp\s' - - "c:systemctl is-enabled tmp.mount -> r:enabled" + - 'c:findmnt --kernel /tmp -> r:^/tmp\s' + - "c:systemctl is-enabled tmp.mount -> r:enabled|static|generated" - # 1.1.3 /tmp: noexec - - id: 4505 - title: "Ensure noexec option set on /tmp partition" + # 1.1.3 Ensure noexec option set on /tmp partition. (Automated) + - id: 4504 + title: "Ensure noexec option set on /tmp partition." description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." rationale: "Since the /tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot run executable binaries from /tmp." - remediation: "Edit the /etc/fstab file OR the /etc/systemd/system/local-fs.target.wants/tmp.mount file: IF /etc/fstab is used to mount /tmp Edit the /etc/fstabfile and add noexecto the fourth field (mounting options) for the /tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /tmp: # mount -o remount,noexec /tmp OR IF systemd is used to mount /tmp: Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to add noexec to the /tmp mount options: [Mount] Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to restart the systemd daemon: # systemctl daemon-reload Run the following command to restart tmp.mount # systemctl restart tmp.mount" + remediation: "Edit the /etc/fstab file OR the /etc/systemd/system/local- fs.target.wants/tmp.mount file: IF /etc/fstab is used to mount /tmp Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /tmp: # mount -o remount,noexec /tmp OR if systemd is used to mount /tmp:_ Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to add noexec to the /tmp mount options: [Mount] Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to restart the systemd daemon: # systemctl daemon-reload Run the following command to restart tmp.mount # systemctl restart tmp.mount." compliance: - cis: ["1.1.3"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: none rules: - - 'c:mount -> r:\s/tmp\s && r:noexec' + - 'c:mount -> r:\s/tmp\s && !r:noexec' - # 1.1.4 /tmp: nodev - - id: 4506 - title: "Ensure nodev option set on /tmp partition" + # 1.1.4 Ensure nodev option set on /tmp partition. (Automated) + - id: 4505 + title: "Ensure nodev option set on /tmp partition." description: "The nodev mount option specifies that the filesystem cannot contain special devices." rationale: "Since the /tmp filesystem is not intended to support devices, set this option to ensure that users cannot attempt to create block or character special devices in /tmp." - remediation: "Edit the /etc/fstab file OR the /etc/systemd/system/local-fs.target.wants/tmp.mount file: IF /etc/fstab is used to mount /tmp Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /tmp: # mount -o remount,nodev /tmp OR IF systemd is used to mount /tmp: Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to add nodev to the /tmp mount options: [Mount] Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to restart the systemd daemon: # systemctl daemon-reload Run the following command to restart tmp.mount # systemctl restart tmp.mount" + remediation: "Edit the /etc/fstab file OR the /etc/systemd/system/local- fs.target.wants/tmp.mount file: IF /etc/fstab is used to mount /tmp Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /tmp: # mount -o remount,nodev /tmp OR if systemd is used to mount /tmp: Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to add nodev to the /tmp mount options: [Mount] Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to restart the systemd daemon: # systemctl daemon-reload Run the following command to restart tmp.mount # systemctl restart tmp.mount." compliance: - cis: ["1.1.4"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: none rules: - - 'c:mount -> r:\s/tmp\s && r:nodev' + - 'c:mount -> r:\s/tmp\s && !r:nodev' - # 1.1.5 /tmp: nosuid - - id: 4507 - title: "Ensure nosuid option set on /tmp partition" + # 1.1.5 Ensure nosuid option set on /tmp partition. (Automated) + - id: 4506 + title: "Ensure nosuid option set on /tmp partition." description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." rationale: "Since the /tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot create setuid files in /tmp." - remediation: "IF /etc/fstab is used to mount /tmp Edit the /etc/fstab file OR the /etc/systemd/system/local-fs.target.wants/tmp.mount file: IF /etc/fstab is used to mount /tmp Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /tmp: # mount -o remount,nosuid /tmp OR IF systemd is used to mount /tmp: Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to add nosuid to the /tmp mount options: [Mount] Options=mode=1777,strictatime,noexec,nosuid,nosuid Run the following command to restart the systemd daemon: # systemctl daemon-reload Run the following command to restart tmp.mount # systemctl restart tmp.mount" + remediation: "IF /etc/fstab is used to mount /tmp Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /tmp : # mount -o remount,nosuid /tmp OR if systemd is used to mount /tmp: Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to add nosuid to the /tmp mount options: [Mount] Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to restart the systemd daemon: # systemctl daemon-reload Run the following command to restart tmp.mount: # systemctl restart tmp.mount." compliance: - cis: ["1.1.5"] - - cis_csc: ["5.1", "13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["3.3", "4.1"] + - cis_csc_v7: ["5.1", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.3.1", "1.5.1", "2.1.1", "2.2.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC7.1", "CC8.1"] + condition: none rules: - - 'c:mount -> r:\s/tmp\s && r:nosuid' + - 'c:mount -> r:\s/tmp\s && !r:nosuid' - # 1.1.6 /dev/shm: - - id: 4508 - title: "Ensure /dev/shm is configured " + # 1.1.6 Ensure /dev/shm is configured. (Automated) + - id: 4507 + title: "Ensure /dev/shm is configured." description: "/dev/shm is a traditional shared memory concept. One program will create a memory portion, which other processes (if permitted) can access. Mounting tmpfs at /dev/shm is handled automatically by systemd." rationale: "Any user can upload and execute files inside the /dev/shm similar to the /tmp partition. Configuring /dev/shm allows an administrator to set the noexec option on the mount, making /dev/shm useless for an attacker to install executable code. It would also prevent an attacker from establishing a hardlink to a system setuid program and wait for it to be updated. Once the program was updated, the hardlink would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw." - remediation: "Edit /etc/fstab and add or edit the following line: tmpfs /dev/shm tmpfs defaults,noexec,nodev,nosuid,seclabel 0 0 Run the following command to remount /dev/shm: # mount -o remount,noexec,nodev,nosuid /dev/shm" + remediation: "Edit /etc/fstab and add or edit the following line: tmpfs /dev/shm tmpfs defaults,noexec,nodev,nosuid,seclabel 0 0 Run the following command to remount /dev/shm: # mount -o remount,noexec,nodev,nosuid /dev/shm." compliance: - cis: ["1.1.6"] - - cis_csc: ["5.1", "13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:mount -> r:\s/dev/shm\s' + - cis_csc_v8: ["3.3", "4.1"] + - cis_csc_v7: ["5.1", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.3.1", "1.5.1", "2.1.1", "2.2.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC7.1", "CC8.1"] + condition: all + rules: + - 'c:findmnt --kernel /dev/shm -> r:^/dev/shm\s' - 'f:/etc/fstab -> r:\s/dev/shm\s' - # 1.1.7 /dev/shm: noexec - - id: 4509 - title: "Ensure noexec option set on /dev/shm partition" + # 1.1.7 Ensure noexec option set on /dev/shm partition. (Automated) + - id: 4508 + title: "Ensure noexec option set on /dev/shm partition." description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." rationale: "Setting this option on a file system prevents users from executing programs from shared memory. This deters users from introducing potentially malicious software on the system." - remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /dev/shm partition. Run the following command to remount /dev/shm: # mount -o remount,noexec /dev/shm" + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /dev/shm partition. See the fstab(5) manual page for more information. Run the following command to remount /dev/shm: # mount -o remount,noexec,nodev,nosuid /dev/shm." compliance: - cis: ["1.1.7"] - - cis_csc: ["2.6", "13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3", "4.1"] + - cis_csc_v7: ["2.6", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - nist_sp_800-53: ["AC-5", "AC-6", "CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.3.1", "1.5.1", "2.1.1", "2.2.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC7.1", "CC8.1"] condition: all rules: - - 'c:mount -> r:\s/dev/shm\s && r:noexec' + - 'c:findmnt --kernel /dev/shm -> r:^/dev/shm\s' + - "c:findmnt --kernel /dev/shm -> r:noexec" - # 1.1.8 /dev/shm: nodev - - id: 4510 - title: "Ensure nodev option set on /dev/shm partition" + # 1.1.8 Ensure nodev option set on /dev/shm partition. (Automated) + - id: 4509 + title: "Ensure nodev option set on /dev/shm partition." description: "The nodev mount option specifies that the filesystem cannot contain special devices." rationale: "Since the /dev/shm filesystem is not intended to support devices, set this option to ensure that users cannot attempt to create special devices in /dev/shm partitions." - remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /dev/shm partition. Run the following command to remount /dev/shm: # mount -o remount,nodev /dev/shm" + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /dev/shm partition. See the fstab(5) manual page for more information. Run the following command to remount /dev/shm: # mount -o remount,noexec,nodev,nosuid /dev/shm." compliance: - cis: ["1.1.8"] - - cis_csc: ["5.1", "13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3", "4.1"] + - cis_csc_v7: ["5.1", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.3.1", "1.5.1", "2.1.1", "2.2.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC7.1", "CC8.1"] condition: all rules: - - 'c:mount -> r:\s/dev/shm\s && r:nodev' + - 'c:findmnt --kernel /dev/shm -> r:^/dev/shm\s' + - "c:findmnt --kernel /dev/shm -> r:nodev" - # 1.1.9 /dev/shm: nosuid - - id: 4511 - title: "Ensure nosuid option set on /dev/shm partition" + # 1.1.9 Ensure nosuid option set on /dev/shm partition. (Automated) + - id: 4510 + title: "Ensure nosuid option set on /dev/shm partition." description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." rationale: "Setting this option on a file system prevents users from introducing privileged programs onto the system and allowing non-root users to execute them." - remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /dev/shm partition. Run the following command to remount /dev/shm: # mount -o remount,nosuid /dev/shm" + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /dev/shm partition. See the fstab(5) manual page for more information. Run the following command to remount /dev/shm: # mount -o remount,noexec,nodev,nosuid /dev/shm." compliance: - cis: ["1.1.9"] - - cis_csc: ["5.1", "13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3", "4.1"] + - cis_csc_v7: ["5.1", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.3.1", "1.5.1", "2.1.1", "2.2.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC7.1", "CC8.1"] condition: all rules: - - 'c:mount -> r:\s/dev/shm\s && r:nosuid' + - 'c:findmnt --kernel /dev/shm -> r:^/dev/shm\s' + - "c:findmnt --kernel /dev/shm -> r:nosuid" - # 1.1.10 Build considerations - Partition scheme. - - id: 4512 - title: "Ensure separate partition exists for /var" + # 1.1.10 Ensure separate partition exists for /var. (Automated) + - id: 4511 + title: "Ensure separate partition exists for /var." description: "The /var directory is used by daemons and other system services to temporarily store dynamic data. Some directories created by these processes may be world-writable." rationale: "Since the /var directory may contain world-writable files and directories, there is a risk of resource exhaustion if it is not bound to a separate partition." - remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." + remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + references: + - "http://tldp.org/HOWTO/LVM-HOWTO/" compliance: - cis: ["1.1.10"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:mount -> r:\s/var\s' - # 1.1.11 bind mount /var/tmp to /tmp - - id: 4513 - title: "Ensure separate partition exists for /var/tmp" + # 1.1.11 Ensure separate partition exists for /var/tmp. (Automated) + - id: 4512 + title: "Ensure separate partition exists for /var/tmp." description: "The /var/tmp directory is a world-writable directory used for temporary storage by all users and some applications and is intended for temporary files that are preserved across reboots." - rationale: "Since the /var/tmp directory is intended to be world-writable, there is a risk of resource exhaustion if it is not bound to a separate partition. In addition, making /var/tmp its own file system allows an administrator to set the noexec option on the mount, making /var/tmp useless for an attacker to install executable code." - remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/tmp. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + rationale: "Since the /var/tmp directory is intended to be world-writable, there is a risk of resource exhaustion if it is not bound to a separate partition. In addition, making /var/tmp its own file system allows an administrator to set the noexec option on the mount, making /var/tmp useless for an attacker to install executable code. It would also prevent an attacker from establishing a hardlink to a system setuid program and wait for it to be updated. Once the program was updated, the hardlink would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." + remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/tmp . For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." compliance: - cis: ["1.1.11"] - - cis_csc: ["5.1", "13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3", "4.1"] + - cis_csc_v7: ["5.1", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.3.1", "1.5.1", "2.1.1", "2.2.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC7.1", "CC8.1"] condition: all rules: - 'c:mount -> r:\s/var/tmp\s' - # 1.1.12 noexec set on /var/tmp - - id: 4514 - title: "Ensure noexec option set on /var/tmp partition" + # 1.1.12 Ensure /var/tmp partition includes the noexec option. (Automated) + - id: 4513 + title: "Ensure /var/tmp partition includes the noexec option." description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." rationale: "Since the /var/tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot run executable binaries from /var/tmp." - remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /var/tmp partition. See the fstab(5) manual page for more information. un the following command to remount /var/tmp: # mount -o remount,noexec /var/tmp" + remediation: "For existing /var/tmp partitions, edit the /etc/fstab file and add noexec to the fourth field (mounting options) of the /var/tmp entry. See the fstab(5) manual page for more information. Run the following command to remount /var/tmp : # mount -o remount,noexec /var/tmp." compliance: - cis: ["1.1.12"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["3.3", "4.1"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - nist_sp_800-53: ["AC-5", "AC-6", "CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.3.1", "1.5.1", "2.1.1", "2.2.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC7.1", "CC8.1"] + condition: none rules: - - 'c:mount -> r:\s/var/tmp\s && r:noexec' + - 'c:mount -> r:\s/var/tmp\s && !r:noexec' - # 1.1.13 nodev set on /var/tmp - - id: 4515 - title: "Ensure nodev option set on /var/tmp partition" + # 1.1.13 Ensure /var/tmp partition includes the nodev option. (Automated) + - id: 4514 + title: "Ensure /var/tmp partition includes the nodev option." description: "The nodev mount option specifies that the filesystem cannot contain special devices." - rationale: "Since the /var/tmp filesystem is not intended to support devices, set this option to ensure that users cannot attempt to create block or character special devices in /var/tmp ." - remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /var/tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /var/tmp: # mount -o remount,nodev /var/tmp" + rationale: "Since the /var/tmp filesystem is not intended to support devices, set this option to ensure that users cannot attempt to create block or character special devices in /var/tmp." + remediation: "For existing /var/tmp partitions, edit the /etc/fstab file and add nodev to the fourth field (mounting options) of the /var/tmp entry. See the fstab(5) manual page for more information. Run the following command to remount /var/tmp: # mount -o remount,nodev /var/tmp." compliance: - cis: ["1.1.13"] - - cis_csc: ["5.1", "13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["3.3", "4.1"] + - cis_csc_v7: ["5.1", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.3.1", "1.5.1", "2.1.1", "2.2.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC7.1", "CC8.1"] + condition: none rules: - - 'c:mount -> r:\s/var/tmp\s && r:nodev' + - 'c:mount -> r:\s/var/tmp\s && !r:nodev' - # 1.1.14 nosuid set on /var/tmp - - id: 4516 - title: "Ensure nosuid option set on /var/tmp partition" + # 1.1.14 Ensure /var/tmp partition includes the nosuid option. (Automated) + - id: 4515 + title: "Ensure /var/tmp partition includes the nosuid option." description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." rationale: "Since the /var/tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot create setuid files in /var/tmp." - remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /var/tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /var/tmp: # mount -o remount,nosuid /var/tmp" + remediation: "For existing /var/tmp partitions, edit the /etc/fstab file and add nosuid to the fourth field (mounting options) of the /var/tmp entry. See the fstab(5) manual page for more information. Run the following command to remount /var/tmp : # mount -o remount,nosuid /var/tmp." compliance: - cis: ["1.1.14"] - - cis_csc: ["5.1", "13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["3.3", "4.1"] + - cis_csc_v7: ["5.1", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.3.1", "1.5.1", "2.1.1", "2.2.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC7.1", "CC8.1"] + condition: none rules: - - 'c:mount -> r:\s/var/tmp\s && r:nosuid' + - 'c:mount -> r:\s/var/tmp\s && !r:nosuid' - # 1.1.15 /var/log: partition - - id: 4517 - title: "Ensure separate partition exists for /var/log" - description: "The /var/log directory is used by system services to store log data ." + # 1.1.15 Ensure separate partition exists for /var/log. (Automated) + - id: 4516 + title: "Ensure separate partition exists for /var/log." + description: "The /var/log directory is used by system services to store log data." rationale: "There are two important reasons to ensure that system logs are stored on a separate partition: protection against resource exhaustion (since logs can grow quite large) and protection of audit data." - remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/log. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." + remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/log . For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + references: + - "http://tldp.org/HOWTO/LVM-HOWTO/" compliance: - cis: ["1.1.15"] - - cis_csc: ["6.4"] - - pci_dss: ["2.2.4", "10.7"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - cis_csc_v8: ["4.1", "8.3"] + - cis_csc_v7: ["6.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["10.7", "11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["A1.1", "CC7.1", "CC8.1"] condition: all rules: - 'c:mount -> r:\s/var/log\s' - # 1.1.16 /var/log/audit: partition - - id: 4518 - title: "Ensure separate partition exists for /var/log/audit" - description: "The auditing daemon, auditd, stores log data in the /var/log/audit directory." - rationale: "There are two important reasons to ensure that data gathered by auditd is stored on a separate partition: protection against resource exhaustion (since the audit.log file can grow quite large) and protection of audit data." - remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/log/audit. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + # 1.1.16 Ensure separate partition exists for /var/log/audit. (Automated) + - id: 4517 + title: "Ensure separate partition exists for /var/log/audit." + description: "The auditing daemon, auditd , stores log data in the /var/log/audit directory." + rationale: "There are two important reasons to ensure that data gathered by auditd is stored on a separate partition: protection against resource exhaustion (since the audit.log file can grow quite large) and protection of audit data. The audit daemon calculates how much free space is left and performs actions based on the results. If other processes (such as syslog) consume space in the same partition as auditd , it may not perform as desired." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." + remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/log/audit . For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + references: + - "http://tldp.org/HOWTO/LVM-HOWTO/" compliance: - cis: ["1.1.16"] - - cis_csc: ["6.3"] - - pci_dss: ["2.2.4", "10.7"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - cis_csc_v8: ["8.3"] + - cis_csc_v7: ["6.4"] + - iso_27001-2013: ["A.12.4.1"] + - pci_dss_v3.2.1: ["10.7"] + - soc_2: ["A1.1"] condition: all rules: - 'c:mount -> r:\s/var/log/audit\s' - # 1.1.17 /home: partition - - id: 4519 - title: "Ensure separate partition exists for /home" + # 1.1.17 Ensure separate partition exists for /home. (Automated) + - id: 4518 + title: "Ensure separate partition exists for /home." description: "The /home directory is used to support disk storage needs of local users." rationale: "If the system is intended to support local users, create a separate partition for the /home directory to protect against resource exhaustion and restrict the type of files that can be stored under /home." - remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /home. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." + remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /home . For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + references: + - "http://tldp.org/HOWTO/LVM-HOWTO/" compliance: - cis: ["1.1.17"] - - cis_csc: ["5.1", "13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:mount -> r:\s/home\s' - # 1.1.18 /home: nodev - - id: 4520 - title: "Ensure nodev option set on /home partition" + # 1.1.18 Ensure /home partition includes the nodev option. (Automated) + - id: 4519 + title: "Ensure /home partition includes the nodev option." description: "The nodev mount option specifies that the filesystem cannot contain special devices." rationale: "Since the user partitions are not intended to support devices, set this option to ensure that users cannot attempt to create block or character special devices." - remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /home partition. # mount -o remount,nodev /home" + remediation: "For existing /home partitions, edit the /etc/fstab file and add nodev to the fourth field (mounting options) of the /home entry. See the fstab(5) manual page for more information. Run the following command to remount /home: # mount -o remount,nodev /home." compliance: - cis: ["1.1.18"] - - cis_csc: ["5.1", "13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["3.3", "4.1"] + - cis_csc_v7: ["5.1", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.3.1", "1.5.1", "2.1.1", "2.2.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC7.1", "CC8.1"] + condition: none + rules: + - 'c:mount -> r:\s/home\s && !r:nodev' + + # 1.1.19 Ensure removable media partitions include noexec option. (Automated) + - id: 4520 + title: "Ensure removable media partitions include noexec option." + description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." + rationale: "Setting this option on a file system prevents users from executing programs from the removable media. This deters users from being able to introduce potentially malicious software on the system." + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) of all removable media partitions. Look for entries that have mount points that contain words such as floppy or cdrom. See the fstab(5) manual page for more information." + compliance: + - cis: ["1.1.19"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["2.6", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: none rules: - - 'c:mount -> r:\s/home\s && r:nodev' + - 'c:mount -> r:\s/etc/fstab\s && !r:noexec' - # 1.1.23 Disable Automounting + # 1.1.20 Ensure nodev option set on removable media partitions. (Automated) - id: 4521 - title: "Disable Automounting" + title: "Ensure nodev option set on removable media partitions." + description: "The nodev mount option specifies that the filesystem cannot contain special devices." + rationale: "Removable media containing character and block special devices could be used to circumvent security controls by allowing non-root users to access sensitive device files such as /dev/kmem or the raw disk partitions." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) of all removable media partitions. Look for entries that have mount points that contain words such as floppy or cdrom. See the fstab(5) manual page for more information." + compliance: + - cis: ["1.1.20"] + - cis_csc_v8: ["3.3", "4.1"] + - cis_csc_v7: ["5.1", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.3.1", "1.5.1", "2.1.1", "2.2.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC7.1", "CC8.1"] + condition: none + rules: + - 'c:mount -> r:\s/etc/fstab\s && !r:nodev' + + # 1.1.21 Ensure nosuid option set on removable media partitions. (Automated) + - id: 4522 + title: "Ensure nosuid option set on removable media partitions." + description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." + rationale: "Setting this option on a file system prevents users from introducing privileged programs onto the system and allowing non-root users to execute them." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) of all removable media partitions. Look for entries that have mount points that contain words such as floppy or cdrom. See the fstab(5) manual page for more information." + compliance: + - cis: ["1.1.21"] + - cis_csc_v8: ["3.3", "4.1"] + - cis_csc_v7: ["5.1", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.3.1", "1.5.1", "2.1.1", "2.2.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC7.1", "CC8.1"] + condition: none + rules: + - 'c:mount -> r:\s/etc/fstab\s && !r:nosuid' + + # 1.1.22 Ensure sticky bit is set on all world-writable directories. (Automated) - Not Implemented + + # 1.1.23 Disable Automounting. (Automated) + - id: 4523 + title: "Disable Automounting." description: "autofs allows automatic mounting of devices, typically including CD/DVDs and USB drives." rationale: "With automounting enabled anyone with physical access could attach a USB drive or disc and have its contents available in system even if they lacked permissions to mount it themselves." - remediation: "Run the following command to disable autofs: systemctl disable autofs" + impact: "The use of portable hard drives is very common for workstation users. If your organization allows the use of portable storage or media on workstations and physical access controls to workstations is considered adequate there is little value add in turning off automounting." + remediation: "Run the following command to mask autofs: # systemctl --now mask autofs OR run the following command to remove autofs # yum remove autofs." compliance: - cis: ["1.1.23"] - - cis_csc: ["8.4", "8.5"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["10.3"] + - cis_csc_v7: ["8.4", "8.5"] + - cmmc_v2.0: ["MP.L2-3.8.7"] + - hipaa: ["164.310(d)(1)"] + - iso_27001-2013: ["A.12.2.1"] + condition: any rules: - - "c:systemctl is-enabled autofs -> r:enabled" + - "c:systemctl is-enabled autofs -> r:Failed to get unit file state for autofs.service: No such file or directory" + - "c:systemctl is-enabled autofs -> r:disabled" + - 'not c:systemctl show "autofs.service" -> r:\s*unitfilestate=enabled' - # 1.1.24 Disable USB Storage - - id: 4522 - title: "Disable USB Storage" + # 1.1.24 Disable USB Storage. (Automated) + - id: 4524 + title: "Disable USB Storage." description: "USB storage provides a means to transfer and store files insuring persistence and availability of the files independent of network connection status. Its popularity and utility has led to USB-based malware being a simple and common means for network infiltration and a first step to establishing a persistent threat within a networked environment." rationale: "Restricting USB access on the system will decrease the physical attack surface for a device and diminish the possible vectors to introduce malware." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: vim /etc/modprobe.d/usb_storage.conf Add the following line: install usb-storage /bin/true Run the following command to unload the usb-storage module: rmmod usb-storage" + remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: vim /etc/modprobe.d/usb_storage.conf Add the following line: install usb-storage /bin/true Run the following command to unload the usb-storage module: rmmod usb-storage." compliance: - - cis: ["1.1.22"] - - cis_csc: ["8.4", "8.5"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.1.24"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["8.4", "8.5"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.12.2.1"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - "c:modprobe -n -v usb-storage -> r:install /bin/true" - - "not c:lsmod -> r:install /bin/true" + - "not c:lsmod -> r:usb-storage" ############################################### # 1.2 Configure Software Updates ############################################### - # 1.2.3 Activate gpgcheck - - id: 4523 - title: "Ensure gpgcheck is globally activated" - description: "The gpgcheck option, found in the main section of the /etc/yum.conf and individual /etc/yum/repos.d/* files determines if an RPM package's signature is checked prior to its installation." + # 1.2.1 Ensure GPG keys are configured. (Manual) - Not Implemented + # 1.2.2 Ensure package manager repositories are configured. (Manual) - Not Implemented + + # 1.2.3 Ensure gpgcheck is globally activated. (Automated) + - id: 4525 + title: "Ensure gpgcheck is globally activated." + description: "The gpgcheck option, found in the main section of the /etc/yum.conf and individual /etc/yum/repos.d/*.repo files determines if an RPM package's signature is checked prior to its installation." rationale: "It is important to ensure that an RPM's package signature is always checked prior to installation to ensure that the software is obtained from a trusted source." - remediation: "Edit /etc/yum.conf and set ' gpgcheck=1 ' in the [main] section. Edit any failing files in /etc/yum.repos.d/* and set all instances of gpgcheck to ' 1 '." + remediation: "Edit /etc/yum.conf and set 'gpgcheck=1' in the [main] section. Edit any failing files in /etc/yum.repos.d/*.repo and set all instances of gpgcheck to 1." compliance: - cis: ["1.2.3"] - - cis_csc: ["3.4"] - - pci_dss: ["6.2"] - - nist_800_53: ["SI.2", "SA.11", "SI.4"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["A1.2", "CC6.8"] + - cis_csc_v8: ["7.3"] + - cis_csc_v7: ["3.4"] + - cmmc_v2.0: ["SI.L1-3.14.1"] + - nist_sp_800-53: ["SI-2(2)"] + - pci_dss_v3.2.1: ["6.2"] + - soc_2: ["CC7.1"] condition: all rules: - "f:/etc/yum.conf -> r:gpgcheck=1" - - "not c:grep -Rh ^gpgcheck /etc/yum.repos.d/ -> r:gpgcheck=0" - - # 1.2.5 Disable the rhnsd Daemon (Not Scored) - - id: 4524 - title: "Disable the rhnsd Daemon" - description: "The rhnsd daemon polls the Red Hat Network web site for scheduled actions and, if there are, executes those actions." - rationale: "Patch management policies may require that organizations test the impact of a patch before it is deployed in a production environment. Having patches automatically deployed could have a negative impact on the environment. It is best to not allow an action by default but only after appropriate consideration has been made. It is recommended that the service be disabled unless the risk is understood and accepted or you are running your own satellite . This item is not scored because organizations may have addressed the risk." - remediation: "Run the following command to stop and mask the rhnsd: # systemctl --now mask rhnsd" - compliance: - - cis: ["1.2.5"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] - condition: none - rules: - - "c:systemctl is-enabled rhnsd -> r:^enabled" - - ############################################### - # 1.3 Configure sudo - ############################################### - - # 1.3.1 install sudo - - id: 4525 - title: "Ensure sudo is installed" - description: "sudo allows a permitted user to execute a command as the superuser or another user, as specified by the security policy. The invoking user's real (not effective) user ID is used to determine the user name with which to query the security policy." - rationale: "sudo supports a plugin architecture for security policies and input/output logging. Third parties can develop and distribute their own policy and I/O logging plugins to work seamlessly with the sudo front end. The default security policy is sudoers, which is configured via the file /etc/sudoers. The security policy determines what privileges, if any, a user has to run sudo. The policy may require that users authenticate themselves with a password or another authentication mechanism. If authentication is required, sudo will exit if the user's password is not entered within a configurable time limit. This limit is policy-specific." - remediation: "Run the following command to install sudo. # yum install sudo" - compliance: - - cis: ["1.3.1"] - - cis_csc: ["4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: "AIDE stable manual: http://aide.sourceforge.net/stable/manual.html" - condition: all - rules: - - 'c:rpm -q sudo -> r:sudo-\S*' + - 'd:/etc/yum.repos.d/ -> r:\.*.repo -> r:gpgcheck=1' - # 1.3.2 Configure sudo - - id: 4526 - title: "Ensure sudo commands use pty" - description: "sudo can be configured to run only from a pseudo-pty" - rationale: "Attackers can run a malicious program using sudo, which would again fork a background process that remains even when the main program has finished executing. This can be mitigated by configuring sudo to run other commands only from a pseudo-pty, whether I/O logging is turned on or not." - remediation: "Edit the file /etc/sudoers or a file in /etc/sudoers.d/ with visudo or visudo -f and add the following line: Defaults use_pty" - compliance: - - cis: ["1.3.2"] - - cis_csc: ["4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: "AIDE stable manual: http://aide.sourceforge.net/stable/manual.html" - condition: any - rules: - - 'f:/etc/sudoers -> r:^\s*Defaults\s*\t*use_pty' - - 'd:/etc/sudoers.d/ -> r:\. -> r:^\s*Defaults\s*\t*\s*use_pty' + # 1.2.4 Ensure Red Hat Subscription Manager connection is configured. (Manual) - Not Implemented - # 1.3.3 Ensure sudo log file exists - - id: 4527 - title: "Ensure sudo log file exists" - description: "sudo can use a custom log file" - rationale: "A sudo log file simplifies auditing of sudo commands" - remediation: 'Edit the file /etc/sudoers or a file in /etc/sudoers.d/ with visudo or visudo -f and add the following line: Defaults logfile='''' Example:Defaults logfile="/var/log/sudo.log"' - compliance: - - cis: ["1.3.3"] - - cis_csc: ["6.3"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'f:/etc/sudoers -> r:^\s*Defaults\s+logfile\s*=\s*"/var/log/sudo.log"' - - 'd:/etc/sudoers.d/ -> r:\. -> r:^\s*Defaults\s+logfile\s*=\s*"/var/log/sudo.log"' + # 1.2.5 Disable the rhnsd Daemon. (Manual) - Not Implemented ############################################### - # 1.4 Filesystem Integrity Checking + # 1.3 Filesystem Integrity Checking ############################################### - # 1.4.1 install AIDE - - id: 4528 - title: "Ensure AIDE is installed" - description: "AIDE takes a snapshot of filesystem state including modification times, permissions, and file hashes which can then be used to compare against the current state of the filesystem to detect modifications to the system." + # 1.3.1 Ensure AIDE is installed. (Automated) + - id: 4526 + title: "Ensure AIDE is installed." + description: "AIDE takes a snapshot of filesystem state including modification times, permissions, and file hashes which can then be used to compare against the current state of the filesystem to detect modifications to the system. Note: The prelinking feature can interfere with AIDE because it alters binaries to speed up their start up times. Run prelink -ua to restore the binaries to their prelinked state, thus avoiding false positives from AIDE." rationale: "By monitoring the filesystem state compromised files can be detected to prevent or limit the exposure of accidental or malicious misconfigurations or modified binaries." - remediation: "Run the following command to install AIDE: yum install aide // Configure AIDE as appropriate for your environment. Consult the AIDE documentation for options. Initialize AIDE: aide --init && mv /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz" + remediation: "Run the following command to install AIDE: # yum install aide Configure AIDE as appropriate for your environment. Consult the AIDE documentation for options. Initialize AIDE: Run the following commands: # aide --init # mv /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz." + references: + - "http://aide.sourceforge.net/stable/manual.html" compliance: - - cis: ["1.4.1"] - - cis_csc: ["14.9"] - - pci_dss: ["11.5"] - - tsc: ["PI1.4", "PI1.5", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - references: "AIDE stable manual: http://aide.sourceforge.net/stable/manual.html" + - cis: ["1.3.1"] + - cis_csc_v8: ["3.14"] + - cis_csc_v7: ["14.9"] + - cmmc_v2.0: ["AC.L2-3.1.7"] + - hipaa: ["164.312(b)", "164.312(c)(1)", "164.312(c)(2)"] + - iso_27001-2013: ["A.12.4.3"] + - nist_sp_800-53: ["AC-6(9)"] + - pci_dss_v3.2.1: ["10.2.1", "11.5"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1"] + - soc_2: ["CC6.1"] condition: all rules: - 'c:rpm -q aide -> r:aide-\S*' - # 1.4.2 AIDE regular checks - - id: 4529 - title: "Ensure filesystem integrity is regularly checked" + # 1.3.2 Ensure filesystem integrity is regularly checked. (Automated) + - id: 4527 + title: "Ensure filesystem integrity is regularly checked." description: "Periodic checking of the filesystem integrity is needed to detect changes to the filesystem." rationale: "Periodic file checking allows the system administrator to determine on a regular basis if critical files have been changed in an unauthorized fashion." - remediation: "If cron will be used to schedule and run aide check run the following command: crontab -u root -e Add the following line to the crontab: 0 5 * * * /usr/sbin/aide --check // Notes: The checking in this recommendation occurs every day at 5am. Alter the frequency and time of the checks in compliance with site policy. OR If aidecheck.service and aidecheck.timer will be used to schedule and run aide check: Create or edit the file /etc/systemd/system/aidecheck.service and add the following lines: [Unit] Description=Aide Check [Service] Type=simpleExecStart=/usr/sbin/aide --check [Install] WantedBy=multi-user.target Create or edit the file /etc/systemd/system/aidecheck.timer and add the following lines: [Unit] Description=Aide check every day at 5AM [Timer] OnCalendar=*-*-* 05:00:00 Unit=aidecheck.service [Install] WantedBy=multi-user.target Run the following commands: # chown root:root /etc/systemd/system/aidecheck.* # chmod 0644 /etc/systemd/system/aidecheck.* # systemctl daemon-reload # systemctl enable aidecheck.service # systemctl --now enable aidecheck.timer " + remediation: "If cron will be used to schedule and run aide check Run the following command: # crontab -u root -e Add the following line to the crontab: 0 5 * * * /usr/sbin/aide --check OR if aidecheck.service and aidecheck.timer will be used to schedule and run aide check: Create or edit the file /etc/systemd/system/aidecheck.service and add the following lines: [Unit] Description=Aide Check [Service] Type=simple ExecStart=/usr/sbin/aide --check [Install] WantedBy=multi-user.target Create or edit the file /etc/systemd/system/aidecheck.timer and add the following lines: [Unit] Description=Aide check every day at 5AM [Timer] OnCalendar=*-*-* 05:00:00 Unit=aidecheck.service [Install] WantedBy=multi-user.target Run the following commands: # chown root:root /etc/systemd/system/aidecheck.* # chmod 0644 /etc/systemd/system/aidecheck.* # systemctl daemon-reload # systemctl enable aidecheck.service # systemctl --now enable aidecheck.timer." + references: + - "https://github.com/konstruktoid/hardening/blob/master/config/aidecheck.servic" + - "https://github.com/konstruktoid/hardening/blob/master/config/aidecheck.timer" compliance: - - cis: ["1.4.2"] - - cis_csc: ["14.9"] - - pci_dss: ["11.5"] - - tsc: ["PI1.4", "PI1.5", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: any + - cis: ["1.3.2"] + - cis_csc_v8: ["3.14"] + - cis_csc_v7: ["14.9"] + - cmmc_v2.0: ["AC.L2-3.1.7"] + - hipaa: ["164.312(b)", "164.312(c)(1)", "164.312(c)(2)"] + - iso_27001-2013: ["A.12.4.3"] + - nist_sp_800-53: ["AC-6(9)"] + - pci_dss_v3.2.1: ["10.2.1", "11.5"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1"] + - soc_2: ["CC6.1"] + condition: all rules: - - "c:crontab -u root -l -> r:aide" - - "c:grep -r aide /etc/cron.* /etc/crontab -> r:aide" - "c:systemctl is-enabled aidecheck.service -> r:enabled" - "c:systemctl is-enabled aidecheck.timer -> r:enabled" - - "c:systemctl status aidecheck.timer -> r:enabled" + - "c:systemctl status aidecheck.timer -> r:active" ############################################### - # 1.5 Secure Boot Settings + # 1.4 Secure Boot Settings ############################################### - # 1.5.1 Set Boot Loader Password (Scored) - - id: 4530 - title: "Ensure bootloader password is set" + # 1.4.1 Ensure bootloader password is set. (Automated) + - id: 4528 + title: "Ensure bootloader password is set." description: "Setting the boot loader password will require that anyone rebooting the system must enter a password before being able to set command line boot parameters." rationale: "Requiring a boot password upon execution of the boot loader will prevent an unauthorized user from entering boot parameters or changing the boot partition. This prevents users from weakening security (e.g. turning off SELinux at boot time)." - remediation: 'For newergrub2based systems (centOS/RHEL 7.2 and newer): Create an encrypted password with grub2-setpassword: # grub2-setpassword OR For older grub2based systems: create an encrypted password with grub2-mkpasswd-pbkdf2: # grub2-mkpasswd-pbkdf2 Enter password: Reenter password: Your PBKDF2 is Add the following into /etc/grub.d/01_users or a custom /etc/grub.d configuration file: cat < EOF Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg' + impact: 'If password protection is enabled, only the designated superuser can edit a Grub 2 menu item by pressing "e" or access the GRUB 2 command line by pressing "c" If GRUB 2 is set up to boot automatically to a password-protected menu entry the user has no option to back out of the password prompt to select another menu entry. Holding the SHIFT key will not display the menu in this case. The user must enter the correct username and password. If unable, the configuration files will have to be edited via the LiveCD or other means to fix the problem - You can add --unrestricted to the menu entries to allow the system to boot without entering a password. Password will still be required to edit menu items.' + remediation: 'For newer grub2 based systems (Release 7.2 and newer), create an encrypted password with grub2-setpassword : # grub2-setpassword Enter password: Confirm password: OR For older grub2 based systems, create an encrypted password with grub2-mkpasswd- pbkdf2: # grub2-mkpasswd-pbkdf2 Enter password: Reenter password: Your PBKDF2 is Add the following into /etc/grub.d/01_users or a custom /etc/grub.d configuration file: cat < EOF Note: - If placing the information in a custom file, do not include the "cat << EOF" and "EOF" lines as the content is automatically added from these files - The superuser/user information and password should not be contained in the /etc/grub.d/00_header file. The information can be placed in any /etc/grub.d file as long as that file is incorporated into grub.cfg. It is preferable to enter this data into a custom file, such as /etc/grub.d/40_custom, so it is not overwritten should the Grub package be updated Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg.' compliance: - - cis: ["1.5.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.4.1"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'f:/boot/grub2/user.cfg -> r:^GRUB2_PASSWORD\s*=\.+' - - 'f:/boot/grub2/grub.cfg -> r:^set superusers\s*=\.+' - - 'f:/boot/grub2/grub.cfg -> r:^password_pbkdf2 \.+' - # 1.5.2 Configure bootloader - - id: 4531 - title: "Ensure permissions on bootloader config are configured" - description: "The grub configuration file contains information on boot settings and passwords for unlocking boot options. The grub configuration is usually located at /boot/grub2/grub.cfg and linked as /etc/grub2.cfg . On newer grub2 systems the encrypted bootloader password is contained in /boot/grub2/user.cfg" + # 1.4.2 Ensure permissions on bootloader config are configured. (Automated) + - id: 4529 + title: "Ensure permissions on bootloader config are configured." + description: "The grub configuration file contains information on boot settings and passwords for unlocking boot options. The grub2 configuration is usually grub.cfg. On newer grub2 systems the encrypted bootloader password is contained in user.cfg. If the system uses UEFI, /boot/efi is a vfat filesystem. The vfat filesystem itself doesn't have the concept of permissions but can be mounted under Linux with whatever permissions desired." rationale: "Setting the permissions to read and write for root only prevents non-root users from seeing the boot parameters or changing them. Non-root users who read the boot parameters may be able to identify weaknesses in security upon boot and be able to exploit them." - remediation: "Run the following commands to set permissions on your grub configuration: # chown root:root /boot/grub2/grub.cfg # chmod og-rwx /boot/grub2/grub.cfg # chown root:root /boot/grub2/user.cfg # chmod og-rwx /boot/grub2/user.cfg" + remediation: "Run the following commands to set ownership and permissions on your grub configuration file(s): # chown root:root /boot/grub2/grub.cfg # test -f /boot/grub2/user.cfg && chown root:root /boot/grub2/user.cfg # chmod og-rwx /boot/grub2/grub.cfg # test -f /boot/grub2/user.cfg && chmod og-rwx /boot/grub2/user.cfg OR If the system uses UEFI, edit /etc/fstab and add the fmask=0077 option: Example: /boot/efi vfat defaults,umask=0027,fmask=0077,uid=0,gid=0 0 0 Note: This may require a re-boot to enable the change." compliance: - - cis: ["1.5.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.4.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:stat -L /boot/grub2/grub.cfg -> r:Access:\s*\(0\d00/-\w\w\w------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - - 'c:stat -L /boot/grub2/user.cfg -> r:Access:\s*\(0\d00/-\w\w\w------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|cannot stat' + - 'c:stat -L /boot/grub2/grubenv -> r:Access:\s*\(0\d00/-\w\w\w------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 1.5.3 Single user authentication - - id: 4532 - title: "Ensure authentication required for single user mode" - description: "Single user mode (rescue mode) is used for recovery when the system detects an issue during boot or by manual selection from the bootloader." + # 1.4.3 Ensure authentication required for single user mode. (Automated) + - id: 4530 + title: "Ensure authentication required for single user mode." + description: "Single user mode (rescue mode) is used for recovery when the system detects an issue during boot or by manual selection from the bootloader. Note: The systemctl option --fail is synonymous with --job-mode=fail. Using either is acceptable." rationale: "Requiring authentication in single user mode (rescue mode) prevents an unauthorized user from rebooting the system into single user to gain root privileges without credentials." - remediation: 'Edit /usr/lib/systemd/system/rescue.service and /usr/lib/systemd/system/emergency.service and set ExecStart to use /sbin/sulogin or /usr/sbin/sulogin: ExecStart=-/bin/sh -c "/sbin/sulogin; /usr/bin/systemctl --fail --no-block default" ' + remediation: 'Edit /usr/lib/systemd/system/rescue.service and /usr/lib/systemd/system/emergency.service and set ExecStart to use /sbin/sulogin or /usr/sbin/sulogin: ExecStart=-/bin/sh -c "/sbin/sulogin; /usr/bin/systemctl --fail --no-block default".' compliance: - - cis: ["1.5.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.4.3"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'f:/usr/lib/systemd/system/rescue.service -> r:ExecStart=-/bin/sh -c "/sbin/sulogin; /usr/bin/systemctl --fail --no-block default"|ExecStart=-/bin/sh -c "/usr/sbin/sulogin; /usr/bin/systemctl --fail --no-block default"' - 'f:/usr/lib/systemd/system/emergency.service -> r:ExecStart=-/bin/sh -c "/sbin/sulogin; /usr/bin/systemctl --fail --no-block default"|ExecStart=-/bin/sh -c "/usr/sbin/sulogin; /usr/bin/systemctl --fail --no-block default"' ############################################### - # 1.6 Additional Process Hardening + # 1.5 Additional Process Hardening ############################################### - # 1.6.1 Restrict Core Dumps (Scored) - - id: 4533 - title: "Ensure core dumps are restricted" - description: "A core dump is the memory of an executable program. It is generally used to determine why a program aborted. It can also be used to glean confidential information from a core file." - rationale: "Setting a hard limit on core dumps prevents users from overriding the soft variable. If core dumps are required, consider setting limits for user groups (see limits.conf). In addition, setting the fs.suid_dumpable variable to 0 will prevent setuid programs from dumping core." - remediation: "Add the following line to /etc/security/limits.conf or a /etc/security/limits.d/* file: * hard core 0. Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: fs.suid_dumpable = 0. Run the following command to set the active kernel parameter: # sysctl -w fs.suid_dumpable=0" + + # 1.5.1 Ensure core dumps are restricted. (Automated) + - id: 4531 + title: "Ensure core dumps are restricted." + description: "A core dump is the memory of an executable program. It is generally used to determine why a program aborted. It can also be used to glean confidential information from a core file. The system provides the ability to set a soft limit for core dumps, but this can be overridden by the user." + rationale: "Setting a hard limit on core dumps prevents users from overriding the soft variable. If core dumps are required, consider setting limits for user groups (see limits.conf(5)). In addition, setting the fs.suid_dumpable variable to 0 will prevent setuid programs from dumping core." + remediation: "Add the following line to /etc/security/limits.conf or a /etc/security/limits.d/* file: * hard core 0 Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: fs.suid_dumpable = 0 Run the following command to set the active kernel parameter: # sysctl -w fs.suid_dumpable=0 If systemd-coredump is installed: edit /etc/systemd/coredump.conf and add/modify the following lines: Storage=none ProcessSizeMax=0 Run the command: systemctl daemon-reload." compliance: - - cis: ["1.6.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:grep -Rh ^*[[:space:]]*hard[[:space:]][[:space:]]*core[[:space:]][[:space:]]* /etc/security/limits.conf /etc/security/limits.d -> r:hard\s*\t*core\s*\t*0$' + - cis: ["1.5.1"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - 'f:/etc/security/limits.conf -> r:hard\s*\t*core\s*\t*0$' + - 'd:/etc/security/limits.d/ -> r:\. -> r:hard\s*\t*core\s*\t*0$' - 'c:sysctl fs.suid_dumpable -> r:^\s*fs.suid_dumpable\s*=\s*0\s*$' - - 'c:grep -Rh fs\.suid_dumpable /etc/sysctl.conf /etc/sysctl.d -> r:^\s*fs.suid_dumpable\s*=\s*0\s*$' + - 'f:/etc/sysctl.conf -> r:^\s*fs.suid_dumpable\s*=\s*0\s*$' + - 'd:/etc/sysctl.d/ -> r:\. -> r:^\s*fs.suid_dumpable\s*=\s*0\s*$' - # 1.6.2 XD/NX enabled - - id: 4534 - title: "Ensure XD/NX support is enabled" + # 1.5.2 Ensure XD/NX support is enabled. (Automated) + - id: 4532 + title: "Ensure XD/NX support is enabled." description: "Recent processors in the x86 family support the ability to prevent code execution on a per memory page basis. Generically and on AMD processors, this ability is called No Execute (NX), while on Intel processors it is called Execute Disable (XD). This ability can help prevent exploitation of buffer overflow vulnerabilities and should be activated whenever possible. Extra steps must be taken to ensure that this protection is enabled, particularly on 32-bit x86 systems. Other processors, such as Itanium and POWER, have included such support since inception and the standard kernel for those platforms supports the feature." - rationale: "Enabling any feature that can protect against buffer overflow attacks enhances the security of the system." + rationale: "Enabling any feature that can protect against buffer overflow attacks enhances the security of the system. Note: Ensure your system supports the XD or NX bit and has PAE support before implementing this recommendation as this may prevent it from booting if these are not supported by your hardware." remediation: "On 32 bit systems install a kernel with PAE support, no installation is required on 64 bit systems: If necessary configure your bootloader to load the new kernel and reboot the system. You may need to enable NX or XD support in your bios." compliance: - - cis: ["1.6.2"] - - cis_csc: ["8.3"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: any + - cis: ["1.5.2"] + - cis_csc_v8: ["10.5"] + - cis_csc_v7: ["8.3"] + - nist_sp_800-53: ["SI-16"] + - pci_dss_v3.2.1: ["1.4"] + - soc_2: ["CC6.8"] + condition: all rules: - - 'c:sh -c "journalctl | grep \"protection: active\"" -> r:kernel:\s+NX \(Execute Disable\) protection: active' - - 'c:sh -c "[[ -n $(grep noexec[0-9]*=off /proc/cmdline) || -z $(grep -E -i \" (pae|nx) \" /proc/cpuinfo) || -n $(grep \"\\sNX\\s.*\\sprotection:\\s\" /var/log/dmesg | grep -v active) ]] && echo \"NX Protection is not active\"" -> r:^$' + - 'c:journalctl -> r:\s*protection:\s*\t*active' - # 1.6.3 Enable Randomized Virtual Memory Region Placement (Scored) - - id: 4535 - title: "Ensure address space layout randomization (ASLR) is enabled" + # 1.5.3 Ensure address space layout randomization (ASLR) is enabled. (Automated) + - id: 4533 + title: "Ensure address space layout randomization (ASLR) is enabled." description: "Address space layout randomization (ASLR) is an exploit mitigation technique which randomly arranges the address space of key data areas of a process." rationale: "Randomly placing virtual memory regions will make it difficult to write memory page exploits as the memory placement will be consistently shifting." - remediation: "Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: kernel.randomize_va_space = 2. Run the following command to set the active kernel parameter: # sysctl -w kernel.randomize_va_space=2" + remediation: "Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: kernel.randomize_va_space = 2 Run the following command to set the active kernel parameter: # sysctl -w kernel.randomize_va_space=2." compliance: - - cis: ["1.6.3"] - - cis_csc: ["8.3"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.5.3"] + - cis_csc_v8: ["10.5"] + - cis_csc_v7: ["8.3"] + - nist_sp_800-53: ["SI-16"] + - pci_dss_v3.2.1: ["1.4"] + - soc_2: ["CC6.8"] condition: all rules: - - 'c:grep -Rh ^kernel\.randomize_va_space /etc/sysctl.conf /etc/sysctl.d -> r:^\s*kernel.randomize_va_space\s*=\s*2$' - 'c:sysctl kernel.randomize_va_space -> r:^\s*kernel.randomize_va_space\s*=\s*2' + - 'f:/etc/sysctl.conf -> r:^\s*kernel.randomize_va_space\s*=\s*2$' + - 'd:/etc/sysctl.d -> r:\.* -> r:^\s*kernel.randomize_va_space\s*=\s*2$' - # 1.6.4 Disable prelink - - id: 4536 - title: "Ensure prelink is disabled" + # 1.5.4 Ensure prelink is not installed. (Automated) + - id: 4534 + title: "Ensure prelink is not installed." description: "prelink is a program that modifies ELF shared libraries and ELF dynamically linked binaries in such a way that the time needed for the dynamic linker to perform relocations at startup significantly decreases." rationale: "The prelinking feature can interfere with the operation of AIDE, because it changes binaries. Prelinking can also increase the vulnerability of the system if a malicious user is able to compromise a common library such as libc." - remediation: "Run the following commands to restore binaries to normal: # prelink -ua Run the following command to uninstall prelink: # yum remove prelink" - compliance: - - cis: ["1.6.4"] - - cis_csc: ["14.9"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + remediation: "Run the following command to restore binaries to normal: # prelink -ua Run the following command to uninstall prelink: # yum remove prelink." + references: + - "http://www.nsa.gov/research/selinux" + - "http://www.nsa.gov/research/selinux/list.shtml" + - "http://docs.fedoraproject.org/selinux-faq" + - "http://docs.fedoraproject.org/selinux-user-guide" + - "http://docs.fedoraproject.org/selinux-managing-" + - "http://www.selinuxproject.org" + compliance: + - cis: ["1.5.4"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["14.9"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.12.4.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: none rules: - - "c:rpm -q prelink -> r:package prelink is not installed" + - "c:rpm -qa prelink -> r:prelink" ############################################### - # 1.7 Configure SELinux + # 1.6 Configure SELinux ############################################### - # 1.7.1.1 Install SELinux - - id: 4537 - title: "Ensure SELinux is installed" - description: "SELinux provides Mandatory Access Controls." + # 1.6.1.1 Ensure SELinux is installed. (Automated) + - id: 4535 + title: "Ensure SELinux is installed." + description: "SELinux provides Mandatory Access Control." rationale: "Without a Mandatory Access Control system installed only the default Discretionary Access Control system will be available." - remediation: "Run the following command to install libselinux: # yum install libselinux" + remediation: "Run the following command to install SELinux: # yum install libselinux." compliance: - - cis: ["1.7.1.1"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.6.1.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:rpm -q libselinux -> r:libselinux-\S+' + - "c:rpm -q libselinux -> r:libselinux-" - # 1.7.1.2 SELinux not disabled - - id: 4538 - title: "Ensure SELinux is not disabled in bootloader configuration" - description: "Configure SELINUX to be enabled at boot time and verify that it has not been overwritten by the grub boot parameters." + # 1.6.1.2 Ensure SELinux is not disabled in bootloader configuration. (Automated) + - id: 4536 + title: "Ensure SELinux is not disabled in bootloader configuration." + description: "Configure SELINUX to be enabled at boot time and verify that it has not been overwritten by the grub boot parameters. Note: This recommendation is designed around the grub 2 bootloader, if LILO or another bootloader is in use in your environment enact equivalent settings." rationale: "SELinux must be enabled at boot time in your grub configuration to ensure that the controls it provides are not overridden." - remediation: 'Edit /etc/default/grub and remove all instances of selinux=0 and enforcing=0 from all CMDLINE_LINUX parameters: GRUB_CMDLINE_LINUX_DEFAULT="quiet" GRUB_CMDLINE_LINUX="" || Run the following command to update the grub2 configuration: grub2-mkconfig -o /boot/grub2/grub.cfg' - compliance: - - cis: ["1.7.1.2"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + remediation: 'Edit /etc/default/grub and remove all instances of selinux=0 and enforcing=0 from all CMDLINE_LINUX parameters: GRUB_CMDLINE_LINUX_DEFAULT="quiet" GRUB_CMDLINE_LINUX="" Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg.' + compliance: + - cis: ["1.6.1.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - - 'f:/boot/grub2/grub.cfg -> r:^\s*linux\.+selinux=0|linux\.+enforcing=0' + - 'f:/boot/grub2/grubenv -> r:kernelopts=\.*selinux=0|kernelopts=\.*enforcing=0' - # 1.7.1.3 Set selinux policy - - id: 4539 - title: "Ensure SELinux policy is configured" - description: "Configure SELinux to meet or exceed the default targeted policy, which constrains daemons and system software only." + # 1.6.1.3 Ensure SELinux policy is configured. (Automated) + - id: 4537 + title: "Ensure SELinux policy is configured." + description: "Configure SELinux to meet or exceed the default targeted policy, which constrains daemons and system software only. Note: If your organization requires stricter policies, ensure that they are set in the /etc/selinux/config file." rationale: "Security configuration requirements vary from site to site. Some sites may mandate a policy that is stricter than the default policy, which is perfectly acceptable. This item is intended to ensure that at least the default recommendations are met." - remediation: "Edit the /etc/selinux/config file to set the SELINUXTYPE parameter: SELINUXTYPE=targeted" + remediation: "Edit the /etc/selinux/config file to set the SELINUXTYPE parameter: SELINUXTYPE=targeted." compliance: - - cis: ["1.7.1.3"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.6.1.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:sestatus -> r:^Loaded policy name:\s*\t*targeted$|^Loaded policy name:\s*\t*mls' + - 'c:sestatus -> r:^Loaded policy name:\s+targeted$|^Loaded policy name:\s+mls$' - 'f:/etc/selinux/config -> r:^\s*SELINUXTYPE\s*=\s*targeted|^\s*SELINUXTYPE\s*=\s*mls' - # 1.7.1.4 Set selinux mode - - id: 4540 - title: "Ensure the SELinux mode is enforcing or permissive" - description: "SELinux can run in one of three modes: disabled, permissive, or enforcing: Enforcing - Is the default, and recommended, mode of operation; in enforcing mode SELinux operates normally, enforcing the loaded security policy on the entire system. Permissive - The system acts as if SELinux is enforcing the loaded security policy, including labeling objects and emitting access denial entries in the logs, but it does not actually deny any operations. While not recommended for production systems, permissive mode can be helpful for SELinux policy development. Disabled -Is strongly discouraged; not only does the system avoid enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future" - rationale: "Running SELinux in disabled modeis strongly discouraged; not only does the system avoid enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future." - remediation: "Run one of the following commands to set SELinux's running mode: To set SELinux mode to Enforcing: # setenforce 1 OR To set SELinux mode to Permissive: # setenforce 0 Edit the /etc/selinux/config file to set the SELINUX parameter: For Enforcing mode: SELINUX=enforcing OR For Permissive mode: SELINUX=permissive" + # 1.6.1.4 Ensure the SELinux mode is enforcing or permissive. (Automated) + - id: 4538 + title: "Ensure the SELinux mode is enforcing or permissive." + description: "SELinux can run in one of three modes: disabled, permissive, or enforcing: - Enforcing - Is the default, and recommended, mode of operation; in enforcing mode SELinux operates normally, enforcing the loaded security policy on the entire system. - Permissive - The system acts as if SELinux is enforcing the loaded security policy, including labeling objects and emitting access denial entries in the logs, but it does not actually deny any operations. While not recommended for production systems, permissive mode can be helpful for SELinux policy development. - Disabled - Is strongly discouraged; not only does the system avoid enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future Note: you can set individual domains to permissive mode while the system runs in enforcing mode. For example, to make the httpd_t domain permissive: # semanage permissive -a httpd_t." + rationale: "Running SELinux in disabled mode is strongly discouraged; not only does the system avoid enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future." + remediation: "Run one of the following commands to set SELinux's running mode: To set SELinux mode to Enforcing: # setenforce 1 OR To set SELinux mode to Permissive: # setenforce 0 Edit the /etc/selinux/config file to set the SELINUX parameter: For Enforcing mode: SELINUX=enforcing OR For Permissive mode: SELINUX=permissive." + references: + - "https://access.redhat.com/documentation/en-us/SELinux-mode" + compliance: + - cis: ["1.6.1.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'c:sestatus -> r:^SELinux status:\s+enabled$' + - 'c:sestatus -> r:^Current mode:\s+enforcing$' + - 'c:sestatus -> r:^Mode from config file:\s+enforcing$' + - 'f:/etc/selinux/config -> r:^\s*SELINUX\s*=\s*enforcing' + - "c:getenforce -> r:^Enforcing|^Permissive" + + # 1.6.1.5 Ensure the SELinux mode is enforcing. (Automated) + - id: 4539 + title: "Ensure the SELinux mode is enforcing." + description: "SELinux can run in one of three modes: disabled, permissive, or enforcing: - Enforcing - Is the default, and recommended, mode of operation; in enforcing mode SELinux operates normally, enforcing the loaded security policy on the entire system. - Permissive - The system acts as if SELinux is enforcing the loaded security policy, including labeling objects and emitting access denial entries in the logs, but it does not actually deny any operations. While not recommended for production systems, permissive mode can be helpful for SELinux policy development. - Disabled - Is strongly discouraged; not only does the system avoid enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future Note: you can set individual domains to permissive mode while the system runs in enforcing mode. For example, to make the httpd_t domain permissive: # semanage permissive -a httpd_t." + rationale: "Running SELinux in disabled mode the system not only avoids enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future. Running SELinux in Permissive mode, though helpful for developing SELinux policy, only logs access denial entries, but does not deny any operations." + remediation: "Run the following command to set SELinux's running mode: # setenforce 1 Edit the /etc/selinux/config file to set the SELINUX parameter: For Enforcing mode: SELINUX=enforcing." + references: + - "https://access.redhat.com/documentation/en-us/selinux-mode" compliance: - - cis: ["1.7.1.4"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.6.1.5"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - "c:getenforce -> r:^Enforcing|^Permissive" - - "f:/etc/selinux/config -> r:^SELINUX=enforcing|^SELINUX=permissive" + - 'f:/etc/selinux/config -> r:^\s*SELINUX\s*=\s*enforcing' + - "c:getenforce -> r:^Enforcing" - # 1.7.1.6 Ensure no unconfined services exist - - id: 4541 - title: "Ensure no unconfined services exist" - description: "Unconfined processes run in unconfined domains" - rationale: "For unconfined processes, SELinux policy rules are applied, but policy rules exist that allow processes running in unconfined domains almost all access. Processes running in unconfined domains fall back to using DAC rules exclusively. If an unconfined process is compromised, SELinux does not prevent an attacker from gaining access to system resources and data, but of course, DAC rules are still used. SELinux is a security enhancement on top of DAC rules – it does not replace them" + # 1.6.1.6 Ensure no unconfined services exist. (Automated) + - id: 4540 + title: "Ensure no unconfined services exist." + description: "Unconfined processes run in unconfined domains Note: Occasionally certain daemons such as backup or centralized management software may require running unconfined. Any such software should be carefully analyzed and documented before such an exception is made." + rationale: "For unconfined processes, SELinux policy rules are applied, but policy rules exist that allow processes running in unconfined domains almost all access. Processes running in unconfined domains fall back to using DAC rules exclusively. If an unconfined process is compromised, SELinux does not prevent an attacker from gaining access to system resources and data, but of course, DAC rules are still used. SELinux is a security enhancement on top of DAC rules - it does not replace them." remediation: "Investigate any unconfined processes found during the audit action. They may need to have an existing security context assigned to them or a policy built for them." compliance: - - cis: ["1.7.1.6"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.6.1.6"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.13.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - "c:ps -eZ -> r:unconfined_service_t" - # 1.7.1.7 Remove SETroubleshoot - - id: 4542 - title: "Ensure SETroubleshoot is not installed" + # 1.6.1.7 Ensure SETroubleshoot is not installed. (Automated) + - id: 4541 + title: "Ensure SETroubleshoot is not installed." description: "The SETroubleshoot service notifies desktop users of SELinux denials through a user-friendly interface. The service provides important information around configuration errors, unauthorized intrusions, and other potential errors." rationale: "The SETroubleshoot service is an unnecessary daemon to have running on a server, especially if X Windows is disabled." - remediation: "Run the following command to uninstall setroubleshoot: # yum remove setroubleshoot" - compliance: - - cis: ["1.7.1.7"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + remediation: "Run the following command to Uninstall setroubleshoot: # yum remove setroubleshoot." + compliance: + - cis: ["1.6.1.7"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.9.1.1"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none rules: - - "c:rpm -q setroubleshoot -> r:package setroubleshoot is not installed" + - "c:rpm -qa setroubleshoot -> r:setroubleshoot" - # 1.7.1.8 Disable MCS Translation service mcstrans - - id: 4543 - title: "Ensure the MCS Translation Service (mcstrans) is not installed" - description: "The mcstransd daemon provides category label information to client processes requesting information. The label translations are defined in /etc/selinux/targeted/setrans.conf" + # 1.6.1.8 Ensure the MCS Translation Service (mcstrans) is not installed. (Automated) + - id: 4542 + title: "Ensure the MCS Translation Service (mcstrans) is not installed." + description: "The mcstransd daemon provides category label information to client processes requesting information. The label translations are defined in /etc/selinux/targeted/setrans.conf." rationale: "Since this service is not used very often, remove it to reduce the amount of potentially vulnerable code running on the system." - remediation: "Run the following command to uninstall mcstrans: # yum remove mcstrans" - compliance: - - cis: ["1.7.1.8"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + remediation: "Run the following command to uninstall mcstrans: # yum remove mcstrans." + compliance: + - cis: ["1.6.1.8"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none rules: - - "c:rpm -q mcstrans -> r:package mcstrans is not installed" + - "c:rpm -qa mcstrans -> r:mcstrans" ############################################### - # 1.8 Warning Banners + # 1.7 Warning Banners ############################################### - # 1.8.1.1 Configure message of the day (Scored) - - id: 4544 - title: "Ensure message of the day is configured properly" - description: "The contents of the /etc/motd file are displayed to users after login and function as a message of the day for authenticated users. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version" + + # 1.7.1 Ensure message of the day is configured properly. (Automated) + - id: 4543 + title: "Ensure message of the day is configured properly." + description: "The contents of the /etc/motd file are displayed to users after login and function as a message of the day for authenticated users. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version." rationale: 'Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Displaying OS and patch level information in login banners also has the side effect of providing detailed system information to attackers attempting to target specific exploits of a system. Authorized users can easily get this information by running the " uname -a " command once they have logged in.' - remediation: "Edit the /etc/motd file with the appropriate contents according to your site policy, remove any instances of \\m, \\r, \\s, \\v or references to the OS platform OR If the motd is not used, this file can be removed. Run the following command to remove the motd file: # rm /etc/motd" - compliance: - - cis: ["1.8.1.1"] - - cis_csc: ["5.1"] - - pci_dss: ["7.1"] - - tsc: ["CC6.4"] + remediation: "Edit the /etc/motd file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform OR If the motd is not used, this file can be removed. Run the following command to remove the motd file: # rm /etc/motd." + compliance: + - cis: ["1.7.1"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: none rules: - 'f:/etc/motd -> r:\\v|\\r|\\m|\\s' - # 1.8.1.2 Configure local login warning banner (Not Scored) - - id: 4545 - title: "Ensure local login warning banner is configured properly" - description: "The contents of the /etc/issue file are displayed to users prior to login for local terminals. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version -or the operating system's name." + # 1.7.2 Ensure local login warning banner is configured properly. (Automated) + - id: 4544 + title: "Ensure local login warning banner is configured properly." + description: "The contents of the /etc/issue file are displayed to users prior to login for local terminals. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version - or the operating system's name." rationale: 'Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Displaying OS and patch level information in login banners also has the side effect of providing detailed system information to attackers attempting to target specific exploits of a system. Authorized users can easily get this information by running the " uname -a " command once they have logged in.' - remediation: "Edit the /etc/issue file with the appropriate contents according to your site policy, remove any instances of \\m, \\r, \\s, or \\v: # echo \"Authorized uses only. All activity may be monitored and reported.\" > /etc/issue" - compliance: - - cis: ["1.8.1.2"] - - cis_csc: ["5.1"] - - pci_dss: ["7.1"] - - tsc: ["CC6.4"] + remediation: "Edit the /etc/issue file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform # echo \"Authorized uses only. All activity may be monitored and reported.\" > /etc/issue." + compliance: + - cis: ["1.7.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: none rules: - 'f:/etc/issue -> r:\\v|\\r|\\m|\\s' - # 1.8.1.3 Configure remote login warning banner (Not Scored) - - id: 4546 - title: "Ensure remote login warning banner is configured properly" - description: "The contents of the /etc/issue.net file are displayed to users prior to login for remote connections from configured services. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version" + # 1.7.3 Ensure remote login warning banner is configured properly. (Automated) + - id: 4545 + title: "Ensure remote login warning banner is configured properly." + description: "The contents of the /etc/issue.net file are displayed to users prior to login for remote connections from configured services. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version." rationale: 'Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Displaying OS and patch level information in login banners also has the side effect of providing detailed system information to attackers attempting to target specific exploits of a system. Authorized users can easily get this information by running the " uname -a " command once they have logged in.' - remediation: "Edit the /etc/issue.net file with the appropriate contents according to your site policy, remove any instances of \\m, \\r, \\s, or \\v: # echo \"Authorized uses only. All activity may be monitored and reported.\" > /etc/issue.net" - compliance: - - cis: ["1.8.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["7.1"] - - tsc: ["CC6.4"] + remediation: "Edit the /etc/issue.net file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform # echo \"Authorized uses only. All activity may be monitored and reported.\" > /etc/issue.net." + compliance: + - cis: ["1.7.3"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: none rules: - 'f:/etc/issue.net -> r:\\v|\\r|\\m|\\s' - # 1.8.1.4 Configure /etc/motd permissions (Not Scored) - - id: 4547 - title: "Ensure permissions on /etc/motd are configured" + # 1.7.4 Ensure permissions on /etc/motd are configured. (Automated) + - id: 4546 + title: "Ensure permissions on /etc/motd are configured." description: "The contents of the /etc/motd file are displayed to users after login and function as a message of the day for authenticated users." rationale: "If the /etc/motd file does not have the correct ownership it could be modified by unauthorized users with incorrect or misleading information." - remediation: "Run the following commands to set permissions on /etc/motd: # chown root:root /etc/motd # chmod 644 /etc/motd" + remediation: "Run the following commands to set permissions on /etc/motd : # chown root:root /etc/motd # chmod u-x,go-wx /etc/motd." compliance: - - cis: ["1.8.1.4"] - - cis_csc: ["14.6"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["1.7.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/motd -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 1.8.1.5 Configure /etc/issue permissions (Scored) - - id: 4548 - title: "Ensure permissions on /etc/issue are configured" + # 1.7.5 Ensure permissions on /etc/issue are configured. (Automated) + - id: 4547 + title: "Ensure permissions on /etc/issue are configured." description: "The contents of the /etc/issue file are displayed to users prior to login for local terminals." rationale: "If the /etc/issue file does not have the correct ownership it could be modified by unauthorized users with incorrect or misleading information." - remediation: "Run the following commands to set permissions on /etc/issue: # chown root:root /etc/issue # chmod 644 /etc/issue" + remediation: "Run the following commands to set permissions on /etc/issue : # chown root:root /etc/issue # chmod u-x,go-wx /etc/issue." compliance: - - cis: ["1.8.1.5"] - - cis_csc: ["14.6"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["1.7.5"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/issue -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 1.8.1.6 Configure /etc/issue.net permissions (Not Scored) - - id: 4549 - title: "Ensure permissions on /etc/issue.net are configured" + + # 1.7.6 Ensure permissions on /etc/issue.net are configured. (Automated) + - id: 4548 + title: "Ensure permissions on /etc/issue.net are configured." description: "The contents of the /etc/issue.net file are displayed to users prior to login for remote connections from configured services." rationale: "If the /etc/issue.net file does not have the correct ownership it could be modified by unauthorized users with incorrect or misleading information." - remediation: "Run the following commands to set permissions on /etc/issue.net: # chown root:root /etc/issue.net # chmod 644 /etc/issue.net" + remediation: "Run the following commands to set permissions on /etc/issue.net : # chown root:root /etc/issue.net # chmod u-x,go-wx /etc/issue.net." compliance: - - cis: ["1.8.1.6"] - - cis_csc: ["14.6"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["1.7.6"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/issue.net -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 1.9 Ensure updates, patches, and additional security software are installed - - id: 4550 - title: "Ensure updates, patches, and additional security software are installed" - description: "Periodically patches are released for included software either due to security flaws or to include additional functionality." - rationale: "Newer patches may contain security enhancements that would not be available through the latest full update. As a result, it is recommended that the latest software patches be used to take advantage of the latest functionality. As with any software installation, organizations need to determine if a given update meets their requirements and verify the compatibility and supportability of any additional software against the update revision that is selected." - remediation: "Use your package manager to update all packages on the system according to site policy. The following command will install all available packages # yum update --security Note: The audit and remediation here only cover security updates. Non-security updates can be audited with the following command and comparing the output against site policy: # yum check-update" + # 1.8.1 Ensure GNOME Display Manager is removed. (Manual) + - id: 4549 + title: "Ensure GNOME Display Manager is removed." + description: "The GNOME Display Manager (GDM) is a program that manages graphical display servers and handles graphical user logins." + rationale: "If a Graphical User Interface (GUI) is not required, it should be removed to reduce the attack surface of the system." + impact: "Removing the GNOME Display manager will remove the GUI from the system." + remediation: "Run the following command to remove the gdm package # yum remove gdm." + references: + - "https://wiki.gnome.org/Projects/GDM" compliance: - - cis: ["1.9"] - - cis_csc: ["3.4", "3.5"] - - pci_dss: ["5.2"] - - nist_800_53: ["AU.6", "SI.4"] - - gpg_13: ["4.2"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["A1.2"] + - cis: ["1.8.1"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:yum check-update --security -> r:No packages needed for security" + - "c:rpm -q gdm -> r:is not installed" - # 1.10 Ensure GDM login banner is configured (Scored) + # 1.8.2 Ensure GDM login banner is configured. (Automated) + - id: 4550 + title: "Ensure GDM login banner is configured." + description: "GDM is the GNOME Display Manager which handles graphical login for GNOME based systems." + rationale: "Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Note: If a graphical login is not required, it should be removed to reduce the attack surface of the system." + remediation: "Edit or create the file /etc/dconf/profile/gdm and add the following: user-db:user system-db:gdm file-db:/usr/share/gdm/greeter-dconf-defaults Edit or create the file /etc/dconf/db/gdm.d/ and add the following: (This is typically /etc/dconf/db/gdm.d/01-banner-message) [org/gnome/login-screen] banner-message-enable=true banner-message-text='' Example Banner Text: 'Authorized users only. All activity may be monitored and reported.' Run the following command to update the system databases: # dconf update." + compliance: + - cis: ["1.8.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - "f:/etc/dconf/profile/gdm -> r:user-db:user && r:system-db:gdm && r:file-db:/usr/share/gdm/greeter-dconf-defaults" + - "f:/etc/dconf/db/gdm.d/01-banner-message -> r:[org/gnome/login-screen]" + - "f:/etc/dconf/db/gdm.d/01-banner-message -> r:banner-message-enable=true" + - "f:/etc/dconf/db/gdm.d/01-banner-message -> r:banner-message-text=" + + # 1.8.3 Ensure last logged in user display is disabled. (Automated) - id: 4551 - title: "Ensure GDM login banner is configured" + title: "Ensure last logged in user display is disabled." description: "GDM is the GNOME Display Manager which handles graphical login for GNOME based systems." - rationale: "If a graphical login is not required, it should be removed to reduce the attack surface of the system. If a graphical login is required, last logged in user display should be disabled, and a warning banner should be configured. Displaying the last logged in user eliminates half of the Userid/Password equation that an unauthorized person would need to log on. Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place." - remediation: "Run the following command to remove gdm: # yum remove gdm OR If GDM is required: Edit or create the file /etc/dconf/profile/gdm and add the following: user-db:user system-db:gdm file-db:/usr/share/gdm/greeter-dconf-defaults Edit or create the file /etc/dconf/db/gdm.d/ and add the following: (This is typically /etc/dconf/db/gdm.d/01-banner-message) [org/gnome/login-screen] banner-message-enable=true banner-message-text='' Example Banner Text: 'Authorized uses only. All activity may be monitored and reported. 'Edit or create the file /etc/dconf/db/gdm.d/and add the following: (This is typically /etc/dconf/db/gdm.d/00-login-screen) [org/gnome/login-screen] # Do not show the user list disable-user-list=true Run the following command to update the system databases: # dconf update" + rationale: "Displaying the last logged in user eliminates half of the Userid/Password equation that an unauthorized person would need to log on. Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Notes: - - If a graphical login is not required, it should be removed to reduce the attack surface of the system. If a different GUI login service is in use and required on the system, consult your documentation to disable displaying the last logged on user." + remediation: "Edit or create the file /etc/dconf/profile/gdm and add the following: user-db:user system-db:gdm file-db:/usr/share/gdm/greeter-dconf-defaults Edit or create the file /etc/dconf/db/gdm.d/ and add the following: (This is typically /etc/dconf/db/gdm.d/00-login-screen) [org/gnome/login-screen] # Do not show the user list disable-user-list=true Run the following command to update the system databases: # dconf update." compliance: - - cis: ["1.10"] - - cis_csc: ["5.1"] - - pci_dss: ["7.1"] - - tsc: ["CC6.4"] + - cis: ["1.8.3"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - "f:/etc/dconf/profile/gdm -> r:user-db:user" - - "f:/etc/dconf/profile/gdm -> r:system-db:gdm" - - "f:/etc/dconf/profile/gdm -> r:file-db:/usr/share/gdm/greeter-dconf-defaults" - - 'd:/etc/dconf/db/gdm.d/ -> r:\. -> r:banner-message-enable=true' - - 'd:/etc/dconf/db/gdm.d/ -> r:\. -> r:banner-message-text=\.+' - - 'd:/etc/dconf/db/gdm.d/ -> r:\. -> r:disable-user-list=true' - - ############################################### - # 2 OS Services - ############################################### - ############################################### - # 2.1 inetd Services - ############################################### + - "f:/etc/dconf/profile/gdm -> r:user-db:user && r:system-db:gdm && r:file-db:/usr/share/gdm/greeter-dconf-defaults" + - "f:/etc/dconf/db/gdm.d/01-banner-message -> r:[org/gnome/login-screen]" + - "f:/etc/dconf/db/gdm.d/01-banner-message -> r:disable-user-list=true" - # 2.1.2 Ensure xinetd is not installed (Automated) + # 1.8.4 Ensure XDCMP is not enabled. (Automated) - id: 4552 - title: "Ensure daytime services are not enabled" - description: "The eXtended InterNET Daemon ( xinetd ) is an open source super daemon that replaced the original inetd daemon. The xinetd daemon listens for well known services and dispatches the appropriate daemon to properly respond to service requests." - rationale: "If there are no xinetd services required, it is recommended that the package be removed to reduce the attack surface area of the system. Note: If an xinetd service or services are required, ensure that any xinetd service not required is stopped and disabled" - remediation: "Run the following command to remove xinetd: # yum remove xinetd" - compliance: - - cis: ["2.1.2"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all + title: "Ensure XDCMP is not enabled." + description: "X Display Manager Control Protocol (XDMCP) is designed to provide authenticated access to display management services for remote displays." + rationale: "XDMCP is inherently insecure. - XDMCP is not a ciphered protocol. This may allow an attacker to capture keystrokes entered by a user - XDMCP is vulnerable to man-in-the-middle attacks. This may allow an attacker to steal the credentials of legitimate users by impersonating the XDMCP server." + remediation: "Edit the file /etc/gdm/custom.conf and remove the line Enable=true." + references: + - "https://help.gnome.org/admin/gdm/2.32/configuration.html.en" + compliance: + - cis: ["1.8.4"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none rules: - - "c:rpm -q xinetd -> r:package xinetd is not installed" + - "f:/etc/gdm/custom.conf -> r:Enable=true" + + # 1.9 Ensure updates, patches, and additional security software are installed. (Manual) - Not Implemented ############################################### - # 2.2 Remove Legacy Services + # 2.1 Remove Legacy Services ############################################### - # 2.2.1.1 Ensure time synchronization is in use (Manual) + # 2.1.1 Ensure xinetd is not installed. (Automated) - id: 4553 - title: "Ensure time synchronization is in use" - description: "System time should be synchronized between all systems in an environment. This is typically done by establishing an authoritative time server or set of servers and having all systems synchronize their clocks to them." + title: "Ensure xinetd is not installed." + description: "The eXtended InterNET Daemon (xinetd) is an open source super daemon that replaced the original inetd daemon. The xinetd daemon listens for well known services and dispatches the appropriate daemon to properly respond to service requests." + rationale: "If there are no xinetd services required, it is recommended that the package be removed to reduce the attack surface are of the system. Note: If an xinetd service or services are required, ensure that any xinetd service not required is stopped and disabled." + remediation: "Run the following command to remove xinetd: # yum remove xinetd." + compliance: + - cis: ["2.1.1"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6", "9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2", "A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none + rules: + - "c:rpm -qa xinetd -> r:xinetd" + + # 2.2.1.1 Ensure time synchronization is in use. (Manual) + - id: 4554 + title: "Ensure time synchronization is in use." + description: "System time should be synchronized between all systems in an environment. This is typically done by establishing an authoritative time server or set of servers and having all systems synchronize their clocks to them. Note: - If another method for time synchronization is being used, this section may be skipped. - Only one time synchronization package should be installed." rationale: "Time synchronization is important to support time sensitive security mechanisms like Kerberos and also ensures log files have consistent time records across the enterprise, which aids in forensic investigations." - remediation: "On systems where host based time synchronization is not available, install chrony or NTP: to install chrony run the following command: # yum install chrony OR to install ntp: run the following command: # yum install ntp" + remediation: "Run One of the following commands to install chrony or NTP: To install chrony, run the following command: # yum install chrony OR To install ntp, run the following command: # yum install ntp Note: On systems where host based time synchronization is available consult your virtualization software documentation and setup host based synchronization." compliance: - cis: ["2.2.1.1"] - - cis_csc: ["6.1"] - - pci_dss: ["10.4"] - - nist_800_53: ["AU.8"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: any + - cis_csc_v8: ["8.4"] + - cis_csc_v7: ["6.1"] + - cmmc_v2.0: ["AU.L2-3.3.7"] + - iso_27001-2013: ["A.12.4.4"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.4"] + - pci_dss_v4.0: ["10.6", "10.6.1", "10.6.2", "10.6.3"] + - soc_2: ["CC4.1", "CC5.2"] + condition: all rules: - - "not c:rpm -q ntp -> r:^package ntp is not installed" - - "not c:rpm -q chrony -> r:^package chrony is not installed" + - "c:rpm -q chrony -> r:^chrony-" + - "c:rpm -q ntp -> r:ntp-" - # 2.2.1.2 Ensure chrony is configured (Automated) - - id: 4554 - title: "Ensure chrony is configured" + # 2.2.1.2 Ensure chrony is configured. (Automated) + - id: 4555 + title: "Ensure chrony is configured." description: "chrony is a daemon which implements the Network Time Protocol (NTP) and is designed to synchronize system clocks across a variety of systems and use a source that is highly accurate. More information on chrony can be found at http://chrony.tuxfamily.org/. chrony can be configured to be a client and/or a server." rationale: "If chrony is in use on the system proper configuration is vital to ensuring time synchronization is working properly. Note: This recommendation only applies if chrony is in use on the system." - remediation: '1) Add or edit server or pool lines to /etc/chrony.conf as appropriate: server . 2) Add or edit the OPTIONS in /etc/sysconfig/chronyd to include ''-u chrony'':OPTIONS="-u chrony"' + remediation: 'Add or edit server or pool lines to /etc/chrony.conf as appropriate: server Add or edit the OPTIONS in /etc/sysconfig/chronyd to include ''-u chrony'': OPTIONS="-u chrony".' compliance: - - cis: ["2.2.1.3"] - - cis_csc: ["6.1"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["2.2.1.2"] + - cis_csc_v8: ["8.4"] + - cis_csc_v7: ["6.1"] + - cmmc_v2.0: ["AU.L2-3.3.7"] + - iso_27001-2013: ["A.12.4.4"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.4"] + - pci_dss_v4.0: ["10.6", "10.6.1", "10.6.2", "10.6.3"] + - soc_2: ["CC4.1", "CC5.2"] condition: all rules: - - 'f:/etc/chrony.conf -> r:^server\.+$|^pool\.+$' - - 'f:/etc/sysconfig/chronyd -> r:^OPTIONS\s*=\s* && r:-u chrony' + - 'f:/etc/chrony.conf -> r:^server\s*\t*\.+|^pool\s*\t*\.+' + - 'not c:ps -ef -> r:\.+/chronyd\s*\t*$ && !r:^\s*\t*chrony\s*\t*' + - 'f:/etc/sysconfig/chronyd -> r:^\s*\t*OPTIONS\.*-u chrony' - # 2.2.1.3 Ensure ntp is configured (Automated) - - id: 4555 - title: "Ensure ntp is configured" - description: "ntp is a daemon which implements the Network Time Protocol (NTP). It is designed to synchronize system clocks across a variety of systems and use a source that is highly accurate. More information on NTP can be found at https://www.ntp.org. ntp can be configured to be a client and/or a server." + # 2.2.1.3 Ensure ntp is configured. (Automated) + - id: 4556 + title: "Ensure ntp is configured." + description: "ntp is a daemon which implements the Network Time Protocol (NTP). It is designed to synchronize system clocks across a variety of systems and use a source that is highly accurate. More information on NTP can be found at http://www.ntp.org. ntp can be configured to be a client and/or a server. Note: This recommendation only applies if ntp is in use on the system." rationale: "If ntp is in use on the system proper configuration is vital to ensuring time synchronization is working properly." - remediation: "1) Add or edit restrict lines in /etc/ntp.conf to match the following: - restrict -4 default kod nomodify notrap nopeer noquery and - restrict -4 default kod nomodify notrap nopeer noquery. 2) Add or edit server or pool lines to /etc/ntp.conf as appropriate: server . 3) Add or edit the OPTIONS in /etc/sysconfig/ntpd to include ' -u ntp:ntp ': - OPTIONS='-u ntp:ntp'" + remediation: 'Add or edit restrict lines in /etc/ntp.conf to match the following: restrict -4 default kod nomodify notrap nopeer noquery restrict -6 default kod nomodify notrap nopeer noquery Add or edit server or pool lines to /etc/ntp.conf as appropriate: server Add or edit the OPTIONS in /etc/sysconfig/ntpd to include ''-u ntp:ntp'': OPTIONS="-u ntp:ntp" Reload the systemd daemon: systemctl daemon-reload Enable and start the ntp service: systemctl --now enable ntpd.' compliance: - - cis: ["2.2.1.2"] - - cis_csc: ["6.1"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["2.2.1.3"] + - cis_csc_v8: ["8.4"] + - cis_csc_v7: ["6.1"] + - cmmc_v2.0: ["AU.L2-3.3.7"] + - iso_27001-2013: ["A.12.4.4"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.4"] + - pci_dss_v4.0: ["10.6", "10.6.1", "10.6.2", "10.6.3"] + - soc_2: ["CC4.1", "CC5.2"] condition: all rules: - - 'f:/etc/ntp.conf -> r:^restrict\s+-4\s+default|^restrict\s+default && r:\s+kod\s+ && r:\s+nomodify\s+ && r:\s+notrap\s+ && r:\s+nopeer\s+ && r:\s+noquery' - - 'f:/etc/ntp.conf -> r:^restrict\s+-6\s+default && r:\s+kod\s+ && r:\s+nomodify\s+ && r:\s+notrap\s+ && r:\s+nopeer\s+ && r:\s+noquery' - - 'f:/etc/ntp.conf -> r:^server\.+|^pool\.+' - - 'f:/etc/sysconfig/ntpd -> r:^OPTIONS\s*=\s* && r:-u ntp:ntp' - - 'f:/usr/lib/systemd/system/ntpd.service -> r:^Execstart\s*=\s*/usr/sbin/ntpd\s+-u\s+ntp:ntp' + - "f:/etc/ntp.conf" + - 'f:/etc/ntp.conf -> r:^restrict\s+-4\s+default|^restrict\s+default && r:\s+kod && r:\s+nomodify && r:\s+notrap && r:\s+nopeer && r:\s+noquery' + - 'f:/etc/ntp.conf -> r:^restrict\s+-6\s+default && r:\s+kod && r:\s+nomodify && r:\s+notrap && r:\s+nopeer && r:\s+noquery' - # 2.2.2 Remove X Windows (Scored) - - id: 4556 - title: " Ensure X11 Server components are not installed" + # 2.2.2 Ensure X11 Server components are not installed. (Automated) + - id: 4557 + title: "Ensure X11 Server components are not installed." description: "The X Window System provides a Graphical User Interface (GUI) where users can have multiple windows in which to run programs and various add on. The X Windows system is typically used on workstations where users login, but not on servers where users typically do not login." rationale: "Unless your organization specifically requires graphical login access via X Windows, remove it to reduce the potential attack surface." - remediation: "Run the following command to remove the X Windows System packages: # yum remove xorg-x11*" + impact: 'Many Linux systems run applications which require a Java runtime. Some Linux Java packages have a dependency on specific X Windows xorg-x11-fonts. One workaround to avoid this dependency is to use the "headless" Java packages for your specific Java runtime.' + remediation: "Run the following command to remove the X Windows Server packages: # yum remove xorg-x11-server*." compliance: - cis: ["2.2.2"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: none rules: - - "c:rpm -qa xorg-x11-server* -> r:^xorg-x11-server" + - "c:rpm -qa xorg-x11-server* -> r:xorg-x11-server*" - # 2.2.3 Ensure Avahi Server is not installed (Automated) - - id: 4557 - title: " Ensure Avahi Server is not installed" + # 2.2.3 Ensure Avahi Server is not installed. (Automated) + - id: 4558 + title: "Ensure Avahi Server is not installed." description: "Avahi is a free zeroconf implementation, including a system for multicast DNS/DNS-SD service discovery. Avahi allows programs to publish and discover services and hosts running on a local network with no specific configuration. For example, a user can plug a computer into a network and Avahi automatically finds printers to print to, files to look at and people to talk to, as well as network services running on the machine." rationale: "Automatic discovery of network services is not normally required for system functionality. It is recommended to remove this package to reduce the potential attack surface." - remediation: "Run the following commands to stop, mask and remove avahi-autoipd and avahi: # systemctl stop avahi-daemon.socket avahi-daemon.service; # yum remove avahi-autoipd avahi" + remediation: "Run the following commands to stop, mask and remove avahi-autoipd and avahi: # systemctl stop avahi-daemon.socket avahi-daemon.service # yum remove avahi-autoipd avahi." compliance: - cis: ["2.2.3"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: + - "c:rpm -q avahi -> r:^package avahi is not installed" - "c:rpm -q avahi-autoipd -> r:package avahi-autoipd is not installed" - - "c:rpm -q avahi -> r:pacakge avahi is not installed" - # 2.2.4 Ensure CUPS is not installed (Automated) - - id: 4558 - title: "Ensure CUPS is not installed" + # 2.2.4 Ensure CUPS is not installed. (Automated) + - id: 4559 + title: "Ensure CUPS is not installed." description: "The Common Unix Print System (CUPS) provides the ability to print to both local and network printers. A system running CUPS can also accept print jobs from remote systems and print them to local printers. It also provides a web based remote administration capability." - rationale: "If the system does not need to print jobs or accept print jobs from other systems, it is recommended that CUPS be removed to reduce the potential attack surface. Disabling CUPS will prevent printing from the system, a common task for workstation systems." - remediation: "Run the following command to remove cups: # yum remove cups" + rationale: "If the system does not need to print jobs or accept print jobs from other systems, it is recommended that CUPS be removed to reduce the potential attack surface. Note: Removing CUPS will prevent printing from the system." + impact: "Disabling CUPS will prevent printing from the system, a common task for workstation systems." + remediation: "Run the following command to remove cups: # yum remove cups." + references: + - "http://www.cups.org." compliance: - cis: ["2.2.4"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q cups -> r:package cups is not installed" + - "c:rpm -q cups -> r:^package cups is not installed" - # 2.2.5 Ensure DHCP Server is not installed (Automated) - - id: 4559 - title: "Ensure DHCP Server is not installed" + # 2.2.5 Ensure DHCP Server is not installed. (Automated) + - id: 4560 + title: "Ensure DHCP Server is not installed." description: "The Dynamic Host Configuration Protocol (DHCP) is a service that allows machines to be dynamically assigned IP addresses." - rationale: "Unless a system is specifically set up to act as a DHCP server, it is recommended that this the dhcp package be removed to reduce the potential attack surface." - remediation: "Run the following command to remove dhcpd: # yum remove dhcp" + rationale: "Unless a system is specifically set up to act as a DHCP server, it is recommended that the dhcp package be removed to reduce the potential attack surface." + remediation: "Run the following command to remove dhcp: # yum remove dhcp." compliance: - cis: ["2.2.5"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - More detailed documentation on DHCP is available at https://www.isc.org/software/dhcp + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q dhcp -> r:package dhcp is not installed" + - "c:rpm -q dhcp-server -> r:^package dhcp-server is not installed" - # 2.2.6 Ensure LDAP server is not installed (Automated) - - id: 4560 - title: "Ensure LDAP Server is not installed" + # 2.2.6 Ensure LDAP server is not installed. (Automated) + - id: 4561 + title: "Ensure LDAP server is not installed." description: "The Lightweight Directory Access Protocol (LDAP) was introduced as a replacement for NIS/YP. It is a service that provides a method for looking up information from a central database." rationale: "If the system will not need to act as an LDAP server, it is recommended that the software be removed to reduce the potential attack surface." - remediation: "Run the following command to remove slapd: # yum remove openldap-servers" + remediation: "Run the following command to remove openldap-servers: # yum remove openldap-servers." + references: + - "http://www.openldap.org." compliance: - cis: ["2.2.6"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - More detailed documentation on OpenLDAP is available at https://www.openldap.org + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q openldap-servers -> r:package openldap-servers is not installed" + - "c:rpm -q openldap-servers -> r:^package openldap-servers is not installed" - # 2.2.7 Ensure nfs-utils is not installed or the nfs-server service is masked (Automated) - - id: 4561 - title: "Ensure nfs-utils is not installed or the nfs-server service is masked" - description: "The Network File System (NFS) is one of the first and most widely distributed file systems in the UNIX environment. It provides the ability for systems to mount file systems of other servers through the network." - rationale: "If the system does not require network shares, it is recommended that the nfs-utils package be removed to reduce the attack surface of the system." - remediation: "Run the following command to remove nfs-utils: # yum remove nfs-utils; OR if the nfs-package is required as a dependency: run the following command to stop and mask the nfs-server service: # systemctl --now mask nfs-server" + # 2.2.7 Ensure DNS Server is not installed. (Automated) + - id: 4562 + title: "Ensure DNS Server is not installed." + description: "The Domain Name System (DNS) is a hierarchical naming system that maps names to IP addresses for computers, services and other resources connected to a network." + rationale: "Unless a system is specifically designated to act as a DNS server, it is recommended that the package be removed to reduce the potential attack surface." + remediation: "Run the following command to remove bind: # yum remove bind." compliance: - cis: ["2.2.7"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: any + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "c:rpm -q nfs-utils -> r:package nfs-utils is not installed" - - "c:systemctl is-enabled nfs-server -> r:masked" + - "c:rpm -q bind -> r:^package bind is not installed" - # 2.2.8 Ensure rpcbind is not installed or the rpcbind services are masked (Automated) - - id: 4562 - title: "Ensure nfs-utils is not installed or the nfs-server service is masked" - description: "The rpcbind utility maps RPC services to the ports on which they listen. RPC processes notify rpcbind when they start, registering the ports they are listening on and the RPC program numbers they expect to serve. The client system then contacts rpcbind on the server with a particular RPC program number. The rpcbind service redirects the client to the proper port number so it can communicate with the requested service Portmapper is an RPC service, which always listens on tcp and udp 111, and is used to map other RPC services (such as nfs, nlockmgr, quotad, mountd, etc.) to their corresponding port number on the server. When a remote host makes an RPC call to that server, it first consults with portmap to determine where the RPC server is listening" - rationale: "A small request (~82 bytes via UDP) sent to the Portmapper generates a large response (7x to 28x amplification), which makes it a suitable tool for DDoS attacks. If rpcbind is not required, it is recommended that the rpcbind package be removed to reduce the attack surface of the system." - remediation: "Run the following command to remove nfs-utils: # yum remove nfs-utils; OR if the nfs-package is required as a dependency: run the following command to stop and mask the nfs-server service: # systemctl --now mask nfs-server" + # 2.2.8 Ensure FTP Server is not installed. (Automated) + - id: 4563 + title: "Ensure FTP Server is not installed." + description: "FTP (File Transfer Protocol) is a traditional and widely used standard tool for transferring files between a server and clients over a network, especially where no authentication is necessary (permits anonymous users to connect to a server)." + rationale: "FTP does not protect the confidentiality of data or authentication credentials. It is recommended SFTP be used if file transfer is required. Unless there is a need to run the system as a FTP server (for example, to allow anonymous downloads), it is recommended that the package be removed to reduce the potential attack surface. Note: Additional FTP servers also exist and should be removed if not required." + remediation: "Run the following command to remove vsftpd: # yum remove vsftpd." compliance: - cis: ["2.2.8"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: any + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "c:rpm -q rpcbind -> r:package rpcbind is not installed" - - "not c:systemctl is-enabled rpcbind rpcbind.socket -> r:enabled" + - "c:rpm -q vsftpd -> r:^package vsftpd is not installed" - # 2.2.9 Ensure DNS Server is not installed (Automate) - - id: 4563 - title: " Ensure DNS Server is not installed " - description: "The Domain Name System (DNS) is a hierarchical naming system that maps names to IP addresses for computers, services and other resources connected to a network." - rationale: "Unless a system is specifically designated to act as a DNS server, it is recommended that the package be removed to reduce the potential attack surface." - remediation: "Run the following command to disable named: # yum remove bind" + # 2.2.9 Ensure HTTP server is not installed. (Automated) + - id: 4564 + title: "Ensure HTTP server is not installed." + description: "HTTP or web servers provide the ability to host web site content." + rationale: "Unless there is a need to run the system as a web server, it is recommended that the package be removed to reduce the potential attack surface. Notes: - Several http servers exist. apache, apache2, lighttpd, and nginx are example packages that provide an HTTP server. - These and other packages should also be audited, and removed if not required." + remediation: "Run the following command to remove httpd: # yum remove httpd." compliance: - cis: ["2.2.9"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q bind -> r:package bind is not installed" + - "c:rpm -q nginx -> r:^package nginx is not installed" + - "c:rpm -q httpd -> r:^package httpd is not installed" - # 2.2.10 Ensure FTP Server is not installed (Automated) - - id: 4564 - title: "Ensure FTP Server is not installed" - description: "FTP (File Transfer Protocol) is a traditional and widely used standard tool for transferring files between a server and clients over a network, especially where no authentication is necessary (permits anonymous users to connect to a server)" - rationale: "FTP does not protect the confidentiality of data or authentication credentials. It is recommended sftp be used if file transfer is required. Unless there is a need to run the system as a FTP server (for example, to allow anonymous downloads), it is recommended that the package be removed to reduce the potential attack surface." - remediation: "Run the following command to disable vsftpd: # yum remove vsftpd" + # 2.2.10 Ensure IMAP and POP3 server is not installed. (Automated) + - id: 4565 + title: "Ensure IMAP and POP3 server is not installed." + description: "dovecot is an open source IMAP and POP3 server for Linux based systems." + rationale: "Unless POP3 and/or IMAP servers are to be provided by this system, it is recommended that the package be removed to reduce the potential attack surface. Notes: - Several IMAP/POP3 servers exist and can use other service names. courier-imap and cyrus-imap are example services that provide a mail server. - These and other services should also be audited and the packages removed if not required." + remediation: "Run the following command to remove dovecot: # yum remove dovecot." compliance: - cis: ["2.2.10"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q vsftpd -> r:package vsftpd is not installed" + - "c:rpm -q dovecot -> r:^package dovecot is not installed" + - "c:rpm -q cyrus-imapd -> r:^package cyrus-imapd is not installed" - # 2.2.11 Ensure HTTP server is not installed (Automated) - - id: 4565 - title: "Ensure HTTP server is not installed" - description: "HTTP or web servers provide the ability to host web site content." - rationale: "Unless there is a need to run the system as a web server, it is recommended that the package be removed to reduce the potential attack surface." - remediation: "Run the following command to disable httpd: # yum remove httpd" + # 2.2.11 Ensure Samba is not installed. (Automated) + - id: 4566 + title: "Ensure Samba is not installed." + description: "The Samba daemon allows system administrators to configure their Linux systems to share file systems and directories with Windows desktops. Samba will advertise the file systems and directories via the Server Message Block (SMB) protocol. Windows desktop users will be able to mount these directories and file systems as letter drives on their systems." + rationale: "If there is no need to mount directories and file systems to Windows systems, then this package can be removed to reduce the potential attack surface." + remediation: "Run the following command to remove samba: # yum remove samba." compliance: - cis: ["2.2.11"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q httpd -> r:package httpd is not installed" + - "c:rpm -q samba -> r:^package samba is not installed" - # 2.2.12 Ensure IMAP and POP3 server is not installed (Automated) - - id: 4566 - title: "Ensure IMAP and POP3 server is not installed" - description: "dovecot is an open source IMAP and POP3 server for Linux based systems." - rationale: "Unless POP3 and/or IMAP servers are to be provided by this system, it is recommended that the package be removed to reduce the potential attack surface." - remediation: "Run the following command to disable dovecot: # yum remove dovecot" + # 2.2.12 Ensure HTTP Proxy Server is not installed. (Automated) + - id: 4567 + title: "Ensure HTTP Proxy Server is not installed." + description: "Squid is a standard proxy server used in many distributions and environments." + rationale: "Unless a system is specifically set up to act as a proxy server, it is recommended that the squid package be removed to reduce the potential attack surface. Note: Several HTTP proxy servers exist. These should be checked and removed unless required." + remediation: "Run the following command to remove the squid package: # yum remove squid." compliance: - cis: ["2.2.12"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q dovecot -> r:package dovecot is not installed" + - "c:rpm -q squid -> r:^package squid is not installed" - # 2.2.13 Ensure Samba is not installed (Automated) - - id: 4567 - title: "Ensure Samba is not installed" - description: "The Samba daemon allows system administrators to configure their Linux systems to share file systems and directories with Windows desktops. Samba will advertise the file systems and directories via the Small Message Block (SMB) protocol. Windows desktop users will be able to mount these directories and file systems as letter drives on their systems." - rationale: "If there is no need to mount directories and file systems to Windows systems, then this package can be removed to reduce the potential attack surface." - remediation: "Run the following command to disable smb: # yum remove samba" + # 2.2.13 Ensure net-snmp is not installed. (Automated) + - id: 4568 + title: "Ensure net-snmp is not installed." + description: 'Simple Network Management Protocol (SNMP) is a widely used protocol for monitoring the health and welfare of network equipment, computer equipment and devices like UPSs. Net-SNMP is a suite of applications used to implement SNMPv1 (RFC 1157), SNMPv2 (RFCs 1901-1908), and SNMPv3 (RFCs 3411-3418) using both IPv4 and IPv6. Support for SNMPv2 classic (a.k.a. "SNMPv2 historic" - RFCs 1441-1452) was dropped with the 4.0 release of the UCD-snmp package. The Simple Network Management Protocol (SNMP) server is used to listen for SNMP commands from an SNMP management system, execute the commands or collect the information and then send results back to the requesting system.' + rationale: "The SNMP server can communicate using SNMPv1, which transmits data in the clear and does not require authentication to execute commands. SNMPv3 replaces the simple/clear text password sharing used in SNMPv2 with more securely encoded parameters. If the the SNMP service is not required, the net-snmp package should be removed to reduce the attack surface of the system. Note: If SNMP is required: - The server should be configured for SNMP v3 only. User Authentication and Message Encryption should be configured. If SNMP v2 is absolutely necessary, modify the community strings' values. -." + remediation: "Run the following command to remove net-snmpd: # yum remove net-snmp." compliance: - cis: ["2.2.13"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6", "9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2", "A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q samba -> r:package samba is not installed" + - "c:rpm -q net-snmp -> r:^package net-snmp is not installed" - # 2.2.14 Ensure HTTP Proxy Server is not installed (Automated) - - id: 4568 - title: "Ensure HTTP Proxy Server is not installed" - description: "Squid is a standard proxy server used in many distributions and environments." - rationale: "If there is no need for a proxy server, it is recommended that the squid package be removed to reduce the potential attack surface." - remediation: "Run the following command to disable squid: # yum remove squid" + # 2.2.14 Ensure NIS server is not installed. (Automated) + - id: 4569 + title: "Ensure NIS server is not installed." + description: "The ypserv package provides the Network Information Service (NIS). This service, formally known as Yellow Pages, is a client-server directory service protocol for distributing system configuration files. The NIS server is a collection of programs that allow for the distribution of configuration files." + rationale: "The NIS service is inherently an insecure system that has been vulnerable to DOS attacks, buffer overflows and has poor authentication for querying NIS maps. NIS generally has been replaced by such protocols as Lightweight Directory Access Protocol (LDAP). It is recommended that the ypserv package be removed, and if required a more secure services be used." + remediation: "Run the following command to remove ypserv: # yum remove ypserv." compliance: - cis: ["2.2.14"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6", "9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2", "A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q squid -> r:package squid is not installed" + - "c:rpm -q ypserv -> r:^package ypserv is not installed" - # 2.2.15 Ensure net-snmp is not installed (Automated) - - id: 4569 - title: "Ensure SNMP Server is not installed" - description: "The Simple Network Management Protocol (SNMP) server is used to listen for SNMP commands from an SNMP management system, execute the commands or collect the information and then send results back to the requesting system." - rationale: "The SNMP server can communicate using SNMPv1, which transmits data in the clear and does not require authentication to execute commands. SNMPv3replaces the simple/clear text password sharing used in SNMPv2with more securely encoded parameters. If the SNMP service is not required, the net-snmp package should be removed to reduce the attack surface of the system." - remediation: "Run the following command to disable snmpd: # # yum remove net-snmp" + # 2.2.15 Ensure telnet-server is not installed. (Automated) + - id: 4570 + title: "Ensure telnet-server is not installed." + description: "The telnet-server package contains the telnet daemon, which accepts connections from users from other systems via the telnet protocol." + rationale: "The telnet protocol is insecure and unencrypted. The use of an unencrypted transmission medium could allow a user with access to sniff network traffic the ability to steal credentials. The ssh package provides an encrypted session and stronger security." + remediation: "Run the following command to remove the telnet-server package: # yum remove telnet-server." compliance: - cis: ["2.2.15"] - - cis_csc: ["2.6", 9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6", "9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2", "A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:rpm -q net-snmp -> r:package net-snmp is not installed" + - "c:rpm -q telnet-server -> r:^package telnet-server is not installed" - # 2.2.16 Ensure mail transfer agent is configured for local-only mode (Automated) - - id: 4570 - title: "Ensure mail transfer agent is configured for local-only mode" + # 2.2.16 Ensure mail transfer agent is configured for local-only mode. (Automated) + - id: 4571 + title: "Ensure mail transfer agent is configured for local-only mode." description: "Mail Transfer Agents (MTA), such as sendmail and Postfix, are used to listen for incoming mail and transfer the messages to the appropriate user or mail server. If the system is not intended to be a mail server, it is recommended that the MTA be configured to only process local mail." - rationale: "The software for all Mail Transfer Agents is complex and most have a long history of security issues. While it is important to ensure that the system can process local mail messages, it is not necessary to have the MTA's daemon listening on a port unless the server is intended to be a mail server that receives and processes mail from other systems." - remediation: "Edit /etc/postfix/main.cf and add the following line to the RECEIVING MAIL section. If the line already exists, change it to look like the line below: inet_interfaces = loopback-only . Restart postfix: # systemctl restart postfix" + rationale: "The software for all Mail Transfer Agents is complex and most have a long history of security issues. While it is important to ensure that the system can process local mail messages, it is not necessary to have the MTA's daemon listening on a port unless the server is intended to be a mail server that receives and processes mail from other systems. Notes: - This recommendation is designed around the postfix mail server. - Depending on your environment you may have an alternative MTA installed such as sendmail. If this is the case consult the documentation for your installed MTA to configure the recommended state." + remediation: "Edit /etc/postfix/main.cf and add the following line to the RECEIVING MAIL section. If the line already exists, change it to look like the line below: inet_interfaces = loopback-only Run the following command to restart postfix: # systemctl restart postfix." compliance: - cis: ["2.2.16"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1", "AC.4", "SC.7"] - - tsc: ["CC5.2", "CC6.4", "CC6.6", "CC6.7"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: none rules: - - 'c:ss -lntu -> r:\.*:25\s* && !r:127.0.0.1:25\.*|::1:25\.*' + - 'c:ss -lntu -> r:\.*:25\.* && !r:\s*127.0.0.1:25\s*|\s*::1:25\s*' - # 2.2.17 Ensure rsync is not installed or the ervice is masked (Automated) - - id: 4571 - title: "Ensure rsync is not installed or the rsyncd service is masked" - description: "The rsyncd service can be used to synchronize files between systems over network links." - rationale: "Unless required, the rsync package should be removed to reduce the attack surface area of the system. The rsyncd service presents a security risk as it uses unencrypted protocols for communication." - remediation: "Run the following command to remove the rsync package: # yum remove rsync; OR run the following command to mask the rsyncd service: # systemctl --now mask rsyncd" + # 2.2.17 Ensure nfs-utils is not installed or the nfs-server service is masked. (Automated) + - id: 4572 + title: "Ensure nfs-utils is not installed or the nfs-server service is masked." + description: "The Network File System (NFS) is one of the first and most widely distributed file systems in the UNIX environment. It provides the ability for systems to mount file systems of other servers through the network." + rationale: "If the system does not require network shares, it is recommended that the nfs-utils package be removed to reduce the attack surface of the system." + remediation: "Run the following command to remove nfs-utils: # yum remove nfs-utils OR If the nfs-package is required as a dependency, run the following command to stop and mask the nfs-server service: # systemctl --now mask nfs-server." compliance: - cis: ["2.2.17"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1", "AC.4", "SC.7"] - - tsc: ["CC5.2", "CC6.4", "CC6.6", "CC6.7"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: any rules: - - "c:rpm -q rsync -> r:package rsync is not installed" - - "c:systemctl is-enabled rsyncd -> r:masked" + - "c:rpm -q nfs-utils -> r:^package nfs-utils is not installed" + - "c:systemctl is-enabled nfs-server -> r:masked|No such file or directory" - # 2.2.18 Ensure NIS server is not installed (Automated) - - id: 4572 - title: "Ensure NIS Server is not installed" - description: "The ypserv package provides the Network Information Service (NIS). This service, formally known as Yellow Pages, is a client-server directory service protocol for distributing system configuration files. The NIS server is a collection of programs that allow for the distribution of configuration files." - rationale: "The NIS service is inherently an insecure system that has been vulnerable to DOS attacks, buffer overflows and has poor authentication for querying NIS maps. NIS generally has been replaced by such protocols as Lightweight Directory Access Protocol (LDAP). It is recommended that the ypservpackage be removed, and if required a more secure services be used." - remediation: "Run the following command to disable ypserv: # yum remove ypserv" + # 2.2.18 Ensure rpcbind is not installed or the rpcbind services are masked. (Automated) + - id: 4573 + title: "Ensure rpcbind is not installed or the rpcbind services are masked." + description: "The rpcbind utility maps RPC services to the ports on which they listen. RPC processes notify rpcbind when they start, registering the ports they are listening on and the RPC program numbers they expect to serve. The client system then contacts rpcbind on the server with a particular RPC program number. The rpcbind service redirects the client to the proper port number so it can communicate with the requested service Portmapper is an RPC service, which always listens on tcp and udp 111, and is used to map other RPC services (such as nfs, nlockmgr, quotad, mountd, etc.) to their corresponding port number on the server. When a remote host makes an RPC call to that server, it first consults with portmap to determine where the RPC server is listening." + rationale: "A small request (~82 bytes via UDP) sent to the Portmapper generates a large response (7x to 28x amplification), which makes it a suitable tool for DDoS attacks. If rpcbind is not required, it is recommended that the rpcbind package be removed to reduce the attack surface of the system." + remediation: "Run the following command to remove nfs-utils: # yum remove rpcbind OR If the rpcbind package is required as a dependency, run the following commands to stop and mask the rpcbind and rpcbind.socket services: # systemctl --now mask rpcbind # systemctl --now mask rpcbind.socket." compliance: - cis: ["2.2.18"] - - cis_csc: ["2.6", "9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: any rules: - - "c:rpm -q ypserv -> r:package ypserv is not installed" + - "c:rpm -q rpcbind -> r:^package rpcbind is not installed" + - "not c:systemctl status rpcbind rpcbind.socket -> r:Loaded: && !r: masked" - # 2.2.19 Ensure telnet-server is not installed (Automated) - - id: 4573 - title: "Ensure telnet server is not installed" - description: "The telnet-server package contains the telnet daemon, which accepts connections from users from other systems via the telnet protocol." - rationale: "The telnet protocol is insecure and unencrypted. The use of an unencrypted transmission medium could allow a user with access to sniff network traffic the ability to steal credentials. The ssh package provides an encrypted session and stronger security." - remediation: "Run the following command to disable telnet: # yum remove telnet-server" + # 2.2.19 Ensure rsync is not installed or the rsyncd service is masked. (Automated) + - id: 4574 + title: "Ensure rsync is not installed or the rsyncd service is masked." + description: "The rsyncd service can be used to synchronize files between systems over network links." + rationale: "Unless required, the rsync package should be removed to reduce the attack surface area of the system. The rsyncd service presents a security risk as it uses unencrypted protocols for communication. Note: If a required dependency exists for the rsync package, but the rsyncd service is not required, the service should be masked." + impact: "There are packages that are dependent on the rsync package. If the rsync package is removed, these packages will be removed as well. Before removing the rsync package, review any dependent packages to determine if they are required on the system. If a dependent package is required, mask the rsyncd service and leave the rsync package installed." + remediation: "Run the following command to remove the rsync package: # yum remove rsync OR Run the following command to mask the rsyncd service: # systemctl --now mask rsyncd." compliance: - cis: ["2.2.19"] - - cis_csc: ["2.6", "9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: any rules: - - "c:rpm -q telnet-server -> r:package telnet-server is not installed" + - "c:rpm -q rsync -> r:^package rsync is not installed" + - "c:systemctl is-enabled rsyncd -> r:masked|No such file or directory" ############################################### # 2.3 Service Clients ############################################### - # 2.3.1 Ensure NIS Client is not installed (Automated) - - id: 4574 - title: "Ensure NIS Client is not installed" - description: "The Network Information Service (NIS), formerly known as Yellow Pages, is a client-server directory service protocol used to distribute system configuration files. The NIS client ( ypbind ) was used to bind a machine to an NIS server and receive the distributed configuration files." + # 2.3.1 Ensure NIS Client is not installed. (Automated) + - id: 4575 + title: "Ensure NIS Client is not installed." + description: "The Network Information Service (NIS), formerly known as Yellow Pages, is a client-server directory service protocol used to distribute system configuration files. The NIS client ( ypbind) was used to bind a machine to an NIS server and receive the distributed configuration files." rationale: "The NIS service is inherently an insecure system that has been vulnerable to DOS attacks, buffer overflows and has poor authentication for querying NIS maps. NIS generally has been replaced by such protocols as Lightweight Directory Access Protocol (LDAP). It is recommended that the service be removed." - remediation: "Run the following command to uninstall ypbind: # yum remove ypbind" + impact: "Many insecure service clients are used as troubleshooting tools and in testing environments. Uninstalling them can inhibit capability to test and troubleshoot. If they are required it is advisable to remove the clients after use to prevent accidental or intentional misuse." + remediation: "Run the following command to remove the ypbind package: # yum remove ypbind." compliance: - cis: ["2.3.1"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none rules: - - "c:rpm -q ypbind -> r:package ypbind is not installed" + - "c:rpm -qa ypbind -> r:ypbind" - # 2.3.2 Ensure rsh client is not installed (Automated) - - id: 4575 - title: "Ensure rsh client is not installed" + # 2.3.2 Ensure rsh client is not installed. (Automated) + - id: 4576 + title: "Ensure rsh client is not installed." description: "The rsh package contains the client commands for the rsh services." - rationale: "These legacy clients contain numerous security exposures and have been replaced with the more secure SSH package. Even if the server is removed, it is best to ensure the clients are also removed to prevent users from inadvertently attempting to use these commands and therefore exposing their credentials. Note that removing the rsh package removes the clients for rsh, rcp and rlogin ." - remediation: "Run the following command to uninstall rsh: # yum remove rsh" + rationale: "These legacy clients contain numerous security exposures and have been replaced with the more secure SSH package. Even if the server is removed, it is best to ensure the clients are also removed to prevent users from inadvertently attempting to use these commands and therefore exposing their credentials. Note that removing the rsh package removes the clients for rsh , rcp and rlogin." + impact: "Many insecure service clients are used as troubleshooting tools and in testing environments. Uninstalling them can inhibit capability to test and troubleshoot. If they are required it is advisable to remove the clients after use to prevent accidental or intentional misuse." + remediation: "Run the following command to remove the rsh package: # yum remove rsh." compliance: - cis: ["2.3.2"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none rules: - - "c:rpm -q rsh -> r:^package rsh is not installed" + - "c:rpm -qa rsh -> r:rsh" - # 2.3.3 Ensure talk client is not installed (Automated) - - id: 4576 - title: "Ensure talk client is not installed" + # 2.3.3 Ensure talk client is not installed. (Automated) + - id: 4577 + title: "Ensure talk client is not installed." description: "The talk software makes it possible for users to send and receive messages across systems through a terminal session. The talk client, which allows initialization of talk sessions, is installed by default." rationale: "The software presents a security risk as it uses unencrypted protocols for communication." - remediation: "Run the following command to uninstall talk: # yum remove talk" + impact: "Many insecure service clients are used as troubleshooting tools and in testing environments. Uninstalling them can inhibit capability to test and troubleshoot. If they are required it is advisable to remove the clients after use to prevent accidental or intentional misuse." + remediation: "Run the following command to remove the talk package: # yum remove talk." compliance: - cis: ["2.3.3"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none rules: - - "c:rpm -q talk -> r:^package talk is not installed" + - "c:rpm -qa talk -> r:talk" - # 2.3.4 Ensure telnet client is not installed (Automated) - - id: 4577 - title: "Ensure telnet client is not installed" + # 2.3.4 Ensure telnet client is not installed. (Automated) + - id: 4578 + title: "Ensure telnet client is not installed." description: "The telnet package contains the telnet client, which allows users to start connections to other systems via the telnet protocol." rationale: "The telnet protocol is insecure and unencrypted. The use of an unencrypted transmission medium could allow an unauthorized user to steal credentials. The ssh package provides an encrypted session and stronger security and is included in most Linux distributions." - remediation: "Run the following command to uninstall telnet: # yum remove telnet" + impact: "Many insecure service clients are used as troubleshooting tools and in testing environments. Uninstalling them can inhibit capability to test and troubleshoot. If they are required it is advisable to remove the clients after use to prevent accidental or intentional misuse." + remediation: "Run the following command to remove the telnet package: # yum remove telnet." compliance: - cis: ["2.3.4"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - "c:rpm -q telnet -> r:^package telnet is not installed" - # 2.3.5 Ensure LDAP client is not installed (Automated) - - id: 4578 - title: "Ensure LDAP client is not installed" + # 2.3.5 Ensure LDAP client is not installed. (Automated) + - id: 4579 + title: "Ensure LDAP client is not installed." description: "The Lightweight Directory Access Protocol (LDAP) was introduced as a replacement for NIS/YP. It is a service that provides a method for looking up information from a central database." rationale: "If the system will not need to act as an LDAP client, it is recommended that the software be removed to reduce the potential attack surface." - remediation: "Run the following command to uninstall openldap-clients: # yum remove openldap-clients" + impact: "Removing the LDAP client will prevent or inhibit using LDAP for authentication in your environment." + remediation: "Run the following command to remove the openldap-clients package: # yum remove openldap-clients." compliance: - cis: ["2.3.5"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - "c:rpm -q openldap-clients -> r:^package openldap-clients is not installed" - # 2.5 Ensure nonessential services are removed or masked (Manual) - - id: 4579 - title: "Ensure nonessential services are removed or masked" - description: "A network port is identified by its number, the associated IP address, and the type of the communication protocol such as TCP or UDP.A listening port is a network port on which an application or process listens on, acting as a communication endpoint. Each listening port can be open or closed (filtered) using a firewall. In general terms, an open port is a network port that accepts incoming packets from remote locations" - rationale: "Services listening on the system pose a potential risk as an attack vector. These services should be reviewed, and if not required, the service should be stopped, and the package containing the service should be removed. If required packages have a dependency, the service should be stopped and masked to reduce the attack surface of the system." - remediation: "Review the output of: # lsof -i -P -n | grep -v '(ESTABLISHED)'; to ensure that all services listed are required on the system. If a listed service is not required, remove the package containing the service. If the package containing the service is required, stop and mask the service. Run the following command to remove the package containing the service:# yum remove OR if required packages have a dependency: run the following command to stop and mask the service:# systemctl --now mask " - compliance: - - cis: ["2.5"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none - rules: - - "c:lsof -i -P -n -> r:(ESTABLISHED)" + # 2.4 Ensure nonessential services are removed or masked. (Manual) - Not Implemented ############################################### - # 3 Network Configuration and Firewalls + # 3 Network Configuration ############################################### ############################################### - # 3.1 Disable unused network protocols and devices + # 3.1 Uncommon Network Protocols ############################################### - # 3.1.1 Disable IPv6 (Manual) + # 3.1.1 Disable IPv6. (Manual) - id: 4580 - title: "Disable IPv6" + title: "Disable IPv6." description: "Although IPv6 has many advantages over IPv4, not all organizations have IPv6 or dual stack configurations implemented." - rationale: "If IPv6 or dual stack is not to be used, it is recommended that IPv6be disabled to reduce the attack surface of the system." - remediation: 'To disable IPv6 through the GRUB2 config: edit /etc/default/gruband add ipv6.disable=1 to the GRUB_CMDLINE_LINUX parameters: GRUB_CMDLINE_LINUX="ipv6.disable=1" Run the following command to update the grub2 configuration:# grub2-mkconfig –o /boot/grub2/grub.cfg; OR to disable IPv6 through sysctl settings: set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: # net.ipv6.conf.all.disable_ipv6 = 1; # net.ipv6.conf.default.disable_ipv6 = 1; Run the following commands to set the active kernel parameters: # sysctl -w net.ipv6.conf.all.disable_ipv6=1; # sysctl -w net.ipv6.conf.default.disable_ipv6=1; # sysctl -w net.ipv6.route.flush=1' + rationale: "If IPv6 or dual stack is not to be used, it is recommended that IPv6 be disabled to reduce the attack surface of the system." + impact: "If IPv6 is disabled through sysctl config, SSH X11forwarding may no longer function as expected. We recommend that SSH X11fowarding be disabled, but if required, the following will allow for SSH X11forwarding with IPv6 disabled through sysctl config: Add the following line the /etc/ssh/sshd_config file: AddressFamily inet Run the following command to re-start the openSSH server: # systemctl restart sshd." + remediation: 'Use one of the two following methods to disable IPv6 on the system: To disable IPv6 through the GRUB2 config: Edit /etc/default/grub and add ipv6.disable=1 to the GRUB_CMDLINE_LINUX parameters: GRUB_CMDLINE_LINUX="ipv6.disable=1" Ru the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg OR To disable IPv6 through sysctl settings: Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv6.conf.all.disable_ipv6 = 1 net.ipv6.conf.default.disable_ipv6 = 1 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv6.conf.all.disable_ipv6=1 # sysctl -w net.ipv6.conf.default.disable_ipv6=1 # sysctl -w net.ipv6.route.flush=1.' compliance: - cis: ["3.1.1"] - - cis_csc: ["9.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: + - 'f:/boot/grub2/grubenv -> r:^\s*kernelopts=\.+ipv6.disable=1' - 'f:/boot/grub2/grub.cfg -> r:^\s*\t*linux && !r:ipv6.disable=1' - 'c:sysctl net.ipv6.conf.all.disable_ipv6 -> r:net.ipv6.conf.all.disable_ipv6\s*=\s*0' - 'c:sysctl net.ipv6.conf.default.disable_ipv6 -> r:net.ipv6.conf.default.disable_ipv6\s*=\s*0' - # 3.1.2 Ensure wireless interfaces are disabled (Manual) - - id: 4581 - title: "Ensure wireless interfaces are disabled" - description: "Wireless networking is used when wired networks are unavailable." - rationale: "If wireless is not to be used, wireless devices should be disabled to reduce the potential attack surface" - remediation: "Run the following command to disable any wireless interfaces: # ip link set down" - compliance: - - cis: ["3.1.2"] - - cis_csc: ["15.4", "15.5"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - "c:iw list -> r:Interface" - - ################################################## - # 3.2 Network Parameters (Host Only) - ################################################## + # 3.1.2 Ensure wireless interfaces are disabled. (Automated) - Not Implemented - # 3.2.1 Ensure IP forwarding is disabled (Automated) - - id: 4582 - title: "Ensure IP forwarding is disabled" - description: "The net.ipv4.ip_forward flag is used to tell the system whether it can forward packets or not." - rationale: "Setting the flag to 0 ensures that a system with multiple interfaces (for example, a hard proxy), will never be able to forward packets, and therefore, never serve as a router." - remediation: "Run the following commands to restore the default parameters and set the active kernel parameters: # grep -Els '^\\s*net\\.ipv4\\.ip_forward\\s*=\\s*1' /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri 's/^\\s*(net\\.ipv4\\.ip_forward\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/' $filename; done; sysctl -w net.ipv4.ip_forward=0; sysctl -w net.ipv4.route.flush=1" + # 3.2.1 Ensure IP forwarding is disabled. (Automated) + - id: 4581 + title: "Ensure IP forwarding is disabled." + description: "The net.ipv4.ip_forward and net.ipv6.conf.all.forwarding flags are used to tell the system whether it can forward packets or not." + rationale: "Setting the flags to 0 ensures that a system with multiple interfaces (for example, a hard proxy), will never be able to forward packets, and therefore, never serve as a router." + remediation: "Run the following commands to restore the default parameters and set the active kernel parameters: # grep -Els \"^\\s*net\\.ipv4\\.ip_forward\\s*=\\s*1\" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri \"s/^\\s*(net\\.ipv4\\.ip_forward\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.ipv4.ip_forward=0; sysctl -w net.ipv4.route.flush=1 # grep -Els \"^\\s*net\\.ipv6\\.conf\\.all\\.forwarding\\s*=\\s*1\" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri \"s/^\\s*(net\\.ipv6\\.conf\\.all\\.forwarding\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.ipv6.conf.all.forwarding=0; sysctl -w net.ipv6.route.flush=1." compliance: - cis: ["3.2.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: none rules: - 'c:sysctl net.ipv4.ip_forward -> r:^net.ipv4.ip_forward\s*=\s*0$' - - 'not c:grep -RhEs "^\s*net\.ipv4\.ip_forward\s*=\s*1" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:ip_forward' + - 'f:/etc/sysctl.conf -> r:^net.ipv4.ip_forward\s*=\s*1$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.ip_forward\s*=\s*1$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.ip_forward\s*=\s*1$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.ip_forward\s*=\s*1$' - 'c:sysctl net.ipv6.conf.all.forwarding -> r:^net.ipv6.conf.all.forwarding\s*=\s*0$' - - 'not c:grep -RhEs "^\s*net\.ipv6\.conf\.all\.forwarding\s*=\s*1" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:forwarding' + - 'f:/etc/sysctl.conf -> r:^net.ipv4.ip_forward\s*=\s*1$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.all.forwarding\s*=\s*1$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.all.forwarding\s*=\s*1$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.all.forwarding\s*=\s*1$' - # 3.2.2 Ensure packet redirect sending is disabled (Automated) - - id: 4583 - title: "Ensure packet redirect sending is disabled" + # 3.2.2 Ensure packet redirect sending is disabled. (Automated) + - id: 4582 + title: "Ensure packet redirect sending is disabled." description: "ICMP Redirects are used to send routing information to other hosts. As a host itself does not act as a router (in a host only configuration), there is no need to send redirects." rationale: "An attacker could use a compromised host to send invalid ICMP redirects to other router devices in an attempt to corrupt routing and have users access a system set up by the attacker as opposed to a valid system." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.send_redirects = 0; net.ipv4.conf.default.send_redirects = 0 and set the active kernel parameters. Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.send_redirects=0; # sysctl -w net.ipv4.conf.default.send_redirects=0; # sysctl -w net.ipv4.route.flush=1" + remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.send_redirects = 0 net.ipv4.conf.default.send_redirects = 0 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.send_redirects=0 # sysctl -w net.ipv4.conf.default.send_redirects=0 # sysctl -w net.ipv4.route.flush=1." compliance: - cis: ["3.2.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:sysctl net.ipv4.conf.all.send_redirects -> r:^net.ipv4.conf.all.send_redirects\s*=\s*0$' - 'c:sysctl net.ipv4.conf.default.send_redirects -> r:^net.ipv4.conf.default.send_redirects\s*=\s*0$' - - 'c:grep -Rh net\.ipv4\.conf\.all\.send_redirects /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.conf.all.send_redirects\s*=\s*0' - - 'c:grep -Rh net\.ipv4\.conf\.default\.send_redirects /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.conf.default.send_redirects\s*=\s*0' + - 'f:/etc/sysctl.conf -> r:^net.ipv4.conf.all.send_redirects\s*=\s*0$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.send_redirects\s*=\s*0$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.send_redirects\s*=\s*0$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.send_redirects\s*=\s*0$' + - 'f:/etc/sysctl.conf -> r:^net.ipv4.conf.default.send_redirects\s*=\s*0$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.send_redirects\s*=\s*0$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.send_redirects\s*=\s*0$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.send_redirects\s*=\s*0$' - ################################################## + ############################################### # 3.3 Network Parameters (Host and Router) - ################################################## + ############################################### - # 3.3.1 Ensure source routed packets are not accepted (Automated) - - id: 4584 - title: "Ensure source routed packets are not accepted" + # 3.3.1 Ensure source routed packets are not accepted. (Automated) + - id: 4583 + title: "Ensure source routed packets are not accepted." description: "In networking, source routing allows a sender to partially or fully specify the route packets take through a network. In contrast, non-source routed packets travel a path determined by routers in the network. In some cases, systems may not be routable or reachable from some locations (e.g. private addresses vs. Internet routable), and so source routed packets would need to be used." - rationale: "Setting net.ipv4.conf.all.accept_source_route and net.ipv4.conf.default.accept_source_route to 0 disables the system from accepting source routed packets. Assume this system was capable of routing packets to Internet routable addresses on one interface and private addresses on another interface. Assume that the private addresses were not routable to the Internet routable addresses and vice versa. Under normal routing circumstances, an attacker from the Internet routable addresses could not use the system as a way to reach the private address systems. If, however, source routed packets were allowed, they could be used to gain access to the private address systems as the route could be specified, rather than rely on routing protocols that did not allow this routing." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.accept_source_route = 0; net.ipv4.conf.default.accept_source_route = 0 and set the active kernel parameters: # sysctl -w net.ipv4.conf.all.accept_source_route=0 # sysctl -w net.ipv4.conf.default.accept_source_route=0 # sysctl -w net.ipv4.route.flush=1" + rationale: "Setting net.ipv4.conf.all.accept_source_route, net.ipv4.conf.default.accept_source_route, net.ipv6.conf.all.accept_source_route and net.ipv6.conf.default.accept_source_route to 0 disables the system from accepting source routed packets. Assume this system was capable of routing packets to Internet routable addresses on one interface and private addresses on another interface. Assume that the private addresses were not routable to the Internet routable addresses and vice versa. Under normal routing circumstances, an attacker from the Internet routable addresses could not use the system as a way to reach the private address systems. If, however, source routed packets were allowed, they could be used to gain access to the private address systems as the route could be specified, rather than rely on routing protocols that did not allow this routing." + remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.accept_source_route = 0 net.ipv4.conf.default.accept_source_route = 0 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.accept_source_route=0 # sysctl -w net.ipv4.conf.default.accept_source_route=0 # sysctl -w net.ipv4.route.flush=1 IF IPv6 is not disabled: Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv6.conf.all.accept_source_route = 0 net.ipv6.conf.default.accept_source_route = 0 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv6.conf.all.accept_source_route=0 # sysctl -w net.ipv6.conf.default.accept_source_route=0 # sysctl -w net.ipv6.route.flush=1." compliance: - cis: ["3.3.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:sysctl net.ipv4.conf.all.accept_source_route -> r:^net.ipv4.conf.all.accept_source_route\s*=\s*0$' - 'c:sysctl net.ipv4.conf.default.accept_source_route -> r:^net.ipv4.conf.default.accept_source_route\s*=\s*0$' - - 'c:grep -Rh net\.ipv4\.conf\.all\.accept_source_route /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.conf.all.accept_source_route\s*=\s*0' - - 'c:grep -Rh net\.ipv4\.conf\.default\.accept_source_route /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.conf.default.accept_source_route\s*=\s*0' - - 'c:sysctl net.ipv6.conf.all.accept_source_route -> r:^net.ipv6.conf.all.accept_source_route\s*=\s*0$' - - 'c:sysctl net.ipv6.conf.default.accept_source_route -> r:^net.ipv6.conf.default.accept_source_route\s*=\s*0$' - - 'c:grep -Rh net\.ipv6\.conf\.all\.accept_source_route /etc/sysctl.conf /etc/sysctl.d/* -> r:^net.ipv6.conf.all.accept_source_route\s*=\s*0' - - 'c:grep -Rh net\.ipv6\.conf\.default\.accept_source_route /etc/sysctl.conf /etc/sysctl.d/* -> r:^net.ipv6.conf.default.accept_source_route\s*=\s*0' - - # 3.3.2 Ensure ICMP redirects are not accepted (Automated) - - id: 4585 - title: "Ensure ICMP redirects are not accepted" - description: "ICMP redirect messages are packets that convey routing information and tell your host (acting as a router) to send packets via an alternate path. It is a way of allowing an outside routing device to update your system routing tables." + - 'f:/etc/sysctl.conf -> r:^net.ipv4.conf.all.accept_source_route\s*=\s*0$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.accept_source_route\s*=\s*0$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.accept_source_route\s*=\s*0$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.accept_source_route\s*=\s*0$' + - 'f:/etc/sysctl.conf -> r:^net.ipv4.conf.default.accept_source_route\s*=\s*0$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.accept_source_route\s*=\s*0$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.accept_source_route\s*=\s*0$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.accept_source_route\s*=\s*0$' + + # 3.3.2 Ensure ICMP redirects are not accepted. (Automated) + - id: 4584 + title: "Ensure ICMP redirects are not accepted." + description: "ICMP redirect messages are packets that convey routing information and tell your host (acting as a router) to send packets via an alternate path. It is a way of allowing an outside routing device to update your system routing tables. By setting net.ipv4.conf.all.accept_redirects and net.ipv6.conf.all.accept_redirects to 0, the system will not accept any ICMP redirect messages, and therefore, won't allow outsiders to update the system's routing tables." rationale: "Attackers could use bogus ICMP redirect messages to maliciously alter the system routing tables and get them to send packets to incorrect networks and allow your system packets to be captured." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.accept_redirects = 0; net.ipv4.conf.default.accept_redirects = 0 and set the active kernel parameters: # sysctl -w net.ipv4.conf.all.accept_redirects=0 # sysctl -w net.ipv4.conf.default.accept_redirects=0 # sysctl -w net.ipv4.route.flush=1" + remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.accept_redirects = 0 net.ipv4.conf.default.accept_redirects = 0 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.accept_redirects=0 # sysctl -w net.ipv4.conf.default.accept_redirects=0 # sysctl -w net.ipv4.route.flush=1 IF IPv6 is not disabled Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv6.conf.all.accept_redirects = 0 net.ipv6.conf.default.accept_redirects = 0 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv6.conf.all.accept_redirects=0 # sysctl -w net.ipv6.conf.default.accept_redirects=0 # sysctl -w net.ipv6.route.flush=1." compliance: - cis: ["3.3.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:sysctl net.ipv4.conf.all.accept_redirects -> r:^net.ipv4.conf.all.accept_redirects\s*=\s*0$' - 'c:sysctl net.ipv4.conf.default.accept_redirects -> r:^net.ipv4.conf.default.accept_redirects\s*=\s*0$' - - 'c:grep -Rh net\.ipv4\.conf\.all\.accept_redirects /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.conf.all.accept_redirects\s*=\s*0' - - 'c:grep -Rh net\.ipv4\.conf\.default\.accept_redirects /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.conf.default.accept_redirects\s*=\s*0' - - 'c:sysctl net.ipv4.conf.all.accept_redirects -> r:^net.ipv4.conf.all.accept_redirects\s*=\s*0$' - - # 3.3.3 Ensure secure ICMP redirects are not accepted (Automated) - - id: 4586 - title: "Ensure secure ICMP redirects are not accepted" + - 'f:/etc/sysctl.conf -> r:^net.ipv4.conf.all.accept_redirects\s*=\s*0$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.accept_redirects\s*=\s*0$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.accept_redirects\s*=\s*0$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.accept_redirects\s*=\s*0$' + - 'f:/etc/sysctl.conf -> r:^net.ipv4.conf.default.accept_redirects\s*=\s*0$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.accept_redirects\s*=\s*0$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.accept_redirects\s*=\s*0$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.accept_redirects\s*=\s*0$' + - 'c:sysctl net.ipv6.conf.all.accept_redirects -> r:^net.ipv6.conf.all.accept_redirects\s*=\s*0$' + - 'c:sysctl net.ipv6.conf.default.accept_redirects -> r:^net.ipv6.conf.default.accept_redirects\s*=\s*0$' + - 'f:/etc/sysctl.conf -> r:^net.ipv6.conf.all.accept_redirects\s*=\s*0$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.all.accept_redirects\s*=\s*0$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.all.accept_redirects\s*=\s*0$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.all.accept_redirects\s*=\s*0$' + - 'f:/etc/sysctl.conf -> r:^net.ipv6.conf.default.accept_redirects\s*=\s*0$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.default.accept_redirects\s*=\s*0$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.default.accept_redirects\s*=\s*0$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.default.accept_redirects\s*=\s*0$' + + # 3.3.3 Ensure secure ICMP redirects are not accepted. (Automated) + - id: 4585 + title: "Ensure secure ICMP redirects are not accepted." description: "Secure ICMP redirects are the same as ICMP redirects, except they come from gateways listed on the default gateway list. It is assumed that these gateways are known to your system, and that they are likely to be secure." rationale: "It is still possible for even known gateways to be compromised. Setting net.ipv4.conf.all.secure_redirects to 0 protects the system from routing table updates by possibly compromised known gateways." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.secure_redirects = 0; net.ipv4.conf.default.secure_redirects = 0 and set the active kernel parameters: # sysctl -w net.ipv4.conf.all.secure_redirects=0 # sysctl -w net.ipv4.conf.default.secure_redirects=0 # sysctl -w net.ipv4.route.flush=1" + remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.secure_redirects = 0 net.ipv4.conf.default.secure_redirects = 0 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.secure_redirects=0 # sysctl -w net.ipv4.conf.default.secure_redirects=0 # sysctl -w net.ipv4.route.flush=1." compliance: - cis: ["3.3.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:sysctl net.ipv4.conf.all.secure_redirects -> r:^net.ipv4.conf.all.secure_redirects\s*=\s*0$' - 'c:sysctl net.ipv4.conf.default.secure_redirects -> r:^net.ipv4.conf.default.secure_redirects\s*=\s*0$' - - 'c:grep -Rh net\.ipv4\.conf\.all\.secure_redirects /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.conf.all.secure_redirects\s*=\s*0' - - 'c:grep -Rh net\.ipv4\.conf\.default\.secure_redirects /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.conf.default.secure_redirects\s*=\s*0' - - # 3.3.4 Ensure suspicious packets are logged (Automated) - - id: 4587 - title: "Ensure suspicious packets are logged" + - 'f:/etc/sysctl.conf -> r:^net.ipv4.conf.all.secure_redirects\s*=\s*0$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.secure_redirects\s*=\s*0$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.secure_redirects\s*=\s*0$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.secure_redirects\s*=\s*0$' + - 'f:/etc/sysctl.conf -> r:^net.ipv4.conf.default.secure_redirects\s*=\s*0$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.secure_redirects\s*=\s*0$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.secure_redirects\s*=\s*0$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.secure_redirects\s*=\s*0$' + + # 3.3.4 Ensure suspicious packets are logged. (Automated) + - id: 4586 + title: "Ensure suspicious packets are logged." description: "When enabled, this feature logs packets with un-routable source addresses to the kernel log." rationale: "Enabling this feature and logging these packets allows an administrator to investigate the possibility that an attacker is sending spoofed packets to their system." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.log_martians = 1; net.ipv4.conf.default.log_martians = 1 and set the active kernel parameters: # sysctl -w net.ipv4.conf.all.log_martians=1 # sysctl -w net.ipv4.conf.default.log_martians=1 # sysctl -w net.ipv4.route.flush=1" + remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.log_martians = 1 net.ipv4.conf.default.log_martians = 1 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.log_martians=1 # sysctl -w net.ipv4.conf.default.log_martians=1 # sysctl -w net.ipv4.route.flush=1." compliance: - cis: ["3.3.4"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - 'c:sysctl net.ipv4.conf.all.log_martians -> r:^net.ipv4.conf.all.log_martians\s*=\s*1$' - 'c:sysctl net.ipv4.conf.default.log_martians -> r:^net.ipv4.conf.default.log_martians\s*=\s*1$' - - 'c:grep -Rh net\.ipv4\.conf\.all\.log_martians /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.conf.all.log_martians\s*=\s*1' - - 'c:grep -Rh net\.ipv4\.conf\.default\.log_martians /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.conf.default.log_martians\s*=\s*1' - - # 3.3.5 Ensure broadcast ICMP requests are ignored (Automated) - - id: 4588 - title: "Ensure broadcast ICMP requests are ignored" + - 'f:/etc/sysctl.conf -> r:^net.ipv4.conf.all.log_martians\s*=\s*1$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.log_martians\s*=\s*1$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.log_martians\s*=\s*1$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.log_martians\s*=\s*1$' + - 'f:/etc/sysctl.conf -> r:^net.ipv4.conf.default.log_martians\s*=\s*1$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.log_martians\s*=\s*1$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.log_martians\s*=\s*1$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.default.log_martians\s*=\s*1$' + + # 3.3.5 Ensure broadcast ICMP requests are ignored. (Automated) + - id: 4587 + title: "Ensure broadcast ICMP requests are ignored." description: "Setting net.ipv4.icmp_echo_ignore_broadcasts to 1 will cause the system to ignore all ICMP echo and timestamp requests to broadcast and multicast addresses." rationale: "Accepting ICMP echo and timestamp requests with broadcast or multicast destinations for your network could be used to trick your host into starting (or participating) in a Smurf attack. A Smurf attack relies on an attacker sending large amounts of ICMP broadcast messages with a spoofed source address. All hosts receiving this message and responding would send echo-reply messages back to the spoofed address, which is probably not routable. If many hosts respond to the packets, the amount of traffic on the network could be significantly multiplied." - remediation: "Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.icmp_echo_ignore_broadcasts = 1 and set the active kernel parameters: # sysctl -w net.ipv4.icmp_echo_ignore_broadcasts=1 # sysctl -w net.ipv4.route.flush=1" + remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.icmp_echo_ignore_broadcasts = 1 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.icmp_echo_ignore_broadcasts=1 # sysctl -w net.ipv4.route.flush=1." compliance: - cis: ["3.3.5"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:sysctl net.ipv4.icmp_echo_ignore_broadcasts -> r:^net.ipv4.icmp_echo_ignore_broadcasts\s*=\s*1$' - - 'c:grep -Rh net\.ipv4\.icmp_echo_ignore_broadcasts /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.icmp_echo_ignore_broadcasts\s*=\s*1' + - 'f:/etc/sysctl.conf -> r:^net.ipv4.icmp_echo_ignore_broadcasts\s*=\s*1$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.icmp_echo_ignore_broadcasts\s*=\s*1$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.icmp_echo_ignore_broadcasts\s*=\s*1$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.icmp_echo_ignore_broadcasts\s*=\s*1$' - # 3.3.6 Ensure bogus ICMP responses are ignored (Automated) - - id: 4589 - title: "Ensure bogus ICMP responses are ignored" + # 3.3.6 Ensure bogus ICMP responses are ignored. (Automated) + - id: 4588 + title: "Ensure bogus ICMP responses are ignored." description: "Setting icmp_ignore_bogus_error_responses to 1 prevents the kernel from logging bogus responses (RFC-1122 non-compliant) from broadcast reframes, keeping file systems from filling up with useless log messages." rationale: "Some routers (and some attackers) will send responses that violate RFC-1122 and attempt to fill up a log file system with many useless error messages." - remediation: "Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.icmp_ignore_bogus_error_responses = 1 and set the active kernel parameters: # sysctl -w net.ipv4.icmp_ignore_bogus_error_responses=1 # sysctl -w net.ipv4.route.flush=1" + remediation: "Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.icmp_ignore_bogus_error_responses = 1 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.icmp_ignore_bogus_error_responses=1 # sysctl -w net.ipv4.route.flush=1." compliance: - cis: ["3.3.6"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:sysctl net.ipv4.icmp_ignore_bogus_error_responses -> r:^net.ipv4.icmp_ignore_bogus_error_responses\s*=\s*1$' - - 'c:grep -Rh net\.ipv4\.icmp_ignore_bogus_error_responses /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.icmp_ignore_bogus_error_responses\s*=\s*1' + - 'f:/etc/sysctl.conf -> r:^net.ipv4.icmp_ignore_bogus_error_responses\s*=\s*1$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.icmp_ignore_bogus_error_responses\s*=\s*1$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.icmp_ignore_bogus_error_responses\s*=\s*1$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.icmp_ignore_bogus_error_responses\s*=\s*1$' - # 3.3.7 Ensure Reverse Path Filtering is enabled (Automated) - - id: 4590 - title: "Ensure Reverse Path Filtering is enabled" + # 3.3.7 Ensure Reverse Path Filtering is enabled. (Automated) + - id: 4589 + title: "Ensure Reverse Path Filtering is enabled." description: "Setting net.ipv4.conf.all.rp_filter and net.ipv4.conf.default.rp_filter to 1 forces the Linux kernel to utilize reverse path filtering on a received packet to determine if the packet was valid. Essentially, with reverse path filtering, if the return packet does not go out the same interface that the corresponding source packet came from, the packet is dropped (and logged if log_martians is set)." - rationale: "Setting these flags is a good way to deter attackers from sending your server bogus packets that cannot be responded to. One instance where this feature breaks down is if asymmetrical routing is employed. This would occur when using dynamic routing protocols (bgp, ospf, etc) on your system. If you are using asymmetrical routing on your system, you will not be able to enable this feature without breaking the routing." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.rp_filter = 1; net.ipv4.conf.default.rp_filter = 1 and set the active kernel parameters: # sysctl -w net.ipv4.conf.all.rp_filter=1 # sysctl -w net.ipv4.conf.default.rp_filter=1 # sysctl -w net.ipv4.route.flush=1" + rationale: "Setting these flags is a good way to deter attackers from sending your system bogus packets that cannot be responded to. One instance where this feature breaks down is if asymmetrical routing is employed. This would occur when using dynamic routing protocols (bgp, ospf, etc) on your system. If you are using asymmetrical routing on your system, you will not be able to enable this feature without breaking the routing." + remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.rp_filter = 1 net.ipv4.conf.default.rp_filter = 1 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.rp_filter=1 # sysctl -w net.ipv4.conf.default.rp_filter=1 # sysctl -w net.ipv4.route.flush=1." compliance: - cis: ["3.3.7"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:sysctl net.ipv4.conf.all.rp_filter -> r:^net.ipv4.conf.all.rp_filter\s*=\s*1$' - - 'c:sysctl net.ipv4.conf.default.rp_filter -> r:^net.ipv4.conf.default.rp_filter\s*=\s*1$' - - 'c:grep -Rh net\.ipv4\.conf\.all\.rp_filter /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.conf.all.rp_filter\s*=\s*1' - - 'c:grep -Rh net\.ipv4\.conf\.default\.rp_filter /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.conf.default.rp_filter\s*=\s*1' + - 'f:/etc/sysctl.conf -> r:^net.ipv4.conf.all.rp_filter\s*=\s*1$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.rp_filter\s*=\s*1$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.rp_filter\s*=\s*1$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.conf.all.rp_filter\s*=\s*1$' - # 3.3.8 Ensure TCP SYN Cookies is enabled (Automated) - - id: 4591 - title: "Ensure TCP SYN Cookies is enabled" - description: "When tcp_syncookies is set, the kernel will handle TCP SYN packets normally until the half-open connection queue is full, at which time, the SYN cookie functionality kicks in. SYN cookies work by not using the SYN queue at all. Instead, the kernel simply replies to the SYN with a SYN|ACK, but will include a specially crafted TCP sequence number that encodes the source and destination IP address and port number and the time the packet was sent." + # 3.3.8 Ensure TCP SYN Cookies is enabled. (Automated) + - id: 4590 + title: "Ensure TCP SYN Cookies is enabled." + description: "When tcp_syncookies is set, the kernel will handle TCP SYN packets normally until the half-open connection queue is full, at which time, the SYN cookie functionality kicks in. SYN cookies work by not using the SYN queue at all. Instead, the kernel simply replies to the SYN with a SYN|ACK, but will include a specially crafted TCP sequence number that encodes the source and destination IP address and port number and the time the packet was sent. A legitimate connection would send the ACK packet of the three way handshake with the specially crafted sequence number. This allows the system to verify that it has received a valid response to a SYN cookie and allow the connection, even though there is no corresponding SYN in the queue." rationale: "Attackers use SYN flood attacks to perform a denial of service attacked on a system by sending many SYN packets without completing the three way handshake. This will quickly use up slots in the kernel's half-open connection queue and prevent legitimate connections from succeeding. SYN cookies allow the system to keep accepting valid connections, even if under a denial of service attack." - remediation: "Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.tcp_syncookies = 1 and set the active kernel parameters: # sysctl -w net.ipv4.tcp_syncookies=1 # sysctl -w net.ipv4.route.flush=1" + remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.tcp_syncookies = 1 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.tcp_syncookies=1 # sysctl -w net.ipv4.route.flush=1." compliance: - cis: ["3.3.8"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:sysctl net.ipv4.tcp_syncookies -> r:^net.ipv4.tcp_syncookies\s*=\s*1$' - - 'c:grep -Rh net\.ipv4\.tcp_syncookies /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv4.tcp_syncookies\s*=\s*1' + - 'f:/etc/sysctl.conf -> r:^net.ipv4.tcp_syncookies\s*=\s*1$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.tcp_syncookies\s*=\s*1$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.tcp_syncookies\s*=\s*1$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv4.tcp_syncookies\s*=\s*1$' - # 3.3.9 Ensure IPv6 router advertisements are not accepted (Automated) - - id: 4592 - title: "Ensure IPv6 router advertisements are not accepted" + # 3.3.9 Ensure IPv6 router advertisements are not accepted. (Automated) + - id: 4591 + title: "Ensure IPv6 router advertisements are not accepted." description: "This setting disables the system's ability to accept IPv6 router advertisements." - rationale: "It is recommended that systems not accept router advertisements as they could be tricked into routing traffic to compromised machines. Setting hard routes within the system (usually a single default route to a trusted router) protects the system from bad routes." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv6.conf.all.accept_ra = 0 and net.ipv6.conf.default.accept_ra = 0 Then, run the following commands to set the active kernel parameters: # sysctl -w net.ipv6.conf.all.accept_ra=0 # sysctl -w net.ipv6.conf.default.accept_ra=0 # sysctl -w net.ipv6.route.flush=1" + rationale: "It is recommended that systems do not accept router advertisements as they could be tricked into routing traffic to compromised machines. Setting hard routes within the system (usually a single default route to a trusted router) protects the system from bad routes." + remediation: "IF IPv6 is enabled: Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv6.conf.all.accept_ra = 0 net.ipv6.conf.default.accept_ra = 0 Run the following commands to set the active kernel parameters: # sysctl -w net.ipv6.conf.all.accept_ra=0 # sysctl -w net.ipv6.conf.default.accept_ra=0 # sysctl -w net.ipv6.route.flush=1." compliance: - cis: ["3.3.9"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:sysctl net.ipv6.conf.all.accept_ra -> r:^net.ipv6.conf.all.accept_ra\s*=\s*0$' - - 'c:sysctl net.ipv6.conf.default.accept_ra -> r:^net.ipv6.conf.default.accept_ra\s*=\s*0$' - - 'c:grep -Rh net\.ipv6\.conf\.all\.accept_ra /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv6.conf.all.accept_ra\s*=\s*0' - - 'c:grep -Rh net\.ipv6\.conf\.default\.accept_ra /etc/sysctl.conf /etc/sysctl.d -> r:^net.ipv6.conf.default.accept_ra\s*=\s*0' + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - 'c:sysctl net.ipv6.conf.all.accept_raa -> r:^net.ipv6.conf.all.accept_raa\s*=\s*1$' + - 'c:sysctl net.ipv6.conf.default.accept_ra -> r:^net.ipv6.conf.default.accept_ra\s*=\s*1$' + - 'f:/etc/sysctl.conf -> r:^net.ipv6.conf.all.accept_raa\s*=\s*1$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.all.accept_raa\s*=\s*1$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.all.accept_raa\s*=\s*1$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.all.accept_raa\s*=\s*1$' + - 'f:/etc/sysctl.conf -> r:^net.ipv6.conf.default.accept_ra\s*=\s*1$' + - 'd:->/etc/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.default.accept_ra\s*=\s*1$' + - 'd:->/usr/lib/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.default.accept_ra\s*=\s*1$' + - 'd:->/run/sysctl.d/ -> r:\.*.conf -> r:^net.ipv6.conf.default.accept_ra\s*=\s*1$' ############################################### # 3.4 Uncommon Network Protocols ############################################### - # 3.4.1 Ensure DCCP is disabled (Automated) - - id: 4593 - title: "Ensure DCCP is disabled" + + # 3.4.1 Ensure DCCP is disabled. (Automated) + - id: 4592 + title: "Ensure DCCP is disabled." description: "The Datagram Congestion Control Protocol (DCCP) is a transport layer protocol that supports streaming media and telephony. DCCP provides a way to gain access to congestion control, without having to do it at the application layer, but does not provide in-sequence delivery." rationale: "If the protocol is not required, it is recommended that the drivers not be installed to reduce the potential attack surface." - remediation: "Edit or create the file /etc/modprobe.d/CIS.conf and add the following line: install dccp /bin/true" + remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: vim /etc/modprobe.d/dccp.conf Add the following line: install dccp /bin/true." compliance: - cis: ["3.4.1"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:modprobe -n -v dccp -> r:install /bin/true" + - 'c:modprobe -n -v dccp -> r:install\s*\t*/bin/true' - "not c:lsmod -> r:dccp" - # 3.4.2 Ensure SCTP is disabled (Automated) - - id: 4594 - title: "Ensure SCTP is disabled" + # 3.4.2 Ensure SCTP is disabled. (Automated) + - id: 4593 + title: "Ensure SCTP is disabled." description: "The Stream Control Transmission Protocol (SCTP) is a transport layer protocol used to support message oriented communication, with several streams of messages in one connection. It serves a similar function as TCP and UDP, incorporating features of both. It is message-oriented like UDP, and ensures reliable in-sequence transport of messages with congestion control like TCP." rationale: "If the protocol is not being used, it is recommended that kernel module not be loaded, disabling the service to reduce the potential attack surface." - remediation: "Edit or create the file /etc/modprobe.d/CIS.conf and add the following line: install sctp /bin/true" + remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: vim /etc/modprobe.d/sctp.conf Add the following line: install sctp /bin/true." compliance: - cis: ["3.4.2"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:modprobe -n -v sctp -> r:install /bin/true" + - 'c:modprobe -n -v sctp -> r:install\s*\t*/bin/true' - "not c:lsmod -> r:sctp" ############################################### # 3.5 Firewall Configuration ############################################### ############################################### - # 3.5.1 Configure firewalld + # 3.5.1 Ensure Firewall software is installed ############################################### - # 3.5.1.1 Ensure FirewallD is installed (Automated) - # 3.5.2.1 Ensure nftables is installed (Automated) - # 3.5.3.1.1 Ensure iptables packages are installed (Automated) - # 3 rules here: - - id: 4595 - title: "Ensure FirewallD or nftables or iptables-services is installed" - description: "firewalld is a firewall management tool for Linux operating systems. It provides firewall features by acting as a front-end for the Linux kernel's net filter framework via the iptables backend or provides firewall features by acting as a front-end for the Linux kernel's netfilter framework via the nftables utility. FirewallD replaces iptables as the default firewall management tool. Use the firewalld utility to configure a firewall for less complex firewalls. The utility is easy to use and covers the typical use cases scenario. FirewallD supports both IPv4 and IPv6 networks and can administer separate firewall zones with varying degrees of trust as defined in zone profiles." - rationale: "A firewall utility is required to configure the Linux kernel's netfilter framework via the iptables or nftables back-end. The Linux kernel's netfilter framework host-based firewall can protect against threats originating from within a corporate network to include malicious mobile code and poorly configured software on a host." - remediation: "Run the following command to install firewalld: # yum install firewalld iptables; OR to install nftables: # yum install nftables; OR to install iptables-services: # yum install iptables-services iptables" + # 3.5.1.1 Ensure firewalld is installed. (Automated) + - id: 4594 + title: "Ensure firewalld is installed." + description: "firewalld is a firewall management tool for Linux operating systems. It provides firewall features by acting as a front-end for the Linux kernel's netfilter framework via the iptables backend or provides firewall features by acting as a front-end for the Linux kernel's netfilter framework via the nftables utility. firewalld replaces iptables as the default firewall management tool. Use the firewalld utility to configure a firewall for less complex firewalls. The utility is easy to use and covers the typical use cases scenario. FirewallD supports both IPv4 and IPv6 networks and can administer separate firewall zones with varying degrees of trust as defined in zone profiles. Note: Starting in v0.6.0, FirewallD added support for acting as a front-end for the Linux kernel's netfilter framework via the nftables userspace utility, acting as an alternative to the nft command line program." + rationale: "A firewall utility is required to configure the Linux kernel's netfilter framework via the iptables or nftables back-end. The Linux kernel's netfilter framework host-based firewall can protect against threats originating from within a corporate network to include malicious mobile code and poorly configured software on a host. Note: Only one firewall utility should be installed and configured. FirewallD is dependent on the iptables package." + impact: "Changing firewall settings while connected over the network can result in being locked out of the system." + remediation: "Run the following command to install FirewallD and iptables: # yum install firewalld iptables." compliance: - cis: ["3.5.1.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.1"] - - tsc: ["CC8.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: any rules: - - "not c:rpm -q iptables firewalld -> r:is not installed" - - "not c:rpm -q nftables -> r:is not installed" - - "not c:rpm -q iptables iptables-services -> r:is not installed" + - "c:rpm -q firewalld -> r:^firewalld-" + - "c:rpm -q iptables -> r:^iptables-" - # 3.5.1.2 Ensure iptables-services package is not installed (Automated) - # 3.5.3.1.3 Ensure firewalld is not installed or stopped and masked(Automated) - # both of the rules are here - - id: 4596 - title: "Ensure iptables-services and FirewallD are not installed at the same time" + # 3.5.1.2 Ensure iptables-services not installed with firewalld. (Automated) + - id: 4595 + title: "Ensure iptables-services not installed with firewalld." description: "The iptables-services package contains the iptables.service and ip6tables.service. These services allow for management of the Host Based Firewall provided by the iptables package." rationale: "iptables.service and ip6tables.service are still supported and can be installed with the iptables-services package. Running both firewalld and the services included in the iptables-services package may lead to conflict." - remediation: "Run the following commands to stop the services included in the iptables-services package and remove the iptables-services package: # systemctl stop iptables; # systemctl stop ip6tables; # yum remove iptables-services. OR Run the following command to remove firewalld: # yum remove firewalld OR Run the following command to stop and mask firewalld: # systemctl --now mask firewalld" + impact: "Running both firewalld and iptables/ip6tables service may lead to conflict." + remediation: "Run the following commands to stop the services included in the iptables-services package and remove the iptables-services package # systemctl stop iptables # systemctl stop ip6tables # yum remove iptables-services." compliance: - cis: ["3.5.1.2"] - - cis_csc: ["9.4"] - - pci_dss: ["1.1"] - - tsc: ["CC8.1"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - "c:rpm -q iptables-services -> r:^package iptables-services is not installed" + + # 3.5.1.3 Ensure nftables either not installed or masked with firewalld. (Automated) + - id: 4596 + title: "Ensure nftables either not installed or masked with firewalld." + description: "nftables is a subsystem of the Linux kernel providing filtering and classification of network packets/datagrams/frames and is the successor to iptables. _Note: Support for using nftables as the back-end for firewalld was added in release v0.6.0. In Fedora 19 Linux derivatives, firewalld utilizes iptables as its back-end by default." + rationale: "Running both firewalld and nftables may lead to conflict. Note: firewalld may configured as the front-end to nftables. If this case, nftables should be stopped and masked instead of removed." + remediation: 'Run the following command to remove nftables: # yum remove nftables OR Run the following command to stop and mask nftables" systemctl --now mask nftables.' + compliance: + - cis: ["3.5.1.3"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: any rules: - - "c:rpm -q iptables-services firewalld -> r:is not installed" - - "c:systemctl is-enabled firewalld -> r:masked" + - "c:rpm -q nftables -> r:package nftables is not installed" + - 'not c:systemctl status nftables -> r:active \(running\)|\(exited\)' + - "c:systemctl is-enabled nftables -> r:masked" - # 3.5.1.3 Ensure nftables is not installed or stopped and masked (Automated) - # 3.5.2.2 Ensure firewalld is not installed or stopped and masked (Automated) - # Both of the rules are here + # 3.5.1.4 Ensure firewalld service enabled and running. (Automated) - id: 4597 - title: "Ensure nftables and FirewallD are not installed at the same time or ensure one of them is stopped and masked" - description: "nftables is a subsystem of the Linux kernel providing filtering and classification of network packets/datagrams/frames and is the successor to iptables.Note: Support for using nftables as the back-end for firewalld was added in release v0.6.0. In Fedora 19 Linux derivatives, firewalld utilizes iptables as its back-end by default." - rationale: "Running both firewalld and nftables may lead to conflict." - remediation: "Run the following command to remove nftables:# yum remove nftables; OR run the following command to stop and mask nftables: # systemctl --now mask nftables. OR Run the following command to remove firewalld: # yum remove firewalld OR Run the following command to stop and mask firewalld: # systemctl --now mask firewalld" - compliance: - - cis: ["3.5.1.3", "3.5.3.1.3"] - - cis_csc: ["9.4"] - - pci_dss: ["1.1"] - - tsc: ["CC8.1"] - condition: any + title: "Ensure firewalld service enabled and running." + description: "firewalld.service enables the enforcement of firewall rules configured through firewalld." + rationale: "Ensure that the firewalld.service is enabled and running to enforce firewall rules configured through firewalld." + impact: "Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following command to unmask firewalld # systemctl unmask firewalld Run the following command to enable and start firewalld # systemctl --now enable firewalld." + compliance: + - cis: ["3.5.1.4"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all rules: - - "c:rpm -q iptables-services nftables -> r:is not installed" + - "p:firewalld" + - "c:firewall-cmd --state -> r:running" - # 3.5.2.3 Ensure iptables-services package is not installed (Automated) - # 3.5.3.1.2 Ensure nftables is not installed (Automated) - # Both of the rules are here + # 3.5.1.5 Ensure firewalld default zone is set. (Automated) - Not Implemented + # 3.5.1.6 Ensure network interfaces are assigned to appropriate zone. (Manual) - Not Implemented + # 3.5.1.7 Ensure firewalld drops unnecessary services and ports. (Manual) - Not Implemented + # 3.5.2.1 Ensure nftables is installed. (Automated) - id: 4598 - title: "Ensure nftables and iptables-services are not installed at the same time or ensure one of them is stopped and masked" - description: "nftables is a subsystem of the Linux kernel providing filtering and classification of network packets/datagrams/frames and is the successor to iptables.Note: Support for using nftables as the back-end for firewalld was added in release v0.6.0. In Fedora 19 Linux derivatives, firewalld utilizes iptables as its back-end by default." - rationale: "Running both nftables and nftables may lead to conflict." - remediation: "Run the following command to remove nftables:# yum remove nftables; OR run the following command to stop and mask nftables: # systemctl --now mask nftables. OR Run the following command to remove iptables# # systemctl stop iptables; # systemctl stop ip6tables; # yum remove iptables-services" - compliance: - - cis: ["3.5.1.3", "3.5.3.1.3"] - - cis_csc: ["9.4"] - - pci_dss: ["1.1"] - - tsc: ["CC8.1"] - condition: any + title: "Ensure nftables is installed." + description: "nftables provides a new in-kernel packet classification framework that is based on a network-specific Virtual Machine (VM) and a new nft userspace command line tool. nftables reuses the existing Netfilter subsystems such as the existing hook infrastructure, the connection tracking system, NAT, userspace queuing and logging subsystem. Note: - nftables is available in Linux kernel 3.13 and newer. - Only one firewall utility should be installed and configured." + rationale: "nftables is a subsystem of the Linux kernel that can protect against threats originating from within a corporate network to include malicious mobile code and poorly configured software on a host." + impact: "Changing firewall settings while connected over the network can result in being locked out of the system." + remediation: "Run the following command to install nftables # yum install nftables." + compliance: + - cis: ["3.5.2.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all + rules: + - "c:rpm -q nftables -> r:^nftables-" + + # 3.5.2.2 Ensure firewalld is either not installed or masked with nftables. (Automated) + - id: 4599 + title: "Ensure firewalld is either not installed or masked with nftables." + description: 'firewalld (Dynamic Firewall Manager) provides a dynamically managed firewall with support for network/firewall "zones" to assign a level of trust to a network and its associated connections, interfaces or sources. It has support for IPv4, IPv6, Ethernet bridges and also for IPSet firewall settings. There is a separation of the runtime and permanent configuration options.' + rationale: "Running both nftables.service and firewalld.service may lead to conflict and unexpected results." + remediation: "Run the following command to remove firewalld # yum remove firewalld OR Run the following command to stop and mask firewalld # systemctl --now mask firewalld." + compliance: + - cis: ["3.5.2.2"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none rules: - - "c:rpm -q firewalld nftables -> r:is not installed" - - "c:systemctl is-enabled nftables -> r:masked" - - "c:systemctl is-enabled firewalld -> r:masked" + - "p:firewalld" + - "c:firewall-cmd --state -> r:running" - # 3.5.1.4 Ensure firewalld service is enabled and running (Automated) - - id: 4599 - title: "Ensure firewalld service is enabled and running" - description: "firewalld.serviceenables the enforcement of firewall rules configured through firewalld" - rationale: "Ensure that the firewalld.service is enabled and running to enforce firewall rules configured through firewalld" - remediation: "Run the following command to unmask firewalld: # systemctl unmask firewalld; Run the following command to enable and start firewalld: # systemctl --now enable firewalld" + # 3.5.2.3 Ensure iptables-services not installed with nftables. (Automated) + - id: 4600 + title: "Ensure iptables-services not installed with nftables." + description: "The iptables-services package contains the iptables.service and ip6tables.service. These services allow for management of the Host Based Firewall provided by the iptables package." + rationale: "iptables.service and ip6tables.service are still supported and can be installed with the iptables-services package. Running both nftables and the services included in the iptables-services package may lead to conflict." + remediation: "Run the following commands to stop the services included in the iptables-services package and remove the iptables-services package # systemctl stop iptables # systemctl stop ip6tables # yum remove iptables-services." compliance: - - cis: ["3.5.1.4"] - - cis_csc: ["9.4"] - - pci_dss: ["1.1"] - - tsc: ["CC8.1"] + - cis: ["3.5.2.3"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:systemctl is-enabled firewalld -> r:enabled" - - "c:firewall-cmd --state -> r:running" + - "c:rpm -q iptables-services -> r:^package iptables-services is not installed" - ############################################### - # 3.5.2 Configure nftables #################### - ############################################### + # 3.5.2.4 Ensure iptables are flushed with nftables. (Manual) + - id: 4601 + title: "Ensure iptables are flushed with nftables." + description: "nftables is a replacement for iptables, ip6tables, ebtables and arptables." + rationale: "It is possible to mix iptables and nftables. However, this increases complexity and also the chance to introduce errors. For simplicity flush out all iptables rules, and ensure it is not loaded." + remediation: "Run the following commands to flush iptables: For iptables: # iptables -F For ip6tables: # ip6tables -F." + compliance: + - cis: ["3.5.2.4"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: none + rules: + - 'c:iptables -L -> !r:^\s*Chain|^\s*target && r:\s*\S+' + - 'c:ip6tables -L -> !r:^\s*Chain|^\s*target && r:\s*\S+' - # 3.5.2.5 Ensure a table exists (Automated) - - id: 4600 - title: "Ensure a table exists" - description: "nTables hold chains. Each table only has one address family and only applies to packets of this family. Tables can have one of five families." + # 3.5.2.5 Ensure an nftables table exists. (Automated) + - id: 4602 + title: "Ensure an nftables table exists." + description: "Tables hold chains. Each table only has one address family and only applies to packets of this family. Tables can have one of five families." rationale: "nftables doesn't have any default tables. Without a table being build, nftables will not filter network traffic." - remediation: "Run the following command to create a table in nftables: # nft create table inet
" + impact: "Adding rules to a running nftables can cause loss of connectivity to the system." + remediation: "Run the following command to create a table in nftables # nft create table inet
Example: # nft create table inet filter." compliance: - cis: ["3.5.2.5"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - "c:nft list tables -> r:table" + - 'c:nft list tables -> r:\w+' - # 3.5.2.6 Ensure base chains exist (Automated) - - id: 4601 - title: "Ensure base chains exist" + # 3.5.2.6 Ensure nftables base chains exist. (Automated) + - id: 4603 + title: "Ensure nftables base chains exist." description: "Chains are containers for rules. They exist in two kinds, base chains and regular chains. A base chain is an entry point for packets from the networking stack, a regular chain may be used as jump target and is used for better rule organization." rationale: "If a base chain doesn't exist with a hook for input, forward, and delete, packets that would flow through those chains will not be touched by nftables." - remediation: "Run the following command to create the base chains: # nft createchain inet
{ type filter hook <(input|forward|output)> priority 0 \\; } " + impact: "If configuring nftables over ssh, creating a base chain with a policy of drop will cause loss of connectivity. Ensure that a rule allowing ssh has been added to the base chain prior to setting the base chain's policy to drop." + remediation: "Run the following command to create the base chains: # nft create chain inet
{ type filter hook <(input|forward|output)> priority 0 \\; } Example: # nft create chain inet filter input { type filter hook input priority 0 \\; } # nft create chain inet filter forward { type filter hook forward priority 0 \\; } # nft create chain inet filter output { type filter hook output priority 0 \\; }." compliance: - cis: ["3.5.2.6"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - "c:nft list ruleset -> r:type filter hook input priority 0" - - "c:nft list ruleset -> r:type filter forward priority 0" - - "c:nft list ruleset -> r:type filter hook output priority 0" + - "c:nft list ruleset -> r:hook input" + - "c:nft list ruleset -> r:hook forward" + - "c:nft list ruleset -> r:hook output" - # 3.5.2.7 Ensure loopback traffic is configured (Automated) - - id: 4602 - title: "Ensure loopback traffic is configured" - description: "Configure the loopback interface to accept traffic. Configure all other interfaces to deny traffic to the loopback network" + # 3.5.2.7 Ensure nftables loopback traffic is configured. (Automated) + - id: 4604 + title: "Ensure nftables loopback traffic is configured." + description: "Configure the loopback interface to accept traffic. Configure all other interfaces to deny traffic to the loopback network." rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure." - remediation: "Run the following commands to implement the loopback rules: # nft add rule inet filter input iif lo accept; # nft create rule inet filter input ip saddr 127.0.0.0/8 counter drop; IF IPv6 is enabled: run the following command to implement the IPv6 loopback rules: # nft add rule inet filter input ip6 saddr::1 counter drop" + remediation: "Run the following commands to implement the loopback rules: # nft add rule inet filter input iif lo accept # nft create rule inet filter input ip saddr 127.0.0.0/8 counter drop IF IPv6 is enabled: Run the following command to implement the IPv6 loopback rules: # nft add rule inet filter input ip6 saddr ::1 counter drop." compliance: - cis: ["3.5.2.7"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - 'c:nft list ruleset -> r:iif "lo" accept' - "c:nft list ruleset -> r:ip saddr 127.0.0.0/8 counter packets 0 bytes 0 drop" - # 3.5.2.8 Ensure outbound and established connections are configured (Manual) - - id: 4603 - title: "Ensure outbound and established connections are configured" - description: "Configure the firewall rules for new outbound and established connections." - rationale: "If rules are not in place for new outbound and established connections, all packets will be dropped by the default policy preventing network usage." - remediation: "Configure nftables in accordance with site policy. The following commands will implement a policy to allow all outbound connections and all established connections: # nft add rule inet filter input ip protocol tcp ct state established accept; # nft add rule inet filter input ip protocol udp ct state established accept; # nft add rule inet filter input ip protocol icmp ct state established accept; # nft add rule inet filter output ip protocol tcp ct state new,related,established accept; # nft add rule inet filter output ip protocol udp ct state new,related,established accept; # nft add rule inet filter output ip protocol icmp ct state new,related,established accept" - compliance: - - cis: ["3.5.2.8"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] - condition: all - rules: - - "c:nft list ruleset -> r:ip protocol tcp ct state established accept" - - "c:nft list ruleset -> r:ip protocol tcp udp state established accept" - - "c:nft list ruleset -> r:ip protocol tcp icmp state established accept" - - "c:nft list ruleset -> r:ip protocol tcp ct state established,related,new accept" - - "c:nft list ruleset -> r:ip protocol tcp udp state established,related,new accept" - - "c:nft list ruleset -> r:ip protocol tcp icmp state established,related,new accept" + # 3.5.2.8 Ensure nftables outbound and established connections are configured. (Manual) - Not Implemented - # 3.5.2.9 Ensure default deny firewall policy (Automated) - - id: 4604 - title: "Ensure default deny firewall policy" + # 3.5.2.9 Ensure nftables default deny firewall policy. (Automated) + - id: 4605 + title: "Ensure nftables default deny firewall policy." description: "Base chain policy is the default verdict that will be applied to packets reaching the end of the chain." - rationale: "There are two policies: accept (Default) and drop. If the policy is set to accept, the firewall will accept any packet that is not configured to be denied and the packet will continue traversing the network stack. It is easier to white list acceptable usage than to black list unacceptable usage." - remediation: "Run the following command for the base chains with the input, forward, and output hooks to implement a default DROP policy: # nft chain
{ policy drop \\; }" + rationale: "There are two policies: accept (Default) and drop. If the policy is set to accept, the firewall will accept any packet that is not configured to be denied and the packet will continue traversing the network stack. It is easier to white list acceptable usage than to black list unacceptable usage. Note: Changing firewall settings while connected over the network can result in being locked out of the system." + impact: "If configuring nftables over ssh, creating a base chain with a policy of drop will cause loss of connectivity. Ensure that a rule allowing ssh has been added to the base chain prior to setting the base chain's policy to drop." + remediation: "Run the following command for the base chains with the input, forward, and output hooks to implement a default DROP policy: # nft chain
{ policy drop \\; } Example: # nft chain inet filter input { policy drop \\; } # nft chain inet filter forward { policy drop \\; } # nft chain inet filter output { policy drop \\; }." compliance: - cis: ["3.5.2.9"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - "c:nft list ruleset -> r:type filter hook input priority 0; policy drop;" - - "c:nft list ruleset -> r:type filter hook forward priority 0; policy drop;" - - "c:nft list ruleset -> r:type filter hook output priority 0; policy drop;" + - "c:nft list ruleset -> r:hook input && r:policy drop" + - "c:nft list ruleset -> r:hook forward && r:policy drop" + - "c:nft list ruleset -> r:hook output && r:policy drop" - # 3.5.2.10 Ensure nftables service is enabled (Automated) - - id: 4605 - title: "Ensure nftables service is enabled" - description: "The nftables service allows for the loading of nftables rulesets during boot, or starting on the nftables service" - rationale: "The nftables service restores the nftables rules from the rules files referenced in the /etc/sysconfig/nftables.conffile during boot or the starting of the nftables service" - remediation: "Run the following command to enable the nftables service: # systemctl enable nftables" + # 3.5.2.10 Ensure nftables service is enabled. (Automated) + - id: 4606 + title: "Ensure nftables service is enabled." + description: "The nftables service allows for the loading of nftables rulesets during boot, or starting on the nftables service." + rationale: "The nftables service restores the nftables rules from the rules files referenced in the /etc/sysconfig/nftables.conf file during boot or the starting of the nftables service." + remediation: "Run the following command to enable the nftables service: # systemctl enable nftables." compliance: - cis: ["3.5.2.10"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - "c:systemctl is-enabled nftables -> r:enabled" - - "c:nft list ruleset -> r:type filter hook forward priority 0; policy drop;" - - "c:nft list ruleset -> r:type filter hook output priority 0; policy drop;" - # 3.5.2.11 Ensure nftables rules are permanent (Automated) - - id: 4606 - title: "Ensure nftables rules are permanent" - description: "nftables is a subsystem of the Linux kernel providing filtering and classification of network packets/datagrams/frames. The nftables service reads the /etc/sysconfig/nftables.conffile for a nftables file or files to include in the nftables ruleset. A nftables ruleset containing the input, forward, and output base chains allow network traffic to be filtered." - remediation: "Run the following command to enable the nftables service: # systemctl enable nftables" - rationale: "Changes made to nftables ruleset only affect the live system, you will also need to configure the nftables ruleset to apply on boot" - compliance: - - cis: ["3.5.2.11"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] - condition: all - rules: - - 'f:/etc/sysconfig/nftables.conf -> r:^include "/etc/nftables/nftables.rules"' - - 'not f:/etc/sysconfig/nftables.conf -> r:#\s*include "/etc/nftables/nftables.rules"' + # 3.5.2.11 Ensure nftables rules are permanent. (Automated) - Not Implemented ############################################### - # 3.5.3 Configure iptables #################### + # 3.5.3 Configure iptables ############################################### ############################################### - #3.5.3.2 Configure IPv4 iptabl + # 3.5.3.1 Configure IPv4 iptables ############################################### - # 3.5.3.2.1 Ensure default deny firewall policy (Automated) + # 3.5.3.1.1 Ensure iptables packages are installed. (Automated) - id: 4607 - title: "Ensure default deny firewall policy" - description: "A default deny all policy on connections ensures that any unconfigured network usage will be rejected." - rationale: "With a default accept policy the firewall will accept any packet that is not configured to be denied. It is easier to white list acceptable usage than to black list unacceptable usage." - remediation: "Run the following commands to implement a default DROP policy: # iptables -P INPUT DROP # iptables -P OUTPUT DROP # iptables -P FORWARD DROP" + title: "Ensure iptables packages are installed." + description: "iptables is a utility program that allows a system administrator to configure the tables provided by the Linux kernel firewall, implemented as different Netfilter modules, and the chains and rules it stores. Different kernel modules and programs are used for different protocols; iptables applies to IPv4, ip6tables to IPv6, arptables to ARP, and ebtables to Ethernet frames." + rationale: "A method of configuring and maintaining firewall rules is necessary to configure a Host Based Firewall." + remediation: "Run the following command to install iptables and iptables-services # yum install iptables iptables-services." + compliance: + - cis: ["3.5.3.1.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all + rules: + - "c:rpm -q iptables -> r:^iptables-" + - "c:rpm -q iptables-services -> r:iptables-services-" + + # 3.5.3.1.2 Ensure nftables is not installed with iptables. (Automated) + - id: 4608 + title: "Ensure nftables is not installed with iptables." + description: "nftables is a subsystem of the Linux kernel providing filtering and classification of network packets/datagrams/frames and is the successor to iptables." + rationale: "Running both iptables and nftables may lead to conflict." + remediation: "Run the following command to remove nftables: # yum remove nftables." compliance: - - cis: ["3.5.3.2.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] + - cis: ["3.5.3.1.2"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:iptables -L -> r:Chain INPUT \(policy DROP\)' - - 'c:iptables -L -> r:Chain FORWARD \(policy DROP\)' - - 'c:iptables -L -> r:Chain OUTPUT \(policy DROP\)' + - "c:rpm -q nftables -> r:^package nftables is not installed" - # 3.5.3.2.2 Ensure loopback traffic is configured (Automated) - - id: 4608 - title: "Ensure loopback traffic is configured" + # 3.5.3.1.3 Ensure firewalld is either not installed or masked with iptables. (Automated) + - id: 4609 + title: "Ensure firewalld is either not installed or masked with iptables." + description: 'firewalld (Dynamic Firewall Manager) provides a dynamically managed firewall with support for network/firewall "zones" to assign a level of trust to a network and its associated connections, interfaces or sources. It has support for IPv4, IPv6, Ethernet bridges and also for IPSet firewall settings. There is a separation of the runtime and permanent configuration options.' + rationale: "Running iptables.service and\\or ip6tables.service with firewalld.service may lead to conflict and unexpected results." + remediation: "Run the following command to remove firewalld # yum remove firewalld OR Run the following command to stop and mask firewalld # systemctl --now mask firewalld." + compliance: + - cis: ["3.5.3.1.3"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none + rules: + - "c:rpm -q firewalld -> r:package firewalld is not installed" + - 'not c:systemctl status firewalld -> r:active \(running\)' + - "c:systemctl is-enabled firewalld -> r:masked" + + # 3.5.3.2.1 Ensure iptables loopback traffic is configured. (Automated) + - id: 4610 + title: "Ensure iptables loopback traffic is configured." description: "Configure the loopback interface to accept traffic. Configure all other interfaces to deny traffic to the loopback network (127.0.0.0/8)." - rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network (127.0.0.0/8) traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure." - remediation: "Run the following commands to implement the loopback rules: # iptables -A INPUT -i lo -j ACCEPT # iptables -A OUTPUT -o lo -j ACCEPT # iptables -A INPUT -s 127.0.0.0/8 -j DROP" + rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network (127.0.0.0/8) traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure. Note: Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following commands to implement the loopback rules: # iptables -A INPUT -i lo -j ACCEPT # iptables -A OUTPUT -o lo -j ACCEPT # iptables -A INPUT -s 127.0.0.0/8 -j DROP." compliance: - - cis: ["3.5.3.2.2"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] + - cis: ["3.5.3.2.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - 'c:iptables -L INPUT -v -n -> r:\.*ACCEPT\.*all\.*lo\.**\.*0.0.0.0/0\.*0.0.0.0/0' - 'c:iptables -L INPUT -v -n -> r:\.*DROP\.*all\.**\.**\.*127.0.0.0/8\.*0.0.0.0/0' - 'c:iptables -L OUTPUT -v -n -> r:\.*ACCEPT\.*all\.**\.*lo\.*0.0.0.0/0\.*0.0.0.0/0' - # 3.5.3.2.6 Ensure iptables is enabled and running (Automated) - - id: 4609 - title: "Ensure iptables is enabled and running" + # 3.5.3.2.2 Ensure iptables outbound and established connections are configured. (Manual) - Not Implemented + # 3.5.3.2.3 Ensure iptables rules exist for all open ports. (Automated) - Not Implemented + + # 3.5.3.2.4 Ensure iptables default deny firewall policy. (Automated) + - id: 4611 + title: "Ensure iptables default deny firewall policy." + description: "A default deny all policy on connections ensures that any unconfigured network usage will be rejected." + rationale: "With a default accept policy the firewall will accept any packet that is not configured to be denied. It is easier to white list acceptable usage than to black list unacceptable usage. Note: Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following commands to implement a default DROP policy: # iptables -P INPUT DROP # iptables -P OUTPUT DROP # iptables -P FORWARD DROP." + compliance: + - cis: ["3.5.3.2.4"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all + rules: + - 'c:iptables -L -> r:Chain INPUT \(policy DROP\)' + - 'c:iptables -L -> r:Chain FORWARD \(policy DROP\)' + - 'c:iptables -L -> r:Chain OUTPUT \(policy DROP\)' + + # 3.5.3.2.5 Ensure iptables rules are saved. (Automated) - Not Implemented + + # 3.5.3.2.6 Ensure iptables is enabled and running. (Automated) + - id: 4612 + title: "Ensure iptables is enabled and running." description: "iptables.service is a utility for configuring and maintaining iptables." - rationale: "iptables.service willload the iptables rules saved in the file /etc/sysconfig/iptablesat boot, otherwise the iptables rules will be cleared during a re-boot of the system." - remediation: "Run the following commands to implement a default DROP policy: # ip6tables -P INPUT DROP; # ip6tables -P OUTPUT DROP; # ip6tables -PFORWARD DROP" + rationale: "iptables.service will load the iptables rules saved in the file /etc/sysconfig/iptables at boot, otherwise the iptables rules will be cleared during a re-boot of the system." + remediation: "Run the following command to enable and start iptables: # systemctl --now enable iptables." compliance: - cis: ["3.5.3.2.6"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - 'c:systemctl is-enabled iptables -> r:\.*ACCEPT\.*all\.*lo\.**\.*0.0.0.0/0\.*0.0.0.0/0' - - 'c:systemctl status iptables -> r:Active: active (running) since \w+ \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d' + - "c:systemctl is-enabled iptables -> r:enabled" + - 'c:systemctl status iptables -> r:active \(running\)|\(exited\)' ############################################### # 3.5.3.3 Configure IPv6 ip6tables ############################################### - # 3.5.3.3.1 Ensure IPv6 default deny firewall policy (Automated) - - id: 4610 - title: "Ensure IPv6 default deny firewall policy" - description: "A default deny all policy on connections ensures that any unconfigured network usage will be rejected." - rationale: "With a default accept policy the firewall will accept any packet that is not configured to be denied. It is easier to white list acceptable usage than to black list unacceptable usage." - remediation: "Run the following commands to implement a default DROP policy: # ip6tables -P INPUT DROP; # ip6tables -P OUTPUT DROP; # ip6tables -PFORWARD DROP" + # 3.5.3.3.1 Ensure ip6tables loopback traffic is configured. (Automated) + - id: 4613 + title: "Ensure ip6tables loopback traffic is configured." + description: "Configure the loopback interface to accept traffic. Configure all other interfaces to deny traffic to the loopback network (::1)." + rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network (::1) traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure. Note: Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following commands to implement the loopback rules: # ip6tables -A INPUT -i lo -j ACCEPT # ip6tables -A OUTPUT -o lo -j ACCEPT # ip6tables -A INPUT -s ::1 -j DROP." compliance: - cis: ["3.5.3.3.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - "c:ip6tables -L -> r:Chain INPUT (policy DROP)" - - "c:ip6tables -L -> r:Chain FORWARD (policy DROP)" - - "c:ip6tables -L -> r:Chain OUTPUT (policy DROP)" + - 'c:ip6tables -L INPUT -v -n -> r:\.*ACCEPT\.*all\.*lo\.**\.*::/0\.*::/0' + - 'c:ip6tables -L INPUT -v -n -> r:\.*DROP\.*all\.**\.**\.*::1\.*::/0' + - 'c:ip6tables -L OUTPUT -v -n -> r:\.*ACCEPT\.*all\.**\.*lo\.*::/0\.*::/0' - # 3.5.3.3.2 Ensure IPv6 loopback traffic is configured (Automated) - - id: 4611 - title: "Ensure IPv6 loopback traffic is configured" - description: "Configure the loopback interface to accept traffic. Configure all other interfaces to deny traffic tothe loopback network (::1)." - rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network (::1) traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure" - remediation: "Run the following commands to implement the loopback rules:# ip6tables -A INPUT -i lo -j ACCEPT# ip6tables -A OUTPUT -o lo -j ACCEPT# ip6tables -A INPUT -s::1 -j DROP" - compliance: - - cis: ["3.5.3.3.2"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] - condition: all - rules: - - "c:ip6tables -L INPUT -v -n -> r:ACCEPT" - - "c:ip6tables -L INPUT -v -n -> r:DROP" - - "c:ip6tables -L OUTPUT -v -n -> r:ACCEPT" + # 3.5.3.3.2 Ensure ip6tables outbound and established connections are configured. (Manual) - Not Implemented + # 3.5.3.3.3 Ensure ip6tables firewall rules exist for all open ports. (Automated) - Not Implemented - # 3.5.3.3.3 Ensure IPv6 outbound and established connections are configured (Manual) - - id: 4612 - title: "Ensure IPv6 outbound and established connections are configured" - description: "Configure the firewall rules for new outbound, and established IPv6 connections." - rationale: "If rules are not in place for new outbound, and established connections all packets will be dropped by the default policy preventing network usage" - remediation: "Configure iptables in accordance with site policy. The following commands will implement a policy to allow all outbound connections and all established connections:# ip6tables -AOUTPUT -p tcp -m state --state NEW,ESTABLISHED -j ACCEPT# ip6tables -A OUTPUT -p udp -m state --state NEW,ESTABLISHED -j ACCEPT# ip6tables -A OUTPUT -p icmp -m state --state NEW,ESTABLISHED -j ACCEPT# ip6tables -A INPUT -p tcp -m state --state ESTABLISHED -j ACCEPT# ip6tables -A INPUT -p udp -m state --state ESTABLISHED -j ACCEPT# ip6tables -A INPUT -p icmp -m state --state ESTABLISHED -j ACCEPT" + # 3.5.3.3.4 Ensure ip6tables default deny firewall policy. (Automated) + - id: 4614 + title: "Ensure ip6tables default deny firewall policy." + description: "A default deny all policy on connections ensures that any unconfigured network usage will be rejected." + rationale: "With a default accept policy the firewall will accept any packet that is not configured to be denied. It is easier to white list acceptable usage than to black list unacceptable usage. Note: Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following commands to implement a default DROP policy: # ip6tables -P INPUT DROP # ip6tables -P OUTPUT DROP # ip6tables -P FORWARD DROP." compliance: - - cis: ["3.5.3.3.3"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] + - cis: ["3.5.3.3.4"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - "c:ip6tables -L OUTPUT -v -n -> r:state NEW,ESTABLISHED" - - "c:ip6tables -L INPUT -v -n -> r:state ESTABLISHED" + - 'c:ip6tables -L -> r:Chain INPUT \(policy DROP\)' + - 'c:ip6tables -L -> r:Chain FORWARD \(policy DROP\)' + - 'c:ip6tables -L -> r:Chain OUTPUT \(policy DROP\)' - # 3.5.3.3.6 Ensure ip6tables is enabled and running (Automated) - - id: 4613 - title: "Ensure ip6tables is enabled and running" + # 3.5.3.3.5 Ensure ip6tables rules are saved. (Automated) - Not Implemented + + # 3.5.3.3.6 Ensure ip6tables is enabled and running. (Automated) + - id: 4615 + title: "Ensure ip6tables is enabled and running." description: "ip6tables.service is a utility for configuring and maintaining ip6tables." rationale: "ip6tables.service will load the iptables rules saved in the file /etc/sysconfig/ip6tables at boot, otherwise the ip6tables rules will be cleared during a re-boot of the system." - remediation: "Run the following command to enable and start ip6tables: # systemctl --now start ip6tables" - compliance: - - cis: ["3.5.3.3.3"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC8.1"] - condition: all + remediation: "Run the following command to enable and start ip6tables: # systemctl --now start ip6tables." + compliance: + - cis: ["3.5.3.3.6"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: none rules: - - 'c:systemctl is-enabled ip6tables -> r:\.*ACCEPT\.*all\.*lo\.**\.*0.0.0.0/0\.*0.0.0.0/0' - - 'c:systemctl status ip6tables -> r:Active: active (running) since \w+ \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d' + - 'f:/boot/grub2/grubenv -> r:^\s*kernelopts=\.+ipv6.disable=1' ############################################### # 4 Logging and Auditing ############################################### ############################################### - # 4.1 Configure System Accounting (auditd) + # 4.1.1 Configure System Accounting (auditd) ############################################### - # 4.1.1.1 Ensure auditd is installed - - id: 4614 - title: "Ensure auditd is installed" + # 4.1.1.1 Ensure auditd is installed. (Automated) + - id: 4616 + title: "Ensure auditd is installed." description: "auditd is the userspace component to the Linux Auditing System. It's responsible for writing audit records to the disk." rationale: "The capturing of system events provides system administrators with information to allow them to determine if unauthorized access to their system is occurring." - remediation: "Run the following command to Install auditd # yum install audit audit-libs" + remediation: "Run the following command to Install auditd # yum install audit audit-libs." compliance: - cis: ["4.1.1.1"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.1", "10.7"] - - tsc: ["CC6.1", "CC6.2", "CC6.3", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - - 'c:rpm -q audit -> r:^audit-\S+' - - 'c:rpm -q audit-libs -> r:^audit-libs-\S+' + - "c:rpm -q audit -> r:^audit-" + - "c:rpm -q audit-libs -> r:^audit-libs-" - # 4.1.1.2 Ensure auditd service is enabled (Scored) - - id: 4615 - title: "Ensure auditd service is enabled and running" + # 4.1.1.2 Ensure auditd service is enabled and running. (Automated) + - id: 4617 + title: "Ensure auditd service is enabled and running." description: "Turn on the auditd daemon to record system events." rationale: "The capturing of system events provides system administrators with information to allow them to determine if unauthorized access to their system is occurring." - remediation: "Run the following command to enable auditd: # systemctl --now enable auditd" + remediation: "Run the following command to enable and start auditd : # systemctl --now enable auditd." compliance: - cis: ["4.1.1.2"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.1", "10.7"] - - tsc: ["CC6.1", "CC6.2", "CC6.3", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - "c:systemctl is-enabled auditd -> r:^enabled" - 'c:systemctl status auditd -> r:Active: active \(running\)' - # 4.1.1.3 Ensure auditing for processes that start prior to auditd is enabled (Scored) - - id: 4616 - title: "Ensure auditing for processes that start prior to auditd is enabled" + # 4.1.1.3 Ensure auditing for processes that start prior to auditd is enabled. (Automated) + - id: 4618 + title: "Ensure auditing for processes that start prior to auditd is enabled." description: "Configure grub so that processes that are capable of being audited can be audited even if they start up prior to auditd startup." - rationale: "Audit events need to be captured on processes that start up prior to auditd, so that potential malicious activity cannot go undetected. Note: This recommendation is designed around the grub2 bootloader, if LILO or another bootloader is in use in your environment enact equivalent settings." - remediation: 'Edit /etc/default/grub and add audit=1 to GRUB_CMDLINE_LINUX: GRUB_CMDLINE_LINUX="audit=1" . Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg' + rationale: "Audit events need to be captured on processes that start up prior to auditd , so that potential malicious activity cannot go undetected. Note: This recommendation is designed around the grub2 bootloader, if LILO or another bootloader is in use in your environment enact equivalent settings." + remediation: 'Edit /etc/default/grub and add audit=1 to GRUB_CMDLINE_LINUX: GRUB_CMDLINE_LINUX="audit=1" Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg.' compliance: - cis: ["4.1.1.3"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.2.6", "10.7"] - - gpg_13: ["7.9"] - - gdpr_IV: ["35.7.d", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: none rules: - - 'f:/boot/grub2/grub.cfg -> r:^\s*\t*linux && !r:audit=1' + - "f:/boot/grub2/grubenv -> r:kernelopts= && !r:audit=1" - # 4.1.2.1 Ensure audit log storage size is configured (Not Scored) - - id: 4617 - title: "Ensure audit log storage size is configured" - description: "Configure the maximum size of the audit log file. Once the log reaches the maximum size, it will be rotated and a new log file will be started." + ############################################### + # 4.1.2 Configure Data Retention + ############################################### + + # 4.1.2.1 Ensure audit log storage size is configured. (Automated) + - id: 4619 + title: "Ensure audit log storage size is configured." + description: "Configure the maximum size of the audit log file. Once the log reaches the maximum size, it will be rotated and a new log file will be started. Notes: - The max_log_file parameter is measured in megabytes. - Other methods of log rotation may be appropriate based on site policy. One example is time-based rotation strategies which don't have native support in auditd configurations. Manual audit of custom configurations should be evaluated for effectiveness and completeness." rationale: "It is important that an appropriate size is determined for log files so that they do not impact the system and audit data is not lost." - remediation: "Set the following parameter in /etc/audit/auditd.conf in accordance with site policy: max_log_file = " + remediation: "Set the following parameter in /etc/audit/auditd.conf in accordance with site policy: max_log_file = ." compliance: - cis: ["4.1.2.1"] - - cis_csc: ["6.4"] - - pci_dss: ["10.7"] + - cis_csc_v8: ["8.3"] + - cis_csc_v7: ["6.4"] + - iso_27001-2013: ["A.12.4.1"] + - pci_dss_v3.2.1: ["10.7"] + - soc_2: ["A1.1"] condition: all rules: - - 'f:/etc/audit/auditd.conf -> r:^max_log_file = \d+' + - 'f:/etc/audit/auditd.conf -> r:^\s*max_log_file\s*=\s*\d+' - # 4.1.2.2 Ensure audit logs are not automatically deleted (Scored) - - id: 4618 - title: "Ensure audit logs are not automatically deleted" + # 4.1.2.2 Ensure audit logs are not automatically deleted. (Automated) + - id: 4620 + title: "Ensure audit logs are not automatically deleted." description: "The max_log_file_action setting determines how to handle the audit log file reaching the max file size. A value of keep_logs will rotate the logs but never delete old logs." rationale: "In high security contexts, the benefits of maintaining a long audit history exceed the cost of storing the audit history." - remediation: "Set the following parameter in /etc/audit/auditd.conf: max_log_file_action = keep_logs" + remediation: "Set the following parameter in /etc/audit/auditd.conf: max_log_file_action = keep_logs." compliance: - cis: ["4.1.2.2"] - - cis_csc: ["6.2", "6.4"] - - pci_dss: ["10.7"] + - cis_csc_v8: ["8.3"] + - cis_csc_v7: ["6.2", "6.4"] + - iso_27001-2013: ["A.12.4.1"] + - pci_dss_v3.2.1: ["10.7"] + - soc_2: ["A1.1"] condition: all rules: - - 'f:/etc/audit/auditd.conf -> r:^\s*max_log_file_action\s*=\s*keep_logs' + - 'f:/etc/audit/auditd.conf -> r:^max_log_file_action\s*=\s*keep_logs' - # 4.1.2.3 Ensure system is disabled when audit logs are full (Scored) - - id: 4619 - title: "Ensure system is disabled when audit logs are full" + # 4.1.2.3 Ensure system is disabled when audit logs are full. (Automated) + - id: 4621 + title: "Ensure system is disabled when audit logs are full." description: "The auditd daemon can be configured to halt the system when the audit logs are full." rationale: "In high security contexts, the risk of detecting unauthorized access or nonrepudiation exceeds the benefit of the system's availability." - remediation: "Set the following parameters in /etc/audit/auditd.conf: space_left_action = email action_mail_acct = root admin_space_left_action = halt" + remediation: "Set the following parameters in /etc/audit/auditd.conf: space_left_action = email action_mail_acct = root admin_space_left_action = halt." compliance: - cis: ["4.1.2.3"] - - cis_csc: ["6.2", "6.4"] - - pci_dss: ["10.7"] - condition: all - rules: - - 'f:/boot/grub2/grub.cfg -> r:^\s*space_left_action\s*=\s*email' - - 'f:/etc/audit/auditd.conf -> r:^\s*action_mail_acct\s*=\s*root' - - 'f:/etc/audit/auditd.conf -> r:^\s*admin_space_left_action\s*=\s*halt' - - # 4.1.2.4 Ensure audit_backlog_limit is sufficient - - id: 4620 - title: "Ensure audit_backlog_limit is sufficient" - description: "The backlog limit has a default setting of 64" - rationale: "During boot if audit=1, then the backlog will hold 64 records. If more than 64 records are created during boot, auditd records will be lost and potential malicious activity could go undetected" - remediation: 'Edit /etc/default/grub and add audit_backlog_limit= to GRUB_CMDLINE_LINUX: Example: GRUB_CMDLINE_LINUX="audit_backlog_limit=8192" Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg' + - cis_csc_v8: ["8.2", "8.3"] + - cis_csc_v7: ["6.2", "6.4"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3", "10.7"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["A1.1"] + condition: all + rules: + - 'f:/etc/audit/auditd.conf -> r:^space_left_action\s*=\s*email' + - 'f:/etc/audit/auditd.conf -> r:^action_mail_acct\s*=\s*root' + - 'f:/etc/audit/auditd.conf -> r:^admin_space_left_action\s*=\s*halt' + + # 4.1.2.4 Ensure audit_backlog_limit is sufficient. (Automated) + - id: 4622 + title: "Ensure audit_backlog_limit is sufficient." + description: "The backlog limit has a default setting of 64." + rationale: "During boot if audit=1, then the backlog will hold 64 records. If more than 64 records are created during boot, auditd records will be lost and potential malicious activity could go undetected." + remediation: 'Edit /etc/default/grub and add audit_backlog_limit= to GRUB_CMDLINE_LINUX: Example: GRUB_CMDLINE_LINUX="audit_backlog_limit=8192" Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg.' compliance: - cis: ["4.1.2.4"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.7"] + - cis_csc_v8: ["8.2", "8.3"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3", "10.7"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["A1.1"] condition: all rules: - - "f:/etc/audit/auditd.conf -> r:audit_backlog_limit=" + - 'c:grubby --info=ALL -> n:^args=\.*\saudit_backlog_limit=(\d+) compare >= 8192' - # 4.1.3 Ensure events that modify date and time information are collected (Scored) - - id: 4621 - title: "Ensure events that modify date and time information are collected" - description: 'Capture events where the system date and/or time has been modified. The parameters in this section are set to determine if the adjtimex (tune kernel clock), settimeofday (Set time, using timeval and timezone structures) stime (using seconds since 1/1/1970) or clock_settime (allows for the setting of several internal clocks and timers) system calls have been executed and always write an audit record to the /var/log/audit.log file upon exit, tagging the records with the identifier "time-change".' + # 4.1.3 Ensure events that modify date and time information are collected. (Automated) + - id: 4623 + title: "Ensure events that modify date and time information are collected." + description: 'Capture events where the system date and/or time has been modified. The parameters in this section are set to determine if the adjtimex (tune kernel clock), settimeofday (Set time, using timeval and timezone structures) stime (using seconds since 1/1/1970) or clock_settime (allows for the setting of several internal clocks and timers) system calls have been executed and always write an audit record to the /var/log/audit.log file upon exit, tagging the records with the identifier "time-change" Note: Reloading the auditd config to set active settings requires the auditd service to be restarted, and may require a system reboot.' rationale: "Unexpected changes in system date and/or time could be a sign of malicious activity on the system." - remediation: "For 32 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/time-change.rules and add the following lines: -a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change -a always,exit -F arch=b32 -S clock_settime -k time-change -w /etc/localtime -p wa -k time-change For 64 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/time-change.rules and add the following lines: -a always,exit -F arch=b64 -S adjtimex -S settimeofday -k time-change -a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change -a always,exit -F arch=b64 -S clock_settime -k time-change -a always,exit -Farch=b32 -S clock_settime -k time-change -w /etc/localtime -p wa -k time-change. Notes: Reloading the auditd config to set active settings may require a system reboot." + remediation: 'For 32 bit systems Edit or Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor events that modify date and time information. 64 Bit systems Example: # printf \" -a always,exit -F arch=b64 -S adjtimex,settimeofday,clock_settime -k time-change -a always,exit -F arch=b32 -S adjtimex,settimeofday,clock_settime -k time-change -w /etc/localtime -p wa -k time-change \" >> /etc/audit/rules.d/50-time-change.rules Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64. In addition, add stime to the system call audit. Example: -a always,exit -F arch=b32 -S adjtimex,settimeofday,clock_settime,stime -k time-change.' compliance: - cis: ["4.1.3"] - - cis_csc: ["5.5"] - - pci_dss: ["10.4.2", "10.2.7"] - - nist_800_53: ["AU.14", "AU.6"] - - gpg_13: ["7.9"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S adjtimex && r:-S settimeofday && r:-S stime && r:-k time-change' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S clock_settime && r:-k time-change' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/localtime && r:-p wa && r:-k time-change' - - # 4.1.4 Ensure events that modify user/group information are collected (Scored) - - id: 4622 - title: "Ensure events that modify user/group information are collected" - description: 'Record events affecting the group, passwd (user IDs), shadow and gshadow (passwords) or /etc/security/opasswd (old passwords, based on remember parameter in the PAM configuration) files. The parameters in this section will watch the files to see if they have been opened for write or have had attribute changes (e.g. permissions) and tag them with the identifier "identity" in the audit log file.' + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["5.5"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.1.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - "d:/etc/audit/rules.d" + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F arch=b64|-F arch=b32 && r:-S && r:adjtimex && r:settimeofday|settimeofday -S stime && r:-k time-change|key=time-change' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F arch=b32|-F arch=b64 && r:-S && r:clock_settime && r:-k time-change|key=time-change' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-w && r:/etc/localtime && r:-p wa && r:-k time-change|key=time-change' + - "c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b64|-F arch=b32 && r:-S && r:adjtimex && r:settimeofday|settimeofday -S stime && r:-k time-change|key=time-change" + - "c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b32|-F arch=b64 && r:-S && r:clock_settime && r:-k time-change|key=time-change" + - "c:auditctl -l -> r:^-w && r:/etc/localtime && r:-p wa && r:-k time-change|key=time-change" + + # 4.1.4 Ensure events that modify user/group information are collected. (Automated) + - id: 4624 + title: "Ensure events that modify user/group information are collected." + description: 'Record events affecting the group , passwd (user IDs), shadow and gshadow (passwords) or /etc/security/opasswd (old passwords, based on remember parameter in the PAM configuration) files. The parameters in this section will watch the files to see if they have been opened for write or have had attribute changes (e.g. permissions) and tag them with the identifier "identity" in the audit log file. Note: Reloading the auditd config to set active settings may require a system reboot.' rationale: "Unexpected changes to these files could be an indication that the system has been compromised and that an unauthorized user is attempting to hide their activities or compromise additional accounts." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/identity.rules and add the following lines: -w /etc/group -p wa -k identity -w /etc/passwd -p wa -k identity -w /etc/gshadow -p wa -k identity -w /etc/shadow -p wa -k identity -w /etc/security/opasswd -p wa -k identity. Notes: Reloading the auditd config to set active settings may require a system reboot." + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-identity.rules Add the following lines: -w /etc/group -p wa -k identity -w /etc/passwd -p wa -k identity -w /etc/gshadow -p wa -k identity -w /etc/shadow -p wa -k identity -w /etc/security/opasswd -p wa -k identity." compliance: - cis: ["4.1.4"] - - cis_csc: ["4.8"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/group && r:-p wa && r:-k identity' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/passwd && r:-p wa && r:-k identity' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/gshadow && r:-p wa && r:-k identity' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/shadow && r:-p wa && r:-k identity' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/security/opasswd && r:-p wa && r:-k identity' - - # 4.1.5 Ensure events that modify the system's network environment are collected (Scored) - - id: 4623 - title: "Ensure events that modify the system's network environment are collected" - description: "Record changes to network environment files or system calls. The below parameters monitor the sethostname (set the systems host name) or setdomainname (set the systems domainname) system calls, and write an audit event on system call exit. The other parameters monitor the /etc/issue and /etc/issue.net files (messages displayed pre-login), /etc/hosts (file containing host names and associated IP addresses), /etc/sysconfig/network file and /etc/sysconfig/network-scripts/ directory (containing network interface scripts and configurations)." - rationale: 'Monitoring sethostname and setdomainname will identify potential unauthorized changes to host and domainname of a system. The changing of these names could potentially break security parameters that are set based on those names. The /etc/hosts file is monitored for changes in the file that can indicate an unauthorized intruder is trying to change machine associations with IP addresses and trick users and processes into connecting to unintended machines. Monitoring /etc/issue and /etc/issue.net is important, as intruders could put disinformation into those files and trick users into providing information to the intruder. Monitoring /etc/sysconfig/network and /etc/sysconfig/network-scripts/ is important as it can show if network interfaces or scripts are being modified in a way that can lead to the machine becoming unavailable or compromised. All audit records will be tagged with the identifier "system-locale."' - remediation: "For 32 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/system-locale.rules and add the following lines: -a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale -w /etc/issue -p wa -k system-locale -w /etc/issue.net -p wa -k system-locale -w /etc/hosts -p wa -k system-locale -w /etc/sysconfig/network -p wa -k system-locale For 64 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/system-locale.rules and add the following lines: -a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale -a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale -w /etc/issue -p wa -k system-locale -w /etc/issue.net -p wa -k system-locale -w /etc/hosts -p wa -k system-locale -w /etc/sysconfig/network -p wa -k system-locale " + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["4.8"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.4.3"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/group -p wa -k identity' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/passwd -p wa -k identity' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/gshadow -p wa -k identity' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/shadow -p wa -k identity' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/security/opasswd -p wa -k identity' + - "c:auditctl -l -> r:^-w && r:/etc/group && r:-p wa && r:-k identity|key=identity" + - "c:auditctl -l -> r:^-w && r:/etc/passwd && r:-p wa && r:-k identity|key=identity" + - "c:auditctl -l -> r:^-w && r:/etc/gshadow && r:-p wa && r:-k identity|key=identity" + - "c:auditctl -l -> r:^-w && r:/etc/shadow && r:-p wa && r:-k identity|key=identity" + - "c:auditctl -l -> r:^-w && r:/etc/security/opasswd && r:-p wa && r:-k identity|key=identity" + + # 4.1.5 Ensure events that modify the system's network environment are collected. (Automated) + - id: 4625 + title: "Ensure events that modify the system's network environment are collected." + description: "Record changes to network environment files or system calls. The below parameters monitor the sethostname (set the systems host name) or setdomainname (set the systems domainname) system calls, and write an audit event on system call exit. The other parameters monitor the /etc/issue and /etc/issue.net files (messages displayed pre-login), /etc/hosts (file containing host names and associated IP addresses) and /etc/sysconfig/network (directory containing network interface scripts and configurations) files. Note: Reloading the auditd config to set active settings requires the auditd service to be restarted, and may require a system reboot." + rationale: 'Monitoring sethostname and setdomainname will identify potential unauthorized changes to host and domainname of a system. The changing of these names could potentially break security parameters that are set based on those names. The /etc/hosts file is monitored for changes in the file that can indicate an unauthorized intruder is trying to change machine associations with IP addresses and trick users and processes into connecting to unintended machines. Monitoring /etc/issue and /etc/issue.net is important, as intruders could put disinformation into those files and trick users into providing information to the intruder. Monitoring /etc/sysconfig/network is important as it can show if network interfaces or scripts are being modified in a way that can lead to the machine becoming unavailable or compromised. All audit records will be tagged with the identifier "system-locale.".' + remediation: "For 32 bit systems Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-system_local.rules Add the following lines: -a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale -w /etc/issue -p wa -k system-locale -w /etc/issue.net -p wa -k system-locale -w /etc/hosts -p wa -k system-locale -w /etc/sysconfig/network -p wa -k system-locale For 64 bit systems Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-system_local.rules Add the following lines: -a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale -a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale -w /etc/issue -p wa -k system-locale -w /etc/issue.net -p wa -k system-locale -w /etc/hosts -p wa -k system-locale -w /etc/sysconfig/network -p wa -k system-locale." compliance: - cis: ["4.1.5"] - - cis_csc: ["5.5", "6.2"] - - pci_dss: ["10.2.7"] - - nist_800_53: ["AU.14", "AU.6"] - - gpg_13: ["7.9"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S sethostname && r:-S setdomainname && r:-k system-locale' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/issue && r:-p wa && r:-k system-locale' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/issue.net && r:-p wa && r:-k system-locale' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/hosts && r:-p wa && r:-k system-locale' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/sysconfig/network && r:-p wa && r:-k system-locale' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/sysconfig/network-scripts/ && r:-p wa && r:-k system-locale' - - # 4.1.6 Ensure events that modify the system's Mandatory Access Controls are collected (Scored) - - id: 4624 - title: "Ensure events that modify the system's Mandatory Access Controls are collected" - description: "Monitor SELinux mandatory access controls. The parameters below monitor any write access (potential additional, deletion or modification of files in the directory) or attribute changes to the /etc/selinux/ and /usr/share/selinux/ directories." + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["5.5", "6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.1.2", "A.12.4.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - "d:/etc/audit/rules.d -> r:.+.rules$" + - "d:/etc/audit/rules.d -> r:.+.rules$ -> r:^-a && r:exit,always|always,exit && r:-F arch=b64|-F arch=b32 && r:-S && r:sethostname && r:setdomainname && r:-k system-locale|key=system-locale" + - "d:/etc/audit/rules.d -> r:.+.rules$ -> r:^-w && r:/etc/issue && r:-p wa && r:-k system-locale|key=system-locale" + - "d:/etc/audit/rules.d -> r:.+.rules$ -> r:^-w && r:/etc/issue.net && r:-p wa && r:-k system-locale|key=system-locale" + - "d:/etc/audit/rules.d -> r:.+.rules$ -> r:^-w && r:/etc/hosts && r:-p wa && r:-k system-locale|key=system-locale" + - "d:/etc/audit/rules.d -> r:.+.rules$ -> r:^-w && r:/etc/sysconfig/network && r:-p wa && r:-k system-locale|key=system-locale" + - "c:auditctl -l -> r:^-a && r:exit,always|always,exit && r:-F arch=b64|-F arch=b32 && r:-S && r:sethostname && r:setdomainname && r:-k system-locale|-F key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/issue && r:-p wa && r:-k system-locale|key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/issue.net && r:-p wa && r:-k system-locale|key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/hosts && r:-p wa && r:-k system-locale|key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/sysconfig/network && r:-p wa && r:-k system-locale|key=system-locale" + + # 4.1.6 Ensure events that modify the system's Mandatory Access Controls are collected. (Automated) + - id: 4626 + title: "Ensure events that modify the system's Mandatory Access Controls are collected." + description: "Monitor SELinux mandatory access controls. The parameters below monitor any write access (potential additional, deletion or modification of files in the directory) or attribute changes to the /etc/selinux/ and /usr/share/selinux/ directories. Note: - If a different Mandatory Access Control method is used, changes to the corresponding directories should be audited. - Reloading the auditd config to set active settings requires the auditd service to be restarted, and may require a system reboot." rationale: "Changes to files in the /etc/selinux/ and /usr/share/selinux/ directories could indicate that an unauthorized user is attempting to modify access controls and change security contexts, leading to a compromise of the system." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/MAC-policy.rules and add the following lines: -w /etc/selinux/ -p wa -k MAC-policy -w /usr/share/selinux/ -p wa -k MAC-policy ." + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-MAC_policy.rules Add the following lines: -w /etc/selinux/ -p wa -k MAC-policy -w /usr/share/selinux/ -p wa -k MAC-policy." compliance: - cis: ["4.1.6"] - - cis_csc: ["5.5"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/selinux/ && r:-p wa && r:-k MAC-policy' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /usr/share/selinux/ && r:-p wa && r:-k MAC-policy' - - # 4.1.7 Ensure login and logout events are collected (Scored) - - id: 4625 - title: "Ensure login and logout events are collected" - description: "Monitor login and logout events. The parameters below track changes to files associated with login/logout events. The file /var/log/faillog tracks failed events from login. The file /var/log/lastlog maintain records of the last time a user successfully logged in. The /var/run/faillock/ directory maintains records of login failures via the pam_faillock module. The file /var/log/tallylog maintains records of failures via the pam_tally2 module" + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["5.5"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.1.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/selinux/ -p wa -k MAC-policy' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /usr/share/selinux/ -p wa -k MAC-policy' + - "c:auditctl -l -> r:^-w && r:/etc/selinux && r:-p wa && r:-k MAC-policy|key=MAC-policy" + - "c:auditctl -l -> r:^-w && r:/usr/share/selinux && r:-p wa && r:-k MAC-policy|key=MAC-policy" + + # 4.1.7 Ensure login and logout events are collected. (Automated) + - id: 4627 + title: "Ensure login and logout events are collected." + description: "Monitor login and logout events. The parameters below track changes to files associated with login/logout events. - The file /var/log/lastlog maintain records of the last time a user successfully logged in. - The /var/run/faillock/ directory maintains records of login failures via the pam_faillock module. Note: Reloading the auditd config to set active settings requires the auditd service to be restarted, and may require a system reboot." rationale: "Monitoring login/logout events could provide a system administrator with information associated with brute force attacks against user logins." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/logins.rules and add the following lines: -w /var/log/lastlog -p wa -k logins -w /var/run/faillog/ -p wa -k logins IF the pam_faillock.so module is used: Also include the line: -w /var/run/faillock/ -p wa -k logins OR IF the pam_tally2.so module is used: Also include the line: -w /var/log/tallylog -p wa -k logins" + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-logins.rules Add the following lines: -w /var/log/lastlog -p wa -k logins -w /var/run/faillock/ -p wa -k logins." compliance: - cis: ["4.1.7"] - - cis_csc: ["4.9", "16.11", "16.13"] - - pci_dss: ["10.2.1", "10.2.4", "10.3"] - - nist_800_53: ["AC.7", "AU.14"] - - gpg_13: ["7.8"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /var/log/lastlog && r:-p wa && r:-k logins' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /var/run/faillog/ && r:-p wa && r:-k logins' - - # 4.1.8 Ensure session initiation information is collected (Scored) - - id: 4626 - title: "Ensure session initiation information is collected" - description: 'Monitor session initiation events. The parameters in this section track changes to the files associated with session events. The file /var/run/utmp file tracks all currently logged in users. All audit records will be tagged with the identifier "session." The /var/log/wtmp file tracks logins, logouts, shutdown, and reboot events. The file /var/log/btmp keeps track of failed login attempts and can be read by entering the command /usr/bin/last -f /var/log/btmp . All audit records will be tagged with the identifier "logins.".' + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["4.9", "16.11", "16.13"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.8.1.3", "A.9.4.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/log/lastlog -p wa -k logins' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/run/faillock/ -p wa -k logins' + - "c:auditctl -l -> r:^-w && r:/var/log/lastlog && r:-p wa && r:-k logins|key=logins" + - "c:auditctl -l -> r:^-w && r:/var/run/faillock && r:-p wa && r:-k logins|key=logins" + + # 4.1.8 Ensure session initiation information is collected. (Automated) + - id: 4628 + title: "Ensure session initiation information is collected." + description: 'Monitor session initiation events. The parameters in this section track changes to the files associated with session events. - /var/run/utmp - tracks all currently logged in users. - /var/log/wtmp - file tracks logins, logouts, shutdown, and reboot events. - /var/log/btmp - keeps track of failed login attempts and can be read by entering the command /usr/bin/last -f /var/log/btmp. All audit records will be tagged with the identifier "session.". - Reloading the auditd config to set active settings requires the auditd service to be restarted, and may require a system reboot.' rationale: "Monitoring these files for changes could alert a system administrator to logins occurring at unusual hours, which could indicate intruder activity (i.e. a user logging in at a time when they do not normally log in)." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/logins.rules and add the following lines: -w /var/run/utmp -p wa -k session -w /var/log/wtmp -p wa -k logins -w /var/log/btmp -p wa -k logins . Notes: The last command can be used to read /var/log/wtmp ( last with no parameters) and /var/run/utmp ( last -f /var/run/utmp ) Reloading the auditd config to set active settings may require a system reboot." + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-session.rules Add the following lines: -w /var/run/utmp -p wa -k session -w /var/log/wtmp -p wa -k logins -w /var/log/btmp -p wa -k logins." compliance: - cis: ["4.1.8"] - - cis_csc: ["4.9", "16.11", "16.13"] - - pci_dss: ["10.3"] - condition: all - rules: - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /var/run/utmp && r:-p wa && r:-k session' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /var/log/wtmp && r:-p wa && r:-k logins' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /var/log/btmp && r:-p wa && r:-k logins' - - # 4.1.9 Ensure discretionary access control permission modification events are collected (Scored) - - id: 4627 - title: "Ensure discretionary access control permission modification events are collected" - description: 'Monitor changes to file permissions, attributes, ownership and group. The parameters in this section track changes for system calls that affect file permissions and attributes. The chmod, fchmod and fchmodat system calls affect the permissions associated with a file. The chown, fchown, fchownat and lchown system calls affect owner and group attributes on a file. The setxattr, lsetxattr, fsetxattr (set extended file attributes) and removexattr, lremovexattr, fremovexattr (remove extended file attributes) control extended file attributes. In all cases, an audit record will only be written for non-system user ids (auid >= 1000) and will ignore Daemon events (auid = 4294967295). All audit records will be tagged with the identifier "perm_mod."' - rationale: "Monitoring for changes in file attributes could alert a system administrator to activity that could indicate intruder activity or policy violation." - remediation: "For 32 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/perm_mod.rules and add the following lines: -a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod For 64 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/perm_mod.rules and add the following lines: -a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lrem . Notes: Reloading the auditd config to set active settings may require a system reboot." - compliance: - - cis: ["4.1.9"] - - cis_csc: ["5.5"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S chmod && r:-S fchmod && r:-S fchmodat && r:-F auid>=1000 && r:-F auid!=4294967295 && r:-k perm_mod' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S chown && r:-S fchown && r:-S fchownat && r:-S lchown && r:-F auid>=1000 && r:-F auid!=4294967295 && r:-k perm_mod' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S setxattr && r:-S lsetxattr && r:-S fsetxattr && r:-S removexattr && r:-S lremovexattr && r:-S fremovexattr && r:-F auid>=1000 && r:-F auid!=4294967295 && r:-k perm_mod' - - # 4.1.10 Ensure unsuccessful unauthorized file access attempts are collected (Scored) - - id: 4628 - title: "Ensure unsuccessful unauthorized file access attempts are collected" - description: 'Monitor for unsuccessful attempts to access files. The parameters below are associated with system calls that control creation ( creat ), opening ( open, openat ) and truncation ( truncate, ftruncate ) of files. An audit log record will only be written if the user is a non- privileged user (auid > = 1000), is not a Daemon event (auid=4294967295) and if the system call returned EACCES (permission denied to the file) or EPERM (some other permanent error associated with the specific system call). All audit records will be tagged with the identifier "access."' + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["4.9", "16.11", "16.13"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.8.1.3", "A.9.4.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/run/utmp -p wa -k session' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/log/wtmp -p wa -k logins' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/log/btmp -p wa -k logins' + - "c:auditctl -l -> r:^-w && r:/var/run/utmp && r:-p wa && r:-k session|key=session" + - "c:auditctl -l -> r:^-w && r:/var/log/wtmp && r:-p wa && r:-k session|key=session" + - "c:auditctl -l -> r:^-w && r:/var/log/btmp && r:-p wa && r:-k session|key=session" + + # 4.1.9 Ensure discretionary access control permission modification events are collected. (Automated) - Not Implemented + + # 4.1.10 Ensure unsuccessful unauthorized file access attempts are collected. (Automated) + - id: 4629 + title: "Ensure unsuccessful unauthorized file access attempts are collected." + description: "Monitor for unsuccessful attempts to access files. The parameters below are associated with system calls that control creation (creat), opening (open , openat) and truncation ( truncate , ftruncate) of files. An audit log record will only be written if the user is a non-privileged user (auid>=1000), is not a Daemon event (auid=4294967295) and if the system call returned EACCES (permission denied to the file) or EPERM (some other permanent error associated with the specific system call). All audit records will be tagged with the identifier \"access.\" Note: Systems may have been customized to change the default UID_MIN. To confirm the UID_MIN for your system, run the following command: # awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs If your systems' UID_MIN is not 1000, replace audit>=1000 with audit>= in the Audit and Remediation procedures. Reloading the auditd config to set active settings may require a system reboot." rationale: "Failed attempts to open, create or truncate files could be an indication that an individual or process is trying to gain unauthorized access to the system." - remediation: "For 32 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/access.rules and add the following lines: -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access For 64 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/access.rules and add the following lines: -a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access . Notes: Reloading the auditd config to set active settings may require a system reboot." + remediation: "For 32 bit systems Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-access.rules Add the following lines: -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access For 64 bit systems Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-access.rules Add the following lines: -a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access." compliance: - cis: ["4.1.10"] - - cis_csc: ["14.9"] - - pci_dss: ["10.2.4"] - - nist_800_53: ["AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["14.9"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.4.3"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S creat && r:-S open && r:-S openat && r:-S truncate && r:-S ftruncate && r:-F exit=-EACCES && r:-F auid>=1000 && r:-F auid!=4294967295 && r:-k access' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S creat && r:-S open && r:-S openat && r:-S truncate && r:-S ftruncate && r:-F exit=-EPERM && r:-F auid>=1000 && r:-F auid!=4294967295 && r:-k access' + - 'd:/etc/audit/rules.d -> r:.+.rules$ -> r:-a always,exit && r:-F arch=b32|-F arch=b64 && r:-S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access' + - 'd:/etc/audit/rules.d -> r:.+.rules$ -> r:-a always,exit && r:-F arch=b32|-F arch=b64 && r:-S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access' + - 'c:auditctl -l -> r:-a always,exit && r:-F arch=b32|-F arch=b64 && r:-S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access' + - 'c:auditctl -l -> r:-a always,exit && r:-F arch=b32|-F arch=b64 && r:-S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access' - # 4.1.12 Ensure successful file system mounts are collected (Scored) - - id: 4629 - title: "Ensure successful file system mounts are collected" - description: "Monitor the use of the mount system call. The mount (and umount ) system call controls the mounting and unmounting of file systems. The parameters below configure the system to create an audit record when the mount system call is used by a non-privileged user." - rationale: "It is highly unusual for a non privileged user to mount file systems to the system. While tracking mount commands gives the system administrator evidence that external media may have been mounted (based on a review of the source of the mount and confirming it's an external media type), it does not conclusively indicate that data was exported to the media. System administrators who wish to determine if data were exported, would also have to track successful open, creat and truncate system calls requiring write access to a file under the mount point of the external media file system. This could give a fair indication that a write occurred. The only way to truly prove it, would be to track successful writes to the external media. Tracking write system calls could quickly fill up the audit log and is not recommended. Recommendations on configuration options to track data export to media is beyond the scope of this document." - remediation: "For 32 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/mount.rules and add the following lines: -a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts For 64 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/mounts.rules and add the following lines: -a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts -a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts . Notes: This tracks successful and unsuccessful mount commands. File system mounts do not have to come from external media and this action still does not verify write (e.g. CD ROMS). Reloading the auditd config to set active settings may require a system reboot." - compliance: - - cis: ["4.1.13"] - - cis_csc: ["6.2"] - - pci_dss: ["10.2.7"] - - nist_800_53: ["AU.14", "AU.6"] - - gpg_13: ["7.9"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S mount && r:-F auid>=1000 && r:-F auid!=4294967295 && r:-k mounts' + # 4.1.11 Ensure use of privileged commands is collected. (Automated) - Not Implemented - # 4.1.13 Ensure file deletion events by users are collected (Scored) + # 4.1.12 Ensure successful file system mounts are collected. (Automated) - id: 4630 - title: "Ensure file deletion events by users are collected" - description: 'Monitor the use of system calls associated with the deletion or renaming of files and file attributes. This configuration statement sets up monitoring for ollowing system calls and tags them with the identifier "delete": unlink -remove a file unlinkat - remove a file attribute), rename (rename a file and renameat - rename a file attribute.' + title: "Ensure successful file system mounts are collected." + description: "Monitor the use of the mount system call. The mount (and umount) system call controls the mounting and unmounting of file systems. The parameters below configure the system to create an audit record when the mount system call is used by a non-privileged user Note: Systems may have been customized to change the default UID_MIN. To confirm the UID_MIN for your system, run the following command: # awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs If your systems' UID_MIN is not 1000, replace audit>=1000 with audit>= in the Audit and Remediation procedures. Reloading the auditd config to set active settings may require a system reboot." + rationale: "It is highly unusual for a non privileged user to mount file systems to the system. While tracking mount commands gives the system administrator evidence that external media may have been mounted (based on a review of the source of the mount and confirming it's an external media type), it does not conclusively indicate that data was exported to the media. System administrators who wish to determine if data were exported, would also have to track successful open , creat and truncate system calls requiring write access to a file under the mount point of the external media file system. This could give a fair indication that a write occurred. The only way to truly prove it, would be to track successful writes to the external media. Tracking write system calls could quickly fill up the audit log and is not recommended. Recommendations on configuration options to track data export to media is beyond the scope of this document." + remediation: "For 32 bit systems Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-mounts.rules Add the following lines: -a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts For 64 bit systems Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-mounts.rules Add the following lines: -a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts -a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts." + compliance: + - cis: ["4.1.12"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - "d:/etc/audit/rules.d -> r:.+.rules$ -> r:-a always,exit && r:-F arch=b32|-F arch=b64 && r:-S mount -F auid>=1000 -F auid!=4294967295 -k mounts" + - "d:/etc/audit/rules.d -> r:.+.rules$ -> r:-a always,exit && r:-F arch=b32|-F arch=b64 && r:-S mount-F auid>=1000 -F auid!=4294967295 -k mounts" + - "c:auditctl -l -> r:-a always,exit && r:-F arch=b32|-F arch=b64 && r:-S mount -F auid>=1000 -F auid!=4294967295 -k mounts" + + # 4.1.13 Ensure file deletion events by users are collected. (Automated) + - id: 4631 + title: "Ensure file deletion events by users are collected." + description: "Monitor the use of system calls associated with the deletion or renaming of files and file attributes. This configuration statement sets up monitoring for following system calls and tags them with the identifier \"delete\": - unlink - remove a file - unlinkat - remove a file attribute - rename - rename a file - renameat - rename a file attribute Note: Systems may have been customized to change the default UID_MIN. To confirm the UID_MIN for your system, run the following command: # awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs If your systems' UID_MIN is not 1000, replace audit>=1000 with audit>= in the Audit and Remediation procedures. Reloading the auditd config to set active settings may require a system reboot." rationale: "Monitoring these calls from non-privileged users could provide a system administrator with evidence that inappropriate removal of files and file attributes associated with protected files is occurring. While this audit option will look at all events, system administrators will want to look for specific privileged files that are being deleted or altered." - remediation: "For 32 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/delete.rules and add the following lines: -a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete For 64 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/delete.rules and add the following lines: -a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete -a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete . Notes: At a minimum, configure the audit system to collect file deletion events for all users and root. Reloading the auditd config to set active settings may require a system reboot." + remediation: "For 32 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-deletion.rules Add the following lines: -a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete For 64 bit systems edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-deletion.rules Add the following lines: -a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete -a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete." compliance: - cis: ["4.1.13"] - - cis_csc: ["6.2", "13"] - - pci_dss: ["10.5.5"] - - tsc: ["PI1.4", "PI1.5", "CC7.1", "CC7.2", "CC7.3", "CC8.1"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["6.2", "13"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S unlink && r:-S unlinkat && r:-S rename && r:-S renameat && r:-F auid>=1000 && r:-F auid!=4294967295 && r:-k delete' + - "d:/etc/audit/rules.d -> r:.+.rules$ -> r:-a always,exit && r:-F arch=b32|-F arch=b64 && r:-S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete" + - "d:/etc/audit/rules.d -> r:.+.rules$ -> r:-a always,exit && r:-F arch=b32|-F arch=b64 && r:-S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete" + - "c:auditctl -l -> r:-a always,exit && r:-F arch=b32|-F arch=b64 && r:-S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete" - # 4.1.14 Ensure changes to system administration scope (sudoers) is collected (Scored) - - id: 4631 - title: "Ensure changes to system administration scope (sudoers) is collected" - description: "Monitor scope changes for system administrators. If the system has been properly configured to force system administrators to log in as themselves first and then use the sudo command to execute privileged commands, it is possible to monitor changes in scope. The file /etc/sudoers or a file in the /etc/sudoers.d directory will be written to when the file or its attributes have changed." + # 4.1.14 Ensure changes to system administration scope (sudoers) is collected. (Automated) + - id: 4632 + title: "Ensure changes to system administration scope (sudoers) is collected." + description: "Monitor scope changes for system administrators. If the system has been properly configured to force system administrators to log in as themselves first and then use the sudo command to execute privileged commands, it is possible to monitor changes in scope. The file /etc/sudoers or a file in the /etc/sudoers.d directory will be written to when the file or its attributes have changed. Note: Reloading the auditd config to set active settings may require a system reboot." rationale: "Changes in the /etc/sudoers file, or a file in the /etc/sudoers.d/ directory can indicate that an unauthorized change has been made to scope of system administrator activity." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/scope.rules and add the following lines:: -w /etc/sudoers -p wa -k scope -w /etc/sudoers.d/ -p wa -k scope . Notes: Reloading the auditd config to set active settings may require a system reboot." + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-scope.rules Add the following lines: -w /etc/sudoers -p wa -k scope -w /etc/sudoers.d/ -p wa -k scope." compliance: - cis: ["4.1.14"] - - cis_csc: ["4.8"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["4.8"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.4.3"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/sudoers && r:-p wa && r:-k scope' - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /etc/sudoers.d/ && r:-p wa && r:-k scope' - # 4.1.15 Ensure system administrator actions (sudolog) are collected (Scored) - - id: 4632 - title: "Ensure system administrator actions (sudolog) are collected" - description: "Monitor the sudo log file. The sudo log file is configured in /etc/sudoersor a file in /etc/sudoers.d. If the system has been properly configured to disable the use of the su command and force all administrators to have to log in first and then use sudo to execute privileged commands, then all administrator commands will be logged to the sudo log file. Any time a command is executed, an audit event will be triggered as the sudo log file will be opened for write and the executed administration command will be written to the log." - rationale: "Changes in /var/log/sudo.log indicate that an administrator has executed a command or the log file itself has been tampered with. Administrators will want to correlate the events written to the audit trail with the records written to /var/log/sudo.log to verify if unauthorized commands have been executed." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules . Example: vi /etc/audit/rules.d/action.rules and add the following lines: -w /var/log/sudo.log -p wa -k actions ." + # 4.1.15 Ensure system administrator command executions (sudo) are collected. (Automated) + - id: 4633 + title: "Ensure system administrator command executions (sudo) are collected." + description: "sudo provides users with temporary elevated privileges to perform operations. Monitor the administrator with temporary elevated privileges and the operation(s) they performed." + rationale: "creating an audit log of administrators with temporary elevated privileges and the operation(s) they performed is essential to reporting. Administrators will want to correlate the events written to the audit trail with the records written to sudo logfile to verify if unauthorized commands have been executed. Note: Systems may have been customized to change the default UID_MIN. To confirm the UID_MIN for your system, run the following command: # awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs If your systems' UID_MIN is not 1000, replace audit>=1000 with audit>= in the Audit and Remediation procedures. Reloading the auditd config to set active settings may require a system reboot." + remediation: "For 32 bit systems Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules: Example: vi /etc/audit/rules.d/50-actions.rules Add the following line: -a exit,always -F arch=b32 -C euid!=uid -F euid=0 -F auid>=1000 -F auid!=4294967295 -S execve -k actions For 64 bit systems Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules: Example: vi /etc/audit/rules.d/50-actions.rules Add the following lines: -a always,exit -F arch=b64 -C euid!=uid -F euid=0 -F auid>=1000 -F auid!=4294967295 -S execve -k actions -a always,exit -F arch=b32 -C euid!=uid -F euid=0 -F auid>=1000 -F auid!=4294967295 -S execve -k actions." compliance: - cis: ["4.1.15"] - - cis_csc: ["4.9", "5.5"] - - pci_dss: ["10.2.2"] - - nist_800_53: ["AU.14", "AC.6", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["4.9"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.9.4.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /var/log/sudo.log && r:-p wa && r:-k actions' + - 'd:/etc/audit/rules.d -> r:\.+.rules$' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:exit,always|always,exit && r:-F arch=b64|-F arch=b32 && r:-C euid!=uid|-C uid!=euid && r:-F euid=0 && r:-F auid!=4294967295 && r:-k actions|key=actions' + - "c:auditctl -l -> r:^-a && r:exit,always|always,exit && r:-F arch=b64|-F arch=b32 && r:-C euid!=uid|-C uid!=euid && r:-F euid=0 && r:-k actions|key=actions" - # 4.1.16 Ensure kernel module loading and unloading is collected (Scored) - - id: 4633 - title: "Ensure kernel module loading and unloading is collected" - description: 'Monitor the loading and unloading of kernel modules. The programs insmod (install a kernel module), rmmod (remove a kernel module), and modprobe (a more sophisticated program to load and unload modules, as well as some other features) control loading and unloading of modules. The init_module (load a module) and delete_module (delete a module) system calls control loading and unloading of modules. Any execution of the loading and unloading module programs and system calls will trigger an audit record with an identifier of "modules".' - rationale: "Monitoring the use of insmod, rmmod and modprobe could provide system administrators with evidence that an unauthorized user loaded or unloaded a kernel module, possibly compromising the security of the system. Monitoring of the init_module and delete_module system calls would reflect an unauthorized user attempting to use a different program to load and unload modules." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules. Example: vi /etc/audit/rules.d/modules.rules and add the following lines: -w /sbin/insmod -p x -k modules -w /sbin/rmmod -p x -k modules -w /sbin/modprobe -p x -k modules -a always,exit -F arch=b32 -S init_module -S delete_module -k modules For 64 bit systems Edit or create a file in the /etc/audit/rules.d/directory ending in .rules Example: vi /etc/audit/rules.d/modules.rules Add the following lines: -w /sbin/insmod -p x -k modules -w /sbin/rmmod -p x -k modules -w /sbin/modprobe -p x -k modules -a always,exit -F arch=b64 -S init_module -S delete_module -k modules " + # 4.1.16 Ensure kernel module loading and unloading is collected. (Automated) + - id: 4634 + title: "Ensure kernel module loading and unloading is collected." + description: 'Monitor the loading and unloading of kernel modules. The programs insmod (install a kernel module), rmmod (remove a kernel module), and modprobe (a more sophisticated program to load and unload modules, as well as some other features) control loading and unloading of modules. The init_module (load a module) and delete_module (delete a module) system calls control loading and unloading of modules. Any execution of the loading and unloading module programs and system calls will trigger an audit record with an identifier of "modules". Note: Reloading the auditd config to set active settings requires the auditd service to be restarted, and may require a system reboot.' + rationale: "Monitoring the use of insmod , rmmod and modprobe could provide system administrators with evidence that an unauthorized user loaded or unloaded a kernel module, possibly compromising the security of the system. Monitoring of the init_module and delete_module system calls would reflect an unauthorized user attempting to use a different program to load and unload modules." + remediation: "For 32 bit systems Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-modules.rules Add the following lines: -w /sbin/insmod -p x -k modules -w /sbin/rmmod -p x -k modules -w /sbin/modprobe -p x -k modules -a always,exit -F arch=b32 -S init_module -S delete_module -k modules For 64 bit systems Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Example: vi /etc/audit/rules.d/50-modules.rules Add the following lines: -w /sbin/insmod -p x -k modules -w /sbin/rmmod -p x -k modules -w /sbin/modprobe -p x -k modules -a always,exit -F arch=b64 -S init_module -S delete_module -k modules." compliance: - cis: ["4.1.16"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.7"] - - nist_800_53: ["AU.14", "AU.6"] - - gpg_13: ["7.9"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /sbin/insmod && r:-p x && r:-k modules' - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /sbin/rmmod && r:-p x && r:-k modules' - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-w /sbin/modprobe && r:-p x && r:-k modules' - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:^-a && r:always,exit|exit,always && r:-F arch=b\d\d && r:-S init_module && r:-S delete_module && r:-k modules' + - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:-a && r:always,exit|exit,always && r:-F arch=b\d\d && r:-S init_module && r:-S delete_module && r:-k modules' - # 4.1.17 Ensure the audit configuration is immutable (Scored) - - id: 4634 - title: "Ensure the audit configuration is immutable" - description: 'Set system audit so that audit rules cannot be modified with auditctl . Setting the flag "-e 2" forces audit to be put in immutable mode. Audit changes can only be made on system reboot.' + # 4.1.17 Ensure the audit configuration is immutable. (Automated) + - id: 4635 + title: "Ensure the audit configuration is immutable." + description: 'Set system audit so that audit rules cannot be modified with auditctl. Setting the flag "-e 2" forces audit to be put in immutable mode. Audit changes can only be made on system reboot. Note: This setting will require the system to be rebooted to update the active auditd configuration settings.' rationale: "In immutable mode, unauthorized users cannot execute changes to the audit system to potentially hide malicious activity and then put the audit rules back. Users would most likely notice a system reboot and that could alert administrators of an attempt to make unauthorized audit changes." - remediation: "Edit or create the file /etc/audit/rules.d/99-finalize.rulesand add the following line at the end of the file: -e 2" + remediation: "Edit or create the file /etc/audit/rules.d/99-finalize.rules and add the following line at the end of the file: -e 2." compliance: - cis: ["4.1.17"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.5"] - - tsc: ["CC6.1", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'd:/etc/audit/rules.d/ -> r:\.*.rules -> r:^-e 2' + - 'not d:/etc/audit/rules.d -> r:\.+.rules$ -> !r:\s*\t*-e 2$' ############################################### # 4.2 Configure Logging ############################################### + # 4.2.1 Configure rsyslog + ############################################### - # 4.2.1.1 Ensure rsyslog is installed (Scored) - - id: 4635 - title: "Ensure rsyslog is installed" - description: "The rsyslog software is a recommended replacement to the original syslogd daemon. rsyslog provides improvements over syslogd, including: - connection-oriented (i.e. TCP) transmission of logs - The option to log to database formats - Encryption of log data en route to a central logging server" - rationale: "The security enhancements of rsyslog and syslog-ng such as connection-oriented (i.e. TCP) transmission of logs, the option to log to database formats, and the encryption of log data en route to a central logging server) justify installing and configuring the package." - remediation: "Run the following command to install rsyslog: # yum install rsyslog" - compliance: - - cis: ["4.2.1.1"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.5"] - - tsc: ["CC6.1", "CC7.2", "CC7.3", "CC7.4"] - condition: any - rules: - - 'c:rpm -q rsyslog -> r:^rsyslog-\S+' - - # 4.2.1.2 Ensure rsyslog Service is enabled (Scored) + # 4.2.1.1 Ensure rsyslog is installed. (Automated) - id: 4636 - title: "Ensure rsyslog Service is enabled and running" - description: "rsyslogneeds to be enabled and running to perform logging" - rationale: "If the rsyslog service is not activated the system may default to the syslogd service or lackblogging instead." - remediation: "Run the following command to enable rsyslog: # systemctl --now enable rsyslog" + title: "Ensure rsyslog is installed." + description: "The rsyslog software is a recommended replacement to the original syslogd daemon. rsyslog provides improvements over syslogd, including: connection-oriented (i.e. TCP) transmission of logs - - The option to log to database formats - Encryption of log data en route to a central logging server." + rationale: "The security enhancements of rsyslog such as connection-oriented (i.e. TCP) transmission of logs, the option to log to database formats, and the encryption of log data en route to a central logging server) justify installing and configuring the package." + remediation: "Run the following command to install rsyslog: # yum install rsyslog." compliance: - cis: ["4.2.1.1"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.5"] - - tsc: ["CC6.1", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - - "c:systemctl is-enabled rsyslog -> enabled" - - 'c:systemctl status rsyslog -> r:active \(running\)' + - "c:rpm -q rsyslog -> r:^rsyslog-" - # 4.2.1.3 Ensure rsyslog default file permissions configured (Scored) + # 4.2.1.2 Ensure rsyslog Service is enabled and running. (Automated) - id: 4637 - title: "Ensure rsyslog default file permissions configured" - description: "rsyslog will create logfiles that do not already exist on the system. This setting controls what permissions will be applied to these newly created files." + title: "Ensure rsyslog Service is enabled and running." + description: "rsyslog needs to be enabled and running to perform logging." + rationale: "If the rsyslog service is not activated the system may default to the syslogd service or lack logging instead." + remediation: "Run the following command to enable and start rsyslog: # systemctl --now enable rsyslog." + compliance: + - cis: ["4.2.1.2"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - "c:systemctl is-enabled rsyslog -> r:enabled" + - 'c:systemctl status rsyslog -> r:active \(running\)' + + # 4.2.1.3 Ensure rsyslog default file permissions configured. (Automated) + - id: 4638 + title: "Ensure rsyslog default file permissions configured." + description: "rsyslog will create logfiles that do not already exist on the system. This setting controls what permissions will be applied to these newly created files. The $FileCreateMode parameter specifies the file creation mode with which rsyslogd creates new files. If not specified, the value 0644 is used. Notes: - The value given must always be a 4-digit octal number, with the initial digit being zero. - This setting can be overridden by a less restrictive setting in any file ending in .conf in the /etc/rsyslog.d/ directory." rationale: "It is important to ensure that log files have the correct permissions to ensure that sensitive data is archived and protected." - remediation: "Edit the /etc/rsyslog.conf and /etc/rsyslog.d/*.conf files and set $FileCreateMode to 0640 or more restrictive: $FileCreateMode 0640" + remediation: "Edit the /etc/rsyslog.conf and /etc/rsyslog.d/*.conf files and set $FileCreateMode to 0640 or more restrictive: $FileCreateMode 0640." compliance: - cis: ["4.2.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["10.5"] - - tsc: ["CC6.1", "CC7.2", "CC7.3", "CC7.4"] - condition: all + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: any rules: - 'f:/etc/rsyslog.conf -> r:^\$FileCreateMode 06\d0|^\$FileCreateMode 04\d0|^\$FileCreateMode 02\d0|^\$FileCreateMode 00\d0 && r:^\$FileCreateMode 0\d40|^\$FileCreateMode 0\d00' - 'd:/etc/rsyslog.d/ -> r:\.*.conf -> r:^\$FileCreateMode 06\d0|^\$FileCreateMode 04\d0|^\$FileCreateMode 02\d0|^\$FileCreateMode 00\d0 && r:^\$FileCreateMode 0\d40|^\$FileCreateMode 0\d00' - # 4.2.1.5 Ensure rsyslog is configured to send logs to a remote log host (Scored) - - id: 4638 - title: "Ensure rsyslog is configured to send logs to a remote log host" - description: "The rsyslog utility supports the ability to send logs it gathers to a remote log host running syslogd(8) or to receive messages from remote hosts, reducing administrative overhead." + # 4.2.1.4 Ensure logging is configured. (Manual) - Not Implemented + + # 4.2.1.5 Ensure rsyslog is configured to send logs to a remote log host. (Automated) + - id: 4639 + title: "Ensure rsyslog is configured to send logs to a remote log host." + description: "The rsyslog utility supports the ability to send logs it gathers to a remote log host running syslogd(8) or to receive messages from remote hosts, reducing administrative overhead. Note: Ensure that the selection of logfiles being sent follows local site policy." rationale: "Storing log data on a remote host protects log integrity from local attacks. If an attacker gains root access on the local system, they could tamper with or remove log data that is stored on the local system." - remediation: "Edit the /etc/rsyslog.conf and /etc/rsyslog.d/*.conf files and add the following line (where loghost.example.com is the name of your central log host). *.* @@loghost.example.com Run the following command to reload the rsyslogd configuration: # systemctl restart rsyslog" + remediation: 'Edit the /etc/rsyslog.conf and /etc/rsyslog.d/*.conf files and add one of the following lines: Newer syntax: action(type="omfwd" target="" port="" protocol="tcp" action.resumeRetryCount="" queue.type="LinkedList" queue.size=") Example: *.* action(type="omfwd" target="192.168.2.100" port="514" protocol="tcp" action.resumeRetryCount="100" queue.type="LinkedList" queue.size="1000") Older syntax: *.* @@ Example: *.* @@192.168.2.100 Run the following command to reload the rsyslog configuration: # systemctl restart rsyslog.' compliance: - cis: ["4.2.1.5"] - - cis_csc: ["6.6", "6.8"] - - pci_dss: ["10.5"] - - tsc: ["CC6.1", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.9"] + - cis_csc_v7: ["6.6", "6.8"] + - nist_sp_800-53: ["AU-6(3)"] + - pci_dss_v3.2.1: ["10.5.3", "10.5.4"] + - pci_dss_v4.0: ["10.3.3"] + - soc_2: ["PL1.4"] condition: all rules: - - 'c:grep *.*[^I][^I]*@ /etc/rsyslog.conf /etc/rsyslog.d/*.conf -> !r:# && r:*.* @@\.+' + - 'f:/etc/rsyslog.conf -> !r:# && r:^*.* @@\.+' + - 'f:/etc/rsyslog.conf -> !r:# && r:^*.* action && r:target="' + - 'd:/etc/rsyslog.d/ -> r:*.conf -> !r:# && r:^*.* @@\.+' + - 'd:/etc/rsyslog.d/ -> r:*.conf -> !r:# && r:^*.* action && r:target="' + + # 4.2.1.6 Ensure remote rsyslog messages are only accepted on designated log hosts. (Manual) + - id: 4640 + title: "Ensure remote rsyslog messages are only accepted on designated log hosts." + description: "By default, rsyslog does not listen for log messages coming in from remote systems. The ModLoad tells rsyslog to load the imtcp.so module so it can listen over a network via TCP. The InputTCPServerRun option instructs rsyslogd to listen on the specified TCP port. Note: The $ModLoad imtcp line can have the .so extension added to the end of the module, or use the full path to the module." + rationale: "The guidance in the section ensures that remote log hosts are configured to only accept rsyslog data from hosts within the specified domain and that those systems that are not designed to be log hosts do not accept any remote rsyslog messages. This provides protection from spoofed log data and ensures that system administrators are reviewing reasonably complete syslog data in a central location." + remediation: "For hosts that are designated as log hosts, edit the /etc/rsyslog.conf file and uncomment or add the following lines: $ModLoad imtcp $InputTCPServerRun 514 For hosts that are not designated as log hosts, edit the /etc/rsyslog.conf file and comment or remove the following lines: # $ModLoad imtcp # $InputTCPServerRun 514 Run the following command to reload the rsyslogd configuration: # systemctl restart rsyslog." + compliance: + - cis: ["4.2.1.6"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.13.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'not d:/etc/rsyslog.d -> r:\.+.conf$ -> r:^\s*\t*\$ModLoad imtcp|\s*\t*^\$InputTCPServerRun|^\s*\t*module load="imtcp"|^\s*\t*input type="imtcp" port="514"' + - 'not f:/etc/rsyslog.conf -> r:^\s*\t*\$ModLoad imtcp|^\s*\t*\$InputTCPServerRun|^\s*\t*module load="imtcp"|^\s*\t*input type="imtcp" port="514"' ############################################### - # 4.2 Configure journald + # 4.2.2.1 Configure systemd journal remote ############################################### - # 4.2.2.1 Ensure journald is configured to send logs to rsyslog - - id: 4639 - title: "Ensure journald is configured to send logs to rsyslog " - description: "Data from journald may be stored in volatile memory or persisted locally on the server. Utilities exist to accept remote export of journald logs, however, use of the rsyslog service provides a consistent means of log collection and export." + # 4.2.2.1 Ensure journald is configured to send logs to rsyslog. (Automated) + - id: 4641 + title: "Ensure journald is configured to send logs to rsyslog." + description: 'Data from journald may be stored in volatile memory or persisted locally on the server. Utilities exist to accept remote export of journald logs, however, use of the rsyslog service provides a consistent means of log collection and export. Notes: - This recommendation assumes that recommendation 4.2.1.5, "Ensure rsyslog is configured to send logs to a remote log host" has been implemented. - The main configuration file /etc/systemd/journald.conf is read before any of the custom *.conf files. If there are custom configs present, they override the main configuration parameters - As noted in the journald man pages: journald logs may be exported to rsyslog either through the process mentioned here, or through a facility like systemd-journald.service. There are trade-offs involved in each implementation, where ForwardToSyslog will immediately capture all events (and forward to an external log server, if properly configured), but may not capture all boot-up activities. Mechanisms such as systemd-journald.service, on the other hand, will record bootup events, but may delay sending the information to rsyslog, leading to the potential for log manipulation prior to export. Be aware of the limitations of all tools employed to secure a system.' rationale: "Storing log data on a remote host protects log integrity from local attacks. If an attacker gains root access on the local system, they could tamper with or remove log data that is stored on the local system." - remediation: "Edit the /etc/systemd/journald.conf file and add the following line: ForwardToSyslog=yes" + remediation: "Edit the /etc/systemd/journald.conf file and add the following line: ForwardToSyslog=yes." + references: + - "https://github.com/konstruktoid/hardening/blob/master/systemd.adoc#etcsyste" compliance: - cis: ["4.2.2.1"] - - cis_csc: ["6.5"] - - pci_dss: ["10.5"] - - tsc: ["CC6.1", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.9"] + - cis_csc_v7: ["6.5"] + - nist_sp_800-53: ["AU-6(3)"] + - pci_dss_v3.2.1: ["10.5.3", "10.5.4"] + - pci_dss_v4.0: ["10.3.3"] + - soc_2: ["PL1.4"] condition: all rules: - - 'f:/etc/systemd/journald.conf -> r:^\s*ForwardToSyslog\s*=\s*yes' + - 'f:/etc/systemd/journald.conf -> r:^\s*\t*ForwardToSyslog\s*=\s*yes' - # 4.2.2.2 Ensure journald is configured to compress large log files - - id: 4640 - title: "Ensure journald is configured to compress large log files" - description: "The journald system includes the capability of compressing overly large files to avoid filling up the system with logs or making the logs unmanageably large." + # 4.2.2.2 Ensure journald is configured to compress large log files. (Automated) + - id: 4642 + title: "Ensure journald is configured to compress large log files." + description: "The journald system includes the capability of compressing overly large files to avoid filling up the system with logs or making the logs unmanageably large. Note: The main configuration file /etc/systemd/journald.conf is read before any of the custom *.conf files. If there are custom configs present, they override the main configuration parameters." rationale: "Uncompressed large files may unexpectedly fill a filesystem leading to resource unavailability. Compressing logs prior to write can prevent sudden, unexpected filesystem impacts." - remediation: "Edit the /etc/systemd/journald.conf file and add the following line: Compress=yes" + remediation: "Edit the /etc/systemd/journald.conf file and add the following line: Compress=yes." + references: + - "https://github.com/konstruktoid/hardening/blob/master/systemd.adoc#etcsyste" compliance: - cis: ["4.2.2.2"] - - cis_csc: ["6.4"] - - pci_dss: ["10.5"] - - tsc: ["CC6.1", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.3"] + - cis_csc_v7: ["6.4"] + - iso_27001-2013: ["A.12.4.1"] + - pci_dss_v3.2.1: ["10.7"] + - soc_2: ["A1.1"] condition: all rules: - - 'f:/etc/systemd/journald.conf -> r:^\s*Compress\s*=\s*yes' + - 'f:/etc/systemd/journald.conf -> r:^\s*\t*Compress\s*=\s*yes' - # 4.2.2.3 Ensure journald is configured to write logfiles to persistent disk - - id: 4641 - title: "Ensure journald is configured to compress large log files" - description: "Data from journald may be stored in volatile memory or persisted locally on the server. Logs in memory will be lost upon a system reboot. By persisting logs to local disk on the server they are protected from loss." + # 4.2.2.3 Ensure journald is configured to write logfiles to persistent disk. (Automated) + - id: 4643 + title: "Ensure journald is configured to write logfiles to persistent disk." + description: "Data from journald may be stored in volatile memory or persisted locally on the server. Logs in memory will be lost upon a system reboot. By persisting logs to local disk on the server they are protected from loss. Note: The main configuration file /etc/systemd/journald.conf is read before any of the custom *.conf files. If there are custom configs present, they override the main configuration parameters." rationale: "Writing log data to disk will provide the ability to forensically reconstruct events which may have impacted the operations or security of a system even after a system crash or reboot." - remediation: "Edit the /etc/systemd/journald.conf file and add the following line: Storage=persistent" + remediation: "Edit the /etc/systemd/journald.conf file and add the following line: Storage=persistent." + references: + - "https://github.com/konstruktoid/hardening/blob/master/systemd.adoc#etcsyste" compliance: - cis: ["4.2.2.3"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.5"] - - tsc: ["CC6.1", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - - 'f:/etc/systemd/journald.conf -> r:^\s*Storage\s*=\s*persistent' + - 'f:/etc/systemd/journald.conf -> r:^\s*\t*Storage\s*=\s*persistent' - # 4.2.3 Ensure permissions on all logfiles are configured (Scored) - - id: 4642 - title: "Ensure permissions on all logfiles are configured" + # 4.2.3 Ensure permissions on all logfiles are configured. (Manual) + - id: 4644 + title: "Ensure permissions on all logfiles are configured." description: "Log files stored in /var/log/ contain logged information from many services on the system, or on log hosts others as well." - rationale: "It is important to ensure that log files have the correct permissions to ensure that sensitive data is archived and protected. Other/world should not have the ability to view this information. Group should not have the ability to modify this information." - remediation: 'Run the following command to set permissions on all existing log files: # find /var/log -type f -exec chmod g-wx,o-rwx "{}" + -o -type d -exec chmod g-wx,o-rwx "{}" +' + rationale: "It is important to ensure that log files have the correct permissions to ensure that sensitive data is archived and protected. Other/world should not have the ability to view this information. Group should not have the ability to modify this information." + remediation: 'Run the following commands to set permissions on all existing log files: # find /var/log -type f -exec chmod g-wx,o-rwx "{}" + Note: The configuration for your logging software or services may need to also be modified for any logs that had incorrect permissions, otherwise, the permissions may be reverted to the incorrect permissions.' compliance: - cis: ["4.2.3"] - - cis_csc: ["5.1"] - - pci_dss: ["10.5"] - - tsc: ["CC6.1", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - 'c:find /var/log -type f -ls -> r:-\w\w\w\ww\w\w\w\w|-\w\w\w\w\wx\w\w\w|-\w\w\w\w\w\w\ww\w|-\w\w\w\w\w\wr\w\w|-\w\w\w\w\w\w\w\wx' - #################################################### + # 4.2.4 Ensure logrotate is configured. (Manual) - Not Implemented + + ############################################### # 5 Access, Authentication and Authorization - #################################################### - #################################################### - # 5.1 Configure time-based job schedulers - #################################################### - # 5.1.1 Ensure cron daemon is enabled and running (Automated) - - id: 4643 - title: "Ensure cron daemon is enabled" + ############################################### + ############################################### + # 5.1 Configure cron + ############################################### + + # 5.1.1 Ensure cron daemon is enabled and running. (Automated) + - id: 4645 + title: "Ensure cron daemon is enabled and running." description: "The cron daemon is used to execute batch jobs on the system." - rationale: "While there may not be user jobs that need to be run on the system, the system does have maintenance jobs that may include security monitoring that have to run, and cron is used to execute them." - remediation: "Run the following command to enable cron : # systemctl enable crond; OR run the following command to remove cron: # yum remove cronie" + rationale: "While there may not be user jobs that need to be run on the system, the system does have maintenance jobs that may include security monitoring that have to run. If another method for scheduling tasks is not being used, cron is used to execute them, and needs to be enabled and running." + remediation: "Run the following command to enable and start cron: # systemctl --now enable crond OR Run the following command to remove cron: # yum remove cronie." compliance: - cis: ["5.1.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - "c:systemctl is-enabled crond -> enabled" + - "c:systemctl is-enabled crond -> r:enabled" - 'c:systemctl status crond -> r:Active: active \(running\) since \w+ \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d' - # 5.1.2 Ensure permissions on /etc/crontab are configured (Automated) - - id: 4644 - title: "Ensure permissions on /etc/crontab are configured" + # 5.1.2 Ensure permissions on /etc/crontab are configured. (Automated) + - id: 4646 + title: "Ensure permissions on /etc/crontab are configured." description: "The /etc/crontab file is used by cron to control its own jobs. The commands in this item make sure that root is the user and group owner of the file and that only the owner can access the file." rationale: "This file contains information on what system jobs are run by cron. Write access to these files could provide unprivileged users with the ability to elevate their privileges. Read access to these files could provide users with the ability to gain insight on system jobs that run on the system and could provide them a way to gain unauthorized privileged access." - remediation: "Run the following commands to set ownership and permissions on /etc/crontab : # chown root:root /etc/crontab; # chmod og-rwx /etc/crontab; OR run the following command to remove cron: # yum remove cronie" + remediation: "Run the following commands to set ownership and permissions on /etc/crontab: # chown root:root /etc/crontab # chmod u-x,og-rwx /etc/crontab OR Run the following command to remove cron: # yum remove cronie." compliance: - cis: ["5.1.2"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/crontab -> r:^Access: \(0\w00/-\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - 'c:stat /etc/crontab -> r:^Access: \(0600/-rw-------\)\s*Uid: \(\s*0/\s*root\)\s*Gid: \(\s*0/\s*root\)$' - # 5.1.3 Ensure permissions on /etc/cron.hourly are configured (Automated) - - id: 4645 - title: "Ensure permissions on /etc/cron.hourly are configured" + # 5.1.3 Ensure permissions on /etc/cron.hourly are configured. (Automated) + - id: 4647 + title: "Ensure permissions on /etc/cron.hourly are configured." description: "This directory contains system cron jobs that need to run on an hourly basis. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.hourly : chown root:root /etc/cron.hourly; # chmod og-rwx /etc/cron.hourly; OR run the following command to remove cron: # yum remove cronie" + remediation: "Run the following commands to set ownership and permissions on the /etc/cron.hourly/ directory: # chown root:root /etc/cron.hourly/ # chmod og-rwx /etc/cron.hourly/ OR Run the following command to remove cron # yum remove cronie." compliance: - cis: ["5.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/cron.hourly -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.1.4 Ensure permissions on /etc/cron.daily are configured (Automated) - - id: 4646 - title: "Ensure permissions on /etc/cron.daily are configured" + # 5.1.4 Ensure permissions on /etc/cron.daily are configured. (Automated) + - id: 4648 + title: "Ensure permissions on /etc/cron.daily are configured." description: "The /etc/cron.daily directory contains system cron jobs that need to run on a daily basis. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.daily : chown root:root /etc/cron.daily; # chmod og-rwx /etc/cron.daily; OR run the following command to remove cron: # yum remove cronie" + remediation: "Run the following commands to set ownership and permissions on /etc/cron.daily directory: # chown root:root /etc/cron.daily # chmod og-rwx /etc/cron.daily OR Run the following command to remove cron: # yum remove cronie." compliance: - cis: ["5.1.4"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/cron.daily -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.1.5 Ensure permissions on /etc/cron.weekly are configured (Automated) - - id: 4647 - title: "Ensure permissions on /etc/cron.weekly are configured" + # 5.1.5 Ensure permissions on /etc/cron.weekly are configured. (Automated) + - id: 4649 + title: "Ensure permissions on /etc/cron.weekly are configured." description: "The /etc/cron.weekly directory contains system cron jobs that need to run on a weekly basis. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.weekly : chown root:root /etc/cron.weekly; # chmod og-rwx /etc/cron.weekly; OR run the following command to remove cron: # yum remove cronie" + remediation: "Run the following commands to set ownership and permissions on /etc/cron.weekly/ directory: # chown root:root /etc/cron.weekly/ # chmod og-rwx /etc/cron.weekly/ OR Run the following command to remove cron: # yum remove cronie." compliance: - cis: ["5.1.5"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/cron.weekly -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.1.6 Ensure permissions on /etc/cron.monthly are configured (Automated) - - id: 4648 - title: "Ensure permissions on /etc/cron.monthly are configured" + # 5.1.6 Ensure permissions on /etc/cron.monthly are configured. (Automated) + - id: 4650 + title: "Ensure permissions on /etc/cron.monthly are configured." description: "The /etc/cron.monthly directory contains system cron jobs that need to run on a monthly basis. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.monthly : chown root:root /etc/cron.monthly; # chmod og-rwx /etc/cron.monthly; OR run the following command to remove cron: # yum remove cronie" + remediation: "Run the following commands to set ownership and permissions on /etc/cron.monthly directory: # chown root:root /etc/cron.monthly # chmod og-rwx /etc/cron.monthly OR Run the following command to remove cron: # yum remove cronie." compliance: - cis: ["5.1.6"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/cron.monthly -> r:^Access: \(0\w00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - 'c:stat -L /etc/cron.monthly -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.1.7 Ensure permissions on /etc/cron.d are configured (Automated) - - id: 4649 - title: "Ensure permissions on /etc/cron.d are configured" - description: "The /etc/cron.d/directory contains system cronjobs that need to run in a similar manner to the hourly, daily weekly and monthly jobs from /etc/crontab, but require more granular control as to when they run. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." + # 5.1.7 Ensure permissions on /etc/cron.d are configured. (Automated) + - id: 4651 + title: "Ensure permissions on /etc/cron.d are configured." + description: "The /etc/cron.d/ directory contains system cron jobs that need to run in a similar manner to the hourly, daily weekly and monthly jobs from /etc/crontab , but require more granular control as to when they run. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.d : # chown root:root /etc/cron.d; # chmod og-rwx /etc/cron.d; OR run the following command to remove cron: # yum remove cronie" + remediation: "Run the following commands to set ownership and permissions on /etc/cron.d directory: # chown root:root /etc/cron.d # chmod og-rwx /etc/cron.d OR Run the following command to remove cron: # yum remove cronie." compliance: - cis: ["5.1.7"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/cron.d -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.1.8 Ensure cron is restricted to authorized users (Scored) - - id: 4650 - title: "Ensure cron is restricted to authorized users" - description: "If cronis installed in the system, configure /etc/cron.allowto allow specific users to use these services. If /etc/cron.allowdoes not exist, then /etc/cron.denyis checked. Any user not specifically defined in those files is allowed to use cron. By removing the file, only users in /etc/cron.alloware allowed to use cron." - rationale: "On many systems, only the system administrator is authorized to schedule cronjobs. Using the cron.allowfile to control who can run cronjobs enforces this policy. It is easier to manage an allow list than a deny list. In a deny list, you could potentially add a user ID to the system and forget to add it to the deny files." - remediation: "Run the following commands to remove /etc/cron.deny and /etc/at.deny and create and set permissions and ownership for /etc/cron.allow and /etc/at.allow: rm /etc/cron.deny;rm /etc/at.deny;touch /etc/cron.allow; touch /etc/at.allow; chmod og-rwx /etc/cron.allow; chmod og-rwx /etc/at.allow; chown root:root /etc/cron.allow and chown root:root /etc/at.allow" + # 5.1.8 Ensure cron is restricted to authorized users. (Automated) + - id: 4652 + title: "Ensure cron is restricted to authorized users." + description: "If cron is installed in the system, configure /etc/cron.allow to allow specific users to use these services. If /etc/cron.allow does not exist, then /etc/cron.deny is checked. Any user not specifically defined in those files is allowed to use cron. By removing the file, only users in /etc/cron.allow are allowed to use cron. Note: Even though a given user is not listed in cron.allow, cron jobs can still be run as that user. The cron.allow file only controls administrative access to the crontab command for scheduling and modifying cron jobs." + rationale: "On many systems, only the system administrator is authorized to schedule cron jobs. Using the cron.allow file to control who can run cron jobs enforces this policy. It is easier to manage an allow list than a deny list. In a deny list, you could potentially add a user ID to the system and forget to add it to the deny files." + remediation: "Run the following command to remove /etc/cron.deny: # rm /etc/cron.deny Run the following command to create /etc/cron.allow # touch /etc/cron.allow Run the following commands to set the owner and permissions on /etc/cron.allow: # chown root:root /etc/cron.allow # chmod u-x,og-rwx /etc/cron.allow OR Run the following command to remove cron # yum remove cronie." compliance: - cis: ["5.1.8"] - - cis_csc: ["16"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - "c:stat -L /etc/cron.deny -> r:No such file or directory$" - - 'c:stat -L /etc/cron.allow -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - - 'c:stat -L /etc/at.allow -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - "c:stat /etc/cron.deny -> r:No such file or directory$" + - 'c:stat /etc/cron.allow -> r:^Access: \(0600/-rw-------\)\s*Uid: \(\s*0/\s*root\)\s*Gid: \(\s*0/\s*root\)$' - # 5.1.9 Ensure at is restricted to authorized users (Automated) - - id: 4651 - title: "Ensure at is restricted to authorized users" - description: "If atis installed in the system, configure /etc/at.allowto allow specific users to use these services. If /etc/at.allowdoes not exist, then /etc/at.denyis checked. Any user not specifically defined in those files is allowed to use at. By removing the file, only users in /etc/at.alloware allowed to use at." - rationale: "On many systems, only the system administrator is authorized to schedule atjobs. Using the at.allowfile to control who can run atjobs enforces this policy. It is easier to manage an allow list than a deny list. In a deny list, you could potentially add a user ID to the system and forget to add it to the deny files." - remediation: "Run the following commands to remove /etc/cron.deny and /etc/at.deny and create and set permissions and ownership for /etc/cron.allow and /etc/at.allow: rm /etc/cron.deny;rm /etc/at.deny;touch /etc/cron.allow; touch /etc/at.allow; chmod og-rwx /etc/cron.allow; chmod og-rwx /etc/at.allow; chown root:root /etc/cron.allow and chown root:root /etc/at.allow" + # 5.1.9 Ensure at is restricted to authorized users. (Automated) + - id: 4653 + title: "Ensure at is restricted to authorized users." + description: "If at is installed in the system, configure /etc/at.allow to allow specific users to use these services. If /etc/at.allow does not exist, then /etc/at.deny is checked. Any user not specifically defined in those files is allowed to use at. By removing the file, only users in /etc/at.allow are allowed to use at. Note: Even though a given user is not listed in at.allow, at jobs can still be run as that user. The at.allow file only controls administrative access to the at command for scheduling and modifying at jobs." + rationale: "On many systems, only the system administrator is authorized to schedule at jobs. Using the at.allow file to control who can run at jobs enforces this policy. It is easier to manage an allow list than a deny list. In a deny list, you could potentially add a user ID to the system and forget to add it to the deny files." + remediation: "Run the following command to remove /etc/at.deny: # rm /etc/at.deny Run the following command to create /etc/at.allow # touch /etc/at.allow Run the following commands to set the owner and permissions on /etc/at.allow: # chown root:root /etc/at.allow # chmod u-x,og-rwx /etc/at.allow OR Run the following command to remove at: # yum remove at." compliance: - cis: ["5.1.9"] - - cis_csc: ["16"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - "c:stat -L /etc/at.deny -> r:No such file or directory$" - 'c:stat -L /etc/at.allow -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' ############################################### - # 5.2 Configure SSH Server + # 5.2 Configure sudo ############################################### - # 5.2.1 Ensure permissions on /etc/ssh/sshd_config are configured (Automated) - - id: 4652 - title: "Ensure permissions on /etc/ssh/sshd_config are configured" - description: "The /etc/ssh/sshd_config file contains configuration specifications for sshd. The command below sets the owner and group of the file to root." - rationale: "The /etc/ssh/sshd_config file needs to be protected from unauthorized changes by non-privileged users." - remediation: "Run the following commands to set ownership and permissions on /etc/ssh/sshd_config: chown root:root /etc/ssh/sshd_config and chmod og-rwx /etc/ssh/sshd_config" - compliance: - - cis: ["5.2.1"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:stat -L /etc/ssh/sshd_config -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - - # 5.2.2 Ensure permissions on SSH private host key files are configured (Automated) - - id: 4653 - title: "Ensure permissions on SSH private host key files are configured" - description: "An SSH private key is one of two files used in SSH public key authentication. In this authentication method, The possession of the private key is proof of identity. Only a private key that corresponds to a public key will be able to authenticate successfully. The private keys need to be stored and handled carefully, and no copies of the private key should be distributed." - rationale: "If an unauthorized user obtains the private SSH host key file, the host could be impersonated" - remediation: "Run the following commands to set permissions, ownership, and group on the private SSH host key files: # find /etc/ssh -xdev -type f -name 'ssh_host_*_key' -exec chmod u-x,g-wx,o-rwx {} \\; # find /etc/ssh -xdev -type f -name 'ssh_host_*_key' -exec chown root:ssh_keys {} \\;" - compliance: - - cis: ["5.2.2"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:find /etc/ssh -xdev -type f -name "ssh_host_rsa_key" -exec stat -L {} \; -> r:^Access: \(0\d40/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - - 'c:find /etc/ssh -xdev -type f -name "ssh_host_ecdsa_key" -exec stat -L {} \; -> r:^Access: \(0\d40/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - - 'c:find /etc/ssh -xdev -type f -name "ssh_host_ed25519_key" -exec stat -L {} \; -> r:^Access: \(0\d400/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.2.3 Ensure permissions on SSH public host key files are configured (Automated) + # 5.2.1 Ensure sudo is installed. (Automated) - id: 4654 - title: "Ensure permissions on SSH private host key files are configured" - description: "An SSH public key is one of two files used in SSH public key authentication. In this authentication method, a public key is a key that can be used for verifying digital signatures generated using a corresponding private key. Only a public key that corresponds to a private key will be able to authenticate successfully." - rationale: "If a public host key file is modified by an unauthorized user, the SSH service may be compromised." - remediation: "Run the following commands to set permissions and ownership on the SSH host public key: # find /etc/ssh -xdev -type f -name 'ssh_host_*_key.pub' -exec chmod u-x,go-wx {} \\; #find /etc/ssh -xdev -type f -name 'ssh_host_*_key.pub' -exec chown root:root {} \\;" + title: "Ensure sudo is installed." + description: "sudo allows a permitted user to execute a command as the superuser or another user, as specified by the security policy. The invoking user's real (not effective) user ID is used to determine the user name with which to query the security policy." + rationale: "sudo supports a plugin architecture for security policies and input/output logging. Third parties can develop and distribute their own policy and I/O logging plugins to work seamlessly with the sudo front end. The default security policy is sudoers, which is configured via the file /etc/sudoers. The security policy determines what privileges, if any, a user has to run sudo. The policy may require that users authenticate themselves with a password or another authentication mechanism. If authentication is required, sudo will exit if the user's password is not entered within a configurable time limit. This limit is policy-specific." + remediation: "Run the following command to install sudo. # yum install sudo." compliance: - - cis: ["5.2.3"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["5.2.1"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:find /etc/ssh -xdev -type f -name "ssh_host_rsa_key.pub" -exec stat -L {} \; -> r:^Access: \(0\d444/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - - 'c:find /etc/ssh -xdev -type f -name "ssh_host_ecdsa_key.pub" -exec stat -L {} \; -> r:^Access: \(0\d44/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - - 'c:find /etc/ssh -xdev -type f -name "ssh_host_ed25519_key.pub" -exec stat -L {} \; -> r:^Access: \(0\d44/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + - "c:rpm -q sudo -> r:sudo-" - # 5.2.4 Ensure SSH access is limited (Scored) + # 5.2.2 Ensure sudo commands use pty. (Automated) - id: 4655 - title: "Ensure SSH access is limited" - description: "There are several options available to limit which users and group can access the system via SSH. It is recommended that at least one of the following options be leveraged: AllowUsers The AllowUsers variable gives the system administrator the option of allowing specific users to ssh into the system. The list consists of space separated user names. Numeric user IDs are not recognized with this variable. If a system administrator wants to restrict user access further by only allowing the allowed users to log in from a particular host, the entry can be specified in the form of user@host. AllowGroups The AllowGroups variable gives the system administrator the option of allowing specific groups of users to ssh into the system. The list consists of space separated group names. Numeric group IDs are not recognized with this variable. DenyUsers The DenyUsers variable gives the system administrator the option of denying specific users to ssh into the system. The list consists of space separated user names. Numeric user IDs are not recognized with this variable. If a system administrator wants to restrict user access further by specifically denying a user's access from a particular host, the entry can be specified in the form of user@host. DenyGroups The DenyGroups variable gives the system administrator the option of denying specific groups of users to ssh into the system. The list consists of space separated group names. Numeric group IDs are not recognized with this variable." - rationale: "Restricting which users can remotely access the system via SSH will help ensure that only authorized users access the system." - remediation: "Edit the /etc/ssh/sshd_config file to set one or more of the parameter as follows: # AllowUsers ; # AllowGroups ; # DenyUsers ; # DenyGroups " + title: "Ensure sudo commands use pty." + description: "sudo can be configured to run only from a pseudo-pty Note: visudo edits the sudoers file in a safe fashion, analogous to vipw(8). visudo locks the sudoers file against multiple simultaneous edits, provides basic sanity checks, and checks for parse errors. If the sudoers file is currently being edited you will receive a message to try again later. The -f option allows you to tell visudo which file to edit." + rationale: "Attackers can run a malicious program using sudo, which would again fork a background process that remains even when the main program has finished executing. This can be mitigated by configuring sudo to run other commands only from a pseudo-pty, whether I/O logging is turned on or not." + remediation: "Edit the file /etc/sudoers or a file in /etc/sudoers.d/ with visudo or visudo -f and add the following line: Defaults use_pty." compliance: - - cis: ["5.2.4"] - - cis_csc: ["4.3"] - - pci_dss: ["8.1"] - - tsc: ["CC6.1"] + - cis: ["5.2.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: any rules: - - 'f:$sshd_file -> r:^\s*AllowUsers' - - 'f:$sshd_file -> r:^\s*AllowGroups' - - 'f:$sshd_file -> r:^\s*DenyUsers' - - 'f:$sshd_file -> r:^\s*DenyGroups' + - 'f:/etc/sudoers -> r:^\s*Defaults\s+use_pty' + - 'd:/etc/sudoers.d -> r:\. -> r:^\s*Defaults\s+use_pty' - # 5.2.5 Ensure SSH LogLevel is appropriate (Automated) + # 5.2.3 Ensure sudo log file exists. (Automated) - id: 4656 - title: "Ensure SSH LogLevel is appropriate" - description: "INFO level is the basic level that only records login activity of SSH users. In many situations, such as Incident Response, it is important to determine when a particular user was active on a system. The logout record can eliminate those users who disconnected, which helps narrow the field.VERBOSE level specifies that login and logout activity as well as the key fingerprint for any SSH key used for login will be logged. This information is important for SSH key management, especially in legacy environments." - rationale: "SSH provides several logging levels with varying amounts of verbosity. DEBUG is specifically not recommended other than strictly for debugging SSH communications since it provides so much data that it is difficult to identify important security information." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: LogLevel INFO" + title: "Ensure sudo log file exists." + description: "sudo can use a custom log file Note: visudo edits the sudoers file in a safe fashion, analogous to vipw(8). visudo locks the sudoers file against multiple simultaneous edits, provides basic sanity checks, and checks for parse errors. If the sudoers file is currently being edited you will receive a message to try again later. The -f option allows you to tell visudo which file to edit." + rationale: "A sudo log file simplifies auditing of sudo commands." + impact: "Editing the sudo configuration incorrectly can cause sudo to stop functioning." + remediation: 'edit the file /etc/sudoers or a file in /etc/sudoers.d/ with visudo or visudo -f and add the following line: Defaults logfile="" Example: Defaults logfile="/var/log/sudo.log".' compliance: - cis: ["5.2.3"] - - cis_csc: ["16"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: any rules: - - 'f:$sshd_file -> r:^# && r:LogLevel\s*\t*INFO' - - 'f:$sshd_file -> r:^# && r:LogLevel\s*\t*VERBOSE' + - 'f:/etc/sudoers -> r:^Defaults logfile="' + - 'd:/etc/sudoers.d -> r:\. -> r:^Defaults\s+logfile="' + + ############################################### + # 5.2 Configure SSH + ############################################### - # 5.2.6 Ensure SSH X11 forwarding is disabled (Automated) + # 5.3.1 Ensure permissions on /etc/ssh/sshd_config are configured. (Automated) - id: 4657 - title: "Ensure SSH X11 forwarding is disabled" - description: "The X11Forwarding parameter provides the ability to tunnel X11 traffic through the connection to enable remote graphic connections." - rationale: "Disable X11 forwarding unless there is an operational requirement to use X11 applications directly. There is a small risk that the remote X11 servers of users who are logged in via SSH with X11 forwarding could be compromised by other users on the X11 server. Note that even if X11 forwarding is disabled, users can always install their own forwarders." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: X11Forwarding no" + title: "Ensure permissions on /etc/ssh/sshd_config are configured." + description: "The /etc/ssh/sshd_config file contains configuration specifications for sshd. The command below sets the owner and group of the file to root." + rationale: "The /etc/ssh/sshd_config file needs to be protected from unauthorized changes by non-privileged users." + remediation: "Run the following commands to set ownership and permissions on /etc/ssh/sshd_config: # chown root:root /etc/ssh/sshd_config # chmod og-rwx /etc/ssh/sshd_config." compliance: - - cis: ["5.2.6"] - - pci_dss: ["9.2"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis: ["5.3.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'f:/etc/ssh/sshd_config -> r:^\s*X11Forwarding\s*\t*no' + - 'c:stat -L /etc/ssh/sshd_config -> r:^Access: \(0600/-rw-------\)\s*Uid: \(\s*0/\s*root\)\s*Gid: \(\s*0/\s*root\)$' + + # 5.3.2 Ensure permissions on SSH private host key files are configured. (Automated) - Not Implemented + # 5.3.3 Ensure permissions on SSH public host key files are configured. (Automated) - Not Implemented - # 5.2.7 Ensure SSH MaxAuthTries is set to 4 or less (Automated) + # 5.3.4 Ensure SSH access is limited. (Automated) - id: 4658 - title: "Ensure SSH MaxAuthTries is set to 4 or less" - description: "The MaxAuthTries parameter specifies the maximum number of authentication attempts permitted per connection. When the login failure count reaches half the number, error messages will be written to the syslog file detailing the login failure." - rationale: "Setting the MaxAuthTries parameter to a low number will minimize the risk of successful brute force attacks to the SSH server. While the recommended setting is 4, set the number based on site policy." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: MaxAuthTries 4" - compliance: - - cis: ["5.2.7"] - - cis_csc: ["16.13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + title: "Ensure SSH access is limited." + description: "There are several options available to limit which users and group can access the system via SSH. It is recommended that at least one of the following options be leveraged: - AllowUsers: o The AllowUsers variable gives the system administrator the option of allowing specific users to ssh into the system. The list consists of space separated user names. Numeric user IDs are not recognized with this variable. If a system administrator wants to restrict user access further by only allowing the allowed users to log in from a particular host, the entry can be specified in the form of user@host. - AllowGroups: o The AllowGroups variable gives the system administrator the option of allowing specific groups of users to ssh into the system. The list consists of space separated group names. Numeric group IDs are not recognized with this variable. - DenyUsers: o The DenyUsers variable gives the system administrator the option of denying specific users to ssh into the system. The list consists of space separated user names. Numeric user IDs are not recognized with this variable. If a system administrator wants to restrict user access further by specifically denying a user's access from a particular host, the entry can be specified in the form of user@host. - DenyGroups: o The DenyGroups variable gives the system administrator the option of denying specific groups of users to ssh into the system. The list consists of space separated group names. Numeric group IDs are not recognized with this variable." + rationale: "Restricting which users can remotely access the system via SSH will help ensure that only authorized users access the system." + remediation: "Edit the /etc/ssh/sshd_config file to set one or more of the parameter as follows: AllowUsers OR AllowGroups OR DenyUsers OR DenyGroups ." + compliance: + - cis: ["5.3.4"] + - cis_csc_v8: ["3.3", "5.4"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "MP.L2-3.8.2", "SC.L2-3.13.3"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1", "CC6.3"] condition: any rules: - - 'f:$sshd_file -> !r:^# && n:^MaxAuthTries\s*\t*(\d+) compare <= 4' + - 'c:sshd -T -> r:^\s*AllowUsers\s+\w*|^\s*AllowGroups\s+\w*|^\s*DenyUsers\s+\w*|^\s*DenyGroups\s+\w*' + - 'f:/etc/ssh/sshd_config -> r:^\s*AllowUsers\s+\w*|^\s*AllowGroups\s+\w*|^\s*DenyUsers\s+\w*|^\s*DenyGroups\s+\w*' - # 5.2.8 Ensure SSH IgnoreRhosts is enabled (Automated) + # 5.3.5 Ensure SSH LogLevel is appropriate. (Automated) - id: 4659 - title: "Ensure SSH IgnoreRhosts is enabled" - description: "The IgnoreRhosts parameter specifies that .rhosts and .shosts files will not be used in RhostsRSAAuthentication or HostbasedAuthentication." - rationale: "Setting this parameter forces users to enter a password when authenticating with ssh." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: IgnoreRhosts yes" + title: "Ensure SSH LogLevel is appropriate." + description: "INFO level is the basic level that only records login activity of SSH users. In many situations, such as Incident Response, it is important to determine when a particular user was active on a system. The logout record can eliminate those users who disconnected, which helps narrow the field. VERBOSE level specifies that login and logout activity as well as the key fingerprint for any SSH key used for login will be logged. This information is important for SSH key management, especially in legacy environments." + rationale: "SSH provides several logging levels with varying amounts of verbosity. DEBUG is specifically not recommended other than strictly for debugging SSH communications since it provides so much data that it is difficult to identify important security information." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: LogLevel VERBOSE OR LogLevel INFO." + references: + - "https://www.ssh.com/ssh/sshd_config/" compliance: - - cis: ["5.2.8"] - - cis_csc: ["9.2"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis: ["5.3.5"] + - cis_csc_v8: ["8.2", "8.5"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - - 'f:$sshd_file -> !r:^# && r:IgnoreRhosts\s*\t*yes' + - 'c:sshd -T -> r:^\s*LogLevel\s+VERBOSE|^\s*LogLevel\s+INFO' + - 'f:/etc/ssh/sshd_config ->!r:^# && r:loglevel\s*(VERBOSE|INFO)' - # 5.2.9 Ensure SSH HostbasedAuthentication is disabled (Automated) + # 5.3.6 Ensure SSH X11 forwarding is disabled. (Automated) - id: 4660 - title: "Ensure SSH HostbasedAuthentication is disabled" - description: "The HostbasedAuthentication parameter specifies if authentication is allowed through trusted hosts via the user of .rhosts, or /etc/hosts.equiv, along with successful public key client host authentication. This option only applies to SSH Protocol Version 2." - rationale: "Even though the .rhosts files are ineffective if support is disabled in /etc/pam.conf, disabling the ability to use .rhosts files in SSH provides an additional layer of protection." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: HostbasedAuthentication no" + title: "Ensure SSH X11 forwarding is disabled." + description: "The X11Forwarding parameter provides the ability to tunnel X11 traffic through an existing SSH shell session to enable remote graphic connections." + rationale: "Disable X11 forwarding unless there is an operational requirement to use X11 applications directly. There is a small risk that the remote X11 servers of users who are logged in via SSH with X11 forwarding could be compromised by other users on the X11 server. Note that even if X11 forwarding is disabled, users can always install their own forwarders." + impact: "X11 programs on the server will not be able to be forwarded to a ssh-client display." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: X11Forwarding no." compliance: - - cis: ["5.2.9"] - - cis_csc: ["16.3"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis: ["5.3.6"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'f:$sshd_file -> !r:^# && r:HostbasedAuthentication\s*\t*no' + - "c:sshd -T -> r:^x11forwarding no" + - "not f:/etc/ssh/sshd_config -> r:^\\s*\\t*x11forwarding\\s*\\t*yes" - # 5.2.10 Ensure SSH root login is disabled (Automated) + # 5.3.7 Ensure SSH MaxAuthTries is set to 4 or less. (Automated) - id: 4661 - title: "Ensure SSH root login is disabled" - description: "The PermitRootLogin parameter specifies if the root user can log in using ssh. The default is no." - rationale: "Disallowing root logins over SSH requires system admins to authenticate using their own individual account, then escalating to root via sudo or su . This in turn limits opportunity for non-repudiation and provides a clear audit trail in the event of a security incident." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitRootLogin no" + title: "Ensure SSH MaxAuthTries is set to 4 or less." + description: "The MaxAuthTries parameter specifies the maximum number of authentication attempts permitted per connection. When the login failure count reaches half the number, error messages will be written to the syslog file detailing the login failure." + rationale: "Setting the MaxAuthTries parameter to a low number will minimize the risk of successful brute force attacks to the SSH server. While the recommended setting is 4, set the number based on site policy." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: MaxAuthTries 4." compliance: - - cis: ["5.2.10"] - - cis_csc: ["4.3"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis: ["5.3.7"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["16.13"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - - 'f:$sshd_file -> !r:^# && r:PermitRootLogin\s*\t*no' + - 'c:sshd -T -> n:^\s*MaxAuthTries\s*\t*(\d+) compare <= 4' + - 'f:/etc/ssh/sshd_config -> n:^\s*MaxAuthTries\s*\t*(\d+) compare <= 4' - # 5.2.11 Ensure SSH PermitEmptyPasswords is disabled (Automated) + # 5.3.8 Ensure SSH IgnoreRhosts is enabled. (Automated) - id: 4662 - title: "Ensure SSH PermitEmptyPasswords is disabled" - description: "The PermitEmptyPasswords parameter specifies if the SSH server allows login to accounts with empty password strings." - rationale: "Disallowing remote shell access to accounts that have an empty password reduces the probability of unauthorized access to the system." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitEmptyPasswords no" + title: "Ensure SSH IgnoreRhosts is enabled." + description: "The IgnoreRhosts parameter specifies that .rhosts and .shosts files will not be used in RhostsRSAAuthentication or HostbasedAuthentication." + rationale: "Setting this parameter forces users to enter a password when authenticating with ssh." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: IgnoreRhosts yes." compliance: - - cis: ["5.2.11"] - - cis_csc: ["16"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis: ["5.3.8"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.13.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:$sshd_file -> !r:^# && r:PermitEmptyPasswords\s*\t*no' + - 'c:sshd -T -> r:\s*ignorerhosts\s*yes' + - 'not f:/etc/ssh/sshd_config -> r:^\s*ignorerhosts\s+no' - # 5.2.12 Ensure SSH PermitUserEnvironment is disabled (Automated) + # 5.3.9 Ensure SSH HostbasedAuthentication is disabled. (Automated) - id: 4663 - title: "Ensure SSH PermitUserEnvironment is disabled" - description: "The PermitUserEnvironment option allows users to present environment options to the ssh daemon." - rationale: "Permitting users the ability to set environment variables through the SSH daemon could potentially allow users to bypass security controls (e.g. setting an execution path that has ssh executing trojan'd programs)" - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitUserEnvironment no" + title: "Ensure SSH HostbasedAuthentication is disabled." + description: "The HostbasedAuthentication parameter specifies if authentication is allowed through trusted hosts via the user of .rhosts, or /etc/hosts.equiv, along with successful public key client host authentication. This option only applies to SSH Protocol Version 2." + rationale: "Even though the .rhosts files are ineffective if support is disabled in /etc/pam.conf, disabling the ability to use .rhosts files in SSH provides an additional layer of protection." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: HostbasedAuthentication no." compliance: - - cis: ["5.2.12"] - - cis_csc: ["5.1"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis: ["5.3.9"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:$sshd_file -> r:^\s*PermitUserEnvironment\s*\t*no' + - 'c:sshd -T -> r:^\s*HostbasedAuthentication\s*\t*no' + - 'not f:/etc/ssh/sshd_config -> r:^\sHostbasedAuthentication\s+yes' - # 5.2.13 Ensure only strong Ciphers are used (Automated) + # 5.3.10 Ensure SSH root login is disabled. (Automated) - id: 4664 - title: "Ensure SSH Idle Timeout Interval is configured" - description: "This variable limits the ciphers that SSH can use during communication." - rationale: "Weak ciphers that are used for authentication to the cryptographic module cannot be relied upon to provide confidentiality or integrity, and system data may be compromised.: The DES, Triple DES, and Blowfish ciphers, as used in SSH, have a birthday bound of approximately four billion blocks, which makes it easier for remote attackers to obtain cleartext data via a birthday attack against a long-duration encrypted session, aka a 'Sweet32' attack; The RC4 algorithm, as used in the TLS protocol and SSL protocol, does not properly combine state data with key data during the initialization phase, which makes it easier for remote attackers to conduct plaintext-recovery attacks against the initial bytes of a stream by sniffing network traffic that occasionally relies on keys affected by the Invariance Weakness, and then using a brute-force approach involvingLSB values, aka the 'Bar Mitzvah' issue; The passwords used during an SSH session encrypted with RC4 can be recovered by an attacker who is able to capture and replay the session; Error handling in the SSH protocol; Client and Server, when using a block cipher algorithm in Cipher Block Chaining (CBC) mode, makes it easier for remote attackers to recover certain plaintext data from an arbitrary block of ciphertext in an SSH session via unknown vectors; The mm_newkeys_from_blob function in monitor_wrap.c, when an AES-GCM cipher is used, does not properly initialize memory for a MAC context data structure, which allows remote authenticated users to bypass intended ForceCommand and login-shell restrictions via packet data that provides a crafted callback address" - remediation: "Edit the /etc/ssh/sshd_configfile add/modify the Ciphersline to contain a comma separated list of the site approved ciphersExample:Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr" - compliance: - - cis: ["5.2.13"] - - cis_csc: ["14.4"] - - pci_dss: ["12.3.8"] - condition: none + title: "Ensure SSH root login is disabled." + description: "The PermitRootLogin parameter specifies if the root user can log in using ssh. The default is no." + rationale: "Disallowing root logins over SSH requires system admins to authenticate using their own individual account, then escalating to root via sudo. This in turn limits opportunity for non-repudiation and provides a clear audit trail in the event of a security incident." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitRootLogin no." + compliance: + - cis: ["5.3.10"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] + condition: all rules: - - "c:sshd -T -> r:ciphers && r:3des-cbc|aes192-cbc|aes256-cbc|arcfour|blowfish-cbc|cast128-cbc|rijndael-cbc@lysator.liu.se" + - 'c:sshd -T -> !r:^# && r:PermitRootLogin\s*\t*no' + - 'not f:/etc/ssh/sshd_config -> r:^\sPermitRootLogin\s+yes' - # 5.2.14 Ensure only strong MAC algorithms are used (Automated) + # 5.3.11 Ensure SSH PermitEmptyPasswords is disabled. (Automated) - id: 4665 - title: "Ensure only strong MAC algorithms are used" - description: "This variable limits the types of MAC algorithms that SSH can use during communication." - rationale: "MD5 and 96-bit MAC algorithms are considered weak and have been shown to increase exploitability in SSH downgrade attacks. Weak algorithms continue to have a great deal of attention as a weak spot that can be exploited with expanded computing power. An attacker that breaks the algorithm could take advantage of a MiTM position to decrypt the SSH tunnel and capture credentials and information" - remediation: "Edit the /etc/ssh/sshd_config file and add/modify the MACs line to contain a comma separated list of the site approved MACs Example:MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512,hmac-sha2-256" - compliance: - - cis: ["5.2.14"] - - cis_csc: ["14.4", "16.5"] - - pci_dss: ["12.3.8"] - condition: none + title: "Ensure SSH PermitEmptyPasswords is disabled." + description: "The PermitEmptyPasswords parameter specifies if the SSH server allows login to accounts with empty password strings." + rationale: "Disallowing remote shell access to accounts that have an empty password reduces the probability of unauthorized access to the system." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitEmptyPasswords no." + compliance: + - cis: ["5.3.11"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all rules: - - "c:sshd -T -> r:macs && r:hmac-md5|hmac-md5-96|hmac-ripemd160|hmac-sha1|hmac-sha1-96|umac-64@openssh.com|umac-128@openssh.com|hmac-md5-etm@openssh.com|hmac-md5-96-etm@openssh.com|hmac-ripemd160-etm@openssh.com|hmac-sha1-etm@openssh.com|hmac-sha1-96-etm@openssh.com|umac-64-etm@openssh.com|umac-128-etm@openssh.com" + - 'c:sshd -T -> !r:^# && r:PermitEmptyPasswords\s*\t*no' + - 'not f:/etc/ssh/sshd_config -> r:^\sPermitEmptyPasswords\s+yes' - # 5.2.15 Ensure only strong Key Exchange algorithms are used (Automated) + # 5.3.12 Ensure SSH PermitUserEnvironment is disabled. (Automated) - id: 4666 - title: "Ensure only strong Key Exchange algorithms are used" - description: "Key exchange is any method in cryptography by which cryptographic keys are exchanged between two parties, allowing use of a cryptographic algorithm. If the sender and receiver wish to exchange encrypted messages, each must be equipped to encrypt messages to be sent and decrypt messages received" - rationale: "Key exchange methods that are considered weak should be removed. A key exchange method may be weak because too few bits are used, or the hashing algorithm is considered too weak. Using weak algorithms could expose connections to man-in-the-middle attacks" - remediation: "Edit the /etc/ssh/sshd_config file add/modify the KexAlgorithms line to contain a comma separated list of the site approved key exchange algorithms.Example:'KexAlgorithms curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256'" - compliance: - - cis: ["5.2.15"] - - cis_csc: ["14.4"] - - pci_dss: ["12.3.8"] - condition: none - rules: - - "c:sshd -T -> r:kexalgorithms && r:diffie-hellman-group1-sha1|diffie-hellman-group14-sha1|diffie-hellman-group-exchange-sha1" - - # 5.2.16 Ensure SSH Idle Timeout Interval is configured (Automated) - - id: 4667 - title: "Ensure SSH Idle Timeout Interval is configured" - description: "The two options ClientAliveInterval and ClientAliveCountMax control the timeout of ssh sessions. When the ClientAliveInterval variable is set, ssh sessions that have no activity for the specified length of time are terminated. When the ClientAliveCountMax variable is set, sshd will send client alive messages at every ClientAliveInterval interval. When the number of consecutive client alive messages are sent with no response from the client, the ssh session is terminated. For example, if the ClientAliveInterval is set to 15 seconds and the ClientAliveCountMax is set to 3, the client ssh session will be terminated after 45 seconds of idle time." - rationale: "Having no timeout value associated with a connection could allow an unauthorized user access to another user's ssh session (e.g. user walks away from their computer and doesn't lock the screen). Setting a timeout value at least reduces the risk of this happening. While the recommended setting is 300 seconds (5 minutes), set this timeout value based on site policy. The recommended setting for ClientAliveCountMax is 0. In this case, the client session will be terminated after 5 minutes of idle time and no keepalive messages will be sent." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameters according to site policy: ClientAliveInterval 300 and ClientAliveCountMax 0" + title: "Ensure SSH PermitUserEnvironment is disabled." + description: "The PermitUserEnvironment option allows users to present environment options to the ssh daemon." + rationale: "Permitting users the ability to set environment variables through the SSH daemon could potentially allow users to bypass security controls (e.g. setting an execution path that has ssh executing a Trojan's programs)." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitUserEnvironment no." compliance: - - cis: ["5.2.16"] - - cis_csc: ["16.11"] - - pci_dss: ["12.3.8"] + - cis: ["5.3.12"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:$sshd_file -> n:^\s*ClientAliveInterval\s*\t*(\d+) compare <= 300' - - 'f:$sshd_file -> n:^\s*ClientAliveCountMax\s*\t*(\d+) compare <= 3' - - 'f:$sshd_file -> n:^\s*ClientAliveInterval\s*\t*(\d+) compare >= 1' - - 'f:$sshd_file -> n:^\s*ClientAliveCountMax\s*\t*(\d+) compare >= 1' + - 'c:sshd -T -> r:^\s*PermitUserEnvironment\s*\t*no' + - 'not f:/etc/ssh/sshd_config -> r:^\sPermitUserEnvironment\s+yes' - # 5.2.17 Ensure SSH LoginGraceTime is set to one minute or less (Automated) + # 5.3.13 Ensure only strong Ciphers are used. (Automated) + - id: 4667 + title: "Ensure only strong Ciphers are used." + description: "This variable limits the ciphers that SSH can use during communication. Note: Some organizations may have stricter requirements for approved ciphers. Ensure that ciphers used are in compliance with site policy." + rationale: 'Weak ciphers that are used for authentication to the cryptographic module cannot be relied upon to provide confidentiality or integrity, and system data may be compromised. - The DES, Triple DES, and Blowfish ciphers, as used in SSH, have a birthday bound of approximately four billion blocks, which makes it easier for remote attackers to obtain cleartext data via a birthday attack against a long-duration encrypted session, aka a "Sweet32" attack - The RC4 algorithm, as used in the TLS protocol and SSL protocol, does not properly combine state data with key data during the initialization phase, which makes it easier for remote attackers to conduct plaintext-recovery attacks against the initial bytes of a stream by sniffing network traffic that occasionally relies on keys affected by the Invariance Weakness, and then using a brute-force approach involving LSB values, aka the "Bar Mitzvah" issue - The passwords used during an SSH session encrypted with RC4 can be recovered by an attacker who is able to capture and replay the session - Error handling in the SSH protocol; Client and Server, when using a block cipher algorithm in Cipher Block Chaining (CBC) mode, makes it easier for remote attackers to recover certain plaintext data from an arbitrary block of ciphertext in an SSH session via unknown vectors.' + remediation: "Edit the /etc/ssh/sshd_config file add/modify the Ciphers line to contain a comma separated list of the site approved ciphers Example: Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128- gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr." + references: + - "https://nvd.nist.gov/vuln/detail/CVE-2016-2183" + - "https://nvd.nist.gov/vuln/detail/CVE-2015-2808" + - "https://www.kb.cert.org/vuls/id/565052" + - "https://www.openssh.com/txt/cbc.adv" + - "https://nvd.nist.gov/vuln/detail/CVE-2008-5161" + - "https://nvd.nist.gov/vuln/detail/CVE-2013-4548" + compliance: + - cis: ["5.3.13"] + - cis_csc_v8: ["3.10"] + - cis_csc_v7: ["14.4"] + - cmmc_v2.0: ["AC.L2-3.1.13", "AC.L2-3.1.17", "IA.L2-3.5.10", "SC.L2-3.13.11", "SC.L2-3.13.15", "SC.L2-3.13.8"] + - hipaa: ["164.312(a)(2)(iv)", "164.312(e)(1)", "164.312(e)(2)(i)", "164.312(e)(2)(ii)"] + - iso_27001-2013: ["A.10.1.1", "A.13.1.1"] + - nist_sp_800-53: ["AC-17(2)", "SC-8", "SC-8(1)"] + - pci_dss_v3.2.1: ["2.1.1", "4.1", "4.1.1", "8.2.1"] + - pci_dss_v4.0: ["2.2.7", "4.1.1", "4.2.1", "4.2.1.2", "4.2.2", "8.3.2"] + condition: all + rules: + - "c: sshd -T -> r:3des-cbc|aes128-cbc|aes192-cbc|aes256-cbc|arcfour|arcfour128|arcfour256|blowfish-cbc|cast128-cbc|rijndael-cbc@lysator.liu.se" + - "not f:/etc/ssh/sshd_config -> r:3des-cbc|aes128-cbc|aes192-cbc|aes256-cbc|arcfour|arcfour128|arcfour256|blowfish-cbc|cast128-cbc|rijndael-cbc@lysator.liu.se" + + # 5.3.14 Ensure only strong MAC algorithms are used. (Automated) - id: 4668 - title: "Ensure SSH LoginGraceTime is set to one minute or less" - description: "The LoginGraceTime parameter specifies the time allowed for successful authentication to the SSH server. The longer the Grace period is the more open unauthenticated connections can exist. Like other session controls in this session the Grace Period should be limited to appropriate organizational limits to ensure the service is available for needed access." - rationale: "Setting the LoginGraceTime parameter to a low number will minimize the risk of successful brute force attacks to the SSH server. It will also limit the number of concurrent unauthenticated connections While the recommended setting is 60 seconds (1 Minute), set the number based on site policy." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: LoginGraceTime 60" + title: "Ensure only strong MAC algorithms are used." + description: "This variable Specifies the available MAC (message authentication code) algorithms. The MAC algorithm is used in protocol version 2 for data integrity protection. Multiple algorithms must be comma-separated. Note: Some organizations may have stricter requirements for approved MACs. Ensure that MACs used are in compliance with site policy." + rationale: "MD5 and 96-bit MAC algorithms are considered weak and have been shown to increase exploitability in SSH downgrade attacks. Weak algorithms continue to have a great deal of attention as a weak spot that can be exploited with expanded computing power. An attacker that breaks the algorithm could take advantage of a MiTM position to decrypt the SSH tunnel and capture credentials and information." + remediation: "Edit the /etc/ssh/sshd_config file and add/modify the MACs line to contain a comma separated list of the site approved MACs Example: MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2- 512,hmac-sha2-256." + references: + - "http://www.mitls.org/pages/attacks/SLOTH" compliance: - - cis: ["5.2.17"] - - cis_csc: ["5.1"] - - pci_dss: ["4.1"] - - tsc: ["CC6.1"] + - cis: ["5.3.14"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["14.4", "16.5"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.10.1.1", "A.13.1.1"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:/etc/ssh/sshd_config -> n:^\s*LoginGraceTime\s*\t*(\d+) compare <= 60' - - 'f:/etc/ssh/sshd_config -> n:^\s*LoginGraceTime\s*\t*(\d+) compare >= 1' + - "c: sshd -T -> r:hmac-md5|hmac-md5-96|hmac-ripemd160|hmac-sha1|hmac-sha1-96|umac-64@openssh.com|hmac-md5-etm@openssh.com|hmac-md5-96-etm@openssh.com|hmac-ripemd160-etm@openssh.com|hmac-sha1-etm@openssh.com|hmac-sha1-96-etm@openssh.com|umac-64-etm@openssh.com|umac-128-etm@openssh.com" + - "not f:/etc/ssh/sshd_config -> r:hmac-md5|hmac-md5-96|hmac-ripemd160|hmac-sha1|hmac-sha1-96|umac-64@openssh.com|hmac-md5-etm@openssh.com|hmac-md5-96-etm@openssh.com|hmac-ripemd160-etm@openssh.com|hmac-sha1-etm@openssh.com|hmac-sha1-96-etm@openssh.com|umac-64-etm@openssh.com|umac-128-etm@openssh.com" - # 5.2.18 Ensure SSH warning banner is configured (Automated) + # 5.3.15 Ensure only strong Key Exchange algorithms are used. (Automated) - id: 4669 - title: "Ensure SSH warning banner is configured" - description: "The Banner parameter specifies a file whose contents must be sent to the remote user before authentication is permitted. By default, no banner is displayed." - rationale: "Banners are used to warn connecting users of the particular site's policy regarding connection. Presenting a warning message prior to the normal user login may assist the prosecution of trespassers on the computer system." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: Banner /etc/issue.net" - compliance: - - cis: ["5.2.18"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'f:$sshd_file -> r:^\s*Banner\s*\t*/etc/issue.net' - - # 5.2.19 Ensure SSH PAM is enabled (Automated) + title: "Ensure only strong Key Exchange algorithms are used." + description: "Key exchange is any method in cryptography by which cryptographic keys are exchanged between two parties, allowing use of a cryptographic algorithm. If the sender and receiver wish to exchange encrypted messages, each must be equipped to encrypt messages to be sent and decrypt messages received Note: Some organizations may have stricter requirements for approved Key Exchange algorithms. Ensure that Key Exchange algorithms used are in compliance with site policy." + rationale: "Key exchange methods that are considered weak should be removed. A key exchange method may be weak because too few bits are used or the hashing algorithm is considered too weak. Using weak algorithms could expose connections to man-in-the-middle attacks." + remediation: "Edit the /etc/ssh/sshd_config file add/modify the KexAlgorithms line to contain a comma separated list of the site approved key exchange algorithms Example: KexAlgorithms curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2- nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange- sha256." + compliance: + - cis: ["5.3.15"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["14.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.10.1.1", "A.13.1.1"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - "c: sshd -T -C user=root -> r:diffie-hellman-group1-sha1|diffie-hellman-group14-sha1|diffie-hellman-group-exchange-sha1" + - "not f:/etc/ssh/sshd_config -> r:diffie-hellman-group1-sha1|diffie-hellman-group14-sha1|diffie-hellman-group-exchange-sha1" + + # 5.3.16 Ensure SSH Idle Timeout Interval is configured. (Automated) - id: 4670 - title: "Ensure SSH PAM is enabled" - description: "UsePAM Enables the Pluggable Authentication Module interface. If set to “yes” this will enable PAM authentication using ChallengeResponseAuthenticationand PasswordAuthentication in addition to PAM account and session module processing for all authentication types" - rationale: "When usePAM is set to yes, PAM runs through account and session types properly. This is important if you want to restrict access to services based off of IP, time or other factors of the account. Additionally, you can make sure users inherit certain environment variables on login or disallow access to the server" - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: UsePAM yes" + title: "Ensure SSH Idle Timeout Interval is configured." + description: "The two options ClientAliveInterval and ClientAliveCountMax control the timeout of ssh sessions. - ClientAliveInterval sets a timeout interval in seconds after which if no data has been received from the client, sshd will send a message through the encrypted channel to request a response from the client. The default is 0, indicating that these messages will not be sent to the client. - ClientAliveCountMax sets the number of client alive messages which may be sent without sshd receiving any messages back from the client. If this threshold is reached while client alive messages are being sent, sshd will disconnect the client, terminating the session. The default value is 3. o The client alive messages are sent through the encrypted channel o Setting ClientAliveCountMax to 0 disables connection termination Example: The default value is 3. If ClientAliveInterval is set to 15, and ClientAliveCountMax is left at the default, unresponsive SSH clients will be disconnected after approximately 45 seconds." + rationale: "Having no timeout value associated with a connection could allow an unauthorized user access to another user's ssh session (e.g. user walks away from their computer and doesn't lock the screen). Setting a timeout value reduces this risk. - The recommended ClientAliveInterval setting is no greater than 900 seconds (15 minutes) - The recommended ClientAliveCountMax setting is 0 - At the 15 minute interval, if the ssh session is inactive, the session will be terminated." + impact: "In some cases this setting may cause termination of long-running scripts over SSH or remote automation tools which rely on SSH. In developing the local site policy, the requirements of such scripts should be considered and appropriate ServerAliveInterval and ClientAliveInterval settings should be calculated to insure operational continuity." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameters according to site policy. This should include ClientAliveInterval between 1 and 900 and ClientAliveCountMax of 0: ClientAliveInterval 900 ClientAliveCountMax 0." + references: + - "https://man.openbsd.org/sshd_config" compliance: - - cis: ["5.2.19"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["5.3.16"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.11"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:$sshd_file -> r:^\s*UsePAM\s*yes' + - 'c:sshd -T -> n:^\s*clientaliveinterval\s*\t*(\d+) compare => 1 && n:^\s*clientaliveinterval\s*\t*(\d+) compare <= 900' + - 'c:sshd -T -> n:^\s*clientalivecountmax\s*\t*(\d+) compare == 0' + - 'f:/etc/ssh/sshd_config -> n:^\s*clientaliveinterval\s*\t*(\d+) compare => 1 && n:^\s*clientaliveinterval\s*\t*(\d+) compare <= 900' + - 'f:/etc/ssh/sshd_config -> n:^\s*clientalivecountmax\s*\t*(\d+) compare == 0' - # 5.2.20 Ensure SSH AllowTcpForwarding is disabled (Automated) + # 5.3.17 Ensure SSH LoginGraceTime is set to one minute or less. (Automated) - id: 4671 - title: "Ensure SSH AllowTcpForwarding is disabled" - description: "SSH port forwarding is a mechanism in SSH for tunneling application ports from the client to the server, or servers to clients. It can be used for adding encryption to legacy applications, going through firewalls, and some system administrators and IT professionals use it for opening backdoors into the internal network from their home machines" - rationale: "Leaving port forwarding enabled can expose the organization to security risks and back-doors.SSH connections are protected with strong encryption. This makes their contents invisible to most deployed network monitoring and traffic filtering solutions. This invisibility carries considerable risk potential if it is used for malicious purposes such as data exfiltration. Cybercriminals or malware could exploit SSH to hide their unauthorized communications, or to exfiltrate stolen data from the target network" - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: AllowTcpForwarding no" + title: "Ensure SSH LoginGraceTime is set to one minute or less." + description: "The LoginGraceTime parameter specifies the time allowed for successful authentication to the SSH server. The longer the Grace period is the more open unauthenticated connections can exist. Like other session controls in this session the Grace Period should be limited to appropriate organizational limits to ensure the service is available for needed access." + rationale: "Setting the LoginGraceTime parameter to a low number will minimize the risk of successful brute force attacks to the SSH server. It will also limit the number of concurrent unauthenticated connections While the recommended setting is 60 seconds (1 Minute), set the number based on site policy." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: LoginGraceTime 60." compliance: - - cis: ["5.2.20"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["5.3.17"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:$sshd_file -> r:^\s*AllowTcpForwarding\s*no' + - 'c:sshd -T -> n:^\s*logingracetime\s*\t*(\d+) compare => 1 && n:^\s*logingracetime\s*\t*(\d+) compare <= 60' + - 'f:/etc/ssh/sshd_config -> n:^\s*logingracetime\s*\t*(\d+) compare => 1 && n:^\s*logingracetime\s*\t*(\d+) compare <= 60' - # 5.2.21 Ensure SSH MaxStartups is configured (Automated) + # 5.3.18 Ensure SSH warning banner is configured. (Automated) - id: 4672 - title: "Ensure SSH MaxStartups is configured" - description: "The MaxStartupsparameter specifies the maximum number of concurrent unauthenticated connections to the SSH daemon" - rationale: "To protect a system from denial of service due to a large number of pending authentication connection attempts, use the rate limiting function of MaxStartups to protect availability of sshd logins and prevent overwhelming the daemon" - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: maxstartups 10:30:60" + title: "Ensure SSH warning banner is configured." + description: "The Banner parameter specifies a file whose contents must be sent to the remote user before authentication is permitted. By default, no banner is displayed." + rationale: "Banners are used to warn connecting users of the particular site's policy regarding connection. Presenting a warning message prior to the normal user login may assist the prosecution of trespassers on the computer system." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: Banner /etc/issue.net." compliance: - - cis: ["5.2.21"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["5.3.18"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:$sshd_file -> r:^\s*maxstartups\s*10:30:60' + - 'c:sshd -T -> r:^\s*Banner\s*\t*/etc/issue.net' - # 5.2.22 Ensure SSH MaxSessions is limited (Automated) + # 5.3.19 Ensure SSH PAM is enabled. (Automated) - id: 4673 - title: "Ensure SSH MaxSessions is limited" - description: "The MaxSessionsparameter specifies the maximum number of open sessions permitted from a given connection." - rationale: "To protect a system from denial of service due to a large number of pending authentication connection attempts, use the rate limiting function of MaxStartups to protect availability of sshd logins and prevent overwhelming the daemon" - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: MaxSessions 10" + title: "Ensure SSH PAM is enabled." + description: 'UsePAM Enables the Pluggable Authentication Module interface. If set to "yes" this will enable PAM authentication using ChallengeResponseAuthentication and PasswordAuthentication in addition to PAM account and session module processing for all authentication types.' + rationale: "When usePAM is set to yes, PAM runs through account and session types properly. This is important if you want to restrict access to services based off of IP, time or other factors of the account. Additionally, you can make sure users inherit certain environment variables on login or disallow access to the server." + impact: "If UsePAM is enabled, you will not be able to run sshd(5) as a non-root user." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: UsePAM yes." + compliance: + - cis: ["5.3.19"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - 'c:sshd -T -> r:^\s*usepam\s*yes' + - 'not f:/etc/ssh/sshd_config -> r:^\s*UsePAM\s+no' + + # 5.3.20 Ensure SSH AllowTcpForwarding is disabled. (Automated) + - id: 4674 + title: "Ensure SSH AllowTcpForwarding is disabled." + description: "SSH port forwarding is a mechanism in SSH for tunneling application ports from the client to the server, or servers to clients. It can be used for adding encryption to legacy applications, going through firewalls, and some system administrators and IT professionals use it for opening backdoors into the internal network from their home machines." + rationale: "Leaving port forwarding enabled can expose the organization to security risks and back-doors. SSH connections are protected with strong encryption. This makes their contents invisible to most deployed network monitoring and traffic filtering solutions. This invisibility carries considerable risk potential if it is used for malicious purposes such as data exfiltration. Cybercriminals or malware could exploit SSH to hide their unauthorized communications, or to exfiltrate stolen data from the target network." + impact: "SSH tunnels are widely used in many corporate environments that employ mainframe systems as their application backends. In those environments the applications themselves may have very limited native support for security. By utilizing tunneling, compliance with SOX, HIPAA, PCI-DSS, and other standards can be achieved without having to modify the applications." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: AllowTcpForwarding no." + references: + - "https://www.ssh.com/ssh/tunneling/example" compliance: - - cis: ["5.2.22"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["5.3.20"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["9.2", "13.5"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.13.1.1", "A.13.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:$sshd_file -> r:^\s*MaxSessions\s*10' + - 'c:sshd -T -> r:^\s*allowtcpforwarding\s*no' + - 'not f:/etc/ssh/sshd_config -> r:^\s*AllowTcpForwarding\s+yes' + + # 5.3.21 Ensure SSH MaxStartups is configured. (Automated) + - id: 4675 + title: "Ensure SSH MaxStartups is configured." + description: "The MaxStartups parameter specifies the maximum number of concurrent unauthenticated connections to the SSH daemon." + rationale: "To protect a system from denial of service due to a large number of pending authentication connection attempts, use the rate limiting function of MaxStartups to protect availability of sshd logins and prevent overwhelming the daemon." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: maxstartups 10:30:60." + compliance: + - cis: ["5.3.21"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - 'c:sshd -T -> n:^\s*maxstartups\s+(\d+):\d+:\d+ compare <= 10' + - 'c:sshd -T -> n:^\s*maxstartups\s+\d+:(\d+):\d+ compare <= 30' + - 'c:sshd -T -> n:^\s*maxstartups\s+\d+:\d+:(\d+) compare <= 60' + - 'f:/etc/ssh/sshd_config -> n:^\s*maxstartups\s+(\d+):\d+:\d+ compare <= 10' + - 'f:/etc/ssh/sshd_config -> n:^\s*maxstartups\s+\d+:(\d+):\d+ compare <= 30' + - 'f:/etc/ssh/sshd_config -> n:^\s*maxstartups\s+\d+:\d+:(\d+) compare <= 60' + + # 5.3.22 Ensure SSH MaxSessions is limited. (Automated) + - id: 4676 + title: "Ensure SSH MaxSessions is limited." + description: "The MaxSessions parameter Specifies the maximum number of open sessions permitted per network connection." + rationale: "To protect a system from denial of service due to a large number of concurrent sessions, use the rate limiting function of MaxSessions to protect availability of sshd logins and prevent overwhelming the daemon." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: MaxSessions 10." + compliance: + - cis: ["5.3.22"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: any + rules: + - 'c:sshd -T -> n:^\s*MaxSessions\s+(\d+) compare <= 10' + - 'not f:/etc/ssh/sshd_config -> n:^\s*MaxSessions\s+(\d+) compare > 10' ############################################### - # 5.3 Configure PAM + # 5.4 Configure PAM ############################################### - # 5.3.1 Ensure password creation requirements are configured (Automated) - - id: 4674 - title: "Ensure password creation requirements are configured" - description: "The pam_pwquality.so module checks the strength of passwords. It performs checks such as making sure a password is not a dictionary word, it is a certain length, contains a mix of characters (e.g. alphabet, numeric, other) and more" - rationale: "Strong passwords protect systems from being hacked through brute force methods." - remediation: "Edit the file /etc/security/pwquality.conf and add or modify the following line for password length to conform to site policy: minlen = 14 Edit the file /etc/security/pwquality.conf and add or modify the following line for password complexity to conform to site policy: minclass = 4 OR dcredit = -1 ucredit = -1 ocredit = -1 lcredit = -1 Edit the /etc/pam.d/password-auth and /etc/pam.d/system-auth files to include the appropriate options for pam_pwquality.so and to conform to site policy:password requisite pam_pwquality.so try_first_pass retry=3" + + # 5.4.1 Ensure password creation requirements are configured. (Automated) + - id: 4677 + title: "Ensure password creation requirements are configured." + description: "The pam_pwquality.so module checks the strength of passwords. It performs checks such as making sure a password is not a dictionary word, it is a certain length, contains a mix of characters (e.g. alphabet, numeric, other) and more. The following are definitions of the pam_pwquality.so options. The following options are set in the /etc/security/pwquality.conf file: Password Length: - minlen = 14 - password must be 14 characters or more Password complexity: - minclass = 4 - The minimum number of required classes of characters for the new password (digits, uppercase, lowercase, others) OR - dcredit = -1 - provide at least one digit - ucredit = -1 - provide at least one uppercase character - ocredit = -1 - provide at least one special character - lcredit = -1 - provide at least one lowercase character The following is set in the /etc/pam.d/password-auth and /etc/pam.d/system-auth files - try_first_pass - retrieve the password from a previous stacked PAM module. If not available, then prompt the user for a password. - retry=3 - Allow 3 tries before sending back a failure. The settings shown above are one possible policy. Alter these values to conform to your own organization's password policies. Notes: - Settings in /etc/security/pwquality.conf must use spaces around the = symbol. - Additional modules options may be set in the /etc/pam.d/password-auth and /etc/pam.d/system-auth files." + rationale: "Strong passwords and limited attempts before locking an account protect systems from being hacked through brute force methods." + remediation: "Edit the file /etc/security/pwquality.conf and add or modify the following line for password length to conform to site policy minlen = 14 Edit the file /etc/security/pwquality.conf and add or modify the following line for password complexity to conform to site policy minclass = 4 OR dcredit = -1 ucredit = -1 ocredit = -1 lcredit = -1 Edit the /etc/pam.d/password-auth and /etc/pam.d/system-auth files to include the appropriate options for pam_pwquality.so and to conform to site policy: password requisite pam_pwquality.so try_first_pass retry=3." + compliance: + - cis: ["5.4.1"] + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - iso_27001-2013: ["A.9.4.3"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] + condition: all + rules: + - "f:/etc/pam.d/password-auth -> r:pam_pwquality.so && r:try_first_pass" + - "f:/etc/pam.d/system-auth -> r:pam_pwquality.so && r:try_first_pass" + - 'f:/etc/security/pwquality.conf -> n:^\s*minlen\s+\t*=\s+\t*(\d+) compare >= 14' + + # 5.4.2 Ensure lockout for failed password attempts is configured. (Automated) + - id: 4678 + title: "Ensure lockout for failed password attempts is configured." + description: 'Lock out users after n unsuccessful consecutive login attempts. These settings are commonly configured with the pam_faillock.so module. Some environments may continue using the pam_tally2.so module, where this older method may simplify automation in mixed environments. Set the lockout number in deny= to the policy in effect at your site. unlock_time=_n_ is the number of seconds the account remains locked after the number of attempts configured in deny=_n_ has been met. Notes: - Additional module options may be set, recommendation only covers those listed here. - When modifying authentication configuration using the authconfig utility, the system-auth and password-auth files are overwritten with the settings from the authconfig utility. This can be avoided by creating symbolic links in place of the configuration files, which authconfig recognizes and does not overwrite. These symbolic links are the default for Fedora 19 derived distributions. - Use of the "audit" keyword may log credentials in the case of user error during - authentication. This risk should be evaluated in the context of the site policies of your organization. If a user has been locked out because they have reached the maximum consecutive failure count defined by deny= in the pam_faillock.so or the pam_tally2.so module, the user can be unlocked by issuing following commands. This command sets the failed count to 0, effectively unlocking the user. If pam_faillock.so is used: o o # faillock --user --reset o o # pam_tally2 -u --reset If pam_tally2.so is used:.' + rationale: "Locking out user IDs after n unsuccessful consecutive login attempts mitigates brute force password attacks against your systems." + remediation: 'Edit the files /etc/pam.d/system-auth and /etc/pam.d/password-auth and add the following lines: Modify the deny= and unlock_time= parameters to conform to local site policy, Not to be greater than deny=5 To use pam_faillock.so module, add the following lines to the auth section: auth required pam_faillock.so preauth silent audit deny=5 unlock_time=900 auth [default=die] pam_faillock.so authfail audit deny=5 unlock_time=900 The auth sections should look similar to the following example: Note: The ordering on the lines in the auth section is important. The preauth line needs to below the line auth required pam_env.so and above all password validation lines. The authfail line needs to be after all password validation lines such as pam_sss.so. Incorrect order can cause you to be locked out of the system Example: auth required pam_env.so auth required pam_faillock.so preauth silent audit deny=5 unlock_time=900 # <- Under "auth required pam_env.so" auth sufficient pam_unix.so nullok try_first_pass auth [default=die] pam_faillock.so authfail audit deny=5 unlock_time=900 # <- Last auth line before "auth requisite pam_succeed_if.so" auth requisite pam_succeed_if.so uid >= 1000 quiet_success auth required pam_deny.so Add the following line to the account section: account required pam_faillock.so Example: account required pam_faillock.so account required pam_unix.so account sufficient pam_localuser.so account sufficient pam_pam_succeed_if.so uid < 1000 quiet account required pam_permit.so OR To use the pam_tally2.so module, add the following line to the auth section: auth required pam_tally2.so deny=5 onerr=fail unlock_time=900 The auth sections should look similar to the following example: Note: The ordering on the lines in the auth section is important. the additional line needs to below the line auth required pam_env.so and above all password validation lines. Example: auth required pam_env.so auth required pam_tally2.so deny=5 onerr=fail unlock_time=900 # <- Under "auth required pam_env.so" auth sufficient pam_unix.so nullok try_first_pass auth requisite pam_succeed_if.so uid >= 1000 quiet_success auth required pam_deny.so Add the following line to the account section: account required pam_tally2.so Example: account required pam_tally2.so account required pam_unix.so account sufficient pam_localuser.so account sufficient pam_pam_succeed_if.so uid < 1000 quiet account required pam_permit.so.' + references: + - "https://access.redhat.com/documentation/en-" compliance: - - cis: ["5.3.1"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2.3"] - - tsc: ["CC6.1"] + - cis: ["5.4.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.7"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.9.2.6"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:/etc/security/pwquality.conf -> n:^\s*minlen\s+\t*=\s+\t*(\d+) compare >= 14' - - 'f:/etc/security/pwquality.conf -> n:^\s*minclass\s+\t*=\s+\t*(\d+) compare >= 4' - - 'f:/etc/pam.d/password-auth -> n:try_first_pass retry=(\d+) compare <=3' - - 'f:/etc/pam.d/system-auth -> n:try_first_pass retry=(\d+) compare <=3' + - 'f:/etc/pam.d/password-auth -> r:^\s*auth\.+required\.+pam_faillock.so\.+ && n:deny=(\d+) compare <= 5 && n:unlock_time=(\d+) compare >= 900' + - 'f:/etc/pam.d/system-auth -> r:^\s*auth\.+required\.+pam_faillock.so\.+ && n:deny=(\d+) compare <= 5 && n:unlock_time=(\d+) compare >= 900' - # 5.3.3 Ensure password hashing algorithm is SHA-512 (Automated) - - id: 4675 - title: "Ensure password hashing algorithm is SHA-512" - description: "The commands below change password encryption from md5 to sha512 (a much stronger hashing algorithm). All existing accounts will need to perform a password change to upgrade the stored hashes to the new algorithm." - rationale: "The SHA-512 algorithm provides much stronger hashing than MD5, thus providing additional protection to the system by increasing the level of effort for an attacker to successfully determine passwords. Note that these changes only apply to accounts configured on the local system." - remediation: "Edit the /etc/pam.d/password-auth and /etc/pam.d/system-auth files to include the sha512 option for pam_unix.so as shown: password sufficient pam_unix.so sha512" + # 5.4.3 Ensure password hashing algorithm is SHA-512. (Automated) + - id: 4679 + title: "Ensure password hashing algorithm is SHA-512." + description: "The commands below change password encryption from md5 to sha512 (a much stronger hashing algorithm). All existing accounts will need to perform a password change to upgrade the stored hashes to the new algorithm. Note: - These changes only apply to accounts configured on the local system. - Additional module options may be set, recommendation only covers those listed here." + rationale: "The SHA-512 algorithm provides much stronger hashing than MD5, thus providing additional protection to the system by increasing the level of effort for an attacker to successfully determine passwords." + remediation: "Edit the /etc/pam.d/password-auth and /etc/pam.d/system-auth files to include sha512 option and remove the md5 option for pam_unix.so: password sufficient pam_unix.so sha512 Note: - Any system accounts that need to be expired should be carefully done separately by the - system administrator to prevent any potential problems. If it is determined that the password algorithm being used is not SHA-512, once it is changed, it is recommended that all user ID's be immediately expired and forced to change their passwords on next login, In accordance with local site policies. - To accomplish this, the following command can be used. o This command intentionally does not affect the root account. The root account's password will also need to be changed. # awk -F: '( $3<'\"$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs)\"' && $1 !~ /^(nfs)?nobody$/ && $1 != \"root\" ) { print $1 }' /etc/passwd | xargs -n 1 chage -d 0." compliance: - - cis: ["5.3.3"] - - cis_csc: ["16.4"] - - pci_dss: ["3.6.1", "8.2.1"] - - tsc: ["CC6.1", "CC6.7"] + - cis: ["5.4.3"] + - cis_csc_v8: ["3.11"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L2-3.1.19", "IA.L2-3.5.10", "MP.L2-3.8.1", "SC.L2-3.13.11", "SC.L2-3.13.16"] + - hipaa: ["164.312(a)(2)(iv)", "164.312(e)(2)(ii)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["SC-28", "SC-28(1)"] + - pci_dss_v3.2.1: ["3.4", "3.4.1", "8.2.1"] + - pci_dss_v4.0: ["3.1.1", "3.3.2", "3.3.3", "3.5.1", "3.5.1.2", "3.5.1.3", "8.3.2"] + - soc_2: ["CC6.1"] condition: all rules: - - 'f:/etc/pam.d/password-auth -> r:^password\s*sufficient\s*pam_unix.so\s*sha512' - - 'f:/etc/pam.d/system-auth -> r:^password\s*sufficient\s*pam_unix.so\s*sha512' + - 'f:/etc/pam.d/password-auth -> r:^\s*password\.+sufficient\.+pam_unix\.so && r:sha512' + - 'f:/etc/pam.d/system-auth -> r:^\s*password\.+sufficient\.+pam_unix\.so && r:sha512' - # 5.3.4 Ensure password reuse is limited (Automated) - - id: 4676 - title: "Ensure password reuse is limited" - description: "The /etc/security/opasswd file stores the users' old passwords and can be checked to ensure that users are not recycling recent passwords." - rationale: "Forcing users not to reuse their past 5 passwords make it less likely that an attacker will be able to guess the password. Note that these changes only apply to accounts configured on the local system." - remediation: "Edit the /etc/pam.d/password-auth and /etc/pam.d/system-auth files to include the remember option and conform to site policy as shown: password sufficient pam_unix.so remember=5 or password required pam_pwhistory.so remember=5" + # 5.4.4 Ensure password reuse is limited. (Automated) + - id: 4680 + title: "Ensure password reuse is limited." + description: "The /etc/security/opasswd file stores the users' old passwords and can be checked to ensure that users are not recycling recent passwords. Note: Additional module options may be set, recommendation only covers those listed here." + rationale: "Forcing users not to reuse their past 5 passwords make it less likely that an attacker will be able to guess the password." + remediation: "Edit both the /etc/pam.d/password-auth and /etc/pam.d/system-auth files to include the remember option and conform to site policy as shown: Note: Add or modify the line containing the pam_pwhistory.so after the first occurrence of password requisite: password required pam_pwhistory.so remember=5 Example: (Second line is modified) password requisite pam_pwquality.so try_first_pass local_users_only authtok_type= password required pam_pwhistory.so use_authtok remember=5 retry=3 password sufficient pam_unix.so sha512 shadow nullok try_first_pass use_authtok password required pam_deny.so." compliance: - - cis: ["5.3.3"] - - cis_csc: ["16"] - - pci_dss: ["8.2.5"] - - tsc: ["CC6.1"] + - cis: ["5.4.4"] + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["16"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] condition: all rules: - - 'f:/etc/pam.d/password-auth -> n:^password\s+sufficient\s+pam_unix.so\.+remember=(\d+)|^password\s+required\s+pam_pwhistory.so\.+remember=(\d+) compare >= 5' - - 'f:/etc/pam.d/system-auth -> n:^password\s+sufficient\s+pam_unix.so\.+remember=(\d+)|^password\s+required\s+pam_pwhistory.so\.+remember=(\d+) compare >= 5' + - 'f:/etc/pam.d/system-auth -> r:^\s*password\.+requisite\.+pam_pwquality\.so\.+ && n:remember=(\d+) compare >= 5' + - 'f:/etc/pam.d/system-auth -> r:^\s*password\.+sufficient\.+pam_unix\.so\.+ && n:remember=(\d+) compare >= 5' ############################################### - # 5.4 User Accounts and Environment + # 5.5 User Accounts and Environment ############################################### ############################################### - # 5.4.1 Set Shadow Password Suite Parameters + # 5.5.1 Set Shadow Password Suite Parameters ############################################### - # 5.4.1.1 Ensure password expiration is 365 days or less (Automated) - - id: 4677 - title: "Ensure password expiration is 365 days or less" - description: "The PASS_MAX_DAYS parameter in /etc/login.defs allows an administrator to force passwords to expire once they reach a defined age. It is recommended that the PASS_MAX_DAYS parameter be set to less than or equal to 365 days." - rationale: "The window of opportunity for an attacker to leverage compromised credentials or successfully compromise credentials via an online brute force attack is limited by the age of the password. Therefore, reducing the maximum age of a password also reduces an attacker's window of opportunity." - remediation: "Set the PASS_MAX_DAYS parameter to conform to site policy in /etc/login.defs : PASS_MAX_DAYS 90 and modify user parameters for all users with a password set to match: chage --maxdays 90 " - compliance: - - cis: ["5.4.1.1"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2.4"] - - tsc: ["CC6.1"] + + # 5.5.1.1 Ensure password expiration is 365 days or less. (Automated) + - id: 4681 + title: "Ensure password expiration is 365 days or less." + description: "The PASS_MAX_DAYS parameter in /etc/login.defs allows an administrator to force passwords to expire once they reach a defined age. It is recommended that the PASS_MAX_DAYS parameter be set to less than or equal to 365 days. Notes: - A value of -1 will disable password expiration. - The password expiration must be greater than the minimum days between password changes or users will be unable to change their password." + rationale: "The window of opportunity for an attacker to leverage compromised credentials via a brute force attack, using already compromised credentials, or gaining the credentials by other means, can be limited by the age of the password. Therefore, reducing the maximum age of a password can also reduce an attacker's window of opportunity. Requiring passwords to be changed helps to mitigate the risk posed by the poor security practice of passwords being used for multiple accounts, and poorly implemented off-boarding and change of responsibility policies. This should not be considered a replacement for proper implementation of these policies and practices. Note: If it is believed that a user's password may have been compromised, the user's account should be locked immediately. Local policy should be followed to ensure the secure update of their password." + remediation: "Set the PASS_MAX_DAYS parameter to conform to site policy in /etc/login.defs : PASS_MAX_DAYS 365 Modify user parameters for all users with a password set to match: # chage --maxdays 365 ." + compliance: + - cis: ["5.5.1.1"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.9.4.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'f:/etc/login.defs -> n:^\s*PASS_MAX_DAYS\s*\t*(\d+) compare <= 365' - # 5.4.1.2 Ensure minimum days between password changes is configured (Automated) - - id: 4678 - title: "Ensure minimum days between password changes is configured" + # 5.5.1.2 Ensure minimum days between password changes is configured. (Automated) + - id: 4682 + title: "Ensure minimum days between password changes is configured." description: "The PASS_MIN_DAYS parameter in /etc/login.defs allows an administrator to prevent users from changing their password until a minimum number of days have passed since the last time the user changed their password. It is recommended that PASS_MIN_DAYS parameter be set to 1 or more days." rationale: "By restricting the frequency of password changes, an administrator can prevent users from repeatedly changing their password in an attempt to circumvent password reuse controls." - remediation: "Set the PASS_MIN_DAYS parameter to 1 in /etc/login.defs: PASS_MIN_DAYS 1 and modify user parameters for all users with a password set to match: chage --mindays 1 " + remediation: "Set the PASS_MIN_DAYS parameter to 1 in /etc/login.defs : PASS_MIN_DAYS 1 Modify user parameters for all users with a password set to match: # chage --mindays 1 ." compliance: - - cis: ["5.4.1.2"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis: ["5.5.1.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.9.4.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'f:/etc/login.defs -> n:^\s*PASS_MIN_DAYS\s*\t*(\d+) compare >= 1' - # 5.4.1.3 Ensure password expiration warning days is 7 or more (Automated) - - id: 4679 - title: "Ensure minimum days between password changes is 7 or more" + # 5.5.1.3 Ensure password expiration warning days is 7 or more. (Automated) + - id: 4683 + title: "Ensure password expiration warning days is 7 or more." description: "The PASS_WARN_AGE parameter in /etc/login.defs allows an administrator to notify users that their password will expire in a defined number of days. It is recommended that the PASS_WARN_AGE parameter be set to 7 or more days." rationale: "Providing an advance warning that a password will be expiring gives users time to think of a secure password. Users caught unaware may choose a simple password or write it down where it may be discovered." - remediation: "Set the PASS_WARN_AGE parameter to 7 in /etc/login.defs: PASS_WARN_AGE 7 and modify user parameters for all users with a password set to match: chage --warndays 7 " + remediation: "Set the PASS_WARN_AGE parameter to 7 in /etc/login.defs : PASS_WARN_AGE 7 Modify user parameters for all users with a password set to match: # chage --warndays 7 ." compliance: - - cis: ["5.4.1.3"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis: ["5.5.1.3"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.9.4.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'f:/etc/login.defs -> n:^\s*PASS_WARN_AGE\s*\t*(\d+) compare >= 7' - # 5.4.1.4 Ensure inactive password lock is 30 days or less (Automated) - - id: 4680 - title: "Ensure inactive password lock is 30 days or less" - description: "User accounts that have been inactive for over a given period of time can be automatically disabled. It is recommended that accounts that are inactive for 30 days after password expiration be disabled." + # 5.5.1.4 Ensure inactive password lock is 30 days or less. (Automated) + - id: 4684 + title: "Ensure inactive password lock is 30 days or less." + description: "User accounts that have been inactive for over a given period of time can be automatically disabled. It is recommended that accounts that are inactive for 30 days after password expiration be disabled. Note: A value of -1 would disable this setting." rationale: "Inactive accounts pose a threat to system security since the users are not logging in to notice failed login attempts or other anomalies." - remediation: "Run the following command to set the default password inactivity period to 30 days: useradd -D -f 30 and modify user parameters for all users with a password set to match: chage --inactive 30 " + remediation: "Run the following command to set the default password inactivity period to 30 days: # useradd -D -f 30 Modify user parameters for all users with a password set to match: # chage --inactive 30 ." compliance: - - cis: ["5.4.1.4"] - - cis_csc: ["16"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis: ["5.5.1.4"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.9"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:useradd -D -> n:^\s*INACTIVE\s*=\s*(\d+) compare <= 30' - # 5.4.3 Ensure default group for the root account is GID 0 (Automated) - - id: 4681 - title: "Ensure default group for the root account is GID 0" + # 5.5.1.5 Ensure all users last password change date is in the past. (Automated) - Not Implemented + # 5.5.2 Ensure system accounts are secured. (Automated) - Not Implemented + + # 5.5.3 Ensure default group for the root account is GID 0. (Automated) + - id: 4685 + title: "Ensure default group for the root account is GID 0." description: "The usermod command can be used to specify which group the root user belongs to. This affects permissions of files that are created by the root user." rationale: "Using GID 0 for the root account helps prevent root -owned files from accidentally becoming accessible to non-privileged users." - remediation: "Run the following command to set the root user default group to GID 0: usermod -g 0 root" + remediation: "Run the following command to set the root user default group to GID 0 : # usermod -g 0 root." compliance: - - cis: ["5.4.3"] - - cis_csc: ["5.1"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis: ["5.5.3"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'f:/etc/passwd -> r:^root:\w:\w:0' - # 5.4.4 Ensure default user shell timeout is configured (Automated) - - id: 4682 - title: " Ensure default user shell timeout is configured" - description: "TMOUT is an environmental setting that determines the timeout of a shell in seconds." + # 5.5.4 Ensure default user shell timeout is configured. (Automated) + - id: 4686 + title: "Ensure default user shell timeout is configured." + description: "TMOUT is an environmental setting that determines the timeout of a shell in seconds. - TMOUT=n - Sets the shell timeout to n seconds. A setting of TMOUT=0 disables - timeout. readonly TMOUT-Sets the TMOUT environmental variable as readonly, preventing unwanted modification during run-time. -export TMOUT -exports the TMOUT variable System Wide Shell Configuration Files: -/etc/profile -used to set system wide environmental variables on users shells. The variables are sometimes the same ones that are in the .bash_profile, however this file is used to set an initial PATH or PS1 for all shell users of the system. is only executed for interactive login shells, or shells executed with the --login parameter. -/etc/profile.d -/etc/profile will execute the scripts within /etc/profile.d/*.sh. It is recommended to place your configuration in a shell script within /etc/profile.d to set your own system wide environmental variables. -/etc/bashrc -System wide version of .bashrc. In Fedora derived distributions, etc/bashrc also invokes /etc/profile.d/*.sh if non-login shell, but redirects output to /dev/null if non-interactive. Is only executed for interactive shells or if BASH_ENV is set to /etc/bashrc." rationale: "Setting a timeout value reduces the window of opportunity for unauthorized user access to another user's shell session that has been left unattended. It also ends the inactive session and releases the resources associated with that session." - remediation: "Edit the /etc/bashrc and /etc/profile files (and the appropriate files for any other shell supported on your system) and add or edit any umask parameters as follows: TMOUT=600" + remediation: "Review /etc/bashrc, /etc/profile, and all files ending in *.sh in the /etc/profile.d/ directory and remove or edit all TMOUT=_n_ entries to follow local site policy. TMOUT should not exceed 900 or be equal to 0. Configure TMOUT in one of the following files: - A file in the /etc/profile.d/ directory ending in .sh - /etc/profile - /etc/bashrc TMOUT configuration examples: - As multiple lines: TMOUT=900 readonly TMOUT export TMOUT - As a single line: readonly TMOUT=900 ; export TMOUT." compliance: - - cis: ["5.4.4"] - - cis_csc: ["16.11"] - - pci_dss: ["12.3.8"] + - cis: ["5.5.4"] + - cis_csc_v8: ["4.3"] + - cis_csc_v7: ["16.11"] + - cmmc_v2.0: ["AC.L2-3.1.10", "AC.L2-3.1.11"] + - hipaa: ["164.312(a)(2)(iii)"] + - iso_27001-2013: ["A.8.1.3"] + - nist_sp_800-53: ["AC-11", "AC-11(1)", "AC-12", "AC-2(5)"] + - pci_dss_v3.2.1: ["8.1.8"] + - pci_dss_v4.0: ["8.2.8"] condition: all rules: - - 'not f:/etc/bashrc -> n:^\s*\t*TMOUT\s*\t*=\s*\t*(\d+) compare > 900' - - 'not f:/etc/profile -> n:^\s*\t*TMOUT\s*\t*=\s*\t*(\d+) compare > 900' - - 'f:/etc/bashrc -> n:^\s*\t*TMOUT\s*\t*=\s*\t*(\d+) compare <= 900' - - 'f:/etc/profile -> n:^\s*\t*TMOUT\s*\t*=\s*\t*(\d+) compare <= 900' + - 'not f:/etc/bashrc -> !r:^# && n:TMOUT\s*\t*=\s*\t*(\d+) compare > 900' + - 'not c:grep -Rh TMOUT /etc/profile /etc/profile.d/*.sh -> !r:^# && n:TMOUT\s*\t*=\s*\t*(\d+) compare > 900' + - 'f:/etc/bashrc -> !r:^# && n:readonly TMOUT\s*=\s*(\d+)\s*; compare <= 900 && r:export TMOUT\s*$' + - 'c:grep -Rh TMOUT /etc/profile /etc/profile.d/*.sh -> !r:^# && n:readonly TMOUT\s*=\s*(\d+)\s*; compare <= 900 && r:export TMOUT\s*$' - # 5.4.5 Ensure default user umask is configured (Automated) - - id: 4683 - title: "Ensure default user umask is configured" - description: "The user file-creation mode mask (umask) is use to determine the file permission for newly created directories and files. In Linux, the default permissions for any newly created directory is 0777 (rwxrwxrwx), and for any newly created file it is 0666 (rw-rw-rw-). The umask modifies the default Linux permissions by restricting (masking) these permissions. The umask is not simply subtracted, but is processed bitwise. Bits set in the umaskare cleared in the resulting file mode." - rationale: "Setting a very secure default value for umask ensures that users make a conscious choice about their file permissions. A default umask setting of 077 causes files and directories created by users to not be readable by any other user on the system. A umask of 027 would make files and directories readable by users in the same Unix group, while a umask of 022 would make files readable by every user on the system." - remediation: "Edit the /etc/bashrc, /etc/profile and /etc/profile.d/*.sh files (and the appropriate files for any other shell supported on your system) and add or edit any umask parameters as follows: umask 027" - compliance: - - cis: ["5.4.4"] - - cis_csc: ["13"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + # 5.5.5 Ensure default user umask is configured. (Automated) + - id: 4687 + title: "Ensure default user umask is configured." + description: "The user file-creation mode mask (umask) is use to determine the file permission for newly created directories and files. In Linux, the default permissions for any newly created directory is 0777 (rwxrwxrwx), and for any newly created file it is 0666 (rw-rw-rw-). The umask modifies the default Linux permissions by restricting (masking) these permissions. The umask is not simply subtracted, but is processed bitwise. Bits set in the umask are cleared in the resulting file mode. umask can be set with either octal or Symbolic values: - Octal (Numeric) Value - Represented by either three or four digits. ie umask 0027 or umask 027. If a four digit umask is used, the first digit is ignored. The remaining three digits effect the resulting permissions for user, group, and world/other respectively. - Symbolic Value - Represented by a comma separated list for User u, group g, and world/other o. The permissions listed are not masked by umask. ie a umask set by umask u=rwx,g=rx,o= is the Symbolic equivalent of the Octal umask 027. This umask would set a newly created directory with file mode drwxr-x--- and a newly created file with file mode rw-r-----. The default umask can be set to use the pam_umask module or in a System Wide Shell Configuration File. The user creating the directories or files has the discretion of changing the permissions via the chmod command, or choosing a different default umask by adding the umask command into a User Shell Configuration File, (.bash_profile or .bashrc), in their home directory. Setting the default umask: - pam_umask module: o will set the umask according to the system default in /etc/login.defs and user settings, solving the problem of different umask settings with different shells, display managers, remote sessions etc. o umask= value in the /etc/login.defs file is interpreted as Octal o Setting USERGROUPS_ENAB to yes in /etc/login.defs (default): will enable setting of the umask group bits to be the same as owner bits. (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is the same as gid, and username is the same as the userdel will remove the user's group if it contains no more members, and useradd will create by default a group with the name of the user - System Wide Shell Configuration File: o /etc/profile - used to set system wide environmental variables on users shells. The variables are sometimes the same ones that are in the .bash_profile, however this file is used to set an initial PATH or PS1 for all shell users of the system. is only executed for interactive login shells, or shells executed with the --login parameter. o /etc/profile.d - /etc/profile will execute the scripts within /etc/profile.d/*.sh. It is recommended to place your configuration in a shell script within /etc/profile.d to set your own system wide environmental variables. o /etc/bashrc - System wide version of .bashrc. In Fedora derived distributions, etc/bashrc also invokes /etc/profile.d/*.sh if non-login shell, but redirects output to /dev/null if non-interactive. Is only executed for interactive shells or if BASH_ENV is set to /etc/bashrc. User Shell Configuration Files: - ~/.bash_profile - Is executed to configure your shell before the initial command prompt. Is only read by login shells. - ~/.bashrc - Is executed for interactive shells. only read by a shell that's both interactive and non-login." + rationale: "Setting a secure default value for umask ensures that users make a conscious choice about their file permissions. A permissive umask value could result in directories or files with excessive permissions that can be read and/or written to by unauthorized users." + remediation: "Review /etc/bashrc, /etc/profile, and all files ending in *.sh in the /etc/profile.d/ directory and remove or edit all umask entries to follow local site policy. Any remaining entries should be: umask 027, umask u=rwx,g=rx,o= or more restrictive. Configure umask in one of the following files: - A file in the /etc/profile.d/ directory ending in .sh - /etc/profile - /etc/bashrc Example: # vi /etc/profile.d/set_umask.sh umask 027 Run the following command and remove or modify the umask of any returned files: # grep -RPi '(^|^[^#]*)\\s*umask\\s+([0-7][0-7][01][0-7]\\b|[0-7][0-7][0-7][0- 6]\\b|[0-7][01][0-7]\\b|[0-7][0-7][0- 6]\\b|(u=[rwx]{0,3},)?(g=[rwx]{0,3},)?o=[rwx]+\\b|(u=[rwx]{1,3},)?g=[^rx]{1,3}( ,o=[rwx]{0,3})?\\b)' /etc/login.defs /etc/profile* /etc/bashrc* Follow one of the following methods to set the default user umask: Edit /etc/login.defs and edit the UMASK and USERGROUPS_ENAB lines as follows: UMASK 027 USERGROUPS_ENAB no Edit the files /etc/pam.d/password-auth and /etc/pam.d/system-auth and add or edit the following: session optional pam_umask.so OR Configure umask in one of the following files: - A file in the /etc/profile.d/ directory ending in .sh - /etc/profile - /etc/bashrc Example: /etc/profile.d/set_umask.sh umask 027 Note: this method only applies to bash and shell. If other shells are supported on the system, it is recommended that their configuration files also are checked." + compliance: + - cis: ["5.5.5"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1", "13"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - - 'f:/etc/bash.bashrc -> !r:^\s*\t*# && r:umask \d0\d|umask \d1\d|umask \d4\d|umask \d5\d' - - 'f:/etc/bash.bashrc -> !r:^\s*\t*# && n:umask \d\d(\d) compare != 7' + - 'f:/etc/bashrc -> !r:^\s*\t*# && r:umask \d0\d|umask \d1\d|umask \d4\d|umask \d5\d' + - 'f:/etc/bashrc -> !r:^\s*\t*# && n:umask \d\d(\d) compare != 7' - 'f:/etc/profile -> !r:^\s*\t*# && r:umask \d0\d|umask \d1\d|umask \d4\d|umask \d5\d' - 'f:/etc/profile -> !r:^\s*\t*# && n:umask \d\d(\d) compare != 7' - 'd:/etc/profile.d -> .sh -> !r:^\s*\t*# && r:umask \d0\d|umask \d1\d|umask \d4\d|umask \d5\d' - 'd:/etc/profile.d -> .sh -> !r:^\s*t*# && n:umask \d\d(\d) compare != 7' - # 5.6 Ensure access to the su command is restricted (Automated) - - id: 4684 + # 5.6 Ensure root login is restricted to system console. (Manual) - Not Implemented + + # 5.7 Ensure access to the su command is restricted. (Automated) + - id: 4688 title: "Ensure access to the su command is restricted." - description: "The su command allows a user to run a command or shell as another user. The program has been superseded by sudo, which allows for more granular control over privileged access. Normally, the su command can be executed by any user. By uncommenting the pam_wheel.so statement in /etc/pam.d/su, the su command will only allow users in the wheel group to execute su ." - rationale: "Restricting the use of su, and using sudo in its place, provides system administrators better control of the escalation of user privileges to execute privileged commands. The sudo utility also provides a better logging and audit mechanism, as it can log each command executed via sudo, whereas su can only record that a user executed the su program." - remediation: "Add the following line to the /etc/pam.d/su file: auth required pam_wheel.so use_uid" + description: "The su command allows a user to run a command or shell as another user. The program has been superseded by sudo, which allows for more granular control over privileged access. Normally, the su command can be executed by any user. By uncommenting the pam_wheel.so statement in /etc/pam.d/su, the su command will only allow users in a specific groups to execute su. This group should be empty to reinforce the use of sudo for privileged access." + rationale: "Restricting the use of su , and using sudo in its place, provides system administrators better control of the escalation of user privileges to execute privileged commands. The sudo utility also provides a better logging and audit mechanism, as it can log each command executed via sudo , whereas su can only record that a user executed the su program." + remediation: "Create an empty group that will be specified for use of the su command. The group should be named according to site policy. Example: # groupadd sugroup Add the following line to the /etc/pam.d/su file, specifying the empty group: auth required pam_wheel.so use_uid group=sugroup." compliance: - - cis: ["5.6"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["5.7"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'f:/etc/pam.d/su -> r:^auth\s*\t*required\s*\t*pam_wheel.so\s*\t*use_uid' + - 'f:/etc/pam.d/su -> !r:^\s*\t*# && r:auth\s*\t*required\s*\t*pam_wheel.so && r:use_uid && r:group=\w+' + + # 6.1.1 Audit system file permissions. (Manual) - Not Implemented ############################################### # 6 System Maintenance @@ -3338,201 +4306,250 @@ checks: # 6.1 System File Permissions ############################################### - # 6.1.2 Configure /etc/passwd permissions (Automated) - - id: 4685 - title: "Ensure permissions on /etc/passwd are configured" + # 6.1.2 Ensure permissions on /etc/passwd are configured. (Automated) + - id: 4689 + title: "Ensure permissions on /etc/passwd are configured." description: "The /etc/passwd file contains user account information that is used by many system utilities and therefore must be readable for these utilities to operate." rationale: "It is critical to ensure that the /etc/passwd file is protected from unauthorized write access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/passwd: # chown root:root /etc/passwd # chmod u-x,g-wx,o-wx /etc/passwd" + remediation: "Run the following commands to set owner, group, and permissions on /etc/passwd : # chown root:root /etc/passwd # chmod u-x,g-wx,o-wx /etc/passwd." compliance: - cis: ["6.1.2"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/passwd -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 6.1.3 Ensure permissions on /etc/shadow are configured (Automated) - - id: 4686 - title: "Ensure permissions on /etc/shadow are configured" - description: "The /etc/shadow file is used to store the information about user accounts that is critical to the security of those accounts, such as the hashed password and other security information." - rationale: "If attackers can gain read access to the /etc/shadow file, they can easily run a password cracking program against the hashed password to break it. Other security information that is stored in the /etc/shadow file (such as expiration) could also be useful to subvert the user accounts." - remediation: "Run the following command to set permissions on /etc/shadow: # chown root:root /etc/shadow # chmod 000 /etc/shadow" + # 6.1.3 Ensure permissions on /etc/passwd- are configured. (Automated) + - id: 4690 + title: "Ensure permissions on /etc/passwd- are configured." + description: "The /etc/passwd- file contains backup user account information." + rationale: "It is critical to ensure that the /etc/passwd- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." + remediation: "Run the following commands to set owner, group, and permissions on /etc/passwd- : # chown root:root /etc/passwd- # chmod u-x,go-wx /etc/passwd-." compliance: - cis: ["6.1.3"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/shadow -> r:Access:\s*\(0000/----------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' + - 'c:stat -L /etc/passwd- -> r:Access:\s*\(0\d00/-\w\w-------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 6.1.4 Ensure permissions on /etc/group are configured (Automated) - - id: 4687 - title: "Ensure permissions on /etc/group are configured" - description: "The /etc/group file contains a list of all the valid groups defined in the system. The command below allows read/write access for root and read access for everyone else." - rationale: "The /etc/group file needs to be protected from unauthorized changes by non-privileged users, but needs to be readable as this information is used with many non-privileged programs." - remediation: "Run the following command to set permissions on /etc/group: # chown root:root /etc/group # chmod u-x,g-wx,o-wx /etc/group" + # 6.1.4 Ensure permissions on /etc/shadow are configured. (Automated) + - id: 4691 + title: "Ensure permissions on /etc/shadow are configured." + description: "The /etc/shadow file is used to store the information about user accounts that is critical to the security of those accounts, such as the hashed password and other security information." + rationale: "If attackers can gain read access to the /etc/shadow file, they can easily run a password cracking program against the hashed password to break it. Other security information that is stored in the /etc/shadow file (such as expiration) could also be useful to subvert the user accounts." + remediation: "Run the following commands to set owner, group, and permissions on /etc/shadow : # chown root:root /etc/shadow # chmod 0000 /etc/shadow." compliance: - cis: ["6.1.4"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/group -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' + - 'c:stat -L /etc/shadow -> r:Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*\w*/\s*\t*shadow\)' - # 6.1.5 Ensure permissions on /etc/gshadow are configured (Automated) - - id: 4688 - title: "Ensure permissions on /etc/gshadow are configured" - description: "The /etc/gshadow file is used to store the information about groups that is critical to the security of those accounts, such as the hashed password and other security information." - rationale: "If attackers can gain read access to the /etc/gshadow file, they can easily run a password cracking program against the hashed password to break it. Other security information that is stored in the /etc/gshadow file (such as group administrators) could also be useful to subvert the group" - remediation: "Run the following command to set permissions on /etc/gshadow: # chown root:root /etc/gshadow # chmod 000 /etc/gshadow" + # 6.1.5 Ensure permissions on /etc/shadow- are configured. (Automated) + - id: 4692 + title: "Ensure permissions on /etc/shadow- are configured." + description: "The /etc/shadow- file is used to store backup information about user accounts that is critical to the security of those accounts, such as the hashed password and other security information." + rationale: "It is critical to ensure that the /etc/shadow- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." + remediation: "Run the following commands to set owner, group, and permissions on /etc/shadow- : # chown root:root /etc/shadow- # chmod 0000 /etc/shadow-." compliance: - cis: ["6.1.5"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/gshadow -> r:Access:\s*\(0000/----------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' + - 'c:stat -L /etc/shadow- -> r:Access:\s*\(0\d00/-\w\w-------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|Access:\s*\(0\d00/-\w\w-------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*\w*/\s*\t*shadow\)' - # 6.1.6 Ensure permissions on /etc/passwd-are configured (Automated) - - id: 4689 - title: "Ensure permissions on /etc/passwd- are configured" - description: "The /etc/passwd- file contains backup user account information." - rationale: "It is critical to ensure that the /etc/passwd- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/passwd-: # chown root:root /etc/passwd- # chmod 644 /etc/passwd-" + # 6.1.6 Ensure permissions on /etc/gshadow- are configured. (Automated) + - id: 4693 + title: "Ensure permissions on /etc/gshadow- are configured." + description: "The /etc/gshadow- file is used to store backup information about groups that is critical to the security of those accounts, such as the hashed password and other security information." + rationale: "It is critical to ensure that the /etc/gshadow- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." + remediation: "Run the following commands to set owner, group, and permissions on /etc/gshadow- : # chown root:root /etc/gshadow- # chmod 0000 /etc/gshadow-." compliance: - cis: ["6.1.6"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/passwd- -> r:Access:\s*\(0\d\d\d/-\w\w-\w--\w--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' + - 'c:stat -L /etc/gshadow- -> r:Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*\w*/\s*\t*shadow\)' - # 6.1.7 Ensure permissions on /etc/shadow-are configured (Automated) - - id: 4690 - title: "Ensure permissions on /etc/shadow- are configured" - description: "The /etc/shadow- file is used to store backup information about user accounts that is critical to the security of those accounts, such as the hashed password and other security information." - rationale: "It is critical to ensure that the /etc/shadow- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/shadow-: # chown root:root /etc/shadow- # chmod 000 /etc/shadow-" + # 6.1.7 Ensure permissions on /etc/gshadow are configured. (Automated) + - id: 4694 + title: "Ensure permissions on /etc/gshadow are configured." + description: "The /etc/gshadow file is used to store the information about groups that is critical to the security of those accounts, such as the hashed password and other security information." + rationale: "If attackers can gain read access to the /etc/gshadow file, they can easily run a password cracking program against the hashed password to break it. Other security information that is stored in the /etc/gshadow file (such as group administrators) could also be useful to subvert the group." + remediation: "Run the following commands to set owner, group, and permissions on /etc/gshadow : # chown root:root /etc/gshadow # chmod 0000 /etc/gshadow." compliance: - cis: ["6.1.7"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/shadow- -> r:Access:\s*\(0000/----------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' + - 'c:stat -L /etc/gshadow -> r:Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*\w*/\s*\t*shadow\)' - # 6.1.8 Ensure permissions on /etc/group-are configured (Automated) - - id: 4691 - title: "Ensure permissions on /etc/group- are configured" - description: "The /etc/group- file contains a backup list of all the valid groups defined in the system." - rationale: "It is critical to ensure that the /etc/group- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/group-: # chown root:root /etc/group- # chmod 644 /etc/group-" + # 6.1.8 Ensure permissions on /etc/group are configured. (Automated) + - id: 4695 + title: "Ensure permissions on /etc/group are configured." + description: "The /etc/group file contains a list of all the valid groups defined in the system. The command below allows read/write access for root and read access for everyone else." + rationale: "The /etc/group file needs to be protected from unauthorized changes by non-privileged users, but needs to be readable as this information is used with many non-privileged programs." + remediation: "Run the following commands to set owner, group, and permissions on /etc/group : # chown root:root /etc/group # chmod u-x,g-wx,o-wx /etc/group." compliance: - cis: ["6.1.8"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/group- -> r:Access:\s*\(0\d\d\d/-\w\w-\w--\w--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' + - 'c:stat -L /etc/group -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 6.1.9 Ensure permissions on /etc/gshadow-are configured (Automated) - - id: 4692 - title: "Ensure permissions on /etc/gshadow- are configured" - description: "The /etc/gshadow- file is used to store backup information about groups that is critical to the security of those accounts, such as the hashed password and other security information." - rationale: "It is critical to ensure that the /etc/gshadow- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/gshadow-: # chown root:root /etc/gshadow- # chmod 000 /etc/gshadow-" + # 6.1.9 Ensure permissions on /etc/group- are configured. (Automated) + - id: 4696 + title: "Ensure permissions on /etc/group- are configured." + description: "The /etc/group- file contains a backup list of all the valid groups defined in the system." + rationale: "It is critical to ensure that the /etc/group- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." + remediation: "Run the following commands to set owner, group, and permissions on /etc/group-: # chown root:root /etc/group- # chmod u-x,go-wx /etc/group-." compliance: - cis: ["6.1.9"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:stat -L /etc/gshadow- -> r:Access:\s*\(0000/----------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' + - 'c:stat -L /etc/group- -> r:Access:\s*\(0\d\d\d/-\w\w-\w--\w--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - ############################################### - # 6.2 Review User and Group Settings - ############################################### + # 6.1.10 Ensure no world writable files exist. (Automated) - Not Implemented + # 6.1.11 Ensure no unowned files or directories exist. (Automated) - Not Implemented + # 6.1.12 Ensure no ungrouped files or directories exist. (Automated) - Not Implemented + # 6.1.13 Audit SUID executables. (Manual) - Not Implemented + # 6.1.14 Audit SGID executables. (Manual) - Not Implemented - # 6.2.1 Ensure accounts in /etc/passwd use shadowed passwords (Automated) - - id: 4693 - title: "Ensure accounts in /etc/passwd use shadowed passwords" - description: "Local accounts can uses shadowed passwords. With shadowed passwords, The passwords are saved in shadow password file, /etc/shadow, encrypted by a salted one-way hash. Accounts with a shadowed password have an xin the second field in /etc/passwd." - rationale: "The /etc/passwd file also contains information like user ID's and group ID's that are used by many system programs. Therefore, the /etc/passwd file must remain world readable. In spite of encoding the password with a randomly-generated one-way hash function, an attacker could still break the system if they got access to the /etc/passwd file. This can be mitigated by using shadowed passwords, thus moving the passwords in the /etc/passwd file to /etc/shadow. The /etc/shadow file is set so only root will be able to read and write. This helps mitigate the risk of an attacker gaining access to the encoded passwords with which to perform a dictionary attack." - remediation: "f any accounts in the /etc/passwdfile do not have a single x in the password field, run the following command to set these accounts to use shadowed passwords:# sed -e 's/^\\([a-zA-Z0-9_]*\\):[^:]*:/\\1:x:/' -i /etc/passwdInvestigate to determine if the account is logged in and what it is being used for, to determine if it needs to be forced off." + # 6.2.1 Ensure accounts in /etc/passwd use shadowed passwords. (Automated) + - id: 4697 + title: "Ensure accounts in /etc/passwd use shadowed passwords." + description: "Local accounts can uses shadowed passwords. With shadowed passwords, The passwords are saved in shadow password file, /etc/shadow, encrypted by a salted one-way hash. Accounts with a shadowed password have an x in the second field in /etc/passwd." + rationale: "The /etc/passwd file also contains information like user ID's and group ID's that are used by many system programs. Therefore, the /etc/passwd file must remain world readable. In spite of encoding the password with a randomly-generated one-way hash function, an attacker could still break the system if they got access to the /etc/passwd file. This can be mitigated by using shadowed passwords, thus moving the passwords in the /etc/passwd file to /etc/shadow. The /etc/shadow file is set so only root will be able to read and write. This helps mitigate the risk of an attacker gaining access to the encoded passwords with which to perform a dictionary attack. Notes: - All accounts must have passwords or be locked to prevent the account from being used by an unauthorized user. - A user account with an empty second field in /etc/passwd allows the account to be logged into by providing only the username." + remediation: "If any accounts in the /etc/passwd file do not have a single x in the password field, run the following command to set these accounts to use shadowed passwords: # sed -e 's/^\\([a-zA-Z0-9_]*\\):[^:]*:/\\1:x:/' -i /etc/passwd Investigate to determine if the account is logged in and what it is being used for, to determine if it needs to be forced off." compliance: - cis: ["6.2.1"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] - condition: all + - cis_csc_v8: ["3.11"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["AC.L2-3.1.19", "IA.L2-3.5.10", "MP.L2-3.8.1", "SC.L2-3.13.11", "SC.L2-3.13.16"] + - hipaa: ["164.312(a)(2)(iv)", "164.312(e)(2)(ii)"] + - iso_27001-2013: ["A.9.4.3"] + - nist_sp_800-53: ["SC-28", "SC-28(1)"] + - pci_dss_v3.2.1: ["3.4", "3.4.1", "8.2.1"] + - pci_dss_v4.0: ["3.1.1", "3.3.2", "3.3.3", "3.5.1", "3.5.1.2", "3.5.1.3", "8.3.2"] + - soc_2: ["CC6.1"] + condition: none rules: - - 'not f:/etc/shadow -> !r:^# && r:\w\w:' + - 'f:/etc/passwd -> !r:^\w+:x:' - # 6.2.2 Ensure /etc/shadow password fields are not empty (Automated) - - id: 4694 - title: "Ensure password fields are not empty" + # 6.2.2 Ensure /etc/shadow password fields are not empty. (Automated) + - id: 4698 + title: "Ensure /etc/shadow password fields are not empty." description: "An account with an empty password field means that anybody may log in as that user without providing a password." rationale: "All accounts must have passwords or be locked to prevent the account from being used by an unauthorized user." - remediation: "If any accounts in the /etc/shadow file do not have a password, run the following command to lock the account until it can be determined why it does not have a password: passwd -l || Also, check to see if the account is logged in and investigate what it is being used for to determine if it needs to be forced off." + remediation: "If any accounts in the /etc/shadow file do not have a password, run the following command to lock the account until it can be determined why it does not have a password: # passwd -l Also, check to see if the account is logged in and investigate what it is being used for to determine if it needs to be forced off." compliance: - cis: ["6.2.2"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - iso_27001-2013: ["A.9.4.3"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] condition: none rules: - - 'f:/etc/shadow -> !r:^# && r:^\w+::' - - # 6.2.3 Ensure root is the only UID 0 account (Automated) - - id: 4695 - title: "Ensure root is the only UID 0 account" + - 'f:/etc/shadow -> r:^\w+::' + + # 6.2.3 Ensure all groups in /etc/passwd exist in /etc/group. (Automated) - Not Implemented + # 6.2.4 Ensure shadow group is empty. (Automated) - Not Implemented + # 6.2.5 Ensure no duplicate user names exist. (Automated) - Not Implemented + # 6.2.6 Ensure no duplicate group names exist. (Automated) - Not Implemented + # 6.2.7 Ensure no duplicate UIDs exist. (Automated) - Not Implemented + # 6.2.8 Ensure no duplicate GIDs exist. (Automated) - Not Implemented + # 6.2.9 Ensure root is the only UID 0 account. (Automated) + - id: 4699 + title: "Ensure root is the only UID 0 account." description: "Any account with UID 0 has superuser privileges on the system." rationale: "This access must be limited to only the default root account and only from the system console. Administrative access must be through an unprivileged account using an approved mechanism as noted in Item 5.6 Ensure access to the su command is restricted." remediation: "Remove any users other than root with UID 0 or assign them a new UID if appropriate." compliance: - - cis: ["6.2.3"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["6.2.9"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: none rules: - 'f:/etc/passwd -> !r:^# && !r:^\s*\t*root: && r:^\w+:\w+:0:' - # 6.2.18 Ensure shadow group is empty (Automated) - - id: 4696 - title: "Ensure shadow group is empty" - description: "The shadow group allows system programs which require access the ability to read the /etc/shadow file. No users should be assigned to the shadow group" - rationale: "Any users assigned to the shadow group would be granted read access to the /etc/shadow file. If attackers can gain read access to the /etc/shadow file, they can easily runa password cracking program against the hashed passwords to break them. Other security information that is stored in the /etc/shadow file (such as expiration) could also be useful to subvert additional user accounts." - remediation: "Remove any legacy '+' entries from /etc/shadow if they exist." - compliance: - - cis: ["6.2.18"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none - rules: - - 'not f:/etc/shadow -> !r:^# && r:^shadow:\.+:\.+:\.+:\.+' + # 6.2.10 Ensure root PATH Integrity. (Automated) - Not Implemented + # 6.2.11 Ensure all users' home directories exist. (Automated) - Not Implemented + # 6.2.12 Ensure users own their home directories. (Automated) - Not Implemented + # 6.2.13 Ensure users' home directories permissions are 750 or more restrictive. (Automated) - Not Implemented + # 6.2.14 Ensure users' dot files are not group or world writable. (Automated) - Not Implemented + # 6.2.15 Ensure no users have .forward files. (Automated) - Not Implemented + # 6.2.16 Ensure no users have .netrc files. (Automated) - Not Implemented + # 6.2.17 Ensure no users have .rhosts files. (Automated) - Not Implemented From 14ad3055c2458c3d69eb366cd90fade75974b397 Mon Sep 17 00:00:00 2001 From: Openime Oniagbi Date: Fri, 31 May 2024 19:00:04 +0300 Subject: [PATCH 385/419] Update cis_rhel8_linux.yml --- ruleset/sca/rhel/8/cis_rhel8_linux.yml | 5938 +++++++++++++++--------- 1 file changed, 3619 insertions(+), 2319 deletions(-) diff --git a/ruleset/sca/rhel/8/cis_rhel8_linux.yml b/ruleset/sca/rhel/8/cis_rhel8_linux.yml index 8b7de222c08..a288426c9e2 100644 --- a/ruleset/sca/rhel/8/cis_rhel8_linux.yml +++ b/ruleset/sca/rhel/8/cis_rhel8_linux.yml @@ -1,20 +1,20 @@ # Security Configuration Assessment -# CIS Checks for RHEL 8 +# CIS Checks for RHEL 8. # Copyright (C) 2015, Wazuh Inc. # # This program is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public -# License (version 2) as published by the FSF - Free Software -# Foundation +# License (version 2) as published by the FSF - Free Software Foundation +# # # Based on: -# Center for Internet Security Red Hat Enterprise Linux 8 Benchmark v1.0.1 - 05-19-2021 +# Center for Internet Security Red Hat Enterprise Linux 8 Benchmark v2.0.0 - 02-23-2022 policy: id: "cis_rhel8_linux" file: "cis_rhel8_linux.yml" - name: "CIS Red Hat Enterprise Linux 8 Benchmark v1.0.1" - description: "This document provides prescriptive guidance for establishing a secure configuration posture for Red Hat Enterprise Linux 8 systems running on x86 and x64 platforms. This document was tested against Red Hat Enterprise Linux 8" + name: "CIS Red Hat Enterprise Linux 8 Benchmark v2.0.0" + description: "This document provides prescriptive guidance for establishing a secure configuration posture for Red Hat Enterprise Linux 8 systems running on x86 and x64 platforms. This document was tested against Red Hat Enterprise Linux 8." references: - https://www.cisecurity.org/cis-benchmarks/ @@ -33,666 +33,1016 @@ variables: $sshd_file: /etc/ssh/sshd_config checks: - ############################################### - # 1 Initial setup - ############################################### - ############################################### - # 1.1 Filesystem Configuration - ############################################### - # 1.1.1.1 cramfs: filesystem + # 1.1.1.1 Ensure mounting of cramfs filesystems is disabled. (Automated) - id: 5000 - title: "Ensure mounting of cramfs filesystems is disabled" + title: "Ensure mounting of cramfs filesystems is disabled." description: "The cramfs filesystem type is a compressed read-only Linux filesystem embedded in small footprint systems. A cramfs image can be used without having to first decompress the image." - rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the server. If this filesystem type is not needed, disable it." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf .Example: vim /etc/modprobe.d/cramfs.conf: and add the following line: install cramfs /bin/true. Run the following command to unload the cramfs module: # rmmod cramfs" + rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the system. If this filesystem type is not needed, disable it." + remediation: 'Edit or create a file in the /etc/modprobe.d/ directory ending in .conf with a line that reads install cramfs /bin/false and a line the reads blacklist cramfs. Example: # printf "install cramfs /bin/false blacklist cramfs " >> /etc/modprobe.d/cramfs.conf Run the following command to unload the cramfs module: # modprobe -r cramfs.' compliance: - cis: ["1.1.1.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:modprobe -n -v cramfs -> r:install /bin/true|Module cramfs not found" + - "c:modprobe -n -v cramfs -> r:install /bin/false|Module cramfs not found" - "not c:lsmod -> r:cramfs" + - 'd:/etc/modprobe.d -> r:\.*.conf -> r:blacklist\t*\s*cramfs' - # 1.1.1.2 vFAT: filesystem + # 1.1.1.2 Ensure mounting of squashfs filesystems is disabled. (Automated) - id: 5001 - title: "Ensure mounting of FAT filesystems is limited" - description: "The VFAT filesystem format is primarily used on older windows systems and portable USB drives or flash modules. It comes in three types FAT12 , FAT16 , and FAT32 all of which are supported by the vfat kernel module." + title: "Ensure mounting of squashfs filesystems is disabled." + description: "The squashfs filesystem type is a compressed read-only Linux filesystem embedded in small footprint systems. A squashfs image can be used without having to first decompress the image." rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the system. If this filesystem type is not needed, disable it." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf . Example: vim /etc/modprobe.d/vfat.conf: install vfat /bin/true. Run the following command to unload the vfat module: # rmmod vfat" + impact: 'As Snap packages utilizes squashfs as a compressed filesystem, disabling squashfs will cause Snap packages to fail. Snap application packages of software are self-contained and work across a range of Linux distributions. This is unlike traditional Linux package management approaches, like APT or RPM, which require specifically adapted packages per Linux distribution on an application update and delay therefore application deployment from developers to their software''s end-user. Snaps themselves have no dependency on any external store ("App store"), can be obtained from any source and can be therefore used for upstream software deployment.' + remediation: 'Edit or create a file in the /etc/modprobe.d/ directory ending in .conf with the lines that reads install squashfs /bin/false and blacklist squashfs. Example: # printf "install squashfs /bin/false blacklist squashfs " >> /etc/modprobe.d/squashfs.conf Run the following command to unload the squashfs module: # modprobe -r squashfs.' compliance: - cis: ["1.1.1.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:modprobe -n -v vfat -> r:install /bin/true|Module vfat not found" - - "not c:lsmod -> r:vfat" + - "c:modprobe -n -v squashfs -> r:install /bin/false|Module squashfs not found" + - "not c:lsmod -> r:squashfs" + - 'd:/etc/modprobe.d -> r:\.*.conf -> r:blacklist\t*\s*squashfs' - # 1.1.1.3 squashfs: filesystem + # 1.1.1.3 Ensure mounting of udf filesystems is disabled. (Automated) - id: 5002 - title: "Ensure mounting of squashfs filesystems is disabled" - description: "The squashfs filesystem type is a compressed read-only Linux filesystem embedded in small footprint systems (similar to cramfs ). A squashfs image can be used without having to first decompress the image." + title: "Ensure mounting of udf filesystems is disabled." + description: "The udf filesystem type is the universal disk format used to implement ISO/IEC 13346 and ECMA-167 specifications. This is an open vendor filesystem type for data storage on a broad range of media. This filesystem type is necessary to support writing DVDs and newer optical disc formats." rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the system. If this filesystem type is not needed, disable it." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf . Example: vim /etc/modprobe.d/squashfs.conf and add the following line: install squashfs /bin/true. Run the following command to unload the squashfs module: rmmod squashfs" + impact: "Microsoft Azure requires the usage of udf. udf should not be disabled on systems run on Microsoft Azure." + remediation: 'Edit or create a file in the /etc/modprobe.d/ directory ending in .conf with a line that reads install udf /bin/false. Example: # printf "install udf /bin/false blacklist udf " >> /etc/modprobe.d/udf.conf Run the following command to unload the udf module: # modprobe -r udf.' compliance: - cis: ["1.1.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:modprobe -n -v squashfs -> r:install /bin/true|Module squashfs not found" - - "not c:lsmod -> r:squashfs" + - "c:modprobe -n -v udf -> r:install /bin/false|Module udf not found" + - "not c:lsmod -> r:udf" + - 'd:/etc/modprobe.d -> r:\.*.conf -> r:blacklist\t*\s*udf' - # 1.1.1.4 udfs: filesystem + # 1.1.2.1 Ensure /tmp is a separate partition. (Automated) - id: 5003 - title: "Ensure mounting of udf filesystems is disabled" - description: "The udf filesystem type is the universal disk format used to implement ISO/IEC 13346 and ECMA-167 specifications. This is an open vendor filesystem type for data storage on a broad range of media. This filesystem type is necessary to support writing DVDs and newer optical disc formats." - rationale: "Removing support for unneeded filesystem types reduces the local attack surface of the system. If this filesystem type is not needed, disable it." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf. Example: vim /etc/modprobe.d/udf.conf and add the following line: install udf /bin/true. Run the following command to unload the udf module: # rmmod udf" - compliance: - - cis: ["1.1.1.4"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] + title: "Ensure /tmp is a separate partition." + description: "The /tmp directory is a world-writable directory used for temporary storage by all users and some applications." + rationale: "Making /tmp its own file system allows an administrator to set additional mount options such as the noexec option on the mount, making /tmp useless for an attacker to install executable code. It would also prevent an attacker from establishing a hard link to a system setuid program and wait for it to be updated. Once the program was updated, the hard link would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw. This can be accomplished by either mounting tmpfs to /tmp, or creating a separate partition for /tmp." + impact: "Since the /tmp directory is intended to be world-writable, there is a risk of resource exhaustion if it is not bound to a separate partition. Running out of /tmp space is a problem regardless of what kind of filesystem lies under it, but in a configuration where /tmp is not a separate file system it will essentially have the whole disk available, as the default installation only creates a single / partition. On the other hand, a RAM-based /tmp (as with tmpfs) will almost certainly be much smaller, which can lead to applications filling up the filesystem much more easily. Another alternative is to create a dedicated partition for /tmp from a separate volume or disk. One of the downsides of a disk-based dedicated partition is that it will be slower than tmpfs which is RAM-based. /tmp utilizing tmpfs can be resized using the size={size} parameter in the relevant entry in /etc/fstab." + remediation: "First ensure that systemd is correctly configured to ensure that /tmp will be mounted at boot time. # systemctl unmask tmp.mount For specific configuration requirements of the /tmp mount for your environment, modify /etc/fstab. Example of using tmpfs with specific mount options: tmpfs /tmp 0 tmpfs defaults,rw,nosuid,nodev,noexec,relatime,size=2G 0 Example of using a volume or disk with specific mount options. The source location of the volume or disk will vary depending on your environment. /tmp defaults,nodev,nosuid,noexec 0 0." references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - "https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems/" + - "https://www.freedesktop.org/software/systemd/man/systemd-fstab" + compliance: + - cis: ["1.1.2.1"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - "c:modprobe -n -v udf -> r:install /bin/true|Module udf not found" - - "not c:lsmod -> r:udf" - # 1.1.2 /tmp: partition + - 'c:findmnt --kernel /tmp -> r:^/tmp\s' + - "c:systemctl is-enabled tmp.mount -> r:enabled|static|generated" + + # 1.1.2.2 Ensure nodev option set on /tmp partition. (Automated) - id: 5004 - title: "Ensure /tmp is configured" - description: "The /tmp directory is a world-writable directory used for temporary storage by all users and some applications." - rationale: "Making /tmp its own file system allows an administrator to set the noexec option on the mount, making /tmp useless for an attacker to install executable code. It would also prevent an attacker from establishing a hardlink to a system setuid program and wait for it to be updated. Once the program was updated, the hardlink would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw. This can be accomplished by either mounting tmpfs to /tmp, or creating a separate partition for /tmp." - remediation: 'Configure /etc/fstab as appropriate. example: "tmpfs /tmp tmpfs defaults,rw,nosuid,nodev,noexec,relatime 0 0" OR Run the following commands to enable systemd /tmp mounting: # systemctl unmask tmp.mount # systemctl enable tmp.mount Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to configure the /tmp mount: [Mount] What=tmpfs Where=/tmp Type=tmpfs Options=mode=1777,strictatime,noexec,nodev,nosuid' - compliance: - - cis: ["1.1.2"] - - cis_csc: ["9.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + title: "Ensure nodev option set on /tmp partition." + description: "The nodev mount option specifies that the filesystem cannot contain special devices." + rationale: "Since the /tmp filesystem is not intended to support devices, set this option to ensure that users cannot create a block or character special devices in /tmp." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /tmp partition. Example: /tmp defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /tmp with the configured options: # mount -o remount /tmp." + compliance: + - cis: ["1.1.2.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:mount -> r:\s/tmp\s' + - "c:findmnt --kernel /tmp -> r:nodev" - # 1.1.3 /tmp: nodev + # 1.1.2.3 Ensure noexec option set on /tmp partition. (Automated) - id: 5005 - title: "Ensure nodev option set on /tmp partition" - description: "The nodev mount option specifies that the filesystem cannot contain special devices." - rationale: "Since the /tmp filesystem is not intended to support devices, set this option to ensure that users cannot attempt to create block or character special devices in /tmp." - remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /tmp : # mount -o remount,nodev /tmp OR Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to add nodev to the /tmp mount options: [Mount] Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to remount /tmp : # mount -o remount,nodev /tmp" - compliance: - - cis: ["1.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + title: "Ensure noexec option set on /tmp partition." + description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." + rationale: "Since the /tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot run executable binaries from /tmp." + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /tmp partition. Example: /tmp defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /tmp with the configured options: # mount -o remount /tmp." + compliance: + - cis: ["1.1.2.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - 'c:mount -> r:\s/tmp\s && !r:nodev' + - "c:findmnt --kernel /tmp -> r:noexec" - # 1.1.4 /tmp: nosuid + # 1.1.2.4 Ensure nosuid option set on /tmp partition. (Automated) - id: 5006 - title: "Ensure nosuid option set on /tmp partition" + title: "Ensure nosuid option set on /tmp partition." description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." rationale: "Since the /tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot create setuid files in /tmp." - remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /tmp : # mount -o remount,nosuid /tmp OR Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to add nosuid to the /tmp mount options: [Mount] Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to remount /tmp : # mount -o remount,nosuid /tmp" + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /tmp partition. Example: /tmp defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /tmp with the configured options: # mount -o remount /tmp." compliance: - - cis: ["1.1.4"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.1.2.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - 'c:mount -> r:\s/tmp\s && !r:nosuid' + - "c:findmnt --kernel /tmp -> r:nosuid" - # 1.1.5 /tmp: noexec + # 1.1.3.1 Ensure separate partition exists for /var. (Automated) - id: 5007 - title: "Ensure noexec option set on /tmp partition" - description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." - rationale: "Since the /tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot run executable binaries from /tmp." - remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /tmp partition. See the fstab(5) manual page for more information. Run the following command to remount /tmp : # mount -o remount,noexec /tmp OR Edit /etc/systemd/system/local-fs.target.wants/tmp.mount to add noexec to the /tmp mount options: [Mount] Options=mode=1777,strictatime,noexec,nodev,nosuid Run the following command to remount /tmp : # mount -o remount,noexec /tmp" + title: "Ensure separate partition exists for /var." + description: "The /var directory is used by daemons and other system services to temporarily store dynamic data. Some directories created by these processes may be world-writable." + rationale: "The reasoning for mounting /var on a separate partition is as follow. Protection from resource exhaustion The default installation only creates a single / partition. Since the /var directory may contain world-writable files and directories, there is a risk of resource exhaustion. It will essentially have the whole disk available to fill up and impact the system as a whole. In addition, other operations on the system could fill up the disk unrelated to /var and cause unintended behavior across the system as the disk is full. See man auditd.conf for details. Fine grained control over the mount Configuring /var as its own file system allows an administrator to set additional mount options such as noexec/nosuid/nodev. These options limit an attacker's ability to create exploits on the system. Other options allow for specific behavior. See man mount for exact details regarding filesystem-independent and filesystem-specific options. Protection from exploitation An example of exploiting /var may be an attacker establishing a hard-link to a system setuid program and wait for it to be updated. Once the program was updated, the hard-link would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." + remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + references: + - "http://tldp.org/HOWTO/LVM-HOWTO/" compliance: - - cis: ["1.1.5"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.1.3.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - 'c:mount -> r:\s/tmp\s && !r:noexec' + - 'c:findmnt --kernel /var -> r:^/var\s' - # 1.1.6 Build considerations - Partition scheme. + # 1.1.3.2 Ensure nodev option set on /var partition. (Automated) - id: 5008 - title: "Ensure separate partition exists for /var" - description: "The /var directory is used by daemons and other system services to temporarily store dynamic data. Some directories created by these processes may be world-writable." - rationale: "Since the /var directory may contain world-writable files and directories, there is a risk of resource exhaustion if it is not bound to a separate partition." - remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + title: "Ensure nodev option set on /var partition." + description: "The nodev mount option specifies that the filesystem cannot contain special devices." + rationale: "Since the /var filesystem is not intended to support devices, set this option to ensure that users cannot create a block or character special devices in /var." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /var partition. Example: /var defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var with the configured options: # mount -o remount /var." compliance: - - cis: ["1.1.6"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - cis: ["1.1.3.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:mount -> r:\s/var\s' + - "c:findmnt --kernel /var -> r:nodev" - # 1.1.7 bind mount /var/tmp to /tmp + # 1.1.3.3 Ensure noexec option set on /var partition. (Automated) - id: 5009 - title: "Ensure separate partition exists for /var/tmp" - description: "The /var/tmp directory is a world-writable directory used for temporary storage by all users and some applications." - rationale: "Since the /var/tmp directory is intended to be world-writable, there is a risk of resource exhaustion if it is not bound to a separate partition. In addition, making /var/tmp its own file system allows an administrator to set the noexec option on the mount, making /var/tmp useless for an attacker to install executable code." - remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/tmp. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + title: "Ensure noexec option set on /var partition." + description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." + rationale: "Since the /var filesystem is only intended for variable files such as logs, set this option to ensure that users cannot run executable binaries from /var." + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /var partition. Example: /var defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var with the configured options: # mount -o remount /var." compliance: - - cis: ["1.1.7"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.1.3.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:mount -> r:\s/var/tmp\s' + - "c:findmnt --kernel /var -> r:noexec" - # 1.1.8 nodev set on /var/tmp + # 1.1.3.4 Ensure nosuid option set on /var partition. (Automated) - id: 5010 - title: "Ensure nodev option set on /var/tmp partition" - description: "The nodev mount option specifies that the filesystem cannot contain special devices." - rationale: "Since the /var/tmp filesystem is not intended to support devices, set this option to ensure that users cannot attempt to create block or character special devices in /var/tmp ." - remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /var/tmp partition. See the fstab(5) manual page for more information." - compliance: - - cis: ["1.1.8"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + title: "Ensure nosuid option set on /var partition." + description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." + rationale: "Since the /var filesystem is only intended for variable files such as logs, set this option to ensure that users cannot create setuid files in /var." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /var partition. Example: /var defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var with the configured options: # mount -o remount /var." + compliance: + - cis: ["1.1.3.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - 'c:mount -> r:\s/var/tmp\s && !r:nodev' + - "c:findmnt --kernel /var -> r:nosuid" - # 1.1.9 nosuid set on /var/tmp + # 1.1.4.1 Ensure separate partition exists for /var/tmp. (Automated) - id: 5011 - title: "Ensure nosuid option set on /var/tmp partition" - description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." - rationale: "Since the /var/tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot create setuid files in /var/tmp." - remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /var/tmp partition. See the fstab(5) manual page for more information." + title: "Ensure separate partition exists for /var/tmp." + description: "The /var/tmp directory is a world-writable directory used for temporary storage by all users and some applications. Temporary file residing in /var/tmp is to be preserved between reboots." + rationale: "The reasoning for mounting /var/tmp on a separate partition is as follow. Protection from resource exhaustion The default installation only creates a single / partition. Since the /var directory may contain world-writable files and directories, there is a risk of resource exhaustion. It will essentially have the whole disk available to fill up and impact the system as a whole. In addition, other operations on the system could fill up the disk unrelated to /var and cause unintended behavior across the system as the disk is full. See man auditd.conf for details. Fine grained control over the mount Configuring /var as its own file system allows an administrator to set additional mount options such as noexec/nosuid/nodev. These options limit an attacker's ability to create exploits on the system. Other options allow for specific behavior. See man mount for exact details regarding filesystem-independent and filesystem-specific options. Protection from exploitation An example of exploiting /var may be an attacker establishing a hard-link to a system setuid program and wait for it to be updated. Once the program was updated, the hard-link would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." + remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/tmp. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + references: + - "http://tldp.org/HOWTO/LVM-HOWTO/" compliance: - - cis: ["1.1.9"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.1.4.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - 'c:mount -> r:\s/var/tmp\s && !r:nosuid' + - 'c:findmnt --kernel /var/tmp -> r:^/var/tmp\s' - # 1.1.10 noexec set on /var/tmp + # 1.1.4.2 Ensure noexec option set on /var/tmp partition. (Automated) - id: 5012 - title: "Ensure noexec option set on /var/tmp partition" + title: "Ensure noexec option set on /var/tmp partition." description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." rationale: "Since the /var/tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot run executable binaries from /var/tmp." - remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /var/tmp partition. See the fstab(5) manual page for more information." + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /var/tmp partition. Example: /var/tmp defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/tmp with the configured options: # mount -o remount /var/tmp." compliance: - - cis: ["1.1.10"] - - cis_csc: ["5.1"] - - cis_csc: ["2"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.1.4.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - 'c:mount -> r:\s/var/tmp\s && !r:noexec' + - "c:findmnt --kernel /var/tmp -> r:noexec" - # 1.1.11 /var/log: partition + # 1.1.4.3 Ensure nosuid option set on /var/tmp partition. (Automated) - id: 5013 - title: "Ensure separate partition exists for /var/log" - description: "The /var/log directory is used by system services to store log data ." - rationale: "There are two important reasons to ensure that system logs are stored on a separate partition: protection against resource exhaustion (since logs can grow quite large) and protection of audit data." - remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/log. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." - compliance: - - cis: ["1.1.11"] - - cis_csc: ["6.4"] - - pci_dss: ["2.2.4", "10.7"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + title: "Ensure nosuid option set on /var/tmp partition." + description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." + rationale: "Since the /var/tmp filesystem is only intended for temporary file storage, set this option to ensure that users cannot create setuid files in /var/tmp." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /var/tmp partition. Example: /var/tmp defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/tmp with the configured options: # mount -o remount /var/tmp." + compliance: + - cis: ["1.1.4.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:mount -> r:\s/var/log\s' + - "c:findmnt --kernel /var/tmp -> r:nosuid" - # 1.1.12 /var/log/audit: partition + # 1.1.4.4 Ensure nodev option set on /var/tmp partition. (Automated) - id: 5014 - title: "Ensure separate partition exists for /var/log/audit" - description: "The auditing daemon, auditd , stores log data in the /var/log/audit directory." - rationale: "There are two important reasons to ensure that data gathered by auditd is stored on a separate partition: protection against resource exhaustion (since the audit.log file can grow quite large) and protection of audit data." - remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/log/audit. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + title: "Ensure nodev option set on /var/tmp partition." + description: "The nodev mount option specifies that the filesystem cannot contain special devices." + rationale: "Since the /var/tmp filesystem is not intended to support devices, set this option to ensure that users cannot create a block or character special devices in /var/tmp." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /var/tmp partition. Example: /var/tmp defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/tmp with the configured options: # mount -o remount /var/tmp." compliance: - - cis: ["1.1.12"] - - cis_csc: ["6.4"] - - pci_dss: ["2.2.4", "10.7"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - cis: ["1.1.4.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:mount -> r:\s/var/log/audit\s' + - "c:findmnt --kernel /var/tmp -> r:nodev" - # 1.1.13 /home: partition + # 1.1.5.1 Ensure separate partition exists for /var/log. (Automated) - id: 5015 - title: "Ensure separate partition exists for /home" - description: "The /home directory is used to support disk storage needs of local users." - rationale: "If the system is intended to support local users, create a separate partition for the /home directory to protect against resource exhaustion and restrict the type of files that can be stored under /home." - remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /home. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." - compliance: - - cis: ["1.1.13"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + title: "Ensure separate partition exists for /var/log." + description: "The /var/log directory is used by system services to store log data." + rationale: "The reasoning for mounting /var/log on a separate partition is as follow. Protection from resource exhaustion The default installation only creates a single / partition. Since the /var directory may contain world-writable files and directories, there is a risk of resource exhaustion. It will essentially have the whole disk available to fill up and impact the system as a whole. In addition, other operations on the system could fill up the disk unrelated to /var and cause unintended behavior across the system as the disk is full. See man auditd.conf for details. Fine grained control over the mount Configuring /var as its own file system allows an administrator to set additional mount options such as noexec/nosuid/nodev. These options limit an attacker's ability to create exploits on the system. Other options allow for specific behavior. See man mount for exact details regarding filesystem-independent and filesystem-specific options. Protection from exploitation An example of exploiting /var may be an attacker establishing a hard-link to a system setuid program and wait for it to be updated. Once the program was updated, the hard-link would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." + remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/log . For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." references: - - AJ Lewis, "LVM HOWTO", https://tldp.org/HOWTO/LVM-HOWTO/ + - "http://tldp.org/HOWTO/LVM-HOWTO/" + compliance: + - cis: ["1.1.5.1"] + - cis_csc_v8: ["8.3"] + - cis_csc_v7: ["6.4"] + - iso_27001-2013: ["A.12.4.1"] + - pci_dss_v3.2.1: ["10.7"] + - soc_2: ["A1.1"] condition: all rules: - - 'c:mount -> r:\s/home\s' + - 'c:findmnt --kernel /var/log -> r:^/var/log\s' - # 1.1.14 /home: nodev + # 1.1.5.2 Ensure nodev option set on /var/log partition. (Automated) - id: 5016 - title: "Ensure nodev option set on /home partition" + title: "Ensure nodev option set on /var/log partition." description: "The nodev mount option specifies that the filesystem cannot contain special devices." - rationale: "Since the user partitions are not intended to support devices, set this option to ensure that users cannot attempt to create block or character special devices." - remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /home partition. # mount -o remount,nodev /home" - compliance: - - cis: ["1.1.14"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + rationale: "Since the /var/log filesystem is not intended to support devices, set this option to ensure that users cannot create a block or character special devices in /var/log." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /var/log partition. Example: /var/log defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/log with the configured options: # mount -o remount /var/log." + compliance: + - cis: ["1.1.5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - 'c:mount -> r:\s/home\s && !r:nodev' + - "c:findmnt --kernel /var/log -> r:nodev" - # 1.1.15 /dev/shm: nodev + # 1.1.5.3 Ensure noexec option set on /var/log partition. (Automated) - id: 5017 - title: "Ensure nodev option set on /dev/shm partition" - description: "The nodev mount option specifies that the filesystem cannot contain special devices." - rationale: "Since the /dev/shm filesystem is not intended to support devices, set this option to ensure that users cannot attempt to create special devices in /dev/shm partitions." - remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /dev/shm partition. Run the following command to remount /dev/shm: # mount -o remount,nodev /dev/shm" + title: "Ensure noexec option set on /var/log partition." + description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." + rationale: "Since the /var/log filesystem is only intended for log files, set this option to ensure that users cannot run executable binaries from /var/log." + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /var/log partition. Example: /var/log defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/log with the configured options: # mount -o remount /var/log." compliance: - - cis: ["1.1.15"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.1.5.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - 'c:mount -> r:\s/dev/shm\s && !r:nodev' + - "c:findmnt --kernel /var/log -> r:noexec" - # 1.1.16 /dev/shm: nosuid + # 1.1.5.4 Ensure nosuid option set on /var/log partition. (Automated) - id: 5018 - title: "Ensure nosuid option set on /dev/shm partition" + title: "Ensure nosuid option set on /var/log partition." description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." - rationale: "Setting this option on a file system prevents users from introducing privileged programs onto the system and allowing non-root users to execute them." - remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /dev/shm partition. Run the following command to remount /dev/shm: # mount -o remount,nosuid /dev/shm" + rationale: "Since the /var/log filesystem is only intended for log files, set this option to ensure that users cannot create setuid files in /var/log." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /var/log partition. Example: /var/log defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/log with the configured options: # mount -o remount /var/log." compliance: - - cis: ["1.1.16"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.1.5.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - 'c:mount -> r:\s/dev/shm\s && !r:nosuid' + - "c:findmnt --kernel /var/log -> r:nosuid" - # 1.1.17 /dev/shm: noexec + # 1.1.6.1 Ensure separate partition exists for /var/log/audit. (Automated) - id: 5019 - title: "Ensure noexec option set on /dev/shm partition" - description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." - rationale: "Setting this option on a file system prevents users from executing programs from shared memory. This deters users from introducing potentially malicious software on the system." - remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /dev/shm partition. Run the following command to remount /dev/shm: # mount -o remount,noexec /dev/shm" + title: "Ensure separate partition exists for /var/log/audit." + description: "The auditing daemon, auditd, stores log data in the /var/log/audit directory." + rationale: "The reasoning for mounting /var/log/audit on a separate partition is as follow. Protection from resource exhaustion The default installation only creates a single / partition. Since the /var directory may contain world-writable files and directories, there is a risk of resource exhaustion. It will essentially have the whole disk available to fill up and impact the system as a whole. In addition, other operations on the system could fill up the disk unrelated to /var and cause unintended behavior across the system as the disk is full. See man auditd.conf for details. Fine grained control over the mount Configuring /var as its own file system allows an administrator to set additional mount options such as noexec/nosuid/nodev. These options limit an attacker's ability to create exploits on the system. Other options allow for specific behavior. See man mount for exact details regarding filesystem-independent and filesystem-specific options. Protection from exploitation An example of exploiting /var may be an attacker establishing a hard-link to a system setuid program and wait for it to be updated. Once the program was updated, the hard-link would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." + remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /var/log/audit. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + references: + - "http://tldp.org/HOWTO/LVM-HOWTO/" compliance: - - cis: ["1.1.17"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.1.6.1"] + - cis_csc_v8: ["8.3"] + - cis_csc_v7: ["6.4"] + - iso_27001-2013: ["A.12.4.1"] + - pci_dss_v3.2.1: ["10.7"] + - soc_2: ["A1.1"] + condition: all rules: - - 'c:mount -> r:\s/dev/shm\s && !r:noexec' + - 'c:findmnt --kernel /var/log/audit -> r:^/var/log/audit\s' - # 1.1.22 Disable Automounting + # 1.1.6.2 Ensure noexec option set on /var/log/audit partition. (Automated) - id: 5020 - title: "Disable Automounting" - description: "autofs allows automatic mounting of devices, typically including CD/DVDs and USB drives." - rationale: "With automounting enabled anyone with physical access could attach a USB drive or disc and have its contents available in system even if they lacked permissions to mount it themselves." - remediation: "Run the following command to disable autofs : systemctl disable autofs" + title: "Ensure noexec option set on /var/log/audit partition." + description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." + rationale: "Since the /var/log/audit filesystem is only intended for audit logs, set this option to ensure that users cannot run executable binaries from /var/log/audit." + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /var partition. Example: /var/log/audit defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/log/audit with the configured options: # mount -o remount /var/log/audit." compliance: - - cis: ["1.1.22"] - - cis_csc: ["8.4", "8.5"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis: ["1.1.6.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - "p:autofs" + - "c:findmnt --kernel /var/log/audit -> r:noexec" - # 1.1.23 Disable USB Storage (Scored) + # 1.1.6.3 Ensure nodev option set on /var/log/audit partition. (Automated) - id: 5021 - title: "Disable USB Storage" - description: "USB storage provides a means to transfer and store files insuring persistence and availability of the files independent of network connection status. Its popularity and utility has led to USB-based malware being a simple and common means for network infiltration and a first step to establishing a persistent threat within a networked environment." - rationale: "Restricting USB access on the system will decrease the physical attack surface for a device and diminish the possible vectors to introduce malware." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: vim /etc/modprobe.d/usb-storage.conf and add the following line: install usb-storage /bin/true Run the following command to unload the usb-storage module: # rmmod usb-storage" + title: "Ensure nodev option set on /var/log/audit partition." + description: "The nodev mount option specifies that the filesystem cannot contain special devices." + rationale: "Since the /var/log/audit filesystem is not intended to support devices, set this option to ensure that users cannot create a block or character special devices in /var/log/audit." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /var/log/audit partition. Example: /var/log/audit defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/log/audit with the configured options: # mount -o remount /var/log/audit." compliance: - - cis: ["1.1.23"] - - cis_csc: ["8.4", "8.5"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.1.6.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - "c:modprobe -n -v usb-storage -> r:install /bin/true" - - "not c:lsmod -> r:usb-storage" - - ############################################### - # 1.2 Configure Software Updates - ############################################### + - "c:findmnt --kernel /var/log/audit -> r:nodev" - # 1.2.2 Disable the rhnsd Daemon (Not Scored) + # 1.1.6.4 Ensure nosuid option set on /var/log/audit partition. (Automated) - id: 5022 - title: "Disable the rhnsd Daemon" - description: "The rhnsd daemon polls the Red Hat Network web site for scheduled actions and, if there are, executes those actions." - rationale: "Patch management policies may require that organizations test the impact of a patch before it is deployed in a production environment. Having patches automatically deployed could have a negative impact on the environment. It is best to not allow an action by default but only after appropriate consideration has been made. It is recommended that the service be disabled unless the risk is understood and accepted or you are running your own satellite . This item is not scored because organizations may have addressed the risk." - remediation: "Run the following command to disable rhnsd : # systemctl --now disable rhnsd" + title: "Ensure nosuid option set on /var/log/audit partition." + description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." + rationale: "Since the /var/log/audit filesystem is only intended for variable files such as logs, set this option to ensure that users cannot create setuid files in /var/log/audit." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /var/log/audit partition. Example: /var/log/audit defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /var/log/audit with the configured options: # mount -o remount /var/log/audit." compliance: - - cis: ["1.2.2"] - - pci_dss: ["2.2.5"] - - tsc: ["CC6.3"] + - cis: ["1.1.6.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - "p:rhnsd" + - "c:findmnt --kernel /var/log/audit -> r:nosuid" - # 1.2.4 Activate gpgcheck + # 1.1.7.1 Ensure separate partition exists for /home. (Automated) - id: 5023 - title: "Ensure gpgcheck is globally activated" - description: "The gpgcheck option, found in the main section of the /etc/yum.conf and individual /etc/yum/repos.d/* files determines if an RPM package's signature is checked prior to its installation." - rationale: "It is important to ensure that an RPM's package signature is always checked prior to installation to ensure that the software is obtained from a trusted source." - remediation: "Edit /etc/yum.conf and set ' gpgcheck=1 ' in the [main] section. Edit any failing files in /etc/yum.repos.d/* and set all instances of gpgcheck to ' 1 '." + title: "Ensure separate partition exists for /home." + description: "The /home directory is used to support disk storage needs of local users." + rationale: "The reasoning for mounting /home on a separate partition is as follow. Protection from resource exhaustion The default installation only creates a single / partition. Since the /var directory may contain world-writable files and directories, there is a risk of resource exhaustion. It will essentially have the whole disk available to fill up and impact the system as a whole. In addition, other operations on the system could fill up the disk unrelated to /var and cause unintended behavior across the system as the disk is full. See man auditd.conf for details. Fine grained control over the mount Configuring /var as its own file system allows an administrator to set additional mount options such as noexec/nosuid/nodev. These options limit an attacker's ability to create exploits on the system. Other options allow for specific behavior. See man mount for exact details regarding filesystem-independent and filesystem-specific options. Protection from exploitation An example of exploiting /var may be an attacker establishing a hard-link to a system setuid program and wait for it to be updated. Once the program was updated, the hard-link would be broken and the attacker would have his own copy of the program. If the program happened to have a security vulnerability, the attacker could continue to exploit the known flaw." + impact: "Resizing filesystems is a common activity in cloud-hosted servers. Separate filesystem partitions may prevent successful resizing, or may require the installation of additional tools solely for the purpose of resizing operations. The use of these additional tools may introduce their own security considerations." + remediation: "For new installations, during installation create a custom partition setup and specify a separate partition for /home. For systems that were previously installed, create a new partition and configure /etc/fstab as appropriate." + references: + - "http://tldp.org/HOWTO/LVM-HOWTO/" compliance: - - cis: ["1.2.4"] - - cis_csc: ["3.4"] - - pci_dss: ["6.2"] - - nist_800_53: ["SI.2", "SA.11", "SI.4"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["A1.2", "CC6.8"] + - cis: ["1.1.7.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - "f:/etc/yum.conf -> r:gpgcheck=1" - - "not c:grep -Rh ^gpgcheck /etc/yum.repos.d/ -> r:gpgcheck=0" + - 'c:findmnt --kernel /home -> r:^/home\s' - ############################################### - # 1.3 Configure sudo - ############################################### - - # 1.3.1 install sudo + # 1.1.7.2 Ensure nodev option set on /home partition. (Automated) - id: 5024 - title: "Ensure sudo is installed" - description: "sudo allows a permitted user to execute a command as the superuser or another user, as specified by the security policy. The invoking user's real (not effective) user ID is used to determine the user name with which to query the security policy." - rationale: "sudo supports a plugin architecture for security policies and input/output logging. Third parties can develop and distribute their own policy and I/O logging plugins to work seamlessly with the sudo front end. The default security policy is sudoers, which is configured via the file /etc/sudoers. The security policy determines what privileges, if any, a user has to run sudo. The policy may require that users authenticate themselves with a password or another authentication mechanism. If authentication is required, sudo will exit if the user's password is not entered within a configurable time limit. This limit is policy-specific." - remediation: "Run the following command to install sudo: # dnf install sudo" + title: "Ensure nodev option set on /home partition." + description: "The nodev mount option specifies that the filesystem cannot contain special devices." + rationale: "Since the /home filesystem is not intended to support devices, set this option to ensure that users cannot create a block or character special devices in /var." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /home partition. Example: /home defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /home with the configured options: # mount -o remount /home." compliance: - - cis: ["1.3.1"] - - cis_csc: ["4.3"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.1.7.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - 'c:rpm -q sudo -> r:sudo-\S*' + - "c:findmnt --kernel /home -> r:nodev" - # 1.3.2 Ensure sudo commands use pty (Scored) + # 1.1.7.3 Ensure nosuid option set on /home partition. (Automated) - id: 5025 - title: "Ensure sudo commands use pty" - description: "sudo can be configured to run only from a pseudo-pty" - rationale: "Attackers can run a malicious program using sudo which would fork a background process that remains even when the main program has finished executing." - remediation: "edit the file /etc/sudoers or a file in /etc/sudoers.d/ with visudo -f and add the following line: Defaults use_pty" + title: "Ensure nosuid option set on /home partition." + description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." + rationale: "Since the /home filesystem is only intended for user file storage, set this option to ensure that users cannot create setuid files in /home." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /home partition. Example: /home defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /home with the configured options: # mount -o remount /home." compliance: - - cis: ["1.3.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: "AIDE stable manual: http://aide.sourceforge.net/stable/manual.html" - condition: any + - cis: ["1.1.7.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all rules: - - 'f:/etc/sudoers -> r:^\s*Defaults\s+use_pty' - - 'c:grep -r Default /etc/sudoers.d/ -> !r:# && r:\s*Defaults\s+use_pty' + - "c:findmnt --kernel /home -> r:nosuid" - # 1.3.3 Ensure sudo log file exists (Scored) + # 1.1.7.4 Ensure usrquota option set on /home partition. (Automated) - id: 5026 - title: "Ensure sudo log file exists" - description: "sudo can use a custom log file" - rationale: "A sudo log file simplifies auditing of sudo commands" - remediation: 'edit the file /etc/sudoers or a file in /etc/sudoers.d/ with visudo -f and add the following line: Defaults logfile=""' - compliance: - - cis: ["1.3.3"] - - cis_csc: ["6.3"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: "AIDE stable manual: http://aide.sourceforge.net/stable/manual.html" + title: "Ensure usrquota option set on /home partition." + description: "The usrquota mount option allows for the filesystem to have disk quotas configured." + rationale: "To ensure the availability of disk space on /home, it is important to limit the impact a single user or group can cause for other users (or the wider system) by accidentally filling up the partition. Quotas can also be applied to inodes for filesystems where inode exhaustion is a concern." + remediation: "Edit the /etc/fstab file and add usrquota to the fourth field (mounting options) for the /home partition. Example: /home defaults,rw,usrquota,grpquota,nodev,relatime 0 0 Run the following command to remount /home with the configured options: # mount -o remount /home Create the quota database. This example will ignore any existing quota files. # quotacheck -cugv /home quotacheck: Your kernel probably supports journaled quota but you are not using it. Consider switching to journaled quota to avoid running quotacheck after an unclean shutdown. quotacheck: Scanning /dev/sdb [/home] done quotacheck: Cannot stat old user quota file /home/aquota.user: No such file or directory. Usage will not be subtracted. quotacheck: Cannot stat old group quota file /home/aquota.group: No such file or directory. Usage will not be subtracted. quotacheck: Cannot stat old user quota file /home/aquota.user: No such file or directory. Usage will not be subtracted. quotacheck: Cannot stat old group quota file /home/aquota.group: No such file or directory. Usage will not be subtracted. quotacheck: Checked 8 directories and 0 files quotacheck: Old file not found. quotacheck: Old file not found. Restore SELinux context on the quota database files. Order of operations is important as quotaon will set the immutable attribute on the files and thus restorecon will fail. # restorecon /home/aquota.user Enable quotas on the partition: # quotaon -vug /home /dev/sdb [/home]: group quotas turned on /dev/sdb [/home]: user quotas turned on." + compliance: + - cis: ["1.1.7.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - "c:findmnt --kernel /home -> r:usrquota" + + # 1.1.7.5 Ensure grpquota option set on /home partition. (Automated) + - id: 5027 + title: "Ensure grpquota option set on /home partition." + description: "The grpquota mount option allows for the filesystem to have disk quotas configured." + rationale: "To ensure the availability of disk space on /home, it is important to limit the impact a single user or group can cause for other users (or the wider system) by accidentally filling up the partition. Quotas can also be applied to inodes for filesystems where inode exhaustion is a concern." + remediation: "Edit the /etc/fstab file and add grpquota to the fourth field (mounting options) for the /home partition. Example: /home defaults,rw,usrquota,grpquota,nodev,relatime 0 0 Run the following command to remount /home with the configured options: # mount -o remount /home Create the quota database. This example will ignore any existing quota files. # quotacheck -cugv /home quotacheck: Your kernel probably supports journaled quota but you are not using it. Consider switching to journaled quota to avoid running quotacheck after an unclean shutdown. quotacheck: Scanning /dev/sdb [/home] done quotacheck: Cannot stat old user quota file /home/aquota.user: No such file or directory. Usage will not be subtracted. quotacheck: Cannot stat old group quota file /home/aquota.group: No such file or directory. Usage will not be subtracted. quotacheck: Cannot stat old user quota file /home/aquota.user: No such file or directory. Usage will not be subtracted. quotacheck: Cannot stat old group quota file /home/aquota.group: No such file or directory. Usage will not be subtracted. quotacheck: Checked 8 directories and 0 files quotacheck: Old file not found. quotacheck: Old file not found. Restore SELinux context on the quota database files. Order of operations is important as quotaon will set the immutable attribute on the files and thus restorecon will fail. # restorecon /home/aquota.group Enable quotas on the partition: # quotaon -vug /home /dev/sdb [/home]: group quotas turned on /dev/sdb [/home]: user quotas turned on." + compliance: + - cis: ["1.1.7.5"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - "c:findmnt --kernel /home -> r:grpquota" + + # 1.1.8.1 Ensure nodev option set on /dev/shm partition. (Automated) + - id: 5028 + title: "Ensure nodev option set on /dev/shm partition." + description: "The nodev mount option specifies that the filesystem cannot contain special devices." + rationale: "Since the /dev/shm filesystem is not intended to support devices, set this option to ensure that users cannot attempt to create special devices in /dev/shm partitions." + remediation: "Edit the /etc/fstab file and add nodev to the fourth field (mounting options) for the /dev/shm partition. See the fstab(5) manual page for more information. Run the following command to remount /dev/shm using the updated options from /etc/fstab: # mount -o remount /dev/shm." + compliance: + - cis: ["1.1.8.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - "c:findmnt --kernel /dev/shm -> r:nodev" + + # 1.1.8.2 Ensure noexec option set on /dev/shm partition. (Automated) + - id: 5029 + title: "Ensure noexec option set on /dev/shm partition." + description: "The noexec mount option specifies that the filesystem cannot contain executable binaries." + rationale: "Setting this option on a file system prevents users from executing programs from shared memory. This deters users from introducing potentially malicious software on the system." + remediation: "Edit the /etc/fstab file and add noexec to the fourth field (mounting options) for the /dev/shm partition. Example: /dev/shm defaults,rw,nosuid,nodev,noexec,relatime 0 0 Run the following command to remount /dev/shm with the configured options: # mount -o remount /dev/shm NOTE It is recommended to use tmpfs as the device/filesystem type as /dev/shm is used as shared memory space by applications." + compliance: + - cis: ["1.1.8.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - "c:findmnt --kernel /dev/shm -> r:noexec" + + # 1.1.8.3 Ensure nosuid option set on /dev/shm partition. (Automated) + - id: 5030 + title: "Ensure nosuid option set on /dev/shm partition." + description: "The nosuid mount option specifies that the filesystem cannot contain setuid files." + rationale: "Setting this option on a file system prevents users from introducing privileged programs onto the system and allowing non-root users to execute them." + remediation: "Edit the /etc/fstab file and add nosuid to the fourth field (mounting options) for the /dev/shm partition. See the fstab(5) manual page for more information. Run the following command to remount /dev/shm using the updated options from /etc/fstab: # mount -o remount /dev/shm." + compliance: + - cis: ["1.1.8.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - "c:findmnt --kernel /dev/shm -> r:nosuid" + + # 1.1.9 Disable Automounting. (Automated) + - id: 5031 + title: "Disable Automounting." + description: "autofs allows automatic mounting of devices, typically including CD/DVDs and USB drives." + rationale: "With automounting enabled anyone with physical access could attach a USB drive or disc and have its contents available in system even if they lacked permissions to mount it themselves." + impact: "The use of portable hard drives is very common for workstation users. If your organization allows the use of portable storage or media on workstations and physical access controls to workstations is considered adequate there is little value add in turning off automounting." + remediation: "If there are no other packages that depends on autofs, remove the package with: # dnf remove autofs Run the following command to disable autofs if it is required: # systemctl --now disable autofs." + compliance: + - cis: ["1.1.9"] + - cis_csc_v8: ["10.3"] + - cis_csc_v7: ["8.5"] + - cmmc_v2.0: ["MP.L2-3.8.7"] + - hipaa: ["164.310(d)(1)"] + - iso_27001-2013: ["A.12.2.1"] condition: any rules: - - 'f:/etc/sudoers -> r:^\s*Defaults\s+logfile=' - - 'c:grep -r Default /etc/sudoers.d/ -> !r:# && r:\s*Defaults\s+logfile' + - "c:systemctl is-enabled autofs -> r:Failed to get unit file state for autofs.service: No such file or directory" + - "c:systemctl is-enabled autofs -> r:disabled" + + # 1.1.10 Disable USB Storage. (Automated) + - id: 5032 + title: "Disable USB Storage." + description: "USB storage provides a means to transfer and store files insuring persistence and availability of the files independent of network connection status. Its popularity and utility has led to USB-based malware being a simple and common means for network infiltration and a first step to establishing a persistent threat within a networked environment." + rationale: "Restricting USB access on the system will decrease the physical attack surface for a device and diminish the possible vectors to introduce malware." + remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: vim /etc/modprobe.d/usb_storage.conf and add the following line: install usb-storage /bin/true Run the following command to unload the usb-storage module: rmmod usb-storage." + compliance: + - cis: ["1.1.10"] + - cis_csc_v8: ["10.3"] + - cis_csc_v7: ["13.7"] + - cmmc_v2.0: ["MP.L2-3.8.7"] + - hipaa: ["164.310(d)(1)"] + - iso_27001-2013: ["A.8.3.1"] + condition: all + rules: + - "c:modprobe -n -v usb-storage -> r:install /bin/true" + - "not c:lsmod -> r:usb-storage" ############################################### - # 1.4 Filesystem Integrity Checking + # 1.2 Configure Software Updates ############################################### - # 1.4.1 install AIDE - - id: 5027 - title: "Ensure AIDE is installed" - description: "AIDE takes a snapshot of filesystem state including modification times, permissions, and file hashes which can then be used to compare against the current state of the filesystem to detect modifications to the system." + # 1.2.1 Ensure Red Hat Subscription Manager connection is configured. (Manual) - Not Implemented + + # 1.2.2 Ensure GPG keys are configured. (Manual) - Not Implemented + + # 1.2.3 Ensure gpgcheck is globally activated. (Automated) + - id: 5033 + title: "Ensure gpgcheck is globally activated." + description: "The gpgcheck option, found in the main section of the /etc/dnf/dnf.conf and individual /etc/yum.repos.d/* files, determines if an RPM package's signature is checked prior to its installation." + rationale: "It is important to ensure that an RPM's package signature is always checked prior to installation to ensure that the software is obtained from a trusted source." + remediation: "Edit /etc/dnf/dnf.conf and set gpgcheck=1 in the [main] section. Example: # sed -i 's/^gpgcheck\\s*=\\s*.*/gpgcheck=1/' /etc/dnf/dnf.conf Edit any failing files in /etc/yum.repos.d/* and set all instances starting with gpgcheck to 1. Example: # find /etc/yum.repos.d/ -name \"*.repo\" -exec echo \"Checking:\" {} \\; -exec sed -i 's/^gpgcheck\\s*=\\s*.*/gpgcheck=1/' {} \\;." + compliance: + - cis: ["1.2.3"] + - cis_csc_v8: ["7.3"] + - cis_csc_v7: ["3.4"] + - cmmc_v2.0: ["SI.L1-3.14.1"] + - nist_sp_800-53: ["SI-2(2)"] + - pci_dss_v3.2.1: ["6.2"] + - soc_2: ["CC7.1"] + condition: all + rules: + - "f:/etc/dnf/dnf.conf -> r:gpgcheck=1" + - 'd:/etc/yum.repos.d/ -> r:\. -> r:gpgcheck=1' + + # 1.2.4 Ensure package manager repositories are configured. (Manual) - Not Implemented + + ############################################### + # 1.3 Filesystem Integrity Checking + ############################################### + + # 1.3.1 Ensure AIDE is installed. (Automated) + - id: 5034 + title: "Ensure AIDE is installed." + description: "Advanced Intrusion Detection Environment (AIDE) is a intrusion detection tool that uses predefined rules to check the integrity of files and directories in the Linux operating system. AIDE has its own database to check the integrity of files and directories. AIDE takes a snapshot of files and directories including modification times, permissions, and file hashes which can then be used to compare against the current state of the filesystem to detect modifications to the system." rationale: "By monitoring the filesystem state compromised files can be detected to prevent or limit the exposure of accidental or malicious misconfigurations or modified binaries." - remediation: "Run the following command to install aide: # dnf install aide || Configure AIDE as appropriate for your environment. Consult the AIDE documentation for options. Initialize AIDE: #aide --init && mv /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz" + remediation: "Run the following command to install AIDE: # dnf install aide Configure AIDE as appropriate for your environment. Consult the AIDE documentation for options. Initialize AIDE: Run the following commands: # aide --init # mv /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz." + references: + - "http://aide.sourceforge.net/stable/manual.html" compliance: - cis: ["1.3.1"] - - cis_csc: ["14.9"] - - pci_dss: ["11.5"] - - tsc: ["PI1.4", "PI1.5", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - references: - - "AIDE stable manual: http://aide.sourceforge.net/stable/manual.html" + - cis_csc_v8: ["3.14"] + - cis_csc_v7: ["14.9"] + - cmmc_v2.0: ["AC.L2-3.1.7"] + - hipaa: ["164.312(b)", "164.312(c)(1)", "164.312(c)(2)"] + - iso_27001-2013: ["A.12.4.3"] + - nist_sp_800-53: ["AC-6(9)"] + - pci_dss_v3.2.1: ["10.2.1", "11.5"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1"] + - soc_2: ["CC6.1"] condition: all rules: - 'c:rpm -q aide -> r:aide-\S*' - # 1.4.2 AIDE regular checks - - id: 5028 - title: "Ensure filesystem integrity is regularly checked" + # 1.3.2 Ensure filesystem integrity is regularly checked. (Automated) + - id: 5035 + title: "Ensure filesystem integrity is regularly checked." description: "Periodic checking of the filesystem integrity is needed to detect changes to the filesystem." rationale: "Periodic file checking allows the system administrator to determine on a regular basis if critical files have been changed in an unauthorized fashion." - remediation: " Run the following commands: # cp ./config/aidecheck.service /etc/systemd/system/aidecheck.service # cp ./config/aidecheck.timer /etc/systemd/system/aidecheck.timer # chmod 0644 /etc/systemd/system/aidecheck.* # systemctl reenable aidecheck.timer # systemctl restart aidecheck.timer # systemctl daemon-reload. OR Run the following command: crontab -u root -e // Add the following line to the crontab: 0 5 * * * /usr/sbin/aide --check // Notes: The checking in this recommendation occurs every day at 5am. Alter the frequency and time of the checks in compliance with site policy. " - compliance: - - cis: ["1.3.2"] - - cis_csc: ["3.5"] - - pci_dss: ["11.5"] - - tsc: ["PI1.4", "PI1.5", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + remediation: "If cron will be used to schedule and run aide check Run the following command: # crontab -u root -e Add the following line to the crontab: 0 5 * * * /usr/sbin/aide --check OR if aidecheck.service and aidecheck.timer will be used to schedule and run aide check: Create or edit the file /etc/systemd/system/aidecheck.service and add the following lines: [Unit] Description=Aide Check [Service] Type=simple ExecStart=/usr/sbin/aide --check [Install] WantedBy=multi-user.target Create or edit the file /etc/systemd/system/aidecheck.timer and add the following lines: [Unit] Description=Aide check every day at 5AM [Timer] OnCalendar=*-*-* 05:00:00 Unit=aidecheck.service [Install] WantedBy=multi-user.target Run the following commands: # chown root:root /etc/systemd/system/aidecheck.* # chmod 0644 /etc/systemd/system/aidecheck.* # systemctl daemon-reload # systemctl enable aidecheck.service # systemctl --now enable aidecheck.timer." references: - - "https://github.com/konstruktoid/hardening/blob/master/config/aidecheck.service" + - "https://github.com/konstruktoid/hardening/blob/master/config/aidecheck.servic" - "https://github.com/konstruktoid/hardening/blob/master/config/aidecheck.timer" - condition: any + compliance: + - cis: ["1.3.2"] + - cis_csc_v8: ["3.14"] + - cis_csc_v7: ["14.9"] + - cmmc_v2.0: ["AC.L2-3.1.7"] + - hipaa: ["164.312(b)", "164.312(c)(1)", "164.312(c)(2)"] + - iso_27001-2013: ["A.12.4.3"] + - nist_sp_800-53: ["AC-6(9)"] + - pci_dss_v3.2.1: ["10.2.1", "11.5"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1"] + - soc_2: ["CC6.1"] + condition: all rules: - - "c:crontab -u root -l -> r:aide" - - "c:grep -r aide /etc/cron.* /etc/crontab -> r:aide" + - "c:systemctl is-enabled aidecheck.service -> r:enabled" + - "c:systemctl is-enabled aidecheck.timer -> r:enabled" + - "c:systemctl status aidecheck.timer -> r:active" ############################################### - # 1.5 Secure Boot Settings + # 1.4 Secure Boot Settings ############################################### - # 1.5.1 Configure bootloader - - id: 5029 - title: "Ensure permissions on bootloader config are configured" - description: "The grub configuration file contains information on boot settings and passwords for unlocking boot options. The grub configuration is usually grub.cfg and grubenv stored in /boot/grub2/" - rationale: "Setting the permissions to read and write for root only prevents non-root users from seeing the boot parameters or changing them. Non-root users who read the boot parameters may be able to identify weaknesses in security upon boot and be able to exploit them." - remediation: "Run the following commands to set permissions on your grub configuration: # chown root:root /boot/grub2/grub.cfg # chmod og-rwx /boot/grub2/grub.cfg # chown root:root /boot/grub2/grubenv # chmod og-rwx /boot/grub2/grubenv" - compliance: - - cis: ["1.5.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:stat -L /boot/grub2/grub.cfg -> r:Access:\s*\(0\d00/-\w\w\w------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - - 'c:stat -L /boot/grub2/grubenv -> r:Access:\s*\(0\d00/-\w\w\w------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 1.5.2 Set Boot Loader Password (Scored) - - id: 5030 - title: "Ensure bootloader password is set" + # 1.4.1 Ensure bootloader password is set. (Automated) + - id: 5036 + title: "Ensure bootloader password is set." description: "Setting the boot loader password will require that anyone rebooting the system must enter a password before being able to set command line boot parameters." rationale: "Requiring a boot password upon execution of the boot loader will prevent an unauthorized user from entering boot parameters or changing the boot partition. This prevents users from weakening security (e.g. turning off SELinux at boot time)." - remediation: "Create an encrypted password with grub2-setpassword: # grub2-setpassword || Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg" + impact: 'If password protection is enabled, only the designated superuser can edit a Grub 2 menu item by pressing "e" or access the GRUB 2 command line by pressing "c" If GRUB 2 is set up to boot automatically to a password-protected menu entry the user has no option to back out of the password prompt to select another menu entry. Holding the SHIFT key will not display the menu in this case. The user must enter the correct username and password. If unable, the configuration files will have to be edited via the LiveCD or other means to fix the problem You can add --unrestricted to the menu entries to allow the system to boot without entering a password. Password will still be required to edit menu items.' + remediation: "Create an encrypted password with grub2-setpassword: # grub2-setpassword Enter password: Confirm password: Run the following command to update the grub2 configuration: # grub2-mkconfig -o \"$(dirname \"$(find /boot -type f \\( -name 'grubenv' -o - name 'grub.conf' -o -name 'grub.cfg' \\) -exec grep -Pl '^\\h*(kernelopts=|linux|kernel)' {} \\;)\")/grub.cfg\"." compliance: - - cis: ["1.5.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.4.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'f:/boot/grub2/user.cfg -> r:^GRUB2_PASSWORD\s*=\.+' - # 1.5.3 Single user authentication - - id: 5031 - title: "Ensure authentication required for single user mode" - description: "Single user mode (rescue mode) is used for recovery when the system detects an issue during boot or by manual selection from the bootloader." - rationale: "Requiring authentication in single user mode (rescue mode) prevents an unauthorized user from rebooting the system into single user to gain root privileges without credentials." - remediation: "Edit /usr/lib/systemd/system/rescue.service and add/modify the following line: ExecStart=-/usr/lib/systemd/systemd-sulogin-shell rescue Edit /usr/lib/systemd/system/emergency.service and add/modify the following line: ExecStart=-/usr/lib/systemd/systemd-sulogin-shell emergency" + # 1.4.2 Ensure permissions on bootloader config are configured. (Automated) + - id: 5037 + title: "Ensure permissions on bootloader config are configured." + description: "The grub files contain information on boot settings and passwords for unlocking boot options. The grub2 configuration is usually grub.cfg. On newer grub2 systems the encrypted bootloader password is contained in user.cfg. If the system uses UEFI, /boot/efi is a vfat filesystem. The vfat filesystem itself doesn't have the concept of permissions but can be mounted under Linux with whatever permissions desired." + rationale: "Setting the permissions to read and write for root only prevents non-root users from seeing the boot parameters or changing them. Non-root users who read the boot parameters may be able to identify weaknesses in security upon boot and be able to exploit them." + remediation: "Run the following commands to set ownership and permissions on your grub configuration file(s): # [ -f /boot/grub2/grub.cfg ] && chown root:root /boot/grub2/grub.cfg # [ -f /boot/grub2/grub.cfg ] && chmod og-rwx /boot/grub2/grub.cfg # [ -f /boot/grub2/grubenv ] && chown root:root /boot/grub2/grubenv # [ -f /boot/grub2/grubenv ] && chmod og-rwx /boot/grub2/grubenv # [ -f /boot/grub2/user.cfg ] && chown root:root /boot/grub2/user.cfg # [ -f /boot/grub2/user.cfg ] && chmod og-rwx /boot/grub2/user.cfg OR If the system uses UEFI, edit /etc/fstab and add the fmask=0077, uid=0, and gid=0 options: Example: /boot/efi vfat defaults,umask=0027,fmask=0077,uid=0,gid=0 0 0 Note: This may require a re-boot to enable the change." compliance: - - cis: ["1.5.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.4.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - - "f:/usr/lib/systemd/system/rescue.service -> r:ExecStart=-/usr/lib/systemd/systemd-sulogin-shell rescue" - - "f:/usr/lib/systemd/system/emergency.service -> r:ExecStart=-/usr/lib/systemd/systemd-sulogin-shell emergency" + - 'c:stat -L /boot/grub2/grub.cfg -> r:Access:\s*\(0\d00/-\w\w\w------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' + - 'c:stat -L /boot/grub2/grubenv -> r:Access:\s*\(0\d00/-\w\w\w------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' + + # 1.4.3 Ensure authentication is required when booting into rescue mode. (Automated) + - id: 5038 + title: "Ensure authentication is required when booting into rescue mode." + description: "Rescue mode (former single user mode) is used for recovery when the system detects an issue during boot or by manual selection from the bootloader." + rationale: "Requiring authentication in rescue mode (former single user mode) prevents an unauthorized user from rebooting the system into rescue mode to gain root privileges without credentials." + remediation: "The systemd drop-in files must be created if it is necessary to change the default settings: Create the file /etc/systemd/system/rescue.service.d/00-require-auth.conf which contains only the configuration to be overridden: [Service] ExecStart=-/usr/lib/systemd/systemd-sulogin-shell rescue." + compliance: + - cis: ["1.4.3"] + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - iso_27001-2013: ["A.9.4.3"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] + condition: any + rules: + - 'f:/systemd-sulogin-shell -> r:ExecStart=-/usr/lib/systemd/systemd-sulogin-shell\s*rescue' + - 'f:/usr/lib/systemd/system/rescue.service -> r:ExecStart=-/usr/lib/systemd/systemd-sulogin-shell\s*rescue' + - 'f:/etc/systemd/system/rescue.service.d -> r:ExecStart=-/usr/lib/systemd/systemd-sulogin-shell\s*rescue' ############################################### - # 1.6 Additional Process Hardening + # 1.5 Additional Process Hardening ############################################### - # 1.6.1 Restrict Core Dumps (Scored) - - id: 5032 - title: "Ensure core dumps are restricted" - description: "A core dump is the memory of an executable program. It is generally used to determine why a program aborted. It can also be used to glean confidential information from a core file.The system provides the ability to set a soft limit for core dumps, but this can be overridden by the user." - rationale: "Setting a hard limit on core dumps prevents users from overriding the soft variable. If core dumps are required, consider setting limits for user groups (see limits.conf(5)). In addition, setting the fs.suid_dumpable variable to 0 will prevent setuid programs from dumping core." - remediation: "Add the following line to /etc/security/limits.conf or a /etc/security/limits.d/* file: * hard core 0. Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: fs.suid_dumpable = 0. Run the following command to set the active kernel parameter: # sysctl -w fs.suid_dumpable=0. If systemd-coredump is installed: edit /etc/systemd/coredump.conf and add/modify the following lines: Storage=none ProcessSizeMax=0 Run the command: # systemctl daemon-reload" + + # 1.5.1 Ensure core dump storage is disabled. (Automated) + - id: 5039 + title: "Ensure core dump storage is disabled." + description: "A core dump is the memory of an executable program. It is generally used to determine why a program aborted. It can also be used to glean confidential information from a core file." + rationale: "A core dump includes a memory image taken at the time the operating system terminates an application. The memory image could contain sensitive data and is generally useful only for developers trying to debug problems." + remediation: "Edit /etc/systemd/coredump.conf and edit or add the following line: Storage=none." + references: + - "https://www.freedesktop.org/software/systemd/man/coredump.conf.html" compliance: - - cis: ["1.6.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.5.1"] condition: all rules: - - 'c:grep -E "^\s*\*\s+hard\s+core" /etc/security/limits.conf /etc/security/limits.d -> r:\s*\t*0$' - - 'c:sysctl fs.suid_dumpable -> r:^fs.suid_dumpable\s*=\s*0\s*$' - - 'c:grep "fs\.suid_dumpable" /etc/sysctl.conf /etc/sysctl.d -> r:^\s*fs.suid_dumpable\s*=\s*0\s*$' + - 'f:/etc/systemd/coredump.conf -> r:^\s*Storage\s*=\s*none' - # 1.6.2 Ensure address space layout randomization (ASLR) is enabled (Scored) - - id: 5033 - title: "Ensure address space layout randomization (ASLR) is enabled" + # 1.5.2 Ensure core dump backtraces are disabled. (Automated) + - id: 5040 + title: "Ensure core dump backtraces are disabled." + description: "A core dump is the memory of an executable program. It is generally used to determine why a program aborted. It can also be used to glean confidential information from a core file." + rationale: "A core dump includes a memory image taken at the time the operating system terminates an application. The memory image could contain sensitive data and is generally useful only for developers trying to debug problems, increasing the risk to the system." + remediation: "Edit or add the following line in /etc/systemd/coredump.conf: ProcessSizeMax=0." + references: + - "https://www.freedesktop.org/software/systemd/man/coredump.conf.html" + compliance: + - cis: ["1.5.2"] + condition: all + rules: + - 'f:/etc/systemd/coredump.conf -> r:^\s*ProcessSizeMax\s*=\s*0' + + # 1.5.3 Ensure address space layout randomization (ASLR) is enabled. (Automated) + - id: 5041 + title: "Ensure address space layout randomization (ASLR) is enabled." description: "Address space layout randomization (ASLR) is an exploit mitigation technique which randomly arranges the address space of key data areas of a process." rationale: "Randomly placing virtual memory regions will make it difficult to write memory page exploits as the memory placement will be consistently shifting." - remediation: "Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: kernel.randomize_va_space = 2 Run the following command to set the active kernel parameter: # sysctl -w kernel.randomize_va_space=2" + remediation: 'Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: Example: # printf " kernel.randomize_va_space = 2 " >> /etc/sysctl.d/60-kernel_sysctl.conf Run the following command to set the active kernel parameter: # sysctl -w kernel.randomize_va_space=2.' compliance: - - cis: ["1.6.2"] - - cis_csc: ["8.3"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.5.3"] + - cis_csc_v8: ["10.5"] + - cis_csc_v7: ["8.3"] + - nist_sp_800-53: ["SI-16"] + - pci_dss_v3.2.1: ["1.4"] + - soc_2: ["CC6.8"] condition: all rules: - 'c:grep -Rh ^kernel\.randomize_va_space /etc/sysctl.conf /etc/sysctl.d -> r:^\s*kernel.randomize_va_space\s*=\s*2$' - 'c:sysctl kernel.randomize_va_space -> r:^\s*kernel.randomize_va_space\s*=\s*2' ############################################### - # 1.7 Configure SELinux + # 1.6 Configure SELinux ############################################### - # 1.7.1.1 Ensure SELinux is installed(Scored) - - id: 5034 - title: "Ensure SELinux is installed" + + # 1.6.1.1 Ensure SELinux is installed. (Automated) + - id: 5042 + title: "Ensure SELinux is installed." description: "SELinux provides Mandatory Access Control." rationale: "Without a Mandatory Access Control system installed only the default Discretionary Access Control system will be available." - remediation: "Run the following command to install SELinux : # dnf install libselinux" + remediation: "Run the following command to install SELinux: # dnf install libselinux." compliance: - - cis: ["1.7.1.1"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.6.1.1"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - "c:rpm -q libselinux -> r:libselinux-" - # 1.7.1.2 SELinux not disabled - - id: 5035 - title: "Ensure SELinux is not disabled in bootloader configuration" + # 1.6.1.2 Ensure SELinux is not disabled in bootloader configuration. (Automated) + - id: 5043 + title: "Ensure SELinux is not disabled in bootloader configuration." description: "Configure SELINUX to be enabled at boot time and verify that it has not been overwritten by the grub boot parameters." rationale: "SELinux must be enabled at boot time in your grub configuration to ensure that the controls it provides are not overridden." - remediation: 'Edit /etc/default/grub and remove all instances of selinux=0 and enforcing=0 from all CMDLINE_LINUX parameters: GRUB_CMDLINE_LINUX_DEFAULT="quiet" GRUB_CMDLINE_LINUX="" || Run the following command to update the grub2 configuration: grub2-mkconfig -o /boot/grub2/grub.cfg' - compliance: - - cis: ["1.7.1.2"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + impact: "Files created while SELinux is disabled are not labeled at all. This behavior causes problems when changing to enforcing mode because files are labeled incorrectly or are not labeled at all. To prevent incorrectly labeled and unlabeled files from causing problems, file systems are automatically relabeled when changing from the disabled state to permissive or enforcing mode. This can be a long running process that should be accounted for as it may extend downtime during initial re-boot." + remediation: "Run the following command to remove all instances of selinux=0 and enforcing=0 from all CMDLINE_LINUX parameters: grubby --update-kernel ALL --remove-args 'selinux=0 enforcing=0'." + compliance: + - cis: ["1.6.1.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - 'f:/boot/grub2/grubenv -> r:kernelopts=\.*selinux=0|kernelopts=\.*enforcing=0' - # 1.7.1.3 Set selinux policy - - id: 5036 - title: "Ensure SELinux policy is configured" + # 1.6.1.3 Ensure SELinux policy is configured. (Automated) + - id: 5044 + title: "Ensure SELinux policy is configured." description: "Configure SELinux to meet or exceed the default targeted policy, which constrains daemons and system software only." rationale: "Security configuration requirements vary from site to site. Some sites may mandate a policy that is stricter than the default policy, which is perfectly acceptable. This item is intended to ensure that at least the default recommendations are met." - remediation: "Edit the /etc/selinux/config file to set the SELINUXTYPE parameter: SELINUXTYPE=targeted" + remediation: "Edit the /etc/selinux/config file to set the SELINUXTYPE parameter: SELINUXTYPE=targeted." compliance: - - cis: ["1.7.1.3"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.6.1.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:sestatus -> r:^Loaded policy name:\s+targeted$|^Loaded policy name:\s+mls$' - 'f:/etc/selinux/config -> r:^\s*SELINUXTYPE\s*=\s*targeted|^\s*SELINUXTYPE\s*=\s*mls' - # 1.7.1.4 Set selinux state - - id: 5037 - title: "Ensure the SELinux state is enforcing" - description: "Set SELinux to enable when the system is booted." - rationale: "SELinux must be enabled at boot time in to ensure that the controls it provides are in effect at all times." - remediation: "Edit the /etc/selinux/config file to set the SELINUX parameter: SELINUX=enforcing" + # 1.6.1.4 Ensure the SELinux mode is not disabled. (Automated) + - id: 5045 + title: "Ensure the SELinux mode is not disabled." + description: "SELinux can run in one of three modes: disabled, permissive, or enforcing: - Enforcing - Is the default, and recommended, mode of operation; in enforcing mode SELinux operates normally, enforcing the loaded security policy on the entire system. - Permissive - The system acts as if SELinux is enforcing the loaded security policy, including labeling objects and emitting access denial entries in the logs, but it does not actually deny any operations. While not recommended for production systems, permissive mode can be helpful for SELinux policy development. - Disabled - Is strongly discouraged; not only does the system avoid enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future Note: you can set individual domains to permissive mode while the system runs in enforcing mode. For example, to make the httpd_t domain permissive: # semanage permissive -a httpd_t." + rationale: "Running SELinux in disabled mode is strongly discouraged; not only does the system avoid enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future." + remediation: "Run one of the following commands to set SELinux's running mode: To set SELinux mode to Enforcing: # setenforce 1 OR To set SELinux mode to Permissive: # setenforce 0 Edit the /etc/selinux/config file to set the SELINUX parameter: For Enforcing mode: SELINUX=enforcing OR For Permissive mode: SELINUX=permissive." + references: + - "https://access.redhat.com/documentation/en-us/selinux" + compliance: + - cis: ["1.6.1.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - "c:getenforce -> r:^Enforcing$|^Permissive$" + - 'f:/etc/selinux/config -> r:^\s*SELINUX\s*=\s*enforcing|\s*SELINUX\s*=\s*permissive' + + # 1.6.1.5 Ensure the SELinux mode is enforcing. (Automated) + - id: 5046 + title: "Ensure the SELinux mode is enforcing." + description: "SELinux can run in one of three modes: disabled, permissive, or enforcing: - Enforcing - Is the default, and recommended, mode of operation; in enforcing mode SELinux operates normally, enforcing the loaded security policy on the entire system. - Permissive - The system acts as if SELinux is enforcing the loaded security policy, including labeling objects and emitting access denial entries in the logs, but it does not actually deny any operations. While not recommended for production systems, permissive mode can be helpful for SELinux policy development. - Disabled - Is strongly discouraged; not only does the system avoid enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future Note: you can set individual domains to permissive mode while the system runs in enforcing mode. For example, to make the httpd_t domain permissive: # semanage permissive -a httpd_t." + rationale: "Running SELinux in disabled mode the system not only avoids enforcing the SELinux policy, it also avoids labeling any persistent objects such as files, making it difficult to enable SELinux in the future. Running SELinux in Permissive mode, though helpful for developing SELinux policy, only logs access denial entries, but does not deny any operations." + remediation: "Run the following command to set SELinux's running mode: # setenforce 1 Edit the /etc/selinux/config file to set the SELINUX parameter: For Enforcing mode: SELINUX=enforcing." + references: + - "https://access.redhat.com/documentation/en-us/selinux" compliance: - - cis: ["1.7.1.4"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["1.6.1.5"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:sestatus -> r:^SELinux status:\s+enabled$' @@ -700,943 +1050,988 @@ checks: - 'c:sestatus -> r:^Mode from config file:\s+enforcing$' - 'f:/etc/selinux/config -> r:^\s*SELINUX\s*=\s*enforcing' - # 1.7.1.5 Ensure no unconfined services exist (Scored) - - id: 5038 - title: "Ensure no unconfined services exist" - description: "Unconfined processes run in unconfined domains" - rationale: "For unconfined processes, SELinux policy rules are applied, but policy rules exist that allow processes running in unconfined domains almost all access. Processes running in unconfined domains fall back to using DAC rules exclusively. If an unconfined process is compromised, SELinux does not prevent an attacker from gaining access to system resources and data, but of course, DAC rules are still used. SELinux is a security enhancement on top of DAC rules – it does not replace them" - remediation: "Investigate any unconfined processes found during the audit action. They may need to have an existing security context assigned to them or a policy built for them. Notes: Occasionally certain daemons such as backup or centralized management software may require running unconfined. Any such software should be carefully analyzed and documented before such an exception is made." - compliance: - - cis: ["1.7.1.5"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + # 1.6.1.6 Ensure no unconfined services exist. (Automated) + - id: 5047 + title: "Ensure no unconfined services exist." + description: "Unconfined processes run in unconfined domains." + rationale: "For unconfined processes, SELinux policy rules are applied, but policy rules exist that allow processes running in unconfined domains almost all access. Processes running in unconfined domains fall back to using DAC rules exclusively. If an unconfined process is compromised, SELinux does not prevent an attacker from gaining access to system resources and data, but of course, DAC rules are still used. SELinux is a security enhancement on top of DAC rules - it does not replace them." + remediation: "Investigate any unconfined processes found during the audit action. They may need to have an existing security context assigned to them or a policy built for them." + compliance: + - cis: ["1.6.1.6"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.13.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - "c:ps -eZ -> r:unconfined_service_t" - # 1.7.1.6 Remove SETroubleshoot - - id: 5039 - title: "Ensure SETroubleshoot is not installed" + # 1.6.1.7 Ensure SETroubleshoot is not installed. (Automated) + - id: 5048 + title: "Ensure SETroubleshoot is not installed." description: "The SETroubleshoot service notifies desktop users of SELinux denials through a user-friendly interface. The service provides important information around configuration errors, unauthorized intrusions, and other potential errors." rationale: "The SETroubleshoot service is an unnecessary daemon to have running on a server, especially if X Windows is disabled." - remediation: "Run the following command to uninstall setroubleshoot: # dnf remove setroubleshoot" - compliance: - - cis: ["1.7.1.6"] - - cis_csc: ["14.6"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + remediation: "Run the following command to uninstall setroubleshoot: # dnf remove setroubleshoot." + compliance: + - cis: ["1.6.1.7"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.9.1.1"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: none rules: - - "c:rpm -qa setroubleshoot -> r:setroubleshoot" + - "c:rpm -q setroubleshoot -> r:setroubleshoot" - # 1.7.1.7 Disable MCS Translation service mcstrans - - id: 5040 - title: "Ensure the MCS Translation Service (mcstrans) is not installed" - description: "The mcstransd daemon provides category label information to client processes requesting information. The label translations are defined in /etc/selinux/targeted/setrans.conf" + # 1.6.1.8 Ensure the MCS Translation Service (mcstrans) is not installed. (Automated) + - id: 5049 + title: "Ensure the MCS Translation Service (mcstrans) is not installed." + description: "The mcstransd daemon provides category label information to client processes requesting information. The label translations are defined in /etc/selinux/targeted/setrans.conf." rationale: "Since this service is not used very often, remove it to reduce the amount of potentially vulnerable code running on the system." - remediation: "Run the following command to uninstall mcstrans: # dnf remove mcstrans" - compliance: - - cis: ["1.7.1.5"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + remediation: "Run the following command to uninstall mcstrans: # dnf remove mcstrans." + compliance: + - cis: ["1.6.1.8"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: none rules: - - "c:rpm -qa mcstrans -> r:mcstrans" + - "c:rpm -q mcstrans -> r:mcstrans" ############################################### - # 1.8 Warning Banners + # 1.7 Warning Banners ############################################### - # 1.8.1.1 Configure message of the day (Scored) - - id: 5041 - title: "Ensure message of the day is configured properly" - description: "The contents of the /etc/motd file are displayed to users after login and function as a message of the day for authenticated users. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version" + + # 1.7.1 Ensure message of the day is configured properly. (Automated) + - id: 5050 + title: "Ensure message of the day is configured properly." + description: "The contents of the /etc/motd file are displayed to users after login and function as a message of the day for authenticated users. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version." rationale: 'Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Displaying OS and patch level information in login banners also has the side effect of providing detailed system information to attackers attempting to target specific exploits of a system. Authorized users can easily get this information by running the " uname -a " command once they have logged in.' - remediation: "Edit the /etc/motd file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform OR If the motd is not used, this file can be removed. Run the following command to remove the motd file: # rm /etc/motd" - compliance: - - cis: ["1.8.1.1"] - - cis_csc: ["5.1"] - - pci_dss: ["7.1"] - - tsc: ["CC6.4"] + remediation: "Edit the /etc/motd file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform OR If the motd is not used, this file can be removed. Run the following command to remove the motd file: # rm /etc/motd." + compliance: + - cis: ["1.7.1"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: none rules: - 'f:/etc/motd -> r:\\v|\\r|\\m|\\s' - # 1.8.1.2 Configure local login warning banner (Scored) - - id: 5042 - title: "Ensure local login warning banner is configured properly" - description: "The contents of the /etc/issue file are displayed to users prior to login for local terminals. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version" + # 1.7.2 Ensure local login warning banner is configured properly. (Automated) + - id: 5051 + title: "Ensure local login warning banner is configured properly." + description: "The contents of the /etc/issue file are displayed to users prior to login for local terminals. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version - or the operating system's name." rationale: 'Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Displaying OS and patch level information in login banners also has the side effect of providing detailed system information to attackers attempting to target specific exploits of a system. Authorized users can easily get this information by running the " uname -a " command once they have logged in.' - remediation: "Edit the /etc/issue file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform: # echo \"Authorized uses only. All activity may be monitored and reported.\" > /etc/issue" - compliance: - - cis: ["1.8.1.2"] - - cis_csc: ["5.1"] - - pci_dss: ["7.1"] - - tsc: ["CC6.4"] + remediation: "Edit the /etc/issue file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform # echo \"Authorized uses only. All activity may be monitored and reported.\" > /etc/issue." + compliance: + - cis: ["1.7.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: none rules: - 'f:/etc/issue -> r:\\v|\\r|\\m|\\s' - # 1.8.1.3 Configure remote login warning banner (Scored) - - id: 5043 - title: "Ensure remote login warning banner is configured properly" - description: "The contents of the /etc/issue.net file are displayed to users prior to login for remote connections from configured services. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version" + # 1.7.3 Ensure remote login warning banner is configured properly. (Automated) + - id: 5052 + title: "Ensure remote login warning banner is configured properly." + description: "The contents of the /etc/issue.net file are displayed to users prior to login for remote connections from configured services. Unix-based systems have typically displayed information about the OS release and patch level upon logging in to the system. This information can be useful to developers who are developing software for a particular OS platform. If mingetty(8) supports the following options, they display operating system information: \\m - machine architecture \\r - operating system release \\s - operating system name \\v - operating system version." rationale: 'Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Displaying OS and patch level information in login banners also has the side effect of providing detailed system information to attackers attempting to target specific exploits of a system. Authorized users can easily get this information by running the " uname -a " command once they have logged in.' - remediation: "Edit the /etc/issue.net file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform: # echo \"Authorized uses only. All activity may be monitored and reported.\" > /etc/issue.net" - compliance: - - cis: ["1.8.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["7.1"] - - tsc: ["CC6.4"] + remediation: "Edit the /etc/issue.net file with the appropriate contents according to your site policy, remove any instances of \\m , \\r , \\s , \\v or references to the OS platform # echo \"Authorized uses only. All activity may be monitored and reported.\" > /etc/issue.net." + compliance: + - cis: ["1.7.3"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: none rules: - 'f:/etc/issue.net -> r:\\v|\\r|\\m|\\s' - # 1.8.1.4 Configure /etc/motd permissions (Scored) - - id: 5044 - title: "Ensure permissions on /etc/motd are configured" + # 1.7.4 Ensure permissions on /etc/motd are configured. (Automated) + - id: 5053 + title: "Ensure permissions on /etc/motd are configured." description: "The contents of the /etc/motd file are displayed to users after login and function as a message of the day for authenticated users." rationale: "If the /etc/motd file does not have the correct ownership it could be modified by unauthorized users with incorrect or misleading information." - remediation: "Run the following commands to set permissions on /etc/motd: # chown root:root /etc/motd # chmod 644 /etc/motd" + remediation: "Run the following commands to set permissions on /etc/motd : # chown root:root /etc/motd # chmod u-x,go-wx /etc/motd." compliance: - - cis: ["1.8.1.4"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["1.7.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/motd -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 1.8.1.5 Configure /etc/issue permissions (Scored) - - id: 5045 - title: "Ensure permissions on /etc/issue are configured" + # 1.7.5 Ensure permissions on /etc/issue are configured. (Automated) + - id: 5054 + title: "Ensure permissions on /etc/issue are configured." description: "The contents of the /etc/issue file are displayed to users prior to login for local terminals." rationale: "If the /etc/issue file does not have the correct ownership it could be modified by unauthorized users with incorrect or misleading information." - remediation: "Run the following commands to set permissions on /etc/issue: # chown root:root /etc/issue # chmod 644 /etc/issue" + remediation: "Run the following commands to set permissions on /etc/issue : # chown root:root /etc/issue # chmod u-x,go-wx /etc/issue." compliance: - - cis: ["1.8.1.5"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["1.7.5"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/issue -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 1.8.1.6 Configure /etc/issue.net permissions (Scored) - - id: 5046 - title: "Ensure permissions on /etc/issue.net are configured" + # 1.7.6 Ensure permissions on /etc/issue.net are configured. (Automated) + - id: 5055 + title: "Ensure permissions on /etc/issue.net are configured." description: "The contents of the /etc/issue.net file are displayed to users prior to login for remote connections from configured services." rationale: "If the /etc/issue.net file does not have the correct ownership it could be modified by unauthorized users with incorrect or misleading information." - remediation: "Run the following commands to set permissions on /etc/issue.net: # chown root:root /etc/issue.net # chmod 644 /etc/issue.net" + remediation: "Run the following commands to set permissions on /etc/issue.net : # chown root:root /etc/issue.net # chmod u-x,go-wx /etc/issue.net." compliance: - - cis: ["1.8.1.6"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["1.7.6"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/issue.net -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 1.8.2 Ensure GDM login banner is configured (Scored) - - id: 5047 - title: "Ensure GDM login banner is configured" + # 1.8.1 Ensure GNOME Display Manager is removed. (Manual) + - id: 5056 + title: "Ensure GNOME Display Manager is removed." + description: "The GNOME Display Manager (GDM) is a program that manages graphical display servers and handles graphical user logins." + rationale: "If a Graphical User Interface (GUI) is not required, it should be removed to reduce the attack surface of the system." + impact: "Removing the GNOME Display manager will remove the GUI from the system." + remediation: "Run the following command to remove the gdm package # dnf remove gdm." + references: + - "https://wiki.gnome.org/Projects/GDM" + compliance: + - cis: ["1.8.1"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - "c:rpm -q gdm -> r:is not installed" + + # 1.8.2 Ensure GDM login banner is configured. (Automated) + - id: 5057 + title: "Ensure GDM login banner is configured." description: "GDM is the GNOME Display Manager which handles graphical login for GNOME based systems." - rationale: "Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place." - remediation: "Edit or create the file /etc/gdm3/greeter.dconf-defaults and add the following in 3 lines: (1) [org/gnome/login-screen] (2) banner-message-enable=true (3) banner-message-text='Authorized uses only. All activity may be monitored and reported.'" + rationale: "Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Note: If a graphical login is not required, it should be removed to reduce the attack surface of the system." + remediation: "Edit or create the file /etc/dconf/profile/gdm and add the following: user-db:user system-db:gdm file-db:/usr/share/gdm/greeter-dconf-defaults Edit or create the file /etc/dconf/db/gdm.d/ and add the following: (This is typically /etc/dconf/db/gdm.d/01-banner-message) [org/gnome/login-screen] banner-message-enable=true banner-message-text='' Example Banner Text: 'Authorized users only. All activity may be monitored and reported.' Run the following command to update the system databases: # dconf update." compliance: - cis: ["1.8.2"] - - cis_csc: ["5.1"] - - pci_dss: ["7.1"] - - tsc: ["CC6.4"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - "f:/etc/dconf/profile/gdm -> r:user-db:user && r:system-db:gdm && r:file-db:/usr/share/gdm/greeter-dconf-defaults" + - "f:/etc/dconf/db/gdm.d/01-banner-message -> r:[org/gnome/login-screen]" + - "f:/etc/dconf/db/gdm.d/01-banner-message -> r:banner-message-enable=true" + - "f:/etc/dconf/db/gdm.d/01-banner-message -> r:banner-message-text=" + + # 1.8.3 Ensure last logged in user display is disabled. (Automated) + - id: 5058 + title: "Ensure last logged in user display is disabled." + description: "GDM is the GNOME Display Manager which handles graphical login for GNOME based systems." + rationale: "Displaying the last logged in user eliminates half of the Userid/Password equation that an unauthorized person would need to log on. Warning messages inform users who are attempting to login to the system of their legal status regarding the system and must include the name of the organization that owns the system and any monitoring policies that are in place. Notes: - - If a graphical login is not required, it should be removed to reduce the attack surface of the system. If a different GUI login service is in use and required on the system, consult your documentation to disable displaying the last logged on user." + remediation: "Edit or create the file /etc/dconf/profile/gdm and add the following: user-db:user system-db:gdm file-db:/usr/share/gdm/greeter-dconf-defaults Edit or create the file /etc/dconf/db/gdm.d/ and add the following: (This is typically /etc/dconf/db/gdm.d/00-login-screen) [org/gnome/login-screen] # Do not show the user list disable-user-list=true Run the following command to update the system databases: # dconf update." + compliance: + - cis: ["1.8.3"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - "f:/etc/gdm3/greeter.dconf-defaults -> r:[org/gnome/login-screen]" - - "f:/etc/gdm3/greeter.dconf-defaults -> r:banner-message-enable=true" - - "f:/etc/gdm3/greeter.dconf-defaults -> r:banner-message-text=" + - "f:/etc/dconf/profile/gdm -> r:user-db:user && r:system-db:gdm && r:file-db:/usr/share/gdm/greeter-dconf-defaults" + - "f:/etc/dconf/db/gdm.d/01-banner-message -> r:[org/gnome/login-screen]" + - "f:/etc/dconf/db/gdm.d/01-banner-message -> r:disable-user-list=true" - # 1.9 Ensure updates, patches, and additional security software are installed(Scored) - - id: 5048 - title: "Ensure updates, patches, and additional security software are installed" + # 1.8.4 Ensure XDMCP is not enabled. (Automated) + - id: 5059 + title: "Ensure XDMCP is not enabled." + description: "X Display Manager Control Protocol (XDMCP) is designed to provide authenticated access to display management services for remote displays." + rationale: "XDMCP is inherently insecure. - XDMCP is not a ciphered protocol. This may allow an attacker to capture keystrokes entered by a user - XDMCP is vulnerable to man-in-the-middle attacks. This may allow an attacker to steal the credentials of legitimate users by impersonating the XDMCP server." + remediation: "Edit the file /etc/gdm/custom.conf and remove the line Enable=true." + references: + - "https://help.gnome.org/admin/gdm/2.32/configuration.html.en" + compliance: + - cis: ["1.8.4"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none + rules: + - 'f:/etc/gdm/custom.conf -> r:^\s*Enable\s*=\s*true' + + # 1.8.5 Ensure automatic mounting of removable media is disabled. (Automated) + - id: 5060 + title: "Ensure automatic mounting of removable media is disabled." + description: "By default GNOME automatically mounts removable media when inserted as a convenience to the user." + rationale: "With automounting enabled anyone with physical access could attach a USB drive or disc and have its contents available in system even if they lacked permissions to mount it themselves." + impact: "The use of portable hard drives is very common for workstation users. If your organization allows the use of portable storage or media on workstations and physical access controls to workstations is considered adequate there is little value add in turning off automounting." + remediation: "Ensure that automatic mounting of media is disabled for all GNOME users: # cat << EOF >> /etc/dconf/db/local.d/00-media-automount [org/gnome/desktop/media-handling] automount=false automount-open=false EOF Apply the changes with: # dconf update." + references: + - "https://access.redhat.com/solutions/20107" + compliance: + - cis: ["1.8.5"] + - cis_csc_v8: ["10.3"] + - cis_csc_v7: ["8.5"] + - cmmc_v2.0: ["MP.L2-3.8.7"] + - hipaa: ["164.310(d)(1)"] + - iso_27001-2013: ["A.12.2.1"] + condition: all + rules: + - "c:gsettings get org.gnome.desktop.media-handling automount -> r:false" + + # 1.9 Ensure updates, patches, and additional security software are installed. (Manual) + - id: 5061 + title: "Ensure updates, patches, and additional security software are installed." description: "Periodically patches are released for included software either due to security flaws or to include additional functionality." rationale: "Newer patches may contain security enhancements that would not be available through the latest full update. As a result, it is recommended that the latest software patches be used to take advantage of the latest functionality. As with any software installation, organizations need to determine if a given update meets their requirements and verify the compatibility and supportability of any additional software against the update revision that is selected." - remediation: "Use your package manager to update all packages on the system according to site policy. The following command will install all available security updates: # dnf update --security . Site policy may mandate a testing period before install onto production systems for available updates. The audit and remediation here only cover security updates. Non-security updates can be audited with and comparing against site policy: # dnf check-update" + remediation: "Use your package manager to update all packages on the system according to site policy. The following command will install all available updates: # dnf update." compliance: - cis: ["1.9"] - - cis_csc: ["3.4"] - - pci_dss: ["5.2"] - - nist_800_53: ["AU.6", "SI.4"] - - gpg_13: ["4.2"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["A1.2"] + - cis_csc_v8: ["7.3", "7.4"] + - cis_csc_v7: ["3.4"] + - cmmc_v2.0: ["SI.L1-3.14.1"] + - nist_sp_800-53: ["SI-2(2)"] + - pci_dss_v3.2.1: ["6.2"] + - soc_2: ["CC7.1"] condition: all rules: - - "c:yum check-update --security -> r:No packages needed for security" + - 'not c:sh -c "dnf check-update | egrep -v \"Updating|Last metadata|^$\"" -> r:^\w' - # 1.10 Ensure system-wide crypto policy is not legacy (Scored) - - id: 5049 - title: "Ensure system-wide crypto policy is not legacy" + # 1.10 Ensure system-wide crypto policy is not legacy. (Automated) + - id: 5062 + title: "Ensure system-wide crypto policy is not legacy." description: "The system-wide crypto-policies followed by the crypto core components allow consistently deprecating and disabling algorithms system-wide. The individual policy levels (DEFAULT, LEGACY, FUTURE, and FIPS) are included in the crypto-policies(7) package." - rationale: "If the Legacy system-wide crypto policy is selected, it includes support for TLS 1.0, TLS 1.1, and SSH2 protocols or later. The algorithms DSA, 3DES, and RC4 are allowed, while RSA and Diffie-Hellman parameters are accepted if larger than 1023-bits. These legacy protocols and algorithms can make the system vulnerable to attacks, including those listed in RFC 7457" - remediation: "Run the following command to change the system-wide crypto policy # update-crypto-policies --set Example: # update-crypto-policies --set DEFAULT Run the following to make the updated system-wide crypto policy active # update-crypto-policies" - compliance: - - cis: ["1.10"] - - cis_csc: ["14.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + rationale: "If the Legacy system-wide crypto policy is selected, it includes support for TLS 1.0, TLS 1.1, and SSH2 protocols or later. The algorithms DSA, 3DES, and RC4 are allowed, while RSA and Diffie-Hellman parameters are accepted if larger than 1023-bits. These legacy protocols and algorithms can make the system vulnerable to attacks, including those listed in RFC 7457." + impact: "Environments that require compatibility with older insecure protocols may require the use of the less secure LEGACY policy level." + remediation: "Run the following command to change the system-wide crypto policy # update-crypto-policies --set Example: # update-crypto-policies --set DEFAULT Run the following to make the updated system-wide crypto policy active # update-crypto-policies." references: - "https://access.redhat.com/articles/3642912#what-polices-are-provided-1" + compliance: + - cis: ["1.10"] + - cis_csc_v8: ["3.10"] + - cis_csc_v7: ["14.4"] + - cmmc_v2.0: ["AC.L2-3.1.13", "AC.L2-3.1.17", "IA.L2-3.5.10", "SC.L2-3.13.11", "SC.L2-3.13.15", "SC.L2-3.13.8"] + - hipaa: ["164.312(a)(2)(iv)", "164.312(e)(1)", "164.312(e)(2)(i)", "164.312(e)(2)(ii)"] + - iso_27001-2013: ["A.10.1.1", "A.13.1.1"] + - nist_sp_800-53: ["AC-17(2)", "SC-8", "SC-8(1)"] + - pci_dss_v3.2.1: ["2.1.1", "4.1", "4.1.1", "8.2.1"] + - pci_dss_v4.0: ["2.2.7", "4.1.1", "4.2.1", "4.2.1.2", "4.2.2", "8.3.2"] condition: none rules: - 'f:/etc/crypto-policies/config -> r:^\s*LEGACY' - # 1.11 Ensure system-wide crypto policy is FUTURE or FIPS (Scored) - - id: 5050 - title: "Ensure system-wide crypto policy is FUTURE or FIPS" - description: "The system-wide crypto-policies followed by the crypto core components allow consistently deprecating and disabling algorithms system-wide. The individual policy levels (DEFAULT, LEGACY, FUTURE, and FIPS) are included in the crypto-policies(7) package." - rationale: "If the Legacy system-wide crypto policy is selected, it includes support for TLS 1.0, TLS 1.1, and SSH2 protocols or later. The algorithms DSA, 3DES, and RC4 are allowed, while RSA and Diffie-Hellman parameters are accepted if larger than 1023-bits. These legacy protocols and algorithms can make the system vulnerable to attacks, including those listed in RFC 7457 FUTURE: Is a conservative security level that is believed to withstand any near-term future attacks. This level does not allow the use of SHA-1 in signature algorithms. The RSA and Diffie-Hellman parameters are accepted if larger than 3071 bits. The level provides at least 128-bit security FIPS: Conforms to the FIPS 140-2 requirements. This policy is used internally by the fips-mode-setup(8) tool which can switch the system into the FIPS 140-2 compliance mode. The level provides at least 112-bit security" - remediation: "Run the following command to change the system-wide crypto policy # update-crypto-policies --set FUTURE OR To switch the system to FIPS mode, run the following command: # fips-mode-setup --enable" - compliance: - - cis: ["1.11"] - - cis_csc: ["14.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'f:/etc/crypto-policies/config -> r:^\s*FUTURE|^\s*FIPS' - ############################################### - # 2 OS Services + # 2.1 Remove Legacy Services ############################################### - ############################################### - # 2.1 inetd Services - ############################################### - # 2.1.1 Ensure xinetd is not installed (Scored) - - id: 5051 - title: "Ensure xinetd is not installed" - description: "The eXtended InterNET Daemon ( xinetd ) is an open source super daemon that replaced the original inetd daemon. The xinetd daemon listens for well known services and dispatches the appropriate daemon to properly respond to service requests." - rationale: "If there are no xinetd services required, it is recommended that the daemon be disabled." - remediation: "Run the following command to remove xinetd: # dnf remove xinetd" + + # 2.1.1 Ensure time synchronization is in use. (Automated) + - id: 5063 + title: "Ensure time synchronization is in use." + description: "System time should be synchronized between all systems in an environment. This is typically done by establishing an authoritative time server or set of servers and having all systems synchronize their clocks to them. Note: If another method for time synchronization is being used, this section may be skipped." + rationale: "Time synchronization is important to support time sensitive security mechanisms like Kerberos and also ensures log files have consistent time records across the enterprise, which aids in forensic investigations." + remediation: "Run the following command to install chrony: # dnf install chrony." compliance: - cis: ["2.1.1"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["8.4"] + - cis_csc_v7: ["6.1"] + - cmmc_v2.0: ["AU.L2-3.3.7"] + - iso_27001-2013: ["A.12.4.4"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.4"] + - pci_dss_v4.0: ["10.6", "10.6.1", "10.6.2", "10.6.3"] + - soc_2: ["CC4.1", "CC5.2"] condition: all rules: - - "c:rpm -q xinetd -> r:^package xinetd is not installed" - - ############################################### - # 2.2 Remove Legacy Services - ############################################### + - "c:rpm -q chrony -> r:^chrony-" - # 2.2.1.1 Ensure time synchronization is in use (Not Scored) - - id: 5052 - title: "Ensure time synchronization is in use" - description: "System time should be synchronized between all systems in an environment. This is typically done by establishing an authoritative time server or set of servers and having all systems synchronize their clocks to them." - rationale: "Time synchronization is important to support time sensitive security mechanisms like Kerberos and also ensures log files have consistent time records across the enterprise, which aids in forensic investigations." - remediation: "On physical systems or virtual systems where host based time synchronization is not available install chrony: # dnf install chrony On virtual systems where host based time synchronization is available consult your virtualization software documentation and verify that host based synchronization is in use." + # 2.1.2 Ensure chrony is configured. (Automated) + - id: 5064 + title: "Ensure chrony is configured." + description: "chrony is a daemon which implements the Network Time Protocol (NTP) and is designed to synchronize system clocks across a variety of systems and use a source that is highly accurate. More information on chrony can be found at http://chrony.tuxfamily.org/. chrony can be configured to be a client and/or a server." + rationale: "If chrony is in use on the system proper configuration is vital to ensuring time synchronization is working properly." + remediation: 'Add or edit server or pool lines to /etc/chrony.conf as appropriate: server Add or edit the OPTIONS in /etc/sysconfig/chronyd to include ''-u chrony'': OPTIONS="-u chrony".' compliance: - - cis: ["2.2.2.1"] - - cis_csc: ["6.1"] - - pci_dss: ["10.4"] - - nist_800_53: ["AU.8"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["2.1.2"] + - cis_csc_v8: ["8.4"] + - cis_csc_v7: ["6.1"] + - cmmc_v2.0: ["AU.L2-3.3.7"] + - iso_27001-2013: ["A.12.4.4"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.4"] + - pci_dss_v4.0: ["10.6", "10.6.1", "10.6.2", "10.6.3"] + - soc_2: ["CC4.1", "CC5.2"] condition: all rules: - - "c:rpm -q chrony -> r:^chrony-" + - "f:/etc/chrony.conf" + - 'f:/etc/chrony.conf -> r:^\s*\t*server|^\s*\t*pool' + - 'f:/etc/sysconfig/chronyd -> r:^\s*\t*OPTIONS\.*-u chrony' - # 2.2.1.2 Configure Network Time Protocol (Chrony) (Scored) - - id: 5053 - title: "Ensure chrony is configured" - description: "chrony is a daemon which implements the Network Time Protocol (NTP). It is designed to synchronize system clocks across a variety of systems and use a source that is highly accurate. More information on NTP can be found at https://www.ntp.org. ntp can be configured to be a client and/or a server." - rationale: "If chrony is in use on the system proper configuration is vital to ensuring time synchronization is working properly." - remediation: "Add or edit server or pool lines to /etc/chrony.conf as appropriate: server Configure chrony to run as the chrony user" + # 2.2.1 Ensure xinetd is not installed. (Automated) + - id: 5065 + title: "Ensure xinetd is not installed." + description: "The eXtended InterNET Daemon (xinetd) is an open source super daemon that replaced the original inetd daemon. The xinetd daemon listens for well known services and dispatches the appropriate daemon to properly respond to service requests." + rationale: "If there are no xinetd services required, it is recommended that the package be removed to reduce the attack surface are of the system. Note: If an xinetd service or services are required, ensure that any xinetd service not required is stopped and disabled." + remediation: "Run the following command to remove xinetd: # dnf remove xinetd." compliance: - - cis: ["2.2.1.2"] - - cis_csc: ["6.1"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - "http://chrony.tuxfamily.org/" + - cis: ["2.2.1"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6", "9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2", "A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'f:/etc/chrony.conf -> r:^server\s*\t*\.+|^pool\s*\t*\.+' - - 'not c:ps -ef -> r:\.+/chronyd\s*\t*$ && !r:^\s*\t*chrony\s*\t*' + - "c:rpm -q xinetd -> r:^package xinetd is not installed" - # 2.2.2 Ensure X Window System is not installed (Scored) - - id: 5054 - title: "Ensure X Window System is not installed" + # 2.2.2 Ensure xorg-x11-server-common is not installed. (Automated) + - id: 5066 + title: "Ensure xorg-x11-server-common is not installed." description: "The X Window System provides a Graphical User Interface (GUI) where users can have multiple windows in which to run programs and various add on. The X Windows system is typically used on workstations where users login, but not on servers where users typically do not login." rationale: "Unless your organization specifically requires graphical login access via X Windows, remove it to reduce the potential attack surface." - remediation: "Run the following command to remove the X Windows System packages: # dnf remove xorg-x11*" + impact: 'Many Linux systems run applications which require a Java runtime. Some Linux Java packages have a dependency on specific X Windows xorg-x11-fonts. One workaround to avoid this dependency is to use the "headless" Java packages for your specific Java runtime.' + remediation: "Run the following command to remove the X Windows Server packages: # dnf remove xorg-x11-server-common." compliance: - cis: ["2.2.2"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "c:rpm -qa xorg-x11* -> r:^xorg-x11" + - "c:rpm -q xorg-x11-server-common -> r:^package xorg-x11-server-common is not installed" - # 2.2.3 Remove rsync service (Scored) - - id: 5055 - title: "Ensure rsync service is not enabled" - description: "The rsyncd service can be used to synchronize files between systems over network links." - rationale: "The rsyncd service presents a security risk as it uses unencrypted protocols for communication." - remediation: "Run the following command to disable rsync: # systemctl --now disable rsyncd" + # 2.2.3 Ensure Avahi Server is not installed. (Automated) + - id: 5067 + title: "Ensure Avahi Server is not installed." + description: "Avahi is a free zeroconf implementation, including a system for multicast DNS/DNS-SD service discovery. Avahi allows programs to publish and discover services and hosts running on a local network with no specific configuration. For example, a user can plug a computer into a network and Avahi automatically finds printers to print to, files to look at and people to talk to, as well as network services running on the machine." + rationale: "Automatic discovery of network services is not normally required for system functionality. It is recommended to remove this package to reduce the potential attack surface." + remediation: "Run the following commands to stop, mask and remove avahi-autoipd and avahi: # systemctl stop avahi-daemon.socket avahi-daemon.service # dnf remove avahi-autoipd avahi." compliance: - cis: ["2.2.3"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:rsyncd" + - "c:rpm -q avahi -> r:^package avahi is not installed" - # 2.2.4 Disable Avahi Server (Scored) - - id: 5056 - title: "Ensure Avahi Server is not enabled" - description: "Avahi is a free zeroconf implementation, including a system for multicast DNS/DNS-SD service discovery. Avahi allows programs to publish and discover services and hosts running on a local network with no specific configuration. For example, a user can plug a computer into a network and Avahi automatically finds printers to print to, files to look at and people to talk to, as well as network services running on the machine." - rationale: "Automatic discovery of network services is not normally required for system functionality. It is recommended to disable the service to reduce the potential attack surface." - remediation: "Run the following command to disable avahi-daemon: # systemctl --now disable avahi-daemon" + # 2.2.4 Ensure CUPS is not installed. (Automated) + - id: 5068 + title: "Ensure CUPS is not installed." + description: "The Common Unix Print System (CUPS) provides the ability to print to both local and network printers. A system running CUPS can also accept print jobs from remote systems and print them to local printers. It also provides a web based remote administration capability." + rationale: "If the system does not need to print jobs or accept print jobs from other systems, it is recommended that CUPS be removed to reduce the potential attack surface. Note: Removing CUPS will prevent printing from the system." + impact: "Disabling CUPS will prevent printing from the system, a common task for workstation systems." + remediation: "Run the following command to remove cups: # dnf remove cups." + references: + - "http://www.cups.org." compliance: - cis: ["2.2.4"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:avahi-daemon" + - "c:rpm -q cups -> r:^package cups is not installed" - # 2.2.5 Remove SNMP Server (Scored) - - id: 5057 - title: "Ensure SNMP Server is not enabled" - description: "The Simple Network Management Protocol (SNMP) server is used to listen for SNMP commands from an SNMP management system, execute the commands or collect the information and then send results back to the requesting system." - rationale: "The SNMP server can communicate using SNMP v1, which transmits data in the clear and does not require authentication to execute commands. Unless absolutely necessary, it is recommended that the SNMP service not be used. If SNMP is required the server should be configured to disallow SNMP v1." - remediation: "Run the following command to disable snmpd: # systemctl --now disable snmpd" + # 2.2.5 Ensure DHCP Server is not installed. (Automated) + - id: 5069 + title: "Ensure DHCP Server is not installed." + description: "The Dynamic Host Configuration Protocol (DHCP) is a service that allows machines to be dynamically assigned IP addresses." + rationale: "Unless a system is specifically set up to act as a DHCP server, it is recommended that the rpm -q dhcp-server package be removed to reduce the potential attack surface." + remediation: "Run the following command to remove dhcp: # dnf remove dhcp-server." compliance: - cis: ["2.2.5"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:snmpd" + - "c:rpm -q dhcp-server -> r:^package dhcp-server is not installed" - # 2.2.6 Remove HTTP Proxy Server (Scored) - - id: 5058 - title: "Ensure HTTP Proxy Server is not enabled" - description: "Squid is a standard proxy server used in many distributions and environments." - rationale: "If there is no need for a proxy server, it is recommended that the squid proxy be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable squid: # systemctl --now disable squid" + # 2.2.6 Ensure DNS Server is not installed. (Automated) + - id: 5070 + title: "Ensure DNS Server is not installed." + description: "The Domain Name System (DNS) is a hierarchical naming system that maps names to IP addresses for computers, services and other resources connected to a network." + rationale: "Unless a system is specifically designated to act as a DNS server, it is recommended that the package be removed to reduce the potential attack surface." + remediation: "Run the following command to remove bind: # dnf remove bind." compliance: - cis: ["2.2.6"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:squid" + - "c:rpm -q bind -> r:^package bind is not installed" - # 2.2.7 Remove Samba (Scored) - - id: 5059 - title: "Ensure Samba is not enabled" - description: "The Samba daemon allows system administrators to configure their Linux systems to share file systems and directories with Windows desktops. Samba will advertise the file systems and directories via the Small Message Block (SMB) protocol. Windows desktop users will be able to mount these directories and file systems as letter drives on their systems." - rationale: "If there is no need to mount directories and file systems to Windows systems, then this service can be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable smb: # systemctl --now disable smb" + # 2.2.7 Ensure FTP Server is not installed. (Automated) + - id: 5071 + title: "Ensure FTP Server is not installed." + description: "FTP (File Transfer Protocol) is a traditional and widely used standard tool for transferring files between a server and clients over a network, especially where no authentication is necessary (permits anonymous users to connect to a server)." + rationale: "FTP does not protect the confidentiality of data or authentication credentials. It is recommended SFTP be used if file transfer is required. Unless there is a need to run the system as a FTP server (for example, to allow anonymous downloads), it is recommended that the package be removed to reduce the potential attack surface." + remediation: "Run the following command to remove ftp: # dnf remove ftp." compliance: - cis: ["2.2.7"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:smb" + - "c:rpm -q ftpd -> r:^package ftpd is not installed" - # 2.2.8 Remove Dovecot (IMAP and POP3 services) (Scored) - - id: 5060 - title: "Ensure IMAP and POP3 server is not enabled" - description: "dovecot is an open source IMAP and POP3 server for Linux based systems." - rationale: "Unless POP3 and/or IMAP servers are to be provided by this system, it is recommended that the service be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable dovecot: # systemctl --now disable dovecot" + # 2.2.8 Ensure VSFTP Server is not installed. (Automated) + - id: 5072 + title: "Ensure VSFTP Server is not installed." + description: "FTP (File Transfer Protocol) is a traditional and widely used standard tool for transferring files between a server and clients over a network, especially where no authentication is necessary (permits anonymous users to connect to a server)." + rationale: "Unless there is a need to run the system as an FTP server, it is recommended that the package be removed to reduce the potential attack surface." + remediation: "Run the following command to remove vsftpd: # dnf remove vsftpd." compliance: - cis: ["2.2.8"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:dovecot" + - "c:rpm -q vsftpd -> r:^package vsftpd is not installed" - # 2.2.9 Remove HTTP Server (Scored) - - id: 5061 - title: "Ensure HTTP server is not enabled" - description: "HTTP or web servers provide the ability to host web site content." - rationale: "Unless there is a need to run the system as a web server, it is recommended that the service be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable httpd: # systemctl --now disable httpd" + # 2.2.9 Ensure TFTP Server is not installed. (Automated) + - id: 5073 + title: "Ensure TFTP Server is not installed." + description: "Trivial File Transfer Protocol (TFTP) is a simple protocol for exchanging files between two TCP/IP machines. TFTP servers allow connections from a TFTP Client for sending and receiving files." + rationale: "TFTP does not have built-in encryption, access control or authentication. This makes it very easy for an attacker to exploit TFTP to gain access to files." + remediation: "Run the following command to remove tftp-server: # dnf remove tftp-server." compliance: - cis: ["2.2.9"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:httpd" + - "c:rpm -q tftp-server -> r:^package tftp-server is not installed" - # 2.2.10 Remove FTP Server (Scored) - - id: 5062 - title: "Ensure FTP Server is not enabled" - description: "The File Transfer Protocol (FTP) provides networked computers with the ability to transfer files." - rationale: "FTP does not protect the confidentiality of data or authentication credentials. It is recommended sftp be used if file transfer is required. Unless there is a need to run the system as a FTP server (for example, to allow anonymous downloads), it is recommended that the service be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable vsftpd: # systemctl --now disable vsftpd" + # 2.2.10 Ensure a web server is not installed. (Automated) + - id: 5074 + title: "Ensure a web server is not installed." + description: "Web servers provide the ability to host web site content." + rationale: "Unless there is a need to run the system as a web server, it is recommended that the packages be removed to reduce the potential attack surface. Note: Several http servers exist. They should also be audited, and removed, if not required." + remediation: "Run the following command to remove httpd and nginx: # dnf remove httpd nginx." compliance: - cis: ["2.2.10"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:vsftpd" + - "c:rpm -q nginx -> r:^package nginx is not installed" + - "c:rpm -q httpd -> r:^package httpd is not installed" - # 2.2.11 Ensure DNS Server is not enabled (Scored) - - id: 5063 - title: "Ensure DNS Server is not enabled" - description: "The Domain Name System (DNS) is a hierarchical naming system that maps names to IP addresses for computers, services and other resources connected to a network." - rationale: "Unless a system is specifically designated to act as a DNS server, it is recommended that the service be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable named : # systemctl --now disable named" + # 2.2.11 Ensure IMAP and POP3 server is not installed. (Automated) + - id: 5075 + title: "Ensure IMAP and POP3 server is not installed." + description: "dovecot is an open source IMAP and POP3 server for Linux based systems." + rationale: "Unless POP3 and/or IMAP servers are to be provided by this system, it is recommended that the package be removed to reduce the potential attack surface. Note: Several IMAP/POP3 servers exist and can use other service names. These should also be audited and the packages removed if not required." + remediation: "Run the following command to remove dovecot and cyrus-imapd: # dnf remove dovecot cyrus-imapd." compliance: - cis: ["2.2.11"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:named" + - "c:rpm -q dovecot -> r:^package dovecot is not installed" + - "c:rpm -q cyrus-imapd -> r:^package cyrus-imapd is not installed" - # 2.2.12 Ensure NFS is not enabled (Scored) - - id: 5064 - title: "Ensure NFS is not enabled" - description: "The Network File System (NFS) is one of the first and most widely distributed file systems in the UNIX environment. It provides the ability for systems to mount file systems of other servers through the network." - rationale: "If the system does not export NFS shares, it is recommended that the NFS be disabled to reduce remote attack surface." - remediation: "Run the following commands to disable nfs: # systemctl --now disable nfs" + # 2.2.12 Ensure Samba is not installed. (Automated) + - id: 5076 + title: "Ensure Samba is not installed." + description: "The Samba daemon allows system administrators to configure their Linux systems to share file systems and directories with Windows desktops. Samba will advertise the file systems and directories via the Server Message Block (SMB) protocol. Windows desktop users will be able to mount these directories and file systems as letter drives on their systems." + rationale: "If there is no need to mount directories and file systems to Windows systems, then this package can be removed to reduce the potential attack surface." + remediation: "Run the following command to remove samba: # dnf remove samba." compliance: - cis: ["2.2.12"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:nfs" + - "c:rpm -q samba -> r:^package samba is not installed" - # 2.2.13 Ensure RPC is not enabled (Scored) - - id: 5065 - title: "Ensure RPC is not enabled" - description: "The rpcbind service maps Remote Procedure Call (RPC) services to the ports on which they listen. RPC processes notify rpcbind when they start, registering the ports they are listening on and the RPC program numbers they expect to serve. The client system then contacts rpcbind on the server with a particular RPC program number. The rpcbind service redirects the client to the proper port number so it can communicate with the requested service." - rationale: "If the system does not require rpc based services, it is recommended that rpcbind be disabled to reduce the remote attack surface." - remediation: "Run the following commands to disable nfs: # systemctl --now disable rpcbind" + # 2.2.13 Ensure HTTP Proxy Server is not installed. (Automated) + - id: 5077 + title: "Ensure HTTP Proxy Server is not installed." + description: "Squid is a standard proxy server used in many distributions and environments." + rationale: "Unless a system is specifically set up to act as a proxy server, it is recommended that the squid package be removed to reduce the potential attack surface. Note: Several HTTP proxy servers exist. These should be checked and removed unless required." + remediation: "Run the following command to remove the squid package: # dnf remove squid." compliance: - cis: ["2.2.13"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:rpcbind" + - "c:rpm -q squid -> r:^package squid is not installed" - # 2.2.14 Remove LDAP Server (Scored) - - id: 5066 - title: "Ensure LDAP Server is not enabled" - description: "The Lightweight Directory Access Protocol (LDAP) was introduced as a replacement for NIS/YP. It is a service that provides a method for looking up information from a central database." - rationale: "If the system will not need to act as an LDAP server, it is recommended that the software be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable slapd: # systemctl --now disable slapd" + # 2.2.14 Ensure net-snmp is not installed. (Automated) + - id: 5078 + title: "Ensure net-snmp is not installed." + description: 'Simple Network Management Protocol (SNMP) is a widely used protocol for monitoring the health and welfare of network equipment, computer equipment and devices like UPSs. Net-SNMP is a suite of applications used to implement SNMPv1 (RFC 1157), SNMPv2 (RFCs 1901-1908), and SNMPv3 (RFCs 3411-3418) using both IPv4 and IPv6. Support for SNMPv2 classic (a.k.a. "SNMPv2 historic" - RFCs 1441-1452) was dropped with the 4.0 release of the UCD-snmp package. The Simple Network Management Protocol (SNMP) server is used to listen for SNMP commands from an SNMP management system, execute the commands or collect the information and then send results back to the requesting system.' + rationale: "The SNMP server can communicate using SNMPv1, which transmits data in the clear and does not require authentication to execute commands. SNMPv3 replaces the simple/clear text password sharing used in SNMPv2 with more securely encoded parameters. If the the SNMP service is not required, the net-snmp package should be removed to reduce the attack surface of the system. Note: If SNMP is required: - The server should be configured for SNMP v3 only. User Authentication and Message Encryption should be configured. If SNMP v2 is absolutely necessary, modify the community strings' values. -." + remediation: "Run the following command to remove net-snmpd: # dnf remove net-snmp." compliance: - cis: ["2.2.14"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - More detailed documentation on OpenLDAP is available at https://www.openldap.org - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6", "9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2", "A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:slapd" + - "c:rpm -q net-snmp -> r:^package net-snmp is not installed" - # 2.2.15 Remove DHCP Server (Scored) - - id: 5067 - title: "Ensure DHCP Server is not enabled" - description: "The Dynamic Host Configuration Protocol (DHCP) is a service that allows machines to be dynamically assigned IP addresses." - rationale: "Unless a system is specifically set up to act as a DHCP server, it is recommended that this service be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable dhcpd: # systemctl --now disable dhcpd" + # 2.2.15 Ensure NIS server is not installed. (Automated) + - id: 5079 + title: "Ensure NIS server is not installed." + description: "The ypserv package provides the Network Information Service (NIS). This service, formally known as Yellow Pages, is a client-server directory service protocol for distributing system configuration files. The NIS server is a collection of programs that allow for the distribution of configuration files." + rationale: "The NIS service is inherently an insecure system that has been vulnerable to DOS attacks, buffer overflows and has poor authentication for querying NIS maps. NIS generally has been replaced by such protocols as Lightweight Directory Access Protocol (LDAP). It is recommended that the ypserv package be removed, and if required a more secure services be used." + remediation: "Run the following command to remove ypserv: # dnf remove ypserv." compliance: - cis: ["2.2.15"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - More detailed documentation on DHCP is available at https://www.isc.org/software/dhcp - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6", "9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2", "A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:dhcpd" + - "c:rpm -q ypserv -> r:^package ypserv is not installed" - # 2.2.16 Ensure CUPS is not enabled (Scored) - - id: 5068 - title: "Ensure CUPS is not enabled" - description: "The Common Unix Print System (CUPS) provides the ability to print to both local and network printers. A system running CUPS can also accept print jobs from remote systems and print them to local printers. It also provides a web based remote administration capability." - rationale: "If the system does not need to print jobs or accept print jobs from other systems, it is recommended that CUPS be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable cups : # systemctl --now disable cups" + # 2.2.16 Ensure telnet-server is not installed. (Automated) + - id: 5080 + title: "Ensure telnet-server is not installed." + description: "The telnet-server package contains the telnet daemon, which accepts connections from users from other systems via the telnet protocol." + rationale: "The telnet protocol is insecure and unencrypted. The use of an unencrypted transmission medium could allow a user with access to sniff network traffic the ability to steal credentials. The ssh package provides an encrypted session and stronger security." + remediation: "Run the following command to remove the telnet-server package: # dnf remove telnet-server." compliance: - cis: ["2.2.16"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - references: - - "More detailed documentation on CUPS is available at the project homepage at http://www.cups.org." - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6", "9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2", "A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:cups" + - "c:rpm -q telnet-server -> r:^package telnet-server is not installed" - # 2.2.17 Remove NIS Server (Scored) - - id: 5069 - title: "Ensure NIS Server is not enabled" - description: "The Network Information Service (NIS) (formally known as Yellow Pages) is a client-server directory service protocol for distributing system configuration files. The NIS server is a collection of programs that allow for the distribution of configuration files." - rationale: "The NIS service is inherently an insecure system that has been vulnerable to DOS attacks, buffer overflows and has poor authentication for querying NIS maps. NIS generally been replaced by such protocols as Lightweight Directory Access Protocol (LDAP). It is recommended that the service be disabled and other, more secure services be used" - remediation: "Run the following command to disable ypserv: # systemctl --now disable ypserv" + # 2.2.17 Ensure mail transfer agent is configured for local-only mode. (Automated) + - id: 5081 + title: "Ensure mail transfer agent is configured for local-only mode." + description: "Mail Transfer Agents (MTA), such as sendmail and Postfix, are used to listen for incoming mail and transfer the messages to the appropriate user or mail server. If the system is not intended to be a mail server, it is recommended that the MTA be configured to only process local mail." + rationale: "The software for all Mail Transfer Agents is complex and most have a long history of security issues. While it is important to ensure that the system can process local mail messages, it is not necessary to have the MTA's daemon listening on a port unless the server is intended to be a mail server that receives and processes mail from other systems. Notes: - This recommendation is designed around the postfix mail server. - Depending on your environment you may have an alternative MTA installed such as sendmail. If this is the case consult the documentation for your installed MTA to configure the recommended state." + remediation: "Edit /etc/postfix/main.cf and add the following line to the RECEIVING MAIL section. If the line already exists, change it to look like the line below: inet_interfaces = loopback-only Run the following command to restart postfix: # systemctl restart postfix." compliance: - cis: ["2.2.17"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all rules: - - "p:ypserv" + - 'c:ss -lntu -> r:\.*:25\.* && !r:\s*127.0.0.1:25\s*|\s*::1:25\s*' - # 2.2.18 Ensure mail transfer agent is configured for local-only mode (Scored) - - id: 5070 - title: "Ensure mail transfer agent is configured for local-only mode" - description: "Mail Transfer Agents (MTA), such as sendmail and Postfix, are used to listen for incoming mail and transfer the messages to the appropriate user or mail server. If the system is not intended to be a mail server, it is recommended that the MTA be configured to only process local mail." - rationale: "Mail Transfer Agents (MTA), such as sendmail and Postfix, are used to listen for incoming mail and transfer the messages to the appropriate user or mail server. If the system is not intended to be a mail server, it is recommended that the MTA be configured to only process local mail." - remediation: "Edit /etc/postfix/main.cf and add the following line to the RECEIVING MAIL section. If the line already exists, change it to look like the line below: inet_interfaces = loopback-only . Restart postfix: # systemctl restart postfix" + # 2.2.18 Ensure nfs-utils is not installed or the nfs-server service is masked. (Automated) + - id: 5082 + title: "Ensure nfs-utils is not installed or the nfs-server service is masked." + description: "The Network File System (NFS) is one of the first and most widely distributed file systems in the UNIX environment. It provides the ability for systems to mount file systems of other servers through the network." + rationale: "If the system does not require network shares, it is recommended that the nfs-utils package be removed to reduce the attack surface of the system." + impact: "Many of the libvirt packages used by Enterprise Linux virtualization are dependent on the nfs-utils package. If the nfs-package is required as a dependency, the nfs-server should be disabled and masked to reduce the attack surface of the system." + remediation: "Run the following command to remove nfs-utils: # dnf remove nfs-utils OR If the nfs-package is required as a dependency, run the following command to stop and mask the nfs-server service: # systemctl --now mask nfs-server." compliance: - cis: ["2.2.18"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.2"] - - nist_800_53: ["CM.1", "AC.4", "SC.7"] - - tsc: ["CC5.2", "CC6.4", "CC6.6", "CC6.7"] - condition: none + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: any rules: - - 'c:ss -lntu -> r:\.*:25\.* && !r:\s*127.0.0.1:25\s*|\s*::1:25\s*' + - "c:rpm -q nfs-utils -> r:package nfs-utils is not installed" + - "c:systemctl is-enabled nfs-server -> r:masked|No such file or directory" + + # 2.2.19 Ensure rpcbind is not installed or the rpcbind services are masked. (Automated) + - id: 5083 + title: "Ensure rpcbind is not installed or the rpcbind services are masked." + description: "The rpcbind utility maps RPC services to the ports on which they listen. RPC processes notify rpcbind when they start, registering the ports they are listening on and the RPC program numbers they expect to serve. The client system then contacts rpcbind on the server with a particular RPC program number. The rpcbind service redirects the client to the proper port number so it can communicate with the requested service. Portmapper is an RPC service, which always listens on tcp and udp 111, and is used to map other RPC services (such as nfs, nlockmgr, quotad, mountd, etc.) to their corresponding port number on the server. When a remote host makes an RPC call to that server, it first consults with portmap to determine where the RPC server is listening." + rationale: "A small request (~82 bytes via UDP) sent to the Portmapper generates a large response (7x to 28x amplification), which makes it a suitable tool for DDoS attacks. If rpcbind is not required, it is recommended that the rpcbind package be removed to reduce the attack surface of the system." + impact: "Many of the libvirt packages used by Enterprise Linux virtualization, and the nfs-utils package used for The Network File System (NFS), are dependent on the rpcbind package. If the rpcbind package is required as a dependency, the services rpcbind.service and rpcbind.socket should be stopped and masked to reduce the attack surface of the system." + remediation: "Run the following command to remove nfs-utils: # dnf remove rpcbind OR If the rpcbind package is required as a dependency, run the following commands to stop and mask the rpcbind and rpcbind.socket services: # systemctl --now mask rpcbind # systemctl --now mask rpcbind.socket." + compliance: + - cis: ["2.2.19"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: any + rules: + - "c:rpm -q -> r:rpcbind package rpcbind is not installed" + - "c:systemctl is-enabled rpcbind rpcbind.socket -> r:masked" + + # 2.2.20 Ensure rsync is not installed or the rsyncd service is masked. (Automated) + - id: 5084 + title: "Ensure rsync is not installed or the rsyncd service is masked." + description: "The rsyncd service can be used to synchronize files between systems over network links." + rationale: "Unless required, the rsync package should be removed to reduce the attack surface area of the system. The rsyncd service presents a security risk as it uses unencrypted protocols for communication. Note: If a required dependency exists for the rsync package, but the rsyncd service is not required, the service should be masked." + impact: "There are packages that are dependent on the rsync package. If the rsync package is removed, these packages will be removed as well. Before removing the rsync package, review any dependent packages to determine if they are required on the system. If a dependent package is required, mask the rsyncd service and leave the rsync package installed." + remediation: "Run the following command to remove the rsync package: # dnf remove rsync OR Run the following command to mask the rsyncd service: # systemctl --now mask rsyncd." + compliance: + - cis: ["2.2.20"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: any + rules: + - "c:rpm -q rsync -> r:package rsync is not installed" + - "c:systemctl is-enabled rsyncd -> r:masked" ############################################### # 2.3 Service Clients ############################################### - # 2.3.1 Remove NIS Client (Scored) - - id: 5071 - title: "Ensure NIS Client is not installed" - description: "The Network Information Service (NIS), formerly known as Yellow Pages, is a client-server directory service protocol used to distribute system configuration files. The NIS client ( ypbind ) was used to bind a machine to an NIS server and receive the distributed configuration files." + # 2.3.1 Ensure NIS Client is not installed. (Automated) + - id: 5085 + title: "Ensure NIS Client is not installed." + description: "The Network Information Service (NIS), formerly known as Yellow Pages, is a client-server directory service protocol used to distribute system configuration files. The NIS client ( ypbind) was used to bind a machine to an NIS server and receive the distributed configuration files." rationale: "The NIS service is inherently an insecure system that has been vulnerable to DOS attacks, buffer overflows and has poor authentication for querying NIS maps. NIS generally has been replaced by such protocols as Lightweight Directory Access Protocol (LDAP). It is recommended that the service be removed." - remediation: "Run the following command to uninstall ypbind: # dnf remove ypbind" + impact: "Many insecure service clients are used as troubleshooting tools and in testing environments. Uninstalling them can inhibit capability to test and troubleshoot. If they are required it is advisable to remove the clients after use to prevent accidental or intentional misuse." + remediation: "Run the following command to remove the ypbind package: # dnf remove ypbind." compliance: - cis: ["2.3.1"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: none rules: - - "c:rpm -qa ypbind -> r:ypbind" + - "c:rpm -q ypbind -> r:^package ypbind is not installed" - # 2.3.2 Ensure telnet client is not installed (Scored) - - id: 5072 - title: "Ensure telnet client is not installed" - description: "The telnet package contains the telnet client, which allows users to start connections to other systems via the telnet protocol." - rationale: "The telnet protocol is insecure and unencrypted. The use of an unencrypted transmission medium could allow an unauthorized user to steal credentials. The ssh package provides an encrypted session and stronger security and is included in most Linux distributions." - remediation: "Run the following command to uninstall telnet : # dnf remove telnet" + # 2.3.2 Ensure rsh client is not installed. (Automated) + - id: 5086 + title: "Ensure rsh client is not installed." + description: "The rsh package contains the client commands for the rsh services." + rationale: "These legacy clients contain numerous security exposures and have been replaced with the more secure SSH package. Even if the server is removed, it is best to ensure the clients are also removed to prevent users from inadvertently attempting to use these commands and therefore exposing their credentials. Note that removing the rsh package removes the clients for rsh, rcp and rlogin." + impact: "Many insecure service clients are used as troubleshooting tools and in testing environments. Uninstalling them can inhibit capability to test and troubleshoot. If they are required it is advisable to remove the clients after use to prevent accidental or intentional misuse." + remediation: "Run the following command to remove the rsh package: # dnf remove rsh." compliance: - cis: ["2.3.2"] - - cis_csc: ["4.5"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none rules: - - "c:rpm -q telnet -> r:^package telnet is not installed" + - "c:rpm -q rsh -> r:^package rsh is not installed" - # 2.3.3 Ensure LDAP client is not installed (Scored) - - id: 5073 - title: "Ensure LDAP client is not installed" - description: "The Lightweight Directory Access Protocol (LDAP) was introduced as a replacement for NIS/YP. It is a service that provides a method for looking up information from a central database." - rationale: "If the system will not need to act as an LDAP client, it is recommended that the software be removed to reduce the potential attack surface." - remediation: "Run the following command to uninstall openldap-clients : # dnf remove openldap-clients" + # 2.3.3 Ensure talk client is not installed. (Automated) + - id: 5087 + title: "Ensure talk client is not installed." + description: "The talk software makes it possible for users to send and receive messages across systems through a terminal session. The talk client, which allows initialization of talk sessions, is installed by default." + rationale: "The software presents a security risk as it uses unencrypted protocols for communication." + impact: "Many insecure service clients are used as troubleshooting tools and in testing environments. Uninstalling them can inhibit capability to test and troubleshoot. If they are required it is advisable to remove the clients after use to prevent accidental or intentional misuse." + remediation: "Run the following command to remove the talk package: # dnf remove talk." compliance: - cis: ["2.3.3"] - - cis_csc: ["2.6"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all - rules: - - "c:rpm -q openldap-clients -> r:^package openldap-clients is not installed" - - ############################################### - # 3 Network Configuration - ############################################### - ############################################### - # 3.1 Network Parameters (Host Only) - ############################################### - # 3.1.1 Ensure IP forwarding is disabled (Scored) - - id: 5074 - title: "Ensure IP forwarding is disabled" - description: "The net.ipv4.ip_forward and net.ipv6.conf.all.forwarding flags are used to tell the system whether it can forward packets or not." - rationale: "Setting the flags to 0 ensures that a system with multiple interfaces (for example, a hard proxy), will never be able to forward packets, and therefore, never serve as a router." - remediation: "Run the following commands to restore the default parameters and set the active kernel parameters: # grep -Els \"^\\s*net\\.ipv4\\.ip_forward\\s*=\\s*1\" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri \"s/^\\s*(net\\.ipv4\\.ip_forward\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" \"s/^\\s*(net\\.ipv4\\.ip_forward\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.ipv4.ip_forward=0; sysctl -w net.ipv4.route.flush=1 && # grep -Els \"^\\s*net\\.ipv6\\.conf\\.all\\.forwarding\\s*=\\s*1\" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri \"s/^\\s*(net\\.ipv6\\.conf\\.all\\.forwarding\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.ipv6.conf.all.forwarding=0; sysctl -w net.ipv6.route.flush=1" - compliance: - - cis: ["3.1.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: none rules: - - 'c:grep -Rh -E -s ^\s*net.ipv4.ip_forward /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv4.ip_forward\s*=\s*1' - - 'c:grep -Rh -E -s ^\s*net.ipv6.conf.all.forwarding /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv6.conf.all.forwarding\s*=\s*1' - - 'c:sysctl net.ipv4.ip_forward -> r:^\s*net.ipv4.ip_forward\s*=\s*1' - - 'c:sysctl net.ipv6.conf.all.forwarding -> r:^\s*net.ipv6.conf.all.forwarding\s*=\s*1' - - # 3.1.2 Ensure packet redirect sending is disabled (Scored) - - id: 5075 - title: "Ensure packet redirect sending is disabled" - description: "ICMP Redirects are used to send routing information to other hosts. As a host itself does not act as a router (in a host only configuration), there is no need to send redirects." - rationale: "An attacker could use a compromised host to send invalid ICMP redirects to other router devices in an attempt to corrupt routing and have users access a system set up by the attacker as opposed to a valid system." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.send_redirects = 0 net.ipv4.conf.default.send_redirects = 0 .Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.send_redirects=0; # sysctl -w net.ipv4.conf.default.send_redirects=0; # sysctl -w net.ipv4.route.flush=1" - compliance: - - cis: ["3.1.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:grep -Rh net.ipv4.conf.all.send_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.all.send_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.default.send_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.default.send_redirects\s*=\s*0' - - 'c:sysctl net.ipv4.conf.all.send_redirects -> r:^\s*net.ipv4.conf.all.send_redirects\s*=\s*0' - - 'c:sysctl net.ipv4.conf.default.send_redirects -> r:^\s*net.ipv4.conf.default.send_redirects\s*=\s*0' + - "c:rpm -q talk -> r:^package talk is not installed" - ############################################### - # 3.2 Network Parameters (Host and Router) - ############################################### - # 3.2.1 Ensure source routed packets are not accepted (Scored) - - id: 5076 - title: "Ensure source routed packets are not accepted" - description: "In networking, source routing allows a sender to partially or fully specify the route packets take through a network. In contrast, non-source routed packets travel a path determined by routers in the network. In some cases, systems may not be routable or reachable from some locations (e.g. private addresses vs. Internet routable), and so source routed packets would need to be used." - rationale: "Setting net.ipv4.conf.all.accept_source_route, net.ipv4.conf.default.accept_source_route, net.ipv6.conf.all.accept_source_route and net.ipv6.conf.default.accept_source_route to 0 disables the system from accepting source routed packets. Assume this system was capable of routing packets to Internet routable addresses on one interface and private addresses on another interface. Assume that the private addresses were not routable to the Internet routable addresses and vice versa. Under normal routing circumstances, an attacker from the Internet routable addresses could not use the system as a way to reach the private address systems. If, however, source routed packets were allowed, they could be used to gain access to the private address systems as the route could be specified, rather than rely on routing protocols that did not allow this routing." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.accept_source_route = 0 net.ipv4.conf.default.accept_source_route = 0 net.ipv6.conf.all.accept_source_route = 0 net.ipv6.conf.default.accept_source_route = 0 and Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.accept_source_route=0; # sysctl -w net.ipv4.conf.default.accept_source_route=0; # sysctl -w net.ipv6.conf.all.accept_source_route=0; # sysctl -w net.ipv6.conf.default.accept_source_route=0; # sysctl -w net.ipv4.route.flush=1; # sysctl -w net.ipv6.route.flush=1" - compliance: - - cis: ["3.2.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:sysctl net.ipv4.conf.all.accept_source_route -> r:^\s*net.ipv4.conf.all.accept_source_route\s*=\s*0' - - 'c:sysctl net.ipv4.conf.default.accept_source_route -> r:^\s*net.ipv4.conf.default.accept_source_route\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.all.accept_source_route /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.all.accept_source_route\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.default.accept_source_route /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.default.accept_source_route\s*=\s*0' - - 'c:sysctl net.ipv6.conf.all.accept_source_route -> r:^\s*net.ipv6.conf.all.accept_source_route\s*=\s*0|No such file or directory' - - 'c:sysctl net.ipv6.conf.default.accept_source_route -> r:^\s*net.ipv6.conf.default.accept_source_route\s*=\s*0|No such file or directory' - - 'c:grep -Rh net.ipv6.conf.all.accept_source_route /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv6.conf.all.accept_source_route\s*=\s*0' - - 'c:grep -Rh net.ipv6.conf.default.accept_source_route /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv6.conf.default.accept_source_route\s*=\s*0' - - # 3.2.2 Ensure ICMP redirects are not accepted (Scored) - - id: 5077 - title: "Ensure ICMP redirects are not accepted" - description: "ICMP redirect messages are packets that convey routing information and tell your host (acting as a router) to send packets via an alternate path. It is a way of allowing an outside routing device to update your system routing tables. By setting net.ipv4.conf.all.accept_redirects and net.ipv6.conf.all.accept_redirects to 0, the system will not accept any ICMP redirect messages, and therefore, won't allow outsiders to update the system's routing tables." - rationale: "Attackers could use bogus ICMP redirect messages to maliciously alter the system routing tables and get them to send packets to incorrect networks and allow your system packets to be captured." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.accept_redirects = 0 net.ipv4.conf.default.accept_redirects = 0 net.ipv6.conf.all.accept_redirects = 0 net.ipv6.conf.default.accept_redirects = 0. Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.accept_redirects=0; # sysctl -w net.ipv4.conf.default.accept_redirects=0; # sysctl -w net.ipv6.conf.all.accept_redirects=0; # sysctl -w net.ipv6.conf.default.accept_redirects=0; # sysctl -w net.ipv4.route.flush=1 and # sysctl -w net.ipv6.route.flush=1" - compliance: - - cis: ["3.2.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:sysctl net.ipv4.conf.all.accept_redirects -> r:^\s*net.ipv4.conf.all.accept_redirects\s*=\s*0' - - 'c:sysctl net.ipv4.conf.default.accept_redirects -> r:^\s*net.ipv4.conf.default.accept_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.all.accept_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.all.accept_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.default.accept_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.default.accept_redirects\s*=\s*0' - - 'c:sysctl net.ipv6.conf.all.accept_redirects -> r:^\s*net.ipv6.conf.all.accept_redirects\s*=\s*0' - - 'c:sysctl net.ipv6.conf.default.accept_redirects -> r:^\s*net.ipv6.conf.default.accept_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv6.conf.all.accept_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv6.conf.all.accept_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv6.conf.default.accept_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv6.conf.default.accept_redirects\s*=\s*0' - - # 3.2.3 Ensure secure ICMP redirects are not accepted (Scored) - - id: 5078 - title: "Ensure secure ICMP redirects are not accepted" - description: "Secure ICMP redirects are the same as ICMP redirects, except they come from gateways listed on the default gateway list. It is assumed that these gateways are known to your system, and that they are likely to be secure." - rationale: "It is still possible for even known gateways to be compromised. Setting net.ipv4.conf.all.secure_redirects to 0 protects the system from routing table updates by possibly compromised known gateways." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.secure_redirects = 0 and net.ipv4.conf.default.secure_redirects = 0. Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.secure_redirects=0; # sysctl -w net.ipv4.conf.default.secure_redirects=0 and # sysctl -w net.ipv4.route.flush=1" + # 2.3.4 Ensure telnet client is not installed. (Automated) + - id: 5088 + title: "Ensure telnet client is not installed." + description: "The telnet package contains the telnet client, which allows users to start connections to other systems via the telnet protocol." + rationale: "The telnet protocol is insecure and unencrypted. The use of an unencrypted transmission medium could allow an unauthorized user to steal credentials. The ssh package provides an encrypted session and stronger security and is included in most Linux distributions." + impact: "Many insecure service clients are used as troubleshooting tools and in testing environments. Uninstalling them can inhibit capability to test and troubleshoot. If they are required it is advisable to remove the clients after use to prevent accidental or intentional misuse." + remediation: "Run the following command to remove the telnet package: # dnf remove telnet." compliance: - - cis: ["3.2.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["2.3.4"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:sysctl net.ipv4.conf.all.secure_redirects -> r:^\s*net.ipv4.conf.all.secure_redirects\s*=\s*0' - - 'c:sysctl net.ipv4.conf.default.secure_redirects -> r:^\s*net.ipv4.conf.default.secure_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.all.secure_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.all.secure_redirects\s*=\s*0' - - 'c:grep -Rh net.ipv4.conf.default.secure_redirects /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.default.secure_redirects\s*=\s*0' + - "c:rpm -q telnet -> r:^package telnet is not installed" - # 3.2.4 Ensure suspicious packets are logged (Scored) - - id: 5079 - title: "Ensure suspicious packets are logged" - description: "When enabled, this feature logs packets with un-routable source addresses to the kernel log." - rationale: "Enabling this feature and logging these packets allows an administrator to investigate the possibility that an attacker is sending spoofed packets to their system." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.all.log_martians = 1 and net.ipv4.conf.default.log_martians = 1. Run the following commands to set the active kernel parameters: # sysctl -w net.ipv4.conf.all.log_martians=1; # sysctl -w net.ipv4.conf.default.log_martians=1 and # sysctl -w net.ipv4.route.flush=1" + # 2.3.5 Ensure LDAP client is not installed. (Automated) + - id: 5089 + title: "Ensure LDAP client is not installed." + description: "The Lightweight Directory Access Protocol (LDAP) was introduced as a replacement for NIS/YP. It is a service that provides a method for looking up information from a central database." + rationale: "If the system will not need to act as an LDAP client, it is recommended that the software be removed to reduce the potential attack surface." + impact: "Removing the LDAP client will prevent or inhibit using LDAP for authentication in your environment." + remediation: "Run the following command to remove the openldap-clients package: # dnf remove openldap-clients." compliance: - - cis: ["3.2.4"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["2.3.5"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["2.6"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.12.5.1", "A.12.6.2"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:sysctl net.ipv4.conf.all.log_martians -> r:^\s*net.ipv4.conf.all.log_martians\s*=\s*1' - - 'c:sysctl net.ipv4.conf.default.log_martians -> r:^\s*net.ipv4.conf.default.log_martians\s*=\s*1' - - 'c:grep -Rh net.ipv4.conf.all.log_martians /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.all.log_martians\s*=\s*1' - - 'c:grep -Rh net.ipv4.conf.default.log_martians /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv4.conf.default.log_martians\s*=\s*1' + - "c:rpm -q openldap-clients -> r:^package openldap-clients is not installed" - # 3.2.5 Ensure broadcast ICMP requests are ignored (Scored) - - id: 5080 - title: "Ensure broadcast ICMP requests are ignored" - description: "Setting net.ipv4.icmp_echo_ignore_broadcasts to 1 will cause the system to ignore all ICMP echo and timestamp requests to broadcast and multicast addresses." - rationale: "Accepting ICMP echo and timestamp requests with broadcast or multicast destinations for your network could be used to trick your host into starting (or participating) in a Smurf attack. A Smurf attack relies on an attacker sending large amounts of ICMP broadcast messages with a spoofed source address. All hosts receiving this message and responding would send echo-reply messages back to the spoofed address, which is probably not routable. If many hosts respond to the packets, the amount of traffic on the network could be significantly multiplied." - remediation: "Run the following command to restore the default parameters and set the active kernel parameters: # grep -Els \"^\\s*net\\.ipv4\\.icmp_echo_ignore_broadcasts\\s*=\\s*0\" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf \"s/^\\s*(net\\.ipv4\\.icmp_echo_ignore_broadcasts\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.icmp_echo_ignore_broadcasts=1; sysctl -w net.ipv4.route.flush=1" + # 2.3.6 Ensure TFTP client is not installed. (Automated) + - id: 5090 + title: "Ensure TFTP client is not installed." + description: "Trivial File Transfer Protocol (TFTP) is a simple protocol for exchanging files between two TCP/IP machines. TFTP servers allow connections from a TFTP Client for sending and receiving files." + rationale: "TFTP does not have built-in encryption, access control or authentication. This makes it very easy for an attacker to exploit TFTP to gain access to files." + remediation: "Run the following command to remove tftp: # dnf remove tftp." compliance: - - cis: ["3.2.5"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["2.3.6"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:sysctl net.ipv4.icmp_echo_ignore_broadcasts -> r:^\s*net.ipv4.icmp_echo_ignore_broadcasts\s*=\s*1' - - 'not c:grep -E -s -Rh ^\s*net.ipv4.icmp_echo_ignore_broadcasts /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv4.icmp_echo_ignore_broadcasts\s*=\s*0' + - "c:rpm -q tftp -> r:^package tftp is not installed" - # 3.2.6 Ensure bogus ICMP responses are ignored (Scored) - - id: 5081 - title: "Ensure bogus ICMP responses are ignored" - description: "Setting icmp_ignore_bogus_error_responses to 1 prevents the kernel from logging bogus responses (RFC-1122 non-compliant) from broadcast reframes, keeping file systems from filling up with useless log messages." - rationale: "Some routers (and some attackers) will send responses that violate RFC-1122 and attempt to fill up a log file system with many useless error messages." - remediation: "Run the following commands to restore the default parameters and set the active kernel parameters: # grep -Els \"^\\s*net\\.ipv4\\.icmp_ignore_bogus_error_responses\\s*=\\s*0 /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri \"s/^\\s*(net\\.ipv4\\.icmp_ignore_bogus_error_responses\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.ipv4.icmp_ignore_bogus_error_responses=1; sysctl -w net.ipv4.route.flush=1\"" - compliance: - - cis: ["3.2.6"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:sysctl net.ipv4.icmp_ignore_bogus_error_responses -> r:^\s*net.ipv4.icmp_ignore_bogus_error_responses\s*=\s*1' - - 'not c:grep -E -s -Rh ^\s*net.ipv4.icmp_ignore_bogus_error_responses /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv4.icmp_ignore_bogus_error_responses\s*=\s*0' + # 2.4 Ensure nonessential services are removed or masked. (Manual) - Not Implemented - # 3.2.7 Ensure Reverse Path Filtering is enabled (Scored) - - id: 5082 - title: "Ensure Reverse Path Filtering is enabled" - description: "Setting net.ipv4.conf.all.rp_filter and net.ipv4.conf.default.rp_filter to 1 forces the Linux kernel to utilize reverse path filtering on a received packet to determine if the packet was valid. Essentially, with reverse path filtering, if the return packet does not go out the same interface that the corresponding source packet came from, the packet is dropped (and logged if log_martians is set)." - rationale: "Setting these flags is a good way to deter attackers from sending your system bogus packets that cannot be responded to. One instance where this feature breaks down is if asymmetrical routing is employed. This would occur when using dynamic routing protocols (bgp, ospf, etc) on your system. If you are using asymmetrical routing on your system, you will not be able to enable this feature without breaking the routing." - remediation: "Run the following command to restore the default net.ipv4.conf.all.rp_filter = 1 parameter and set the active kernel parameter: # grep -Els \"^\\s*net\\.ipv4\\.conf\\.all\\.rp_filter\\s*=\\s*0\" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri \"s/^\\s*(net\\.ipv4\\.net.ipv4.conf\\.all\\.rp_filter\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.ipv4.conf.all.rp_filter=1; sysctl -w net.ipv4.route.flush=1 .Set the following parameter in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv4.conf.default.rp_filter=1 and Run the following commands to set the active kernel parameter: # sysctl -w net.ipv4.conf.default.rp_filter=1 and # sysctl -w net.ipv4.route.flush=1" - compliance: - - cis: ["3.2.7"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:sysctl net.ipv4.conf.all.rp_filter -> r:^\s*net.ipv4.conf.all.rp_filter\s*=\s*1' - - 'c:sysctl net.ipv4.conf.default.rp_filter -> r:^\s*net.ipv4.conf.default.rp_filter\s*=\s*1' - - 'not c:grep -E -s -Rh ^\s*net.ipv4.conf.all.rp_filter /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv4.conf.all.rp_filter\s*=\s*0' - - 'c:grep -E -s -Rh ^\s*net.ipv4.conf.default.rp_filter /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv4.conf.default.rp_filter\s*=\s*1' + ############################################### + # 3 Network Configuration + ############################################### + ############################################### + # 3.1 Uncommon Network Protocols + ############################################### - # 3.2.8 Ensure TCP SYN Cookies is enabled (Scored) - - id: 5083 - title: "Ensure TCP SYN Cookies is enabled" - description: "When tcp_syncookies is set, the kernel will handle TCP SYN packets normally until the half-open connection queue is full, at which time, the SYN cookie functionality kicks in. SYN cookies work by not using the SYN queue at all. Instead, the kernel simply replies to the SYN with a SYN|ACK, but will include a specially crafted TCP sequence number that encodes the source and destination IP address and port number and the time the packet was sent. A legitimate connection would send the ACK packet of the three way handshake with the specially crafted sequence number. This allows the system to verify that it has received a valid response to a SYN cookie and allow the connection, even though there is no corresponding SYN in the queue." - rationale: "Attackers use SYN flood attacks to perform a denial of service attacked on a system by sending many SYN packets without completing the three way handshake. This will quickly use up slots in the kernel's half-open connection queue and prevent legitimate connections from succeeding. SYN cookies allow the system to keep accepting valid connections, even if under a denial of service attack." - remediation: "Run the following command to restore the default parameter and set the active kernel parameters: grep -Els \"^\\s*net\\.ipv4\\.tcp_syncookies\\s*=\\s*[02]*\" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read filename; do sed -ri \"s/^\\s*(net\\.ipv4\\.tcp_syncookies\\s*)(=)(\\s*\\S+\\b).*$/# *REMOVED* \\1/\" $filename; done; sysctl -w net.ipv4.tcp_syncookies=1; sysctl -w net.ipv4.route.flush=1" + # 3.1.1 Verify if IPv6 is enabled on the system. (Manual) + - id: 5091 + title: "Verify if IPv6 is enabled on the system." + description: "Internet Protocol Version 6 (IPv6) is the most recent version of Internet Protocol (IP). It's designed to supply IP addressing and additional security to support the predicted growth of connected devices." + rationale: "It is recommended that either IPv6 settings are configured OR IPv6 be disabled to reduce the attack surface of the system." + impact: "IETF RFC 4038 recommends that applications are built with an assumption of dual stack. If IPv6 is disabled through sysctl config, SSH X11forwarding may no longer function as expected. We recommend that SSH X11fowarding be disabled, but if required, the following will allow for SSH X11forwarding with IPv6 disabled through sysctl config: Add the following line the /etc/ssh/sshd_config file: AddressFamily inet Run the following command to re-start the openSSH server: # systemctl restart sshd." + remediation: 'If IPv6 is to be disabled, use one of the two following methods to disable IPv6 on the system: To disable IPv6 through the GRUB2 config, run the following command to add ipv6.disable=1 to the GRUB_CMDLINE_LINUX parameters: grubby --update-kernel ALL --args ''ipv6.disable=1'' OR To disable IPv6 through sysctl settings, set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: Example: # printf " net.ipv6.conf.all.disable_ipv6 = 1 net.ipv6.conf.default.disable_ipv6 = 1 " >> /etc/sysctl.d/60-disable_ipv6.conf Run the following command to set the active kernel parameters: # { sysctl -w net.ipv6.conf.all.disable_ipv6=1 sysctl -w net.ipv6.conf.default.disable_ipv6=1 sysctl -w net.ipv6.route.flush=1 }.' compliance: - - cis: ["3.2.8"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["3.1.1"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:sysctl net.ipv4.tcp_syncookies -> r:^\s*net.ipv4.tcp_syncookies\s*=\s*1' - - 'not c:grep -E -r -Rh ^\s*net.ipv4.tcp_syncookies /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf -> r:^\s*net.ipv4.tcp_syncookies\s*=\s*[02]' + - 'f:/boot/grub2/grubenv -> r:^\s*kernelopts=\.+ipv6.disable=1' - # 3.2.9 Ensure IPv6 router advertisements are not accepted (Scored) - - id: 5084 - title: "Ensure IPv6 router advertisements are not accepted" - description: "This setting disables the system's ability to accept IPv6 router advertisements." - rationale: "It is recommended that systems do not accept router advertisements as they could be tricked into routing traffic to compromised machines. Setting hard routes within the system (usually a single default route to a trusted router) protects the system from bad routes." - remediation: "Set the following parameters in /etc/sysctl.conf or a /etc/sysctl.d/* file: net.ipv6.conf.all.accept_ra = 0 and net.ipv6.conf.default.accept_ra = 0 . Run the following commands to set the active kernel parameters: # sysctl -w net.ipv6.conf.all.accept_ra=0; # sysctl -w net.ipv6.conf.default.accept_ra=0 and # sysctl -w net.ipv6.route.flush=1;" + # 3.1.2 Ensure SCTP is disabled. (Automated) + - id: 5092 + title: "Ensure SCTP is disabled." + description: "The Stream Control Transmission Protocol (SCTP) is a transport layer protocol used to support message-oriented communication, with several streams of messages in one connection. It serves a similar function as TCP and UDP, incorporating features of both. It is message-oriented like UDP, and ensures reliable in-sequence transport of messages with congestion control like TCP." + rationale: "If the protocol is not being used, it is recommended that kernel module not be loaded, disabling the service to reduce the potential attack surface." + remediation: 'Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: printf " install sctp /bin/true " >> /etc/modprobe.d/sctp.conf.' compliance: - - cis: ["3.2.9"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis: ["3.1.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:sysctl net.ipv6.conf.all.accept_ra -> r:^\s*net.ipv6.conf.all.accept_ra\s*=\s*0' - - 'c:sysctl net.ipv6.conf.default.accept_ra -> r:^\s*net.ipv6.conf.default.accept_ra\s*=\s*0' - - 'c:grep -Rh net.ipv6.conf.all.accept_ra /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv6.conf.all.accept_ra\s*=\s*0' - - 'c:grep -Rh net.ipv6.conf.default.accept_ra /etc/sysctl.conf /etc/sysctl.d/* -> r:^\s*net.ipv6.conf.default.accept_ra\s*=\s*0' + - 'c:modprobe -n -v sctp -> r:^\s*install\s*/bin/true|Module sctp not found' + - "not c:lsmod -> r:sctp" - ############################################### - # 3.3 Uncommon Network Protocols - ############################################### - # 3.3.1 Ensure DCCP is disabled (Scored) - - id: 5085 - title: "Ensure DCCP is disabled" - description: "The Datagram Congestion Control Protocol (DCCP) is a transport layer protocol that supports streaming media and telephony. DCCP provides a way to gain access to congestion control, without having to do it at the application layer, but does not provide in-sequence delivery" + # 3.1.3 Ensure DCCP is disabled. (Automated) + - id: 5093 + title: "Ensure DCCP is disabled." + description: "The Datagram Congestion Control Protocol (DCCP) is a transport layer protocol that supports streaming media and telephony. DCCP provides a way to gain access to congestion control, without having to do it at the application layer, but does not provide in-sequence delivery." rationale: "If the protocol is not required, it is recommended that the drivers not be installed to reduce the potential attack surface." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf. Example: vim /etc/modprobe.d/dccp.conf and add the following line: install dccp /bin/true" + remediation: 'Edit or create a file in the /etc/modprobe.d/ directory ending in .conf Example: printf " install dccp /bin/true " >> /etc/modprobe.d/dccp.conf.' compliance: - - cis: ["3.3.1"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis: ["3.1.3"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - 'c:modprobe -n -v dccp -> r:^\s*install\s*/bin/true|Module dccp not found' - "not c:lsmod -> r:dccp" - # 3.3.2 Ensure SCTP is disabled (Scored) - - id: 5086 - title: "Ensure SCTP is disabled" - description: "The Stream Control Transmission Protocol (SCTP) is a transport layer protocol used to support message oriented communication, with several streams of messages in one connection. It serves a similar function as TCP and UDP, incorporating features of both. It is message-oriented like UDP, and ensures reliable in-sequence transport of messages with congestion control like TCP." - rationale: "If the protocol is not being used, it is recommended that kernel module not be loaded, disabling the service to reduce the potential attack surface." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf .Example: vim /etc/modprobe.d/sctp.conf and add the following line: install sctp /bin/true" - compliance: - - cis: ["3.3.2"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:modprobe -n -v sctp -> r:^\s*install\s*/bin/true|Module sctp not found' - - "not c:lsmod -> r:sctp" + # 3.1.4 Ensure wireless interfaces are disabled. (Automated) - Not Implemented + # 3.2.1 Ensure IP forwarding is disabled. (Automated) - Not Implemented + # 3.2.2 Ensure packet redirect sending is disabled. (Automated) - Not Implemented - # 3.3.3 Ensure RDS is disabled (Scored) - - id: 5087 - title: "Ensure RDS is disabled" - description: "The Reliable Datagram Sockets (RDS) protocol is a transport layer protocol designed to provide low-latency, high-bandwidth communications between cluster nodes. It was developed by the Oracle Corporation." - rationale: "If the protocol is not being used, it is recommended that kernel module not be loaded, disabling the service to reduce the potential attack surface." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf .Example: vim /etc/modprobe.d/rds.conf and add the following line: install rds /bin/true" - compliance: - - cis: ["3.3.3"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:modprobe -n -v rds -> r:^\s*install\s*/bin/true|Module rds not found' - - "not c:lsmod -> r:rds" + ############################################### + # 3.3 Network Parameters (Host and Router) + ############################################### - # 3.3.4 Ensure TIPC is disabled (Scored) - - id: 5088 - title: "Ensure TIPC is disabled" - description: "The Transparent Inter-Process Communication (TIPC) protocol is designed to provide communication between cluster nodes." - rationale: "If the protocol is not being used, it is recommended that kernel module not be loaded, disabling the service to reduce the potential attack surface." - remediation: "Edit or create a file in the /etc/modprobe.d/ directory ending in .conf .Example: vim /etc/modprobe.d/tipc.conf and add the following line: install tipc /bin/true" - compliance: - - cis: ["3.3.4"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: all - rules: - - 'c:modprobe -n -v tipc -> r:^\s*install\s*/bin/true|Module tipc not found' - - "not c:lsmod -> r:tipc" + # 3.3.1 Ensure source routed packets are not accepted. (Automated) - Not Implemented + # 3.3.2 Ensure ICMP redirects are not accepted. (Automated) - Not Implemented + # 3.3.3 Ensure secure ICMP redirects are not accepted. (Automated) - Not Implemented + # 3.3.4 Ensure suspicious packets are logged. (Automated) - Not Implemented + # 3.3.5 Ensure broadcast ICMP requests are ignored. (Automated) - Not Implemented + # 3.3.6 Ensure bogus ICMP responses are ignored. (Automated) - Not Implemented + # 3.3.7 Ensure Reverse Path Filtering is enabled. (Automated) - Not Implemented + # 3.3.8 Ensure TCP SYN Cookies is enabled. (Automated) - Not Implemented + # 3.3.9 Ensure IPv6 router advertisements are not accepted. (Automated) - Not Implemented ############################################### # 3.4 Firewall Configuration @@ -1644,272 +2039,500 @@ checks: ############################################### # 3.4.1 Ensure Firewall software is installed ############################################### - # 3.4.1.1 Ensure a Firewall package is installed (Scored) - - id: 5089 - title: "Ensure a Firewall package is installed" - description: "A Firewall package should be selected. Most firewall configuration utilities operate as a front end to nftables or iptables." - rationale: "A Firewall package is required for firewall management and configuration." - remediation: "Run one of the following commands to install a Firewall package. For firewalld: dnf install firewalld .For nftables: # dnf install nftables. For iptables: # dnf install iptables" + + # 3.4.1.1 Ensure firewalld is installed. (Automated) + - id: 5094 + title: "Ensure firewalld is installed." + description: "firewalld is a firewall management tool for Linux operating systems. It provides firewall features by acting as a front-end for the Linux kernel's netfilter framework via the iptables backend or provides firewall features by acting as a front-end for the Linux kernel's netfilter framework via the nftables utility. firewalld replaces iptables as the default firewall management tool. Use the firewalld utility to configure a firewall for less complex firewalls. The utility is easy to use and covers the typical use cases scenario. FirewallD supports both IPv4 and IPv6 networks and can administer separate firewall zones with varying degrees of trust as defined in zone profiles. Note: Starting in v0.6.0, FirewallD added support for acting as a front-end for the Linux kernel's netfilter framework via the nftables userspace utility, acting as an alternative to the nft command line program." + rationale: "A firewall utility is required to configure the Linux kernel's netfilter framework via the iptables or nftables back-end. The Linux kernel's netfilter framework host-based firewall can protect against threats originating from within a corporate network to include malicious mobile code and poorly configured software on a host. Note: Only one firewall utility should be installed and configured. FirewallD is dependent on the iptables package." + impact: "Changing firewall settings while connected over the network can result in being locked out of the system." + remediation: "Run the following command to install FirewallD and iptables: # dnf install firewalld iptables." compliance: - cis: ["3.4.1.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.1"] - - tsc: ["CC6.6"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: any rules: - "c:rpm -q firewalld -> r:^firewalld-" - - "c:rpm -q nftables -> r:^nftables-" - "c:rpm -q iptables -> r:^iptables-" - ############################################### - # 3.4.2 Configure firewalld - ############################################### + # 3.4.1.2 Ensure iptables-services not installed with firewalld. (Automated) + - id: 5095 + title: "Ensure iptables-services not installed with firewalld." + description: "The iptables-services package contains the iptables.service and ip6tables.service. These services allow for management of the Host Based Firewall provided by the iptables package." + rationale: "iptables.service and ip6tables.service are still supported and can be installed with the iptables-services package. Running both firewalld and the services included in the iptables-services package may lead to conflict." + impact: "Running both firewalld and iptables/ip6tables service may lead to conflict." + remediation: "Run the following commands to stop the services included in the iptables-services package and remove the iptables-services package # systemctl stop iptables # systemctl stop ip6tables # dnf remove iptables-services." + compliance: + - cis: ["3.4.1.2"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - "c:rpm -q iptables-services -> r:^package iptables-services is not installed" + + # 3.4.1.3 Ensure nftables either not installed or masked with firewalld. (Automated) + - id: 5096 + title: "Ensure nftables either not installed or masked with firewalld." + description: "nftables is a subsystem of the Linux kernel providing filtering and classification of network packets/datagrams/frames and is the successor to iptables. Note: Support for using nftables as the back-end for firewalld was added in release v0.6.0. In Fedora 19 Linux derivatives, firewalld utilizes iptables as its back-end by default." + rationale: "Running both firewalld and nftables may lead to conflict. Note: firewalld may configured as the front-end to nftables. If this case, nftables should be stopped and masked instead of removed." + remediation: 'Run the following command to remove nftables: # dnf remove nftables OR Run the following command to stop and mask nftables" systemctl --now mask nftables.' + compliance: + - cis: ["3.4.1.3"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - 'c:systemctl status nftables -> r:Loaded:\s*disabled|Loaded:\s*masked|could not be found' + - 'c:systemctl status nftables -> r:Active:\s*inactive\s*\(dead\)|could not be found' + - "not p:nftables" - # 3.4.2.1 Ensure firewalld service is enabled and running (Scored) - - id: 5090 - title: "Ensure firewalld service is enabled and running" - description: "Ensure that the firewalld service is enabled to protect your system" - rationale: "firewalld (Dynamic Firewall Manager) tool provides a dynamically managed firewall. The tool enables network/firewall zones to define the trust level of network connections and/or interfaces. It has support both for IPv4 and IPv6 firewall settings. Also, it supports Ethernet bridges and allow you to separate between runtime and permanent configuration options. Finally, it supports an interface for services or applications to add firewall rules directly" - remediation: "Run the following command to enable and start firewalld: # systemctl --now enable firewalld" - compliance: - - cis: ["3.4.2.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + # 3.4.1.4 Ensure firewalld service enabled and running. (Automated) + - id: 5097 + title: "Ensure firewalld service enabled and running." + description: "firewalld.service enables the enforcement of firewall rules configured through firewalld." + rationale: "Ensure that the firewalld.service is enabled and running to enforce firewall rules configured through firewalld." + impact: "Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following command to unmask firewalld # systemctl unmask firewalld Run the following command to enable and start firewalld # systemctl --now enable firewalld." + compliance: + - cis: ["3.4.1.4"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - "p:firewalld" - "c:firewall-cmd --state -> r:running" - # 3.4.2.2 Ensure iptables is not enabled (Scored) - - id: 5091 - title: "Ensure iptables is not enabled" - description: "IPtables is an application that allows a system administrator to configure the IPv4 and IPv6 tables, chains and rules provided by the Linux kernel firewall. IPtables is installed as a dependency with firewalld." - rationale: "Running firewalld and IPtables concurrently may lead to conflict, therefore IPtables should be stopped and masked when using firewalld." - remediation: "Run the following command to stop and mask iptables: systemctl --now mask iptables" + # 3.4.1.5 Ensure firewalld default zone is set. (Automated) Not Implemented + # 3.4.1.6 Ensure network interfaces are assigned to appropriate zone. (Manual) Not Implemented + # 3.4.1.7 Ensure firewalld drops unnecessary services and ports. (Manual) - Not Implemented + + # 3.4.2.1 Ensure nftables is installed. (Automated) + - id: 5098 + title: "Ensure nftables is installed." + description: "nftables provides a new in-kernel packet classification framework that is based on a network-specific Virtual Machine (VM) and a new nft userspace command line tool. nftables reuses the existing Netfilter subsystems such as the existing hook infrastructure, the connection tracking system, NAT, userspace queuing and logging subsystem. Note: - nftables is available in Linux kernel 3.13 and newer. - Only one firewall utility should be installed and configured." + rationale: "nftables is a subsystem of the Linux kernel that can protect against threats originating from within a corporate network to include malicious mobile code and poorly configured software on a host." + impact: "Changing firewall settings while connected over the network can result in being locked out of the system." + remediation: "Run the following command to install nftables # dnf install nftables." compliance: - - cis: ["3.4.2.2"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + - cis: ["3.4.2.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - 'c:systemctl status iptables -> r:Loaded:\s*disabled|Loaded:\s*masked|could not be found' - - 'c:systemctl status iptables -> r:Active:\s*inactive\s*\(dead\)|could not be found' - - "not p:iptables" + - "c:rpm -q nftables -> r:^nftables-" - # 3.4.2.3 Ensure nftables is not enabled (Scored) - - id: 5092 - title: "Ensure nftables is not enabled" - description: "nftables is a subsystem of the Linux kernel providing filtering and classification of network packets/datagrams/frames and is the successor to iptables. nftables are installed as a dependency with firewalld." - rationale: "Running firewalld and nftables concurrently may lead to conflict, therefore nftables should be stopped and masked when using firewalld." - remediation: "Run the following command to mask and stop nftables: systemctl --now mask nftables" + # 3.4.2.2 Ensure firewalld is either not installed or masked with nftables. (Automated) + - id: 5099 + title: "Ensure firewalld is either not installed or masked with nftables." + description: "Firewalld (Dynamic Firewall Manager) provides a dynamically managed firewall with support for network-firewall zones to assign a level of trust to a network and its associated connections interfaces or sources. It has support for IPv4, IPv6, Ethernet bridges and also for IPSet firewall settings. There is a separation of the runtime and permanent configuration options." + rationale: "Running both nftables.service and firewalld.service may lead to conflict and unexpected results." + remediation: "Run the following command to remove firewalld # dnf remove firewalld OR Run the following command to stop and mask firewalld # systemctl --now mask firewalld." + compliance: + - cis: ["3.4.2.2"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none + rules: + - "p:firewalld" + - "c:firewall-cmd --state -> r:running" + + # 3.4.2.3 Ensure iptables-services not installed with nftables. (Automated) + - id: 5100 + title: "Ensure iptables-services not installed with nftables." + description: "The iptables-services package contains the iptables.service and ip6tables.service. These services allow for management of the Host Based Firewall provided by the iptables package." + rationale: "iptables.service and ip6tables.service are still supported and can be installed with the iptables-services package. Running both nftables and the services included in the iptables-services package may lead to conflict." + remediation: "Run the following commands to stop the services included in the iptables-services package and remove the iptables-services package # systemctl stop iptables # systemctl stop ip6tables # dnf remove iptables-services." compliance: - cis: ["3.4.2.3"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:systemctl status nftables -> r:Loaded:\s*disabled|Loaded:\s*masked|could not be found' - - 'c:systemctl status nftables -> r:Active:\s*inactive\s*\(dead\)|could not be found' - - "not p:nftables" + - "c:rpm -q iptables-services -> r:^package iptables-services is not installed" - ############################################### - # 3.4.3 Configure nftables - ############################################### - - # 3.4.3.1 Ensure iptables are flushed (Not Scored) - - id: 5093 - title: "Ensure iptables are flushed" - description: "nftables is a replacement for iptables, ip6tables, ebtables and arptables" + # 3.4.2.4 Ensure iptables are flushed with nftables. (Manual) + - id: 5101 + title: "Ensure iptables are flushed with nftables." + description: "nftables is a replacement for iptables, ip6tables, ebtables and arptables." rationale: "It is possible to mix iptables and nftables. However, this increases complexity and also the chance to introduce errors. For simplicity flush out all iptables rules, and ensure it is not loaded." - remediation: "Run the following commands to flush iptables: For iptables: # iptables -F and For ip6tables: # ip6tables -F" - compliance: - - cis: ["3.4.3.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + remediation: "Run the following commands to flush iptables: For iptables: # iptables -F For ip6tables: # ip6tables -F." + compliance: + - cis: ["3.4.2.4"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: none rules: - 'c:iptables -L -> !r:^\s*Chain|^\s*target && r:\s*\S+' - 'c:ip6tables -L -> !r:^\s*Chain|^\s*target && r:\s*\S+' - # 3.4.3.2 Ensure a table exists (Scored) - - id: 5094 - title: "Ensure a table exists" + # 3.4.2.5 Ensure an nftables table exists. (Automated) + - id: 5102 + title: "Ensure an nftables table exists." description: "Tables hold chains. Each table only has one address family and only applies to packets of this family. Tables can have one of five families." rationale: "nftables doesn't have any default tables. Without a table being build, nftables will not filter network traffic." - remediation: "Run the following command to create a table in nftables: # nft create table inet
.Example: # nft create table inet filter" + impact: "Adding rules to a running nftables can cause loss of connectivity to the system." + remediation: "Run the following command to create a table in nftables # nft create table inet
Example: # nft create table inet filter." compliance: - - cis: ["3.4.3.2"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + - cis: ["3.4.2.5"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - 'c:nft list tables -> r:\w+' - # 3.4.3.3 Ensure base chains exist (Scored) - - id: 5095 - title: "Ensure base chains exist" + # 3.4.2.6 Ensure nftables base chains exist. (Automated) + - id: 5103 + title: "Ensure nftables base chains exist." description: "Chains are containers for rules. They exist in two kinds, base chains and regular chains. A base chain is an entry point for packets from the networking stack, a regular chain may be used as jump target and is used for better rule organization." rationale: "If a base chain doesn't exist with a hook for input, forward, and delete, packets that would flow through those chains will not be touched by nftables." - remediation: "Run the following command to create the base chains: # nft create chain inet
{ type filter hook <(input|forward|output)> priority 0 \\; } . Example: # nft create chain inet filter input { type filter hook input priority 0 \\; } # nft create chain inet filter forward { type filter hook forward priority 0\\; } # nft create chain inet filter output { type filter hook output priority 0 \\; }" + impact: "If configuring nftables over ssh, creating a base chain with a policy of drop will cause loss of connectivity. Ensure that a rule allowing ssh has been added to the base chain prior to setting the base chain's policy to drop." + remediation: "Run the following command to create the base chains: # nft create chain inet
{ type filter hook <(input|forward|output)> priority 0 \\; } Example: # nft create chain inet filter input { type filter hook input priority 0 \\; } # nft create chain inet filter forward { type filter hook forward priority 0 \\; } # nft create chain inet filter output { type filter hook output priority 0 \\; }." compliance: - - cis: ["3.4.3.3"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + - cis: ["3.4.2.6"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - "c:nft list ruleset -> r:hook input" - "c:nft list ruleset -> r:hook forward" - "c:nft list ruleset -> r:hook output" - # 3.4.3.6 Ensure default deny firewall policy (Scored) - - id: 5096 - title: "Ensure default deny firewall policy" + # 3.4.2.7 Ensure nftables loopback traffic is configured. (Automated) + - id: 5104 + title: "Ensure nftables loopback traffic is configured." + description: "Configure the loopback interface to accept traffic. Configure all other interfaces to deny traffic to the loopback network." + rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure." + remediation: "Run the following commands to implement the loopback rules: # nft add rule inet filter input iif lo accept # nft create rule inet filter input ip saddr 127.0.0.0/8 counter drop. IF IPv6 is enabled on the system: Run the following command to implement the IPv6 loopback rule: # nft add rule inet filter input ip6 saddr ::1 counter drop." + compliance: + - cis: ["3.4.2.7"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all + rules: + - 'c:sh -c "nft list ruleset | awk ''/hook input/,/}/''" -> r:iif "lo" accept' + - 'c:sh -c "nft list ruleset | awk ''/hook input/,/}/''" -> r:ip saddr 127.0.0.0/8' + - 'c:sh -c "nft list ruleset | awk ''/hook input/,/}/''" -> r:ip6 saddr ::1' + + # 3.4.2.8 Ensure nftables outbound and established connections are configured. (Manual) - Not Implemented + + # 3.4.2.9 Ensure nftables default deny firewall policy. (Automated) + - id: 5105 + title: "Ensure nftables default deny firewall policy." description: "Base chain policy is the default verdict that will be applied to packets reaching the end of the chain." - rationale: "There are two policies: accept (Default) and drop. If the policy is set to accept , the firewall will accept any packet that is not configured to be denied and the packet will continue transversing the network stack. It is easier to white list acceptable usage than to black list unacceptable usage." - remediation: "Run the following command for the base chains with the input, forward, and output hooks to implement a default DROP policy: # nft chain
{ policy drop \\; } . Example: # nft chain inet filter input { policy drop \\; } ; # nft chain inet filter forward { policy drop \\; } and # nft chain inet filter output { policy drop \\; }" + rationale: "There are two policies: accept (Default) and drop. If the policy is set to accept, the firewall will accept any packet that is not configured to be denied and the packet will continue traversing the network stack. It is easier to white list acceptable usage than to black list unacceptable usage. Note: Changing firewall settings while connected over the network can result in being locked out of the system." + impact: "If configuring nftables over ssh, creating a base chain with a policy of drop will cause loss of connectivity. Ensure that a rule allowing ssh has been added to the base chain prior to setting the base chain's policy to drop." + remediation: "Run the following command for the base chains with the input, forward, and output hooks to implement a default DROP policy: # nft chain
{ policy drop \\; } Example: # nft chain inet filter input { policy drop \\; } # nft chain inet filter forward { policy drop \\; } # nft chain inet filter output { policy drop \\; }." compliance: - - cis: ["3.4.3.6"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC6.6"] + - cis: ["3.4.2.9"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - "c:nft list ruleset -> r:hook input && r:policy drop" - "c:nft list ruleset -> r:hook forward && r:policy drop" - "c:nft list ruleset -> r:hook output && r:policy drop" - # 3.4.3.7 Ensure nftables service is enabled (Scored) - - id: 5097 - title: "Ensure nftables service is enabled" - description: "The nftables service allows for the loading of nftables rulesets during boot, or starting of the nftables service." + # 3.4.2.10 Ensure nftables service is enabled. (Automated) + - id: 5106 + title: "Ensure nftables service is enabled." + description: "The nftables service allows for the loading of nftables rulesets during boot, or starting on the nftables service." rationale: "The nftables service restores the nftables rules from the rules files referenced in the /etc/sysconfig/nftables.conf file during boot or the starting of the nftables service." - remediation: "Run the following command to enable the nftables service: # systemctl --now enable nftables" + remediation: "Run the following command to enable the nftables service: # systemctl enable nftables." compliance: - - cis: ["3.4.3.7"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2"] - - tsc: ["CC6.6"] + - cis: ["3.4.2.10"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - "p:nftables" + # 3.4.2.11 Ensure nftables rules are permanent. (Automated) - Not Implemented + ############################################### - # 3.4.4 Configure iptables + # 3.4.3 Configure iptables ############################################### ############################################### - # 3.4.4.1 Configure IPv4 iptables + # 3.4.3.1 Configure IPv4 iptables ############################################### - # 3.4.4.1.1 Ensure default deny firewall policy (Scored) - - id: 5098 - title: "Ensure iptables default deny firewall policy" - description: "A default deny all policy on connections ensures that any unconfigured network usage will be rejected." - rationale: "With a default accept policy the firewall will accept any packet that is not configured to be denied. It is easier to white list acceptable usage than to black list unacceptable usage." - remediation: "Run the following commands to implement a default DROP policy: # iptables -P INPUT DROP; # iptables -P OUTPUT DROP; # iptables -P FORWARD DROP" + + # 3.4.3.1.1 Ensure iptables packages are installed. (Automated) + - id: 5107 + title: "Ensure iptables packages are installed." + description: "iptables is a utility program that allows a system administrator to configure the tables provided by the Linux kernel firewall, implemented as different Netfilter modules, and the chains and rules it stores. Different kernel modules and programs are used for different protocols; iptables applies to IPv4, ip6tables to IPv6, arptables to ARP, and ebtables to Ethernet frames." + rationale: "A method of configuring and maintaining firewall rules is necessary to configure a Host Based Firewall." + remediation: "Run the following command to install iptables and iptables-services # dnf install iptables iptables-services." + compliance: + - cis: ["3.4.3.1.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all + rules: + - "c:rpm -q iptables -> r:^iptables-" + + # 3.4.3.1.2 Ensure nftables is not installed with iptables. (Automated) + - id: 5108 + title: "Ensure nftables is not installed with iptables." + description: "nftables is a subsystem of the Linux kernel providing filtering and classification of network packets/datagrams/frames and is the successor to iptables." + rationale: "Running both iptables and nftables may lead to conflict." + remediation: "Run the following command to remove nftables: # dnf remove nftables." compliance: - - cis: ["3.4.4.1.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC6.6"] + - cis: ["3.4.3.1.2"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:iptables -L -> r:Chain INPUT \(policy DROP\)' - - 'c:iptables -L -> r:Chain FORWARD \(policy DROP\)' - - 'c:iptables -L -> r:Chain OUTPUT \(policy DROP\)' + - "c:rpm -q nftables -> r:^package nftables is not installed" - # 3.4.4.1.2 Ensure loopback traffic is configured (Scored) - - id: 5099 - title: "Ensure loopback traffic is configured" + # 3.4.3.1.3 Ensure firewalld is either not installed or masked with iptables. (Automated) + - id: 5109 + title: "Ensure firewalld is either not installed or masked with iptables." + description: "firewalld (Dynamic Firewall Manager) provides a dynamically managed firewall with support for network/firewall zones to assign a level of trust to a network and its associated connections, interfaces or sources. It has support for IPv4, IPv6, Ethernet bridges and also for IPSet firewall settings. There is a separation of the runtime and permanent configuration options." + rationale: "Running iptables.service and\\or ip6tables.service with firewalld.service may lead to conflict and unexpected results." + remediation: "Run the following command to remove firewalld # yum remove firewalld OR Run the following command to stop and mask firewalld # systemctl --now mask firewalld." + compliance: + - cis: ["3.4.3.1.3"] + - cis_csc_v8: ["4.4", "4.8"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.1.6", "1.2.1", "1.3.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.1", "1.2.5", "1.4.1", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] + condition: none + rules: + - "p:firewalld" + - "c:firewall-cmd --state -> r:running" + + # 3.4.3.2.1 Ensure iptables loopback traffic is configured. (Automated) + - id: 5110 + title: "Ensure iptables loopback traffic is configured." description: "Configure the loopback interface to accept traffic. Configure all other interfaces to deny traffic to the loopback network (127.0.0.0/8)." - rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network (127.0.0.0/8) traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure." - remediation: "Run the following commands to implement the loopback rules: # iptables -A INPUT -i lo -j ACCEPT # iptables -A OUTPUT -o lo -j ACCEPT # iptables -A INPUT -s 127.0.0.0/8 -j DROP" + rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network (127.0.0.0/8) traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure. Note: Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following commands to implement the loopback rules: # iptables -A INPUT -i lo -j ACCEPT # iptables -A OUTPUT -o lo -j ACCEPT # iptables -A INPUT -s 127.0.0.0/8 -j DROP." compliance: - - cis: ["3.4.4.1.2"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC6.6"] + - cis: ["3.4.3.2.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - 'c:iptables -L INPUT -v -n -> r:\.*ACCEPT\.*all\.*lo\.**\.*0.0.0.0/0\.*0.0.0.0/0' - 'c:iptables -L INPUT -v -n -> r:\.*DROP\.*all\.**\.**\.*127.0.0.0/8\.*0.0.0.0/0' - 'c:iptables -L OUTPUT -v -n -> r:\.*ACCEPT\.*all\.**\.*lo\.*0.0.0.0/0\.*0.0.0.0/0' - ############################################### - # 3.4.4.2 Configure IPv6 ip6tables - ############################################### - # 3.4.4.2.1 Ensure IPv6 default deny firewall policy (Scored) - - id: 5100 - title: "Ensure IPv6 default deny firewall policy" + # 3.4.3.2.2 Ensure iptables outbound and established connections are configured. (Manual) - Not Implemented + # 3.4.3.2.3 Ensure iptables rules exist for all open ports. (Automated) - Not Implemented + + # 3.4.3.2.4 Ensure iptables default deny firewall policy. (Automated) + - id: 5111 + title: "Ensure iptables default deny firewall policy." description: "A default deny all policy on connections ensures that any unconfigured network usage will be rejected." - rationale: "With a default accept policy the firewall will accept any packet that is not configured to be denied. It is easier to white list acceptable usage than to black list unacceptable usage." - remediation: "Run the following commands to implement a default DROP policy: # ip6tables -P INPUT DROP; # ip6tables -P OUTPUT DROP; # ip6tables -P FORWARD DROP" + rationale: "With a default accept policy the firewall will accept any packet that is not configured to be denied. It is easier to white list acceptable usage than to black list unacceptable usage. Note: Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following commands to implement a default DROP policy: # iptables -P INPUT DROP # iptables -P OUTPUT DROP # iptables -P FORWARD DROP." compliance: - - cis: ["3.4.4.2.1"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC6.6"] + - cis: ["3.4.3.2.4"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - 'c:ip6tables -L -> r:Chain INPUT \(policy DROP\)' - - 'c:ip6tables -L -> r:Chain FORWARD \(policy DROP\)' - - 'c:ip6tables -L -> r:Chain OUTPUT \(policy DROP\)' + - 'c:iptables -L -> r:Chain INPUT \(policy DROP\)' + - 'c:iptables -L -> r:Chain FORWARD \(policy DROP\)' + - 'c:iptables -L -> r:Chain OUTPUT \(policy DROP\)' - # 3.4.4.2.2 Ensure IPv6 loopback traffic is configured (Scored) - - id: 5101 - title: "Ensure loopback traffic is configured" + # 3.4.3.2.5 Ensure iptables rules are saved. (Automated) - Not Implemented + # 3.4.3.2.6 Ensure iptables is enabled and active. (Automated) + - id: 5112 + title: "Ensure iptables is enabled and active." + description: "iptables.service is a utility for configuring and maintaining iptables." + rationale: "iptables.service will load the iptables rules saved in the file /etc/sysconfig/iptables at boot, otherwise the iptables rules will be cleared during a re-boot of the system." + remediation: "Run the following command to enable and start iptables: systemctl --now enable iptables." + compliance: + - cis: ["3.4.3.2.6"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: all + rules: + - "c:systemctl is-enabled iptables -> r:^enabled" + - "c:systemctl is-active iptables -> r:^active" + + ############################################### + # 3.4.3.3 Configure IPv6 ip6tables + ############################################### + + # 3.4.3.3.1 Ensure ip6tables loopback traffic is configured. (Automated) + - id: 5113 + title: "Ensure ip6tables loopback traffic is configured." description: "Configure the loopback interface to accept traffic. Configure all other interfaces to deny traffic to the loopback network (::1)." - rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network (::1) traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure." - remediation: "Run the following commands to implement the loopback rules: # ip6tables -A INPUT -i lo -j ACCEPT # ip6tables -A OUTPUT -o lo -j ACCEPT # ip6tables -A INPUT -s ::1 -j DROP" + rationale: "Loopback traffic is generated between processes on machine and is typically critical to operation of the system. The loopback interface is the only place that loopback network (::1) traffic should be seen, all other interfaces should ignore traffic on this network as an anti-spoofing measure. Note: Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following commands to implement the loopback rules: # ip6tables -A INPUT -i lo -j ACCEPT # ip6tables -A OUTPUT -o lo -j ACCEPT # ip6tables -A INPUT -s ::1 -j DROP." compliance: - - cis: ["3.4.4.2.2"] - - cis_csc: ["9.4"] - - pci_dss: ["1.2.1"] - - tsc: ["CC6.6"] + - cis: ["3.4.3.3.1"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - 'c:ip6tables -L INPUT -v -n -> r:\.*ACCEPT\.*all\.*lo\.**\.*::/0\.*::/0' - 'c:ip6tables -L INPUT -v -n -> r:\.*DROP\.*all\.**\.**\.*::1\.*::/0' - 'c:ip6tables -L OUTPUT -v -n -> r:\.*ACCEPT\.*all\.**\.*lo\.*::/0\.*::/0' - # 3.5 Ensure wireless interfaces are disabled (Scored) - - id: 5102 - title: "Ensure wireless interfaces are disabled" - description: "Wireless networking is used when wired networks are unavailable. Red Hat Enterprise Linux contains a wireless tool kit to allow system administrators to configure and use wireless networks." - rationale: "If wireless is not to be used, wireless devices can be disabled to reduce the potential attack surface." - remediation: "Run the following command to disable any wireless interfaces: # nmcli radio all off . Disable any wireless interfaces in your network configuration." - compliance: - - cis: ["3.5"] - - cis_csc: ["15.4", "15.5"] - - pci_dss: ["1.2.3"] - - tsc: ["CC6.6"] - references: - - nmcli(1) - Linux man page + # 3.4.3.3.2 Ensure ip6tables outbound and established connections are configured. (Manual) - Not Implemented + # 3.4.3.3.3 Ensure ip6tables firewall rules exist for all open ports. (Automated) - Not Implemented + # 3.4.3.3.4 Ensure ip6tables default deny firewall policy. (Automated) + - id: 5114 + title: "Ensure ip6tables default deny firewall policy." + description: "A default deny all policy on connections ensures that any unconfigured network usage will be rejected." + rationale: "With a default accept policy the firewall will accept any packet that is not configured to be denied. It is easier to white list acceptable usage than to black list unacceptable usage. Note: Changing firewall settings while connected over network can result in being locked out of the system." + remediation: "Run the following commands to implement a default DROP policy: # ip6tables -P INPUT DROP # ip6tables -P OUTPUT DROP # ip6tables -P FORWARD DROP." + compliance: + - cis: ["3.4.3.3.4"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] condition: all rules: - - "c:nmcli radio wifi -> r:^disabled" - - "c:nmcli radio wwan -> r:^disabled" + - 'c:ip6tables -L -> r:Chain INPUT \(policy DROP\)' + - 'c:ip6tables -L -> r:Chain FORWARD \(policy DROP\)' + - 'c:ip6tables -L -> r:Chain OUTPUT \(policy DROP\)' - # 3.6 Disable IPv6 (Not Scored) - - id: 5103 - title: "Disable IPv6" - description: "Although IPv6 has many advantages over IPv4, not all organizations have IPv6 or dual stack configurations implemented." - rationale: "If IPv6 or dual stack is not to be used, it is recommended that IPv6 be disabled to reduce the attack surface of the system." - remediation: 'Edit /etc/default/grub and add ipv6.disable=1 to the GRUB_CMDLINE_LINUX parameters: GRUB_CMDLINE_LINUX="ipv6.disable=1" .Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg' - compliance: - - cis: ["3.6"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.6", "CC5.2"] - condition: all + # 3.4.3.3.5 Ensure ip6tables rules are saved. (Automated) - Not Implemented + + # 3.4.3.3.6 Ensure ip6tables is enabled and active. (Automated) + - id: 5115 + title: "Ensure ip6tables is enabled and active." + description: "ip6tables.service is a utility for configuring and maintaining ip6tables." + rationale: "ip6tables.service will load the iptables rules saved in the file /etc/sysconfig/ip6tables at boot, otherwise the ip6tables rules will be cleared during a re-boot of the system." + remediation: "Run the following command to enable and start ip6tables: # systemctl --now start ip6tables." + compliance: + - cis: ["3.4.3.3.6"] + - cis_csc_v8: ["4.4"] + - cis_csc_v7: ["9.4"] + - cmmc_v2.0: ["AC.L1-3.1.20", "CM.L2-3.4.7", "SC.L1-3.13.1", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.1"] + - nist_sp_800-53: ["SC-7(5)"] + - pci_dss_v3.2.1: ["1.1.4", "1.3.1"] + - pci_dss_v4.0: ["1.2.1", "1.4.1"] + - soc_2: ["CC6.6"] + condition: none rules: - 'f:/boot/grub2/grubenv -> r:^\s*kernelopts=\.+ipv6.disable=1' @@ -1917,203 +2540,206 @@ checks: # 4 Logging and Auditing ############################################### ############################################### - # 4.1 Configure System Accounting (auditd) + # 4.1.1 Configure System Accounting (auditd) ############################################### - # 4.1.1.1 Ensure auditd is installed (Scored) - - id: 5105 - title: "Ensure auditd is installed" + # 4.1.1.1 Ensure auditd is installed. (Automated) + - id: 5116 + title: "Ensure auditd is installed." description: "auditd is the userspace component to the Linux Auditing System. It's responsible for writing audit records to the disk." rationale: "The capturing of system events provides system administrators with information to allow them to determine if unauthorized access to their system is occurring." - remediation: "Run the following command to Install auditd # dnf install audit audit-libs" + remediation: "Run the following command to Install auditd # dnf install audit." compliance: - cis: ["4.1.1.1"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.1"] - - nist_800_53: ["AU.2"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.2", "CC6.3", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.2", "8.5"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - "c:rpm -q audit audit-libs -> r:^audit-" - # 4.1.1.2 Ensure auditd service is enabled (Scored) - - id: 5106 - title: "Ensure auditd service is enabled" + # 4.1.1.2 Ensure auditd service is enabled. (Automated) + - id: 5117 + title: "Ensure auditd service is enabled." description: "Turn on the auditd daemon to record system events." rationale: "The capturing of system events provides system administrators with information to allow them to determine if unauthorized access to their system is occurring." - remediation: "Run the following command to enable auditd : # systemctl --now enable auditd" + remediation: "Run the following command to enable auditd: # systemctl --now enable auditd." compliance: - - cis: ["4.1.2"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.1", "10.7"] - - nist_800_53: ["AU.2"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.2", "CC6.3", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["4.1.1.2"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - "p:auditd" - # 4.1.1.3 Ensure auditing for processes that start prior to auditd is enabled (Scored) - - id: 5107 - title: "Ensure auditing for processes that start prior to auditd is enabled" + # 4.1.1.3 Ensure auditing for processes that start prior to auditd is enabled. (Automated) + - id: 5118 + title: "Ensure auditing for processes that start prior to auditd is enabled." description: "Configure grub2 so that processes that are capable of being audited can be audited even if they start up prior to auditd startup." - rationale: "The capturing of system events provides system administrators with information to allow them to determine if unauthorized access to their system is occurring." - remediation: 'Edit /etc/default/grub and add audit=1 to GRUB_CMDLINE_LINUX : GRUB_CMDLINE_LINUX="audit=1" . Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg' + rationale: "Audit events need to be captured on processes that start up prior to auditd, so that potential malicious activity cannot go undetected." + remediation: "Run the following command to add audit=1 to GRUB_CMDLINE_LINUX: # grubby --update-kernel ALL --args 'audit=1'." compliance: - cis: ["4.1.1.3"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.2.6", "10.7"] - - nist_800_53: ["AU.2"] - - gpg_13: ["7.9"] - - gdpr_IV: ["35.7.d", "32.2"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: none rules: - "f:/boot/grub2/grubenv -> r:kernelopts= && !r:audit=1" - # 4.1.1.4 Ensure auditing for processes that start prior to auditd is enabled (Scored) - - id: 5108 - title: "Ensure audit_backlog_limit is sufficient" + # 4.1.1.4 Ensure audit_backlog_limit is sufficient. (Automated) + - id: 5119 + title: "Ensure audit_backlog_limit is sufficient." description: "The backlog limit has a default setting of 64." rationale: "During boot if audit=1, then the backlog will hold 64 records. If more than 64 records are created during boot, auditd records will be lost and potential malicious activity could go undetected." - remediation: "Edit /etc/default/grub and add audit_backlog_limit= to GRUB_CMDLINE_LINUX: Run the following command to update the grub2 configuration: # grub2-mkconfig -o /boot/grub2/grub.cfg" + remediation: "Run the following command to add audit_backlog_limit= to GRUB_CMDLINE_LINUX: # grubby --update-kernel ALL --args 'audit_backlog_limit=' Example: # grubby --update-kernel ALL --args 'audit_backlog_limit=8192'." compliance: - cis: ["4.1.1.4"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.7"] - - nist_800_53: ["AU.4"] - - hipaa: ["164.312.b"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - - 'f:/boot/grub2/grubenv -> r:kernelopts= && n:audit_backlog_limit=(\d+) compare >= 8192' + - 'c:grubby --info=ALL -> n:^args=\.*\saudit_backlog_limit=(\d+) compare >= 8192' ############################################### # 4.1.2 Configure Data Retention ############################################### - # 4.1.2.1 Ensure audit log storage size is configured (Not Scored) - - id: 5109 - title: "Ensure audit log storage size is configured" + # 4.1.2.1 Ensure audit log storage size is configured. (Automated) + - id: 5120 + title: "Ensure audit log storage size is configured." description: "Configure the maximum size of the audit log file. Once the log reaches the maximum size, it will be rotated and a new log file will be started." rationale: "It is important that an appropriate size is determined for log files so that they do not impact the system and audit data is not lost." - remediation: "Set the following parameter in /etc/audit/auditd.conf in accordance with site policy: max_log_file = " + remediation: "Set the following parameter in /etc/audit/auditd.conf in accordance with site policy: max_log_file = ." compliance: - cis: ["4.1.2.1"] - - cis_csc: ["6.4"] - - pci_dss: ["10.7"] - - nist_800_53: ["AU.4"] - - hipaa: ["164.312.b"] + - cis_csc_v8: ["8.3"] + - cis_csc_v7: ["6.4"] + - iso_27001-2013: ["A.12.4.1"] + - pci_dss_v3.2.1: ["10.7"] + - soc_2: ["A1.1"] condition: all rules: - 'f:/etc/audit/auditd.conf -> r:^max_log_file\s*=\s*\d+' - # 4.1.2.2 Ensure audit logs are not automatically deleted (Scored) - - id: 5110 - title: "Ensure audit logs are not automatically deleted" + # 4.1.2.2 Ensure audit logs are not automatically deleted. (Automated) + - id: 5121 + title: "Ensure audit logs are not automatically deleted." description: "The max_log_file_action setting determines how to handle the audit log file reaching the max file size. A value of keep_logs will rotate the logs but never delete old logs." rationale: "In high security contexts, the benefits of maintaining a long audit history exceed the cost of storing the audit history." - remediation: "Set the following parameter in /etc/audit/auditd.conf: max_log_file_action = keep_logs" + remediation: "Set the following parameter in /etc/audit/auditd.conf: max_log_file_action = keep_logs." compliance: - cis: ["4.1.2.2"] - - cis_csc: ["6.4"] - - pci_dss: ["10.7"] - - nist_800_53: ["AU.9"] - - hipaa: ["164.312.b"] + - cis_csc_v8: ["8.3"] + - cis_csc_v7: ["6.4"] + - iso_27001-2013: ["A.12.4.1"] + - pci_dss_v3.2.1: ["10.7"] + - soc_2: ["A1.1"] condition: all rules: - 'f:/etc/audit/auditd.conf -> r:^max_log_file_action\s*=\s*keep_logs' - # 4.1.2.3 Ensure system is disabled when audit logs are full (Scored) - - id: 5111 - title: "Ensure system is disabled when audit logs are full" - description: "The auditd daemon can be configured to halt the system when the audit logs are full." + # 4.1.2.3 Ensure system is disabled when audit logs are full. (Automated) + - id: 5122 + title: "Ensure system is disabled when audit logs are full." + description: "The auditd daemon can be configured to halt the system when the audit logs are full. The admin_space_left_action parameter tells the system what action to take when the system has detected that it is low on disk space. Valid values are ignore, syslog, suspend, single, and halt. - ignore, the audit daemon does nothing - Syslog, the audit daemon will issue a warning to syslog - Suspend, the audit daemon will stop writing records to the disk - single, the audit daemon will put the computer system in single user mode - halt, the audit daemon will shutdown the system." rationale: "In high security contexts, the risk of detecting unauthorized access or nonrepudiation exceeds the benefit of the system's availability." - remediation: "Set the following parameters in /etc/audit/auditd.conf: space_left_action = email action_mail_acct = root admin_space_left_action = halt" + impact: "If the admin_space_left_action parameter is set to halt the audit daemon will shutdown the system when the disk partition containing the audit logs becomes full." + remediation: "Set the following parameters in /etc/audit/auditd.conf: space_left_action = email action_mail_acct = root set admin_space_left_action to either halt or single in /etc/audit/auditd.conf. Example: admin_space_left_action = halt." compliance: - - cis: ["4.1.1.2"] - - cis_csc: ["6.3"] - - pci_dss: ["10.7"] + - cis: ["4.1.2.3"] + - cis_csc_v8: ["8.2", "8.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3", "10.7"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["A1.1"] condition: all rules: - 'f:/etc/audit/auditd.conf -> r:^space_left_action\s*=\s*email' - 'f:/etc/audit/auditd.conf -> r:^action_mail_acct\s*=\s*root' - 'f:/etc/audit/auditd.conf -> r:^admin_space_left_action\s*=\s*halt' - ## 4.1.3 Ensure changes to system administration scope (sudoers) is collected (Scored) - - id: 5112 - title: "Ensure changes to system administration scope (sudoers) is collected" - description: 'Monitor scope changes for system administrations. If the system has been properly configured to force system administrators to log in as themselves first and then use the sudo command to execute privileged commands, it is possible to monitor changes in scope. The file /etc/sudoers will be written to when the file or its attributes have changed. The audit records will be tagged with the identifier "scope."' - rationale: "Changes in the /etc/sudoers file can indicate that an unauthorized change has been made to scope of system administrator activity." - remediation: "Add the following line to the /etc/audit/audit.rules file: -w /etc/sudoers -p wa -k scope -w /etc/sudoers.d/ -p wa -k scope" + # 4.1.3.1 Ensure changes to system administration scope (sudoers) is collected. (Automated) + - id: 5123 + title: "Ensure changes to system administration scope (sudoers) is collected." + description: 'Monitor scope changes for system administrators. If the system has been properly configured to force system administrators to log in as themselves first and then use the sudo command to execute privileged commands, it is possible to monitor changes in scope. The file /etc/sudoers, or files in /etc/sudoers.d, will be written to when the file(s) or related attributes have changed. The audit records will be tagged with the identifier "scope".' + rationale: "Changes in the /etc/sudoers and /etc/sudoers.d files can indicate that an unauthorized change has been made to the scope of system administrator activity." + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor scope changes for system administrators. Example: # printf \" -w /etc/sudoers -p wa -k scope -w /etc/sudoers.d -p wa -k scope \" >> /etc/audit/rules.d/50-scope.rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi." compliance: - - cis: ["4.1.15"] - - cis_csc: ["4.8"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["4.1.3.1"] + - cis_csc_v7: ["4.8"] + - iso_27001-2013: ["A.12.4.3"] condition: all rules: - "d:/etc/audit/rules.d -> .rules -> r:-w /etc/sudoers && r:-p wa && r:-k scope" - "d:/etc/audit/rules.d -> .rules -> r:-w /etc/sudoers.d/ && r:-p wa && r:-k scope" - # 4.1.4 Ensure login and logout events are collected (Scored) - - id: 5113 - title: "Ensure login and logout events are collected" - description: "Monitor login and logout events. The parameters below track changes to files associated with login/logout events. The file /var/log/faillog tracks failed events from login. The file /var/log/lastlog maintain records of the last time a user successfully logged in." - rationale: "Monitoring login/logout events could provide a system administrator with information associated with brute force attacks against user logins." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -w /var/log/lastlog -p wa -k logins -w /var/run/faillock/ -p wa -k logins" - compliance: - - cis: ["4.1.4"] - - cis_csc: ["4.9", "16.13"] - - pci_dss: ["10.2.1", "10.2.4", "10.3"] - - nist_800_53: ["AC.7", "AU.14"] - - gpg_13: ["7.8"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/log/lastlog -p wa -k logins' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/run/faillock/ -p wa -k logins' - - # 4.1.5 Ensure session initiation information is collected (Scored) - - id: 5114 - title: "Ensure session initiation information is collected" - description: 'Monitor session initiation events. The parameters in this section track changes to the files associated with session events. The file /var/run/utmp file tracks all currently logged in users. All audit records will be tagged with the identifier "session." The /var/log/wtmp file tracks logins, logouts, shutdown, and reboot events. The file /var/log/btmp keeps track of failed login attempts and can be read by entering the command /usr/bin/last -f /var/log/btmp . All audit records will be tagged with the identifier "logins.".' - rationale: "Monitoring these files for changes could alert a system administrator to logins occurring at unusual hours, which could indicate intruder activity (i.e. a user logging in at a time when they do not normally log in)." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -w /var/run/utmp -p wa -k session -w /var/log/wtmp -p wa -k logins -w /var/log/btmp -p wa -k logins" - compliance: - - cis: ["4.1.5"] - - cis_csc: ["4.9", "16.13"] - - pci_dss: ["10.3"] - - nist_800_53: ["AC.7", "AU.14"] - - hipaa: ["164.312.b"] - condition: all - rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/run/utmp -p wa -k session' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/log/wtmp -p wa -k logins' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/log/btmp -p wa -k logins' - - # 4.1.6 Ensure events that modify date and time information are collected (Scored) - - id: 5115 - title: "Ensure events that modify date and time information are collected" - description: 'Capture events where the system date and/or time has been modified. The parameters in this section are set to determine if the adjtimex (tune kernel clock), settimeofday (Set time, using timeval and timezone structures) stime (using seconds since 1/1/1970) or clock_settime (allows for the setting of several internal clocks and timers) system calls have been executed and always write an audit record to the /var/log/audit.log file upon exit, tagging the records with the identifier "time-change".' + # 4.1.3.2 Ensure actions as another user are always logged. (Automated) + - id: 5124 + title: "Ensure actions as another user are always logged." + description: "sudo provides users with temporary elevated privileges to perform operations, either as the superuser or another user." + rationale: "Creating an audit log of users with temporary elevated privileges and the operation(s) they performed is essential to reporting. Administrators will want to correlate the events written to the audit trail with the records written to sudo's logfile to verify if unauthorized commands have been executed." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor elevated privileges. 64 Bit systems Example: # printf \" -a always,exit -F arch=b64 -C euid!=uid -F auid!=unset -S execve -k user_emulation -a always,exit -F arch=b32 -C euid!=uid -F auid!=unset -S execve -k user_emulation \" >> /etc/audit/rules.d/50-user_emulation.rules Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." + compliance: + - cis: ["4.1.3.2"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["4.9"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.9.4.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:exit,always|always,exit && r:-F arch=b64 && r:-C euid!=uid|-C uid!=euid && r:-F auid!=unset|-F auid!=1|-F auid!=4294967295 && r:-S execve && r:-k user_emulation|key=user_emulation' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:exit,always|always,exit && r:-F arch=b32 && r:-C euid!=uid|-C uid!=euid && r:-F auid!=unset|-F auid!=1|-F auid!=4294967295 && r:-S execve && r:-k user_emulation|key=user_emulation' + - "c:auditctl -l -> r:^-a && r:exit,always|always,exit && r:-F arch=b64 && r:-C euid!=uid|-C uid!=euid && r:-F auid!=unset|-F auid!=1|-F auid!=4294967295 && r:-S execve && r:-k user_emulation|key=user_emulation" + - "c:auditctl -l -> r:^-a && r:exit,always|always,exit && r:-F arch=b32 && r:-C euid!=uid|-C uid!=euid && r:-F auid!=unset|-F auid!=1|-F auid!=4294967295 && r:-S execve && r:-k user_emulation|key=user_emulation" + + # 4.1.3.3 Ensure events that modify the sudo log file are collected. (Automated) - Not Implemented + + # 4.1.3.4 Ensure events that modify date and time information are collected. (Automated) + - id: 5125 + title: "Ensure events that modify date and time information are collected." + description: 'Capture events where the system date and/or time has been modified. The parameters in this section are set to determine if the; - adjtimex - tune kernel clock - settimeofday - set time using timeval and timezone structures - stime - using seconds since 1/1/1970 - clock_settime - allows for the setting of several internal clocks and timers system calls have been executed. Further, ensure to write an audit record to the configured audit log file upon exit, tagging the records with a unique identifier such as "time-change".' rationale: "Unexpected changes in system date and/or time could be a sign of malicious activity on the system." - remediation: "For 32 bit systems add the following lines to the /etc/audit/audit.rules file: -a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change -a always,exit -F arch=b32 -S clock_settime -k time-change -w /etc/localtime -p wa -k time-change For 64 bit systems add the following lines to the /etc/audit/audit.rules file: -a always,exit -F arch=b64 -S adjtimex -S settimeofday -k time-change -a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change -a always,exit -F arch=b64 -S clock_settime -k time-change -a always,exit -Farch=b32 -S clock_settime -k time-change -w /etc/localtime -p wa -k time-change" + remediation: "Create audit rules Edit or create fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64. In addition, add stime to the system call audit. Example: -a always,exit -F arch=b32 -S adjtimex settimeofday clock_settime stime -k time-change." compliance: - - cis: ["4.1.6"] - - cis_csc: ["5.5"] - - pci_dss: ["10.4.2", "10.2.7"] - - nist_800_53: ["AU.14", "AU.6"] - - gpg_13: ["7.9"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["4.1.3.4"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["5.5"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.1.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change' @@ -2121,42 +2747,20 @@ checks: - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-a always,exit -F arch=b64 -S adjtimex -S settimeofday -S stime -k time-change' - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-a always,exit -F arch=b64 -S clock_settime -k time-change' - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/localtime -p wa -k time-change' + - "c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b64 && r:-S && r:adjtimex && r:settimeofday && r:clock_settime && r:-k time-change|key=time-change" + - "c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S && r:adjtimex && r:settimeofday && r:clock_settime && r:-k time-change|key=time-change" + - "c:auditctl -l -> r:^-w && r:/etc/localtime && r:-p wa && r:-k time-change|key=time-change" - # 4.1.7 Ensure events that modify the system's Mandatory Access Controls are collected (Scored) - - id: 5116 - title: "Ensure events that modify the system's Mandatory Access Controls are collected" - description: "Monitor SELinux mandatory access controls. The parameters below monitor any write access (potential additional, deletion or modification of files in the directory) or attribute changes to the /etc/selinux or /etc/apparmor and /etc/apparmor.d directories." - rationale: "Changes to files in these directories could indicate that an unauthorized user is attempting to modify access controls and change security contexts, leading to a compromise of the system." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -w /etc/selinux/ -p wa -k MAC-policy -w /usr/share/selinux/ -p wa -k MAC-policy" - compliance: - - cis: ["4.1.7"] - - cis_csc: ["5.5"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/selinux/ -p wa -k MAC-policy' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /usr/share/selinux/ -p wa -k MAC-policy' - - # 4.1.8 Ensure events that modify the system's network environment are collected (Scored) - - id: 5117 - title: "Ensure events that modify the system's network environment are collected" - description: "Record changes to network environment files or system calls. The below parameters monitor the sethostname (set the systems host name) or setdomainname (set the systems domainname) system calls, and write an audit event on system call exit. The other parameters monitor the /etc/issue and /etc/issue.net files (messages displayed pre-login), /etc/hosts (file containing host names and associated IP addresses), /etc/sysconfig/network file and /etc/sysconfig/network-scripts/ directory (containing network interface scripts and configurations)." - rationale: 'Monitoring sethostname and setdomainname will identify potential unauthorized changes to host and domainname of a system. The changing of these names could potentially break security parameters that are set based on those names. The /etc/hosts file is monitored for changes in the file that can indicate an unauthorized intruder is trying to change machine associations with IP addresses and trick users and processes into connecting to unintended machines. Monitoring /etc/issue and /etc/issue.net is important, as intruders could put disinformation into those files and trick users into providing information to the intruder. Monitoring /etc/sysconfig/network and /etc/sysconfig/network-scripts/ is important as it can show if network interfaces or scripts are being modified in a way that can lead to the machine becoming unavailable or compromised. All audit records will be tagged with the identifier "system-locale."' - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale -a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale -w /etc/issue -p wa -k system-locale -w /etc/issue.net -p wa -k system-locale -w /etc/hosts -p wa -k system-locale -w /etc/sysconfig/network -p wa -k system-locale -w /etc/sysconfig/network-scripts/ -p wa -k system-locale" + # 4.1.3.5 Ensure events that modify the system's network environment are collected. (Automated) + - id: 5126 + title: "Ensure events that modify the system's network environment are collected." + description: "Record changes to network environment files or system calls. The below parameters monitor the following system calls, and write an audit event on system call exit: - sethostname - set the systems host name - setdomainname - set the systems domain name The files being monitored are: - /etc/issue and /etc/issue.net - messages displayed pre-login - /etc/hosts - file containing host names and associated IP addresses - /etc/sysconfig/network - additional information that is valid to all network interfaces - /etc/sysconfig/network-scripts/ - directory containing network interface scripts and configurations files." + rationale: "Monitoring sethostname and setdomainname will identify potential unauthorized changes to host and domainname of a system. The changing of these names could potentially break security parameters that are set based on those names. The /etc/hosts file is monitored for changes that can indicate an unauthorized intruder is trying to change machine associations with IP addresses and trick users and processes into connecting to unintended machines. Monitoring /etc/issue and /etc/issue.net is important, as intruders could put disinformation into those files and trick users into providing information to the intruder. Monitoring /etc/sysconfig/network is important as it can show if network interfaces or scripts are being modified in a way that can lead to the machine becoming unavailable or compromised. All audit records should have a relevant tag associated with them." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor events that modify the system's network environment. 64 Bit systems Example: # printf \" -a always,exit -F arch=b64 -S sethostname,setdomainname -k system-locale -a always,exit -F arch=b32 -S sethostname,setdomainname -k system-locale -w /etc/issue -p wa -k system-locale -w /etc/issue.net -p wa -k system-locale -w /etc/hosts -p wa -k system-locale -w /etc/sysconfig/network -p wa -k system-locale -w /etc/sysconfig/network-scripts/ -p wa -k system-locale \" >> /etc/audit/rules.d/50-system_local.rules Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." compliance: - - cis: ["4.1.8"] - - cis_csc: ["5.5"] - - pci_dss: ["10.2.7"] - - nist_800_53: ["AU.14", "AU.6"] - - gpg_13: ["7.9"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["4.1.3.5"] + - cis_csc_v7: ["5.5"] + - iso_27001-2013: ["A.12.1.2"] condition: all rules: - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale' @@ -2166,68 +2770,47 @@ checks: - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/hosts -p wa -k system-locale' - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/sysconfig/network -p wa -k system-locale' - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/sysconfig/network-scripts/ -p wa -k system-locale' + - "c:auditctl -l -> r:^-a && r:exit,always|always,exit && r:-F arch=b64 && r:-S && r:sethostname && r:setdomainname && r:-k system-locale|-F key=system-locale" + - "c:auditctl -l -> r:^-a && r:exit,always|always,exit && r:-F arch=b32 && r:-S && r:sethostname && r:setdomainname && r:-k system-locale|-F key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/issue && r:-p wa && r:-k system-locale|key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/issue.net && r:-p wa && r:-k system-locale|key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/hosts && r:-p wa && r:-k system-locale|key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/sysconfig/network && r:-p wa && r:-k system-locale|key=system-locale" + - "c:auditctl -l -> r:^-w && r:/etc/sysconfig/network-scripts && r:-p wa && r:-k system-locale|key=system-locale" - # 4.1.9 Ensure discretionary access control permission modification events are collected (Scored) - - id: 5118 - title: "Ensure discretionary access control permission modification events are collected" - description: 'Monitor changes to file permissions, attributes, ownership and group. The parameters in this section track changes for system calls that affect file permissions and attributes. The chmod , fchmod and fchmodat system calls affect the permissions associated with a file. The chown , fchown , fchownat and lchown system calls affect owner and group attributes on a file. The setxattr , lsetxattr , fsetxattr (set extended file attributes) and removexattr , lremovexattr , fremovexattr (remove extended file attributes) control extended file attributes. In all cases, an audit record will only be written for non-system user ids (auid >= 1000) and will ignore Daemon events (auid = 4294967295). All audit records will be tagged with the identifier "perm_mod."' - rationale: "Monitoring for changes in file attributes could alert a system administrator to activity that could indicate intruder activity or policy violation." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod -a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lrem" - compliance: - - cis: ["4.1.9"] - - cis_csc: ["5.5"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod' - - # 4.1.10 Ensure unsuccessful unauthorized file access attempts are collected (Scored) - - id: 5119 - title: "Ensure unsuccessful unauthorized file access attempts are collected" - description: 'Monitor for unsuccessful attempts to access files. The parameters below are associated with system calls that control creation ( creat ), opening ( open , openat ) and truncation ( truncate , ftruncate ) of files. An audit log record will only be written if the user is a non- privileged user (auid > = 1000), is not a Daemon event (auid=4294967295) and if the system call returned EACCES (permission denied to the file) or EPERM (some other permanent error associated with the specific system call). All audit records will be tagged with the identifier "access."' + # 4.1.3.6 Ensure use of privileged commands are collected. (Automated) - Not Implemented + + # 4.1.3.7 Ensure unsuccessful file access attempts are collected. (Automated) + - id: 5127 + title: "Ensure unsuccessful file access attempts are collected." + description: "Monitor for unsuccessful attempts to access files. The following parameters are associated with system calls that control files: creation - creat - - opening - open , openat - truncation - truncate , ftruncate An audit log record will only be written if all of the following criteria is met for the user when trying to access a file: - a non-privileged user (auid>=UID_MIN) - - is not a Daemon event (auid=4294967295/unset/-1) if the system call returned EACCES (permission denied) or EPERM (some other permanent error associated with the specific system call)." rationale: "Failed attempts to open, create or truncate files could be an indication that an individual or process is trying to gain unauthorized access to the system." - remediation: "For 32 bit systems add the following lines to the /etc/audit/audit.rules file: -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access For 64 bit systems add the following lines to the /etc/audit/audit.rules file: -a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access" + remediation: "For 32 bit systems Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor unsuccessful file access attempts. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F arch=b64 -S creat,open,openat,truncate,ftruncate -F exit=- EACCES -F auid>=${UID_MIN} -F auid!=unset -k access -a always,exit -F arch=b64 -S creat,open,openat,truncate,ftruncate -F exit=- EPERM -F auid>=${UID_MIN} -F auid!=unset -k access -a always,exit -F arch=b32 -S creat,open,openat,truncate,ftruncate -F exit=- EACCES -F auid>=${UID_MIN} -F auid!=unset -k access -a always,exit -F arch=b32 -S creat,open,openat,truncate,ftruncate -F exit=- EPERM -F auid>=${UID_MIN} -F auid!=unset -k access \" >> /etc/audit/rules.d/50-access.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." compliance: - - cis: ["4.1.10"] - - cis_csc: ["14.9"] - - pci_dss: ["10.2.4"] - - nist_800_53: ["AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["4.1.3.7"] + - cis_csc_v7: ["14.9"] + - iso_27001-2013: ["A.12.4.3"] condition: all rules: - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access' - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access' - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access' - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S && r:creat && r:open && r:openat && r:truncate && r:ftruncate && r:-F exit=-EACCES && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k access|key=access' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b64 && r:-S && r:creat && r:open && r:openat && r:truncate && r:ftruncate && r:-F exit=-EACCES && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k access|key=access' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S && r:creat && r:open && r:openat && r:truncate && r:ftruncate && r:-F exit=-EPERM && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k access|key=access' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b64 && r:-S && r:creat && r:open && r:openat && r:truncate && r:ftruncate && r:-F exit=-EPERM && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k access|key=access' - # 4.1.11 Ensure events that modify user/group information are collected (Scored) - - id: 5120 - title: "Ensure events that modify user/group information are collected" - description: 'Record events affecting the group , passwd (user IDs), shadow and gshadow (passwords) or /etc/security/opasswd (old passwords, based on remember parameter in the PAM configuration) files. The parameters in this section will watch the files to see if they have been opened for write or have had attribute changes (e.g. permissions) and tag them with the identifier "identity" in the audit log file.' + # 4.1.3.8 Ensure events that modify user/group information are collected. (Automated) + - id: 5128 + title: "Ensure events that modify user/group information are collected." + description: 'Record events affecting the modification of user or group information, including that of passwords and old passwords if in use. - /etc/group - system groups - /etc/passwd - system users - /etc/gshadow - encrypted password for each group - /etc/shadow - system user passwords - /etc/security/opasswd - storage of old passwords if the relevant PAM module is in use The parameters in this section will watch the files to see if they have been opened for write or have had attribute changes (e.g. permissions) and tag them with the identifier "identity" in the audit log file.' rationale: "Unexpected changes to these files could be an indication that the system has been compromised and that an unauthorized user is attempting to hide their activities or compromise additional accounts." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -w /etc/group -p wa -k identity -w /etc/passwd -p wa -k identity -w /etc/gshadow -p wa -k identity -w /etc/shadow -p wa -k identity -w /etc/security/opasswd -p wa -k identity" + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor events that modify user/group information. Example: # printf \" -w /etc/group -p wa -k identity -w /etc/passwd -p wa -k identity -w /etc/gshadow -p wa -k identity -w /etc/shadow -p wa -k identity -w /etc/security/opasswd -p wa -k identity \" >> /etc/audit/rules.d/50-identity.rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi." compliance: - - cis: ["4.1.11"] - - cis_csc: ["4.8"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["4.1.3.8"] + - cis_csc_v7: ["4.8"] + - iso_27001-2013: ["A.12.4.3"] condition: all rules: - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/group -p wa -k identity' @@ -2235,101 +2818,263 @@ checks: - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/gshadow -p wa -k identity' - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/shadow -p wa -k identity' - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /etc/security/opasswd -p wa -k identity' + - "c:auditctl -l -> r:^-w && r:/etc/group && r:-p wa && r:-k identity|key=identity" + - "c:auditctl -l -> r:^-w && r:/etc/passwd && r:-p wa && r:-k identity|key=identity" + - "c:auditctl -l -> r:^-w && r:/etc/gshadow && r:-p wa && r:-k identity|key=identity" + - "c:auditctl -l -> r:^-w && r:/etc/shadow && r:-p wa && r:-k identity|key=identity" + - "c:auditctl -l -> r:^-w && r:/etc/security/opasswd && r:-p wa && r:-k identity|key=identity" - # 4.1.12 Ensure successful file system mounts are collected (Scored) - - id: 5121 - title: "Ensure successful file system mounts are collected" - description: "Monitor the use of the mount system call. The mount (and umount ) system call controls the mounting and unmounting of file systems. The parameters below configure the system to create an audit record when the mount system call is used by a non-privileged user." - rationale: "It is highly unusual for a non privileged user to mount file systems to the system. While tracking mount commands gives the system administrator evidence that external media may have been mounted (based on a review of the source of the mount and confirming it's an external media type), it does not conclusively indicate that data was exported to the media. System administrators who wish to determine if data were exported, would also have to track successful open , creat and truncate system calls requiring write access to a file under the mount point of the external media file system. This could give a fair indication that a write occurred. The only way to truly prove it, would be to track successful writes to the external media. Tracking write system calls could quickly fill up the audit log and is not recommended. Recommendations on configuration options to track data export to media is beyond the scope of this document." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts -a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts" + # 4.1.3.9 Ensure discretionary access control permission modification events are collected. (Automated) - Not Implemented + + # 4.1.3.10 Ensure successful file system mounts are collected. (Automated) + - id: 5129 + title: "Ensure successful file system mounts are collected." + description: "Monitor the use of the mount system call. The mount (and umount) system call controls the mounting and unmounting of file systems. The parameters below configure the system to create an audit record when the mount system call is used by a non-privileged user." + rationale: "It is highly unusual for a non-privileged user to mount file systems to the system. While tracking mount commands gives the system administrator evidence that external media may have been mounted (based on a review of the source of the mount and confirming it's an external media type), it does not conclusively indicate that data was exported to the media. System administrators who wish to determine if data were exported, would also have to track successful open, creat and truncate system calls requiring write access to a file under the mount point of the external media file system. This could give a fair indication that a write occurred. The only way to truly prove it, would be to track successful writes to the external media. Tracking write system calls could quickly fill up the audit log and is not recommended. Recommendations on configuration options to track data export to media is beyond the scope of this document." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor successful file system mounts. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=unset -k mounts -a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=unset -k mounts \" >> /etc/audit/rules.d/50-perm_mod.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." compliance: - - cis: ["4.1.12"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.7"] - - nist_800_53: ["AU.14", "AU.6"] - - gpg_13: ["7.9"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["4.1.3.10"] + - cis_csc_v7: ["5.1"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] condition: all rules: - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts' - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b64 && r:-S mount && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k mounts|key=mounts' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S mount && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k mounts|key=mounts' - # 4.1.14 Ensure file deletion events by users are collected (Scored) - - id: 5122 - title: "Ensure file deletion events by users are collected" - description: 'Monitor the use of system calls associated with the deletion or renaming of files and file attributes. This configuration statement sets up monitoring for the unlink (remove a file), unlinkat (remove a file attribute), rename (rename a file) and renameat (rename a file attribute) system calls and tags them with the identifier "delete".' + # 4.1.3.11 Ensure session initiation information is collected. (Automated) + - id: 5130 + title: "Ensure session initiation information is collected." + description: 'Monitor session initiation events. The parameters in this section track changes to the files associated with session events. - /var/run/utmp - tracks all currently logged in users. - /var/log/wtmp - file tracks logins, logouts, shutdown, and reboot events. - /var/log/btmp - keeps track of failed login attempts and can be read by entering the command /usr/bin/last -f /var/log/btmp. All audit records will be tagged with the identifier "session.".' + rationale: "Monitoring these files for changes could alert a system administrator to logins occurring at unusual hours, which could indicate intruder activity (i.e. a user logging in at a time when they do not normally log in)." + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor session initiation information. Example: # printf \" -w /var/run/utmp -p wa -k session -w /var/log/wtmp -p wa -k session -w /var/log/btmp -p wa -k session \" >> /etc/audit/rules.d/50-session.rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi." + compliance: + - cis: ["4.1.3.11"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["4.9", "16.13"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.9.4.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/run/utmp -p wa -k session' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/log/wtmp -p wa -k logins' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/log/btmp -p wa -k logins' + - "c:auditctl -l -> r:^-w && r:/var/run/utmp && r:-p wa && r:-k session|key=session" + - "c:auditctl -l -> r:^-w && r:/var/log/wtmp && r:-p wa && r:-k session|key=session" + - "c:auditctl -l -> r:^-w && r:/var/log/btmp && r:-p wa && r:-k session|key=session" + + # 4.1.3.12 Ensure login and logout events are collected. (Automated) + - id: 5131 + title: "Ensure login and logout events are collected." + description: "Monitor login and logout events. The parameters below track changes to files associated with login/logout events. - /var/log/lastlog - maintain records of the last time a user successfully logged in. - /var/run/faillock - directory maintains records of login failures via the pam_faillock module." + rationale: "Monitoring login/logout events could provide a system administrator with information associated with brute force attacks against user logins." + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor login and logout events. Example: # printf \" -w /var/log/lastlog -p wa -k logins -w /var/run/faillock -p wa -k logins \" >> /etc/audit/rules.d/50-login.rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi." + compliance: + - cis: ["4.1.3.12"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["4.9", "16.11", "16.13"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.8.1.3", "A.9.4.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/log/lastlog -p wa -k logins' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /var/run/faillock/ -p wa -k logins' + - "c:auditctl -l -> r:^-w && r:/var/log/lastlog && r:-p wa && r:-k logins|key=logins" + - "c:auditctl -l -> r:^-w && r:/var/run/faillock && r:-p wa && r:-k logins|key=logins" + + # 4.1.3.13 Ensure file deletion events by users are collected. (Automated) + - id: 5132 + title: "Ensure file deletion events by users are collected." + description: 'Monitor the use of system calls associated with the deletion or renaming of files and file attributes. This configuration statement sets up monitoring for: - unlink - remove a file - unlinkat - remove a file attribute - rename - rename a file - renameat rename a file attribute system calls and tags them with the identifier "delete".' rationale: "Monitoring these calls from non-privileged users could provide a system administrator with evidence that inappropriate removal of files and file attributes associated with protected files is occurring. While this audit option will look at all events, system administrators will want to look for specific privileged files that are being deleted or altered." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete -a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete" + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor file deletion events by users. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F arch=b64 -S rename,unlink,unlinkat,renameat -F auid>=${UID_MIN} -F auid!=unset -F key=delete -a always,exit -F arch=b32 -S rename,unlink,unlinkat,renameat -F auid>=${UID_MIN} -F auid!=unset -F key=delete \" >> /etc/audit/rules.d/50-delete.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." compliance: - - cis: ["4.1.14"] - - cis_csc: ["13"] - - pci_dss: ["10.5.5"] - - nist_800_53: ["AU.14"] - - hipaa: ["164.312.b"] - - tsc: ["PI1.4", "PI1.5", "CC7.1", "CC7.2", "CC7.3", "CC8.1"] + - cis: ["4.1.3.13"] + - cis_csc_v7: ["13"] condition: all rules: - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete' - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b64 && r:-S && r:unlink && r:unlinkat && r:rename && r:renameat && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k delete|key=delete' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b32 && r:-S && r:unlink && r:unlinkat && r:rename && r:renameat && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k delete|key=delete' - # 4.1.15 Ensure kernel module loading and unloading is collected (Scored) - - id: 5123 - title: "Ensure kernel module loading and unloading is collected" - description: 'Monitor the loading and unloading of kernel modules. The programs insmod (install a kernel module), rmmod (remove a kernel module), and modprobe (a more sophisticated program to load and unload modules, as well as some other features) control loading and unloading of modules. The init_module (load a module) and delete_module (delete a module) system calls control loading and unloading of modules. Any execution of the loading and unloading module programs and system calls will trigger an audit record with an identifier of "modules".' - rationale: "Monitoring the use of insmod , rmmod and modprobe could provide system administrators with evidence that an unauthorized user loaded or unloaded a kernel module, possibly compromising the security of the system. Monitoring of the init_module and delete_module system calls would reflect an unauthorized user attempting to use a different program to load and unload modules." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -w /sbin/insmod -p x -k modules -w /sbin/rmmod -p x -k modules -w /sbin/modprobe -p x -k modules -a always,exit -F arch=b32 -S init_module -S delete_module -k modules -a always,exit -F arch=b64 -S init_module -S delete_module -k modules" - compliance: - - cis: ["4.1.15"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.7"] - - nist_800_53: ["AU.14", "AU.6"] - - gpg_13: ["7.9"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /sbin/insmod -p x -k modules' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /sbin/rmmod -p x -k modules' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /sbin/modprobe -p x -k modules' - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-a always,exit -F arch=b64 -S init_module -S delete_module -k modules' - - # 4.1.16 Ensure system administrator actions (sudolog) are collected (Scored) - - id: 5124 - title: "Ensure system administrator actions (sudolog) are collected" - description: "Monitor the sudo log file. If the system has been properly configured to disable the use of the su command and force all administrators to have to log in first and then use sudo to execute privileged commands, then all administrator commands will be logged to /var/log/sudo.log . Any time a command is executed, an audit event will be triggered as the /var/log/sudo.log file will be opened for write and the executed administration command will be written to the log." - rationale: "Changes in /var/log/sudo.log indicate that an administrator has executed a command or the log file itself has been tampered with. Administrators will want to correlate the events written to the audit trail with the records written to /var/log/sudo.log to verify if unauthorized commands have been executed." - remediation: "Edit or create a file in the /etc/audit/rules.d/ directory ending in .rules and add the following lines: -w /var/log/sudo.log -p wa -k actions" - compliance: - - cis: ["4.1.16"] - - cis_csc: ["4.9"] - - pci_dss: ["10.2.2"] - - nist_800_53: ["AU.14", "AC.6", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["32.2", "35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + # 4.1.3.14 Ensure events that modify the system's Mandatory Access Controls are collected. (Automated) + - id: 5133 + title: "Ensure events that modify the system's Mandatory Access Controls are collected." + description: "Monitor SELinux, an implementation of mandatory access controls. The parameters below monitor any write access (potential additional, deletion or modification of files in the directory) or attribute changes to the /etc/selinux/ and /usr/share/selinux/ directories. Note: If a different Mandatory Access Control method is used, changes to the corresponding directories should be audited." + rationale: "Changes to files in the /etc/selinux/ and /usr/share/selinux/ directories could indicate that an unauthorized user is attempting to modify access controls and change security contexts, leading to a compromise of the system." + remediation: "Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor events that modify the system's Mandatory Access Controls. Example: # printf \" -w /etc/selinux -p wa -k MAC-policy -w /usr/share/selinux -p wa -k MAC-policy \" >> /etc/audit/rules.d/50-MAC-policy.rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi." + compliance: + - cis: ["4.1.3.14"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["5.5"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.1.2"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> -w /var/log/sudo.log -p wa -k actions' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /etc/selinux/ -p wa -k MAC-policy' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:-w /usr/share/selinux/ -p wa -k MAC-policy' + - "c:auditctl -l -> r:^-w && r:/etc/selinux && r:-p wa && r:-k MAC-policy|key=MAC-policy" + - "c:auditctl -l -> r:^-w && r:/usr/share/selinux && r:-p wa && r:-k MAC-policy|key=MAC-policy" - # 4.1.17 Ensure the audit configuration is immutable (Scored) - - id: 5125 - title: "Ensure the audit configuration is immutable" - description: 'Set system audit so that audit rules cannot be modified with auditctl . Setting the flag "-e 2" forces audit to be put in immutable mode. Audit changes can only be made on system reboot.' + # 4.1.3.15 Ensure successful and unsuccessful attempts to use the chcon command are recorded. (Automated) + - id: 5134 + title: "Ensure successful and unsuccessful attempts to use the chcon command are recorded." + description: "The operating system must generate audit records for successful/unsuccessful uses of the chcon command." + rationale: "Without generating audit records that are specific to the security and mission needs of the organization, it would be difficult to establish, correlate, and investigate the events relating to an incident or identify those responsible for one. Audit records can be generated from various components within the information system (e.g., module or policy filter)." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor successful and unsuccessful attempts to use the chcon command. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F path=/usr/bin/chcon -F perm=x -F auid>=${UID_MIN} -F auid!=unset -k perm_chng \" >> /etc/audit/rules.d/50-perm_chng.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." + compliance: + - cis: ["4.1.3.15"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/chcon && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k perm_chng|key=perm_chng' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/chcon && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k perm_chng|key=perm_chng' + + # 4.1.3.16 Ensure successful and unsuccessful attempts to use the setfacl command are recorded. (Automated) + - id: 5135 + title: "Ensure successful and unsuccessful attempts to use the setfacl command are recorded." + description: "The operating system must generate audit records for successful/unsuccessful uses of the setfacl command." + rationale: "Without generating audit records that are specific to the security and mission needs of the organization, it would be difficult to establish, correlate, and investigate the events relating to an incident or identify those responsible for one. Audit records can be generated from various components within the information system (e.g., module or policy filter)." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor successful and unsuccessful attempts to use the setfacl command. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F path=/usr/bin/setfacl -F perm=x -F auid>=${UID_MIN} -F auid!=unset -k perm_chng \" >> /etc/audit/rules.d/50-priv_cmd.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." + compliance: + - cis: ["4.1.3.16"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/setfacl && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k perm_chng|-F key=perm_chng' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/setfacl && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k perm_chng|-F key=perm_chng' + + # 4.1.3.17 Ensure successful and unsuccessful attempts to use the chacl command are recorded. (Automated) + - id: 5136 + title: "Ensure successful and unsuccessful attempts to use the chacl command are recorded." + description: "The operating system must generate audit records for successful/unsuccessful uses of the chacl command." + rationale: "Without generating audit records that are specific to the security and mission needs of the organization, it would be difficult to establish, correlate, and investigate the events relating to an incident or identify those responsible for one. Audit records can be generated from various components within the information system (e.g., module or policy filter)." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor successful and unsuccessful attempts to use the chacl command. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F path=/usr/bin/chacl -F perm=x -F auid>=${UID_MIN} -F auid!=unset -k perm_chng \" >> /etc/audit/rules.d/50-perm_chng.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." + compliance: + - cis: ["4.1.3.17"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/chacl && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k priv_cmd|-F key=priv_cmd' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/chacl && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k priv_cmd|-F key=priv_cmd' + + # 4.1.3.18 Ensure successful and unsuccessful attempts to use the usermod command are recorded. (Automated) + - id: 5137 + title: "Ensure successful and unsuccessful attempts to use the usermod command are recorded." + description: "The operating system must generate audit records for successful/unsuccessful uses of the usermod command." + rationale: "Without generating audit records that are specific to the security and mission needs of the organization, it would be difficult to establish, correlate, and investigate the events relating to an incident or identify those responsible for one. Audit records can be generated from various components within the information system (e.g., module or policy filter)." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor successful and unsuccessful attempts to use the usermod command. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F path=/usr/sbin/usermod -F perm=x -F auid>=${UID_MIN} -F auid!=unset -k usermod \" >> /etc/audit/rules.d/50-usermod.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi 32 Bit systems Follow the same procedures as for 64 bit systems and ignore any entries with b64." + compliance: + - cis: ["4.1.3.18"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/sbin/usermod && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k usermod|-F key=usermod' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/sbin/usermod && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k usermod|-F key=usermod' + + # 4.1.3.19 Ensure kernel module loading unloading and modification is collected. (Automated) + - id: 5138 + title: "Ensure kernel module loading unloading and modification is collected." + description: "Monitor the loading and unloading of kernel modules. All the loading / listing / dependency checking of modules is done by kmod via symbolic links. The following system calls control loading and unloading of modules: - init_module - load a module - finit_module - load a module (used when the overhead of using cryptographically signed modules to determine the authenticity of a module can be avoided) - delete_module - delete a module - create_module - create a loadable module entry - query_module - query the kernel for various bits pertaining to modules Any execution of the loading and unloading module programs and system calls will trigger an audit record with an identifier of modules." + rationale: "Monitoring the use of all the various ways to manipulate kernel modules could provide system administrators with evidence that an unauthorized change was made to a kernel module, possibly compromising the security of the system." + remediation: "Create audit rules Edit or create a file in the /etc/audit/rules.d/ directory, ending in .rules extension, with the relevant rules to monitor kernel module modification. 64 Bit systems Example: # UID_MIN=$(awk '/^\\s*UID_MIN/{print $2}' /etc/login.defs) # [ -n \"${UID_MIN}\" ] && printf \" -a always,exit -F arch=b64 -S init_module,finit_module,delete_module,create_module,query_module -F auid>=${UID_MIN} -F auid!=unset -k kernel_modules -a always,exit -F path=/usr/bin/kmod -F perm=x -F auid>=${UID_MIN} -F auid!=unset -k kernel_modules \" >> /etc/audit/rules.d/50-kernel_modules.rules \\ || printf \"ERROR: Variable 'UID_MIN' is unset.\\n\" Load audit rules Merge and load the rules into active configuration: # augenrules --load Check if reboot is required. # if [[ $(auditctl -s | grep \"enabled\") =~ \"2\" ]]; then printf \"Reboot required to load rules\\n\"; fi." + compliance: + - cis: ["4.1.3.19"] + - cis_csc_v7: ["5.1"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + condition: all + rules: + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F arch=b64|-F arch=b32 && r:-S && r:init_module && r:finit_module && r:delete_module && r:create_module && r:query_module && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k kernel_modules|-F key=kernel_modules' + - 'd:/etc/audit/rules.d -> r:\.+.rules$ -> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/kmod && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k kernel_modules|-F key=kernel_modules' + - 'c:auditctl -l -> r:^-a && r:always,exit|exit,always && r:-F arch=b64|-F arch=b32 && r:-S && r:init_module && r:finit_module && r:delete_module && r:create_module && r:query_module && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k kernel_modules|-F key=kernel_modules' + - 'c:auditctl -l-> r:^-a && r:always,exit|exit,always && r:-F path=/usr/bin/kmod && r:-F perm=x && r:-F auid>=\d+ && r:-F auid!=-1|-F auid!=4294967295|-F auid!=unset && r:-k kernel_modules|-F key=kernel_modules' + - "c:ls -l /usr/sbin/lsmod -> r:/bin/kmod" + - "c:ls -l /usr/sbin/rmmod -> r:/bin/kmod" + - "c:ls -l /usr/sbin/insmod -> r:/bin/kmod" + - "c:ls -l /usr/sbin/modinfo -> r:/bin/kmod" + - "c:ls -l /usr/sbin/modprobe -> r:/bin/kmod" + - "c:ls -l /usr/sbin/depmod -> r:/bin/kmod" + + # 4.1.3.20 Ensure the audit configuration is immutable. (Automated) + - id: 5139 + title: "Ensure the audit configuration is immutable." + description: "Set system audit so that audit rules cannot be modified with auditctl. Setting the flag -e 2 forces audit to be put in immutable mode. Audit changes can only be made on system reboot. Note: This setting will require the system to be rebooted to update the active auditd configuration settings." rationale: "In immutable mode, unauthorized users cannot execute changes to the audit system to potentially hide malicious activity and then put the audit rules back. Users would most likely notice a system reboot and that could alert administrators of an attempt to make unauthorized audit changes." - remediation: "Edit or create the file /etc/audit/rules.d/99-finalize.rules and add the line: -e 2 at the end of the file" + remediation: "Edit or create the file /etc/audit/rules.d/99-finalize.rules and add the following line at the end of the file: -e 2." + compliance: + - cis: ["4.1.3.20"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'not d:/etc/audit/rules.d -> r:\.+.rules$ -> !r:\s*\t*-e 2$' + + # 4.1.3.21 Ensure the running and on disk configuration is the same. (Manual) + - id: 5140 + title: "Ensure the running and on disk configuration is the same." + description: "The Audit system have both on disk and running configuration. It is possible for these configuration settings to differ. Note: Due to the limitations of augenrules and auditctl, it is not absolutely guaranteed that loading the rule sets via augenrules --load will result in all rules being loaded or even that the user will be informed if there was a problem loading the rules." + rationale: "Configuration differences between what is currently running and what is on disk could cause unexpected problems or may give a false impression of compliance requirements." + remediation: 'If the rules are not aligned across all three () areas, run the following command to merge and load all rules: # augenrules --load Check if reboot is required. if [[ $(auditctl -s | grep "enabled") =~ "2" ]]; then echo "Reboot required to load rules"; fi.' compliance: - - cis: ["4.1.17"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.5"] - - nist_800_53: ["AU.9"] - - hipaa: ["164.312.b"] + - cis: ["4.1.3.21"] + - cis_csc_v8: ["8.5"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - - "c:tail -1 /etc/audit/rules.d/99-finalize.rules -> -e 2" + - "c:augenrules --check -> r:/usr/sbin/augenrules && r:No change" ############################################### # 4.2 Configure Logging @@ -2337,785 +3082,1334 @@ checks: # 4.2.1 Configure rsyslog ############################################### - # 4.2.1.1 Ensure rsyslog or syslog-ng is installed (Scored) - - id: 5104 - title: "Ensure rsyslog is installed" - description: "The rsyslog software is a recommended replacement to the original syslogd daemon which provide improvements over syslogd , such as connection-oriented (i.e. TCP) transmission of logs, the option to log to database formats, and the encryption of log data en route to a central logging server." + # 4.2.1.1 Ensure rsyslog is installed. (Automated) + - id: 5141 + title: "Ensure rsyslog is installed." + description: "The rsyslog software is recommended in environments where journald does not meet operation requirements." rationale: "The security enhancements of rsyslog such as connection-oriented (i.e. TCP) transmission of logs, the option to log to database formats, and the encryption of log data en route to a central logging server) justify installing and configuring the package." - remediation: "Run the following command to install rsyslog: # dnf install rsyslog" + remediation: "Run the following command to install rsyslog: # dnf install rsyslog." compliance: - cis: ["4.2.1.1"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.1"] - - nist_800_53: ["CM.1"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.2", "CC6.3", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - "c:rpm -q rsyslog -> r:^rsyslog-" - # 4.2.1.2 Ensure rsyslog Service is enabled (Scored) - - id: 5126 - title: "Ensure rsyslog Service is enabled" - description: "Once the rsyslog package is installed it needs to be activated." - rationale: "If the rsyslog service is not activated the system may default to the syslogd service or lack logging instead." - remediation: "Run the following command to enable rsyslog : # systemctl --now enable rsyslog" + # 4.2.1.2 Ensure rsyslog service is enabled. (Automated) + - id: 5142 + title: "Ensure rsyslog service is enabled." + description: "Once the rsyslog package is installed, ensure that the service is enabled." + rationale: "If the rsyslog service is not enabled to start on boot, the system will not capture logging events." + remediation: "Run the following command to enable rsyslog: # systemctl --now enable rsyslog." compliance: - cis: ["4.2.1.2"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.1"] - - nist_800_53: ["CM.1"] - - hipaa: ["164.312.b"] - - tsc: ["CC6.1", "CC6.2", "CC6.3", "CC7.2", "CC7.3", "CC7.4"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - "p:rsyslog" - # 4.2.1.3 Ensure rsyslog default file permissions configured (Scored) - - id: 5127 - title: "Ensure rsyslog default file permissions configured" - description: "rsyslog will create logfiles that do not already exist on the system. This setting controls what permissions will be applied to these newly created files." - rationale: "It is important to ensure that log files have the correct permissions to ensure that sensitive data is archived and protected." - remediation: "Edit the /etc/rsyslog.conf and /etc/rsyslog.d/*.conf files and set $FileCreateMode to 0640 or more restrictive: $FileCreateMode 0640" + # 4.2.1.3 Ensure journald is configured to send logs to rsyslog. (Manual) + - id: 5143 + title: "Ensure journald is configured to send logs to rsyslog." + description: "Data from journald may be stored in volatile memory or persisted locally on the server. Utilities exist to accept remote export of journald logs, however, use of the RSyslog service provides a consistent means of log collection and export." + rationale: "IF RSyslog is the preferred method for capturing logs, all logs of the system should be sent to it for further processing." + remediation: "Edit the /etc/systemd/journald.conf file and add the following line: ForwardToSyslog=yes Restart the service: # systemctl restart rsyslog." compliance: - cis: ["4.2.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["10.5.1", "10.5.2"] - - nist_800_53: ["CM.1", "AU.9"] - - tsc: ["CC5.2", "CC7.2"] + - cis_csc_v8: ["8.2", "8.9"] + - cis_csc_v7: ["6.2", "6.3", "6.5"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-6(3)", "AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3", "10.5.3", "10.5.4"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "10.3.3", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["PL1.4"] + condition: all + rules: + - 'f:/etc/systemd/journald.conf -> r:^\s*\t*ForwardToSyslog\s*=\s*yes' + + # 4.2.1.4 Ensure rsyslog default file permissions are configured. (Automated) + - id: 5144 + title: "Ensure rsyslog default file permissions are configured." + description: "RSyslog will create logfiles that do not already exist on the system. This setting controls what permissions will be applied to these newly created files." + rationale: "It is important to ensure that log files have the correct permissions to ensure that sensitive data is archived and protected." + impact: "The systems global umask could override, but only making the file permissions stricter, what is configured in RSyslog with the FileCreateMode directive. RSyslog also has it's own $umask directive that can alter the intended file creation mode. In addition, consideration should be given to how FileCreateMode is used. Thus it is critical to ensure that the intended file creation mode is not overridden with less restrictive settings in /etc/rsyslog.conf, /etc/rsyslog.d/*conf files and that FileCreateMode is set before any file is created." + remediation: "Edit either /etc/rsyslog.conf or a dedicated .conf file in /etc/rsyslog.d/ and set $FileCreateMode to 0640 or more restrictive: $FileCreateMode 0640 Restart the service: # systemctl restart rsyslog." + compliance: + - cis: ["4.2.1.4"] + - cis_csc_v8: ["3.3", "8.2"] + - cis_csc_v7: ["5.1", "6.2", "6.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "AU.L2-3.3.1", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)", "164.312(b)"] + - iso_27001-2013: ["A.12.4.1", "A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6", "AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3", "7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: any rules: - 'f:/etc/rsyslog.conf -> r:^\$FileCreateMode 06\d0|^\$FileCreateMode 04\d0|^\$FileCreateMode 02\d0|^\$FileCreateMode 00\d0' - 'f:/etc/rsyslog.conf -> r:^\$FileCreateMode 0\d40|^\$FileCreateMode 0\d20|^\$FileCreateMode 0\d00' - # 4.2.1.5 Ensure rsyslog is configured to send logs to a remote log host (Scored) - - id: 5128 - title: "Ensure rsyslog is configured to send logs to a remote log host" - description: "The rsyslog utility supports the ability to send logs it gathers to a remote log host running syslogd(8) or to receive messages from remote hosts, reducing administrative overhead." + # 4.2.1.5 Ensure logging is configured. (Manual) - Not Implemented + + # 4.2.1.6 Ensure rsyslog is configured to send logs to a remote log host. (Manual) + - id: 5145 + title: "Ensure rsyslog is configured to send logs to a remote log host." + description: "RSyslog supports the ability to send log events it gathers to a remote log host or to receive messages from remote hosts, thus enabling centralised log management." rationale: "Storing log data on a remote host protects log integrity from local attacks. If an attacker gains root access on the local system, they could tamper with or remove log data that is stored on the local system." - remediation: "Edit the /etc/rsyslog.conf and /etc/rsyslog.d/*.conf files and add the following line (where loghost.example.com is the name of your central log host). *.* @@loghost.example.com Run the following command to reload the rsyslogd configuration: # pkill -HUP rsyslogd" + remediation: 'Edit the /etc/rsyslog.conf and /etc/rsyslog.d/*.conf files and add the following line (where loghost.example.com is the name of your central log host). The target directive may either be a fully qualified domain name or an IP address. *.* action(type="omfwd" target="192.168.2.100" port="514" protocol="tcp" action.resumeRetryCount="100" queue.type="LinkedList" queue.size="1000") Run the following command to reload the rsyslogd configuration: # systemctl restart rsyslog.' compliance: - - cis: ["4.2.1.5"] - - cis_csc: ["6.6", "6.8"] - - pci_dss: ["10.5.3"] - - nist_800_53: ["CM.1", "AU.4"] - - tsc: ["CC5.2"] + - cis: ["4.2.1.6"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - - 'c:grep *.*[^I][^I]*@ /etc/rsyslog.conf /etc/rsyslog.d/*.conf -> !r:# && r:*.* @@\.+' + - 'f:/etc/rsyslog.conf -> !r:# && r:^*.* @@\.+' + - 'f:/etc/rsyslog.conf -> !r:# && r:^*.* action && r:target="' + - 'd:/etc/rsyslog.d/ -> r:*.conf -> !r:# && r:^*.* @@\.+' + - 'd:/etc/rsyslog.d/ -> r:*.conf -> !r:# && r:^*.* action && r:target="' + + # 4.2.1.7 Ensure rsyslog is not configured to recieve logs from a remote client. (Automated) + - id: 5146 + title: "Ensure rsyslog is not configured to recieve logs from a remote client." + description: "RSyslog supports the ability to receive messages from remote hosts, thus acting as a log server. Clients should not receive data from other hosts." + rationale: "If a client is configured to also receive data, thus turning it into a server, the client system is acting outside it's operational boundary." + remediation: 'Should there be any active log server configuration found in the auditing section, modify those files and remove the specific lines highlighted by the audit. Ensure none of the following entries are present in any of /etc/rsyslog.conf or /etc/rsyslog.d/*.conf. Old format $ModLoad imtcp $InputTCPServerRun New format module(load="imtcp") input(type="imtcp" port="514") Restart the service: # systemctl restart rsyslog.' + compliance: + - cis: ["4.2.1.7"] + - cis_csc_v8: ["4.8", "8.2"] + - cis_csc_v7: ["6.2", "6.3", "9.2"] + - cmmc_v2.0: ["AU.L2-3.3.1", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1", "A.13.1.3"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "10.2", "10.3", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "2.2.4", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - 'not d:/etc/rsyslog.d -> r:\.+.conf$ -> r:^\s*\t*\$ModLoad imtcp|\s*\t*^\$InputTCPServerRun|^\s*\t*module load="imtcp"|^\s*\t*input type="imtcp" port="514"' + - 'not f:/etc/rsyslog.conf -> r:^\s*\t*\$ModLoad imtcp|^\s*\t*\$InputTCPServerRun|^\s*\t*module load="imtcp"|^\s*\t*input type="imtcp" port="514"' ############################################### - # 4.2.2 Configure journald + # 4.2.2.1 Configure systemd journal remote ############################################### - # 4.2.2.1 Ensure journald is configured to send logs to rsyslog (Scored) - - id: 5129 - title: "Ensure journald is configured to send logs to rsyslog" - description: "Data from journald may be stored in volatile memory or persisted locally on the server. Utilities exist to accept remote export of journald logs, however, use of the rsyslog service provides a consistent means of log collection and export." + # 4.2.2.1.1 Ensure systemd-journal-remote is installed. (Manual) + - id: 5147 + title: "Ensure systemd-journal-remote is installed." + description: "Journald (via systemd-journal-remote) supports the ability to send log events it gathers to a remote log host or to receive messages from remote hosts, thus enabling centralised log management." rationale: "Storing log data on a remote host protects log integrity from local attacks. If an attacker gains root access on the local system, they could tamper with or remove log data that is stored on the local system." - remediation: "Edit the /etc/systemd/journald.conf file and add the following line: ForwardToSyslog=yes" + remediation: "Run the following command to install systemd-journal-remote: # dnf install systemd-journal-remote." compliance: - - cis: ["4.2.2.1"] - - cis_csc: ["6.5"] - - pci_dss: ["10.5.3"] - - nist_800_53: ["CM.1", "AU.9", "AU.4"] - - tsc: ["CC5.2", "CC7.2"] - references: - - "https://github.com/konstruktoid/hardening/blob/master/systemd.adoc#etcsystemdjournaldconf" + - cis: ["4.2.2.1.1"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - - 'f:/etc/systemd/journald.conf -> r:^\s*\t*ForwardToSyslog\s*=\s*yes' + - "c:rpm -q systemd-journal-remote -> r:^systemd-journal-remote-" - # 4.2.2.2 Ensure journald is configured to compress large log files (Scored) - - id: 5130 - title: "Ensure journald is configured to compress large log files" + # 4.2.2.1.2 Ensure systemd-journal-remote is configured. (Manual) - Not Implemented + + # 4.2.2.1.3 Ensure systemd-journal-remote is enabled. (Manual) + - id: 5148 + title: "Ensure systemd-journal-remote is enabled." + description: "Journald (via systemd-journal-remote) supports the ability to send log events it gathers to a remote log host or to receive messages from remote hosts, thus enabling centralised log management." + rationale: "Storing log data on a remote host protects log integrity from local attacks. If an attacker gains root access on the local system, they could tamper with or remove log data that is stored on the local system." + remediation: "Run the following command to enable systemd-journal-remote: # systemctl --now enable systemd-journal-upload.service." + compliance: + - cis: ["4.2.2.1.3"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - "c:systemctl is-enabled systemd-journal-upload.service -> r:^enabled" + + ############################################### + # 4.2.2.1 Configure journald + ############################################### + + # 4.2.2.1.4 Ensure journald is not configured to recieve logs from a remote client. (Automated) + - id: 5149 + title: "Ensure journald is not configured to recieve logs from a remote client." + description: "Journald supports the ability to receive messages from remote hosts, thus acting as a log server. Clients should not receive data from other hosts. NOTE: - The same package, systemd-journal-remote, is used for both sending logs to remote hosts and receiving incoming logs. - With regards to receiving logs, there are two services; systemd-journal- remote.socket and systemd-journal-remote.service." + rationale: "If a client is configured to also receive data, thus turning it into a server, the client system is acting outside it's operational boundary." + remediation: "Run the following command to disable systemd-journal-remote.socket: # systemctl --now mask systemd-journal-remote.socket." + compliance: + - cis: ["4.2.2.1.4"] + - cis_csc_v8: ["4.8", "8.2"] + - cis_csc_v7: ["6.2", "6.3", "9.2"] + - cmmc_v2.0: ["AU.L2-3.3.1", "CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1", "A.13.1.3"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "10.2", "10.3", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "2.2.4", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["CC6.3", "CC6.6"] + condition: all + rules: + - "c:systemctl is-enabled systemd-journal-remote.socket -> r:^masked" + + # 4.2.2.2 Ensure journald service is enabled. (Automated) + - id: 5150 + title: "Ensure journald service is enabled." + description: "Ensure that the systemd-journald service is enabled to allow capturing of logging events." + rationale: "If the systemd-journald service is not enabled to start on boot, the system will not capture logging events." + remediation: "By default the systemd-journald service does not have an [Install] section and thus cannot be enabled / disabled. It is meant to be referenced as Requires or Wants by other unit files. As such, if the status of systemd-journald is not static, investigate why." + compliance: + - cis: ["4.2.2.2"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + condition: all + rules: + - "c:systemctl is-enabled systemd-journald.service -> r:^static" + + # 4.2.2.3 Ensure journald is configured to compress large log files. (Automated) + - id: 5151 + title: "Ensure journald is configured to compress large log files." description: "The journald system includes the capability of compressing overly large files to avoid filling up the system with logs or making the logs unmanageably large." rationale: "Uncompressed large files may unexpectedly fill a filesystem leading to resource unavailability. Compressing logs prior to write can prevent sudden, unexpected filesystem impacts." - remediation: "Edit the /etc/systemd/journald.conf file and add the following line: Compress=yes" + remediation: "Edit the /etc/systemd/journald.conf file and add the following line: Compress=yes Restart the service: # systemctl restart systemd-journal-upload." compliance: - - cis: ["4.2.2.2"] - - cis_csc: ["6.4"] - - pci_dss: ["10.7"] - - nist_800_53: ["CM.1", "AU.4"] - - tsc: ["CC5.2"] + - cis: ["4.2.2.3"] + - cis_csc_v8: ["8.2", "8.3"] + - cis_csc_v7: ["6.2", "6.3", "6.4"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3", "10.7"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["A1.1"] condition: all rules: - 'f:/etc/systemd/journald.conf -> r:^\s*\t*Compress\s*=\s*yes' - # 4.2.2.3 Ensure journald is configured to write logfiles to persistent disk (Scored) - - id: 5131 - title: "Ensure journald is configured to write logfiles to persistent disk" - description: "Data from journald may be stored in volatile memory or persisted locally on the server. Logs in memory will be lost upon a system reboot. By persisting logs to local disk on the server they are protected from loss." + # 4.2.2.4 Ensure journald is configured to write logfiles to persistent disk. (Automated) + - id: 5152 + title: "Ensure journald is configured to write logfiles to persistent disk." + description: "Data from journald may be stored in volatile memory or persisted locally on the server. Logs in memory will be lost upon a system reboot. By persisting logs to local disk on the server they are protected from loss due to a reboot." rationale: "Writing log data to disk will provide the ability to forensically reconstruct events which may have impacted the operations or security of a system even after a system crash or reboot." - remediation: "Edit the /etc/systemd/journald.conf file and add the following line: Compress=yes" + remediation: "Edit the /etc/systemd/journald.conf file and add the following line: Storage=persistent Restart the service: # systemctl restart systemd-journal-upload." compliance: - - cis: ["4.2.2.3"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["10.7"] - - nist_800_53: ["CM.1", "AU.4"] - - tsc: ["CC5.2"] + - cis: ["4.2.2.4"] + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - 'f:/etc/systemd/journald.conf -> r:^\s*\t*Storage\s*=\s*persistent' - # 4.2.3 Ensure permissions on all logfiles are configured (Scored) - - id: 5132 - title: "Ensure permissions on all logfiles are configured" - description: "Log files stored in /var/log/ contain logged information from many services on the system, or on log hosts others as well." - rationale: "It is important to ensure that log files have the correct permissions to ensure that sensitive data is archived and protected." - remediation: "Run the following command to set permissions on all existing log files: # find /var/log -type f -exec chmod g-wx,o-rwx {} +" + # 4.2.2.5 Ensure journald is not configured to send logs to rsyslog. (Manual) + - id: 5153 + title: "Ensure journald is not configured to send logs to rsyslog." + description: "Data from journald should be kept in the confines of the service and not forwarded on to other services." + rationale: "IF journald is the method for capturing logs, all logs of the system should be handled by journald and not forwarded to other logging mechanisms." + remediation: "Edit the /etc/systemd/journald.conf file and ensure that ForwardToSyslog=yes is removed. Restart the service: # systemctl restart systemd-journal-upload." + compliance: + - cis: ["4.2.2.5"] + - cis_csc_v8: ["8.2", "8.9"] + - cis_csc_v7: ["6.2", "6.3", "6.5"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-6(3)", "AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3", "10.5.3", "10.5.4"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "10.3.3", "5.3.4", "6.4.1", "6.4.2"] + - soc_2: ["PL1.4"] + condition: none + rules: + - 'f:/etc/systemd/journald.conf -> r:^\s*\t*ForwardToSyslog\s*=\s*yes' + + # 4.2.2.6 Ensure journald log rotation is configured per site policy. (Manual) - Not Implemented + # 4.2.2.7 Ensure journald default file permissions configured. (Manual) - Not Implemented + + # 4.2.3 Ensure permissions on all logfiles are configured. (Automated) + - id: 5154 + title: "Ensure permissions on all logfiles are configured." + description: "Log files contain information from many services on the the local system, or in the event of a centralized log server, others systems logs as well. In general log files are found in /var/log/, although application can be configured to store logs elsewhere. Should your application store logs in another, ensure to run the same test on that location." + rationale: "It is important that log files have the correct permissions to ensure that sensitive data is protected and that only the appropriate users / groups have access to them." + remediation: 'Run the following command to set permissions on all existing log files in /var/log. Although the command is not destructive, ensure that the output of the audit procedure is captured in the event that the remediation causes issues. # find /var/log/ -type f -perm /g+wx,o+rwx -exec chmod --changes g-wx,o-rwx "{}" + If there are services that logs to other locations, ensure that those log files have the appropriate permissions.' compliance: - cis: ["4.2.3"] - - cis_csc: ["5.1"] - - pci_dss: ["10.5.1", "10.5.2"] - - nist_800_53: ["CM.1", "AU.9"] - - tsc: ["CC5.2", "CC7.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - 'c:find /var/log -type f -ls -> r:-\w\w\w\ww\w\w\w\w|-\w\w\w\w\wx\w\w\w|-\w\w\w\w\w\w\ww\w|-\w\w\w\w\w\wr\w\w|-\w\w\w\w\w\w\w\wx' + # 4.3 Ensure logrotate is configured. (Manual) - Not Implemented + ############################################### # 5 Access, Authentication and Authorization ############################################### ############################################### # 5.1 Configure cron ############################################### - # 5.1.1 Ensure cron daemon is enabled (Scored) - - id: 5133 - title: "Ensure cron daemon is enabled" + + # 5.1.1 Ensure cron daemon is enabled. (Automated) + - id: 5155 + title: "Ensure cron daemon is enabled." description: "The cron daemon is used to execute batch jobs on the system." rationale: "While there may not be user jobs that need to be run on the system, the system does have maintenance jobs that may include security monitoring that have to run, and cron is used to execute them." - remediation: "Run the following command to enable cron : # systemctl --now enable crond" + remediation: "Run the following command to enable cron: # systemctl --now enable crond." compliance: - cis: ["5.1.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - "p:crond" - # 5.1.2 Ensure permissions on /etc/crontab are configured (Scored) - - id: 5134 - title: "Ensure permissions on /etc/crontab are configured" + # 5.1.2 Ensure permissions on /etc/crontab are configured. (Automated) + - id: 5156 + title: "Ensure permissions on /etc/crontab are configured." description: "The /etc/crontab file is used by cron to control its own jobs. The commands in this item make sure that root is the user and group owner of the file and that only the owner can access the file." rationale: "This file contains information on what system jobs are run by cron. Write access to these files could provide unprivileged users with the ability to elevate their privileges. Read access to these files could provide users with the ability to gain insight on system jobs that run on the system and could provide them a way to gain unauthorized privileged access." - remediation: "Run the following commands to set ownership and permissions on /etc/crontab : # chown root:root /etc/crontab # chmod og-rwx /etc/crontab" + remediation: "Run the following commands to set ownership and permissions on /etc/crontab : # chown root:root /etc/crontab # chmod og-rwx /etc/crontab." compliance: - cis: ["5.1.2"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/crontab -> r:^Access: \(0\d00/-\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.1.3 Ensure permissions on /etc/cron.hourly are configured (Scored) - - id: 5135 - title: "Ensure permissions on /etc/cron.hourly are configured" + # 5.1.3 Ensure permissions on /etc/cron.hourly are configured. (Automated) + - id: 5157 + title: "Ensure permissions on /etc/cron.hourly are configured." description: "This directory contains system cron jobs that need to run on an hourly basis. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.hourly : # chown root:root /etc/cron.hourly # chmod og-rwx /etc/cron.hourly" + remediation: "Run the following commands to set ownership and permissions on /etc/cron.hourly : # chown root:root /etc/cron.hourly # chmod og-rwx /etc/cron.hourly." compliance: - cis: ["5.1.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/cron.hourly -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.1.4 Ensure permissions on /etc/cron.daily are configured (Scored) - - id: 5136 - title: "Ensure permissions on /etc/cron.daily are configured" + # 5.1.4 Ensure permissions on /etc/cron.daily are configured. (Automated) + - id: 5158 + title: "Ensure permissions on /etc/cron.daily are configured." description: "The /etc/cron.daily directory contains system cron jobs that need to run on a daily basis. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.daily : # chown root:root /etc/cron.daily # chmod og-rwx /etc/cron.daily" + remediation: "Run the following commands to set ownership and permissions on /etc/cron.daily : # chown root:root /etc/cron.daily # chmod og-rwx /etc/cron.daily." compliance: - cis: ["5.1.4"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/cron.daily -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.1.5 Ensure permissions on /etc/cron.weekly are configured (Scored) - - id: 5137 - title: "Ensure permissions on /etc/cron.weekly are configured" + # 5.1.5 Ensure permissions on /etc/cron.weekly are configured. (Automated) + - id: 5159 + title: "Ensure permissions on /etc/cron.weekly are configured." description: "The /etc/cron.weekly directory contains system cron jobs that need to run on a weekly basis. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.weekly : # chown root:root /etc/cron.weekly # chmod og-rwx /etc/cron.weekly" + remediation: "Run the following commands to set ownership and permissions on /etc/cron.weekly : # chown root:root /etc/cron.weekly # chmod og-rwx /etc/cron.weekly." compliance: - cis: ["5.1.5"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/cron.weekly -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.1.6 Ensure permissions on /etc/cron.monthly are configured (Scored) - - id: 5138 - title: "Ensure permissions on /etc/cron.monthly are configured" + # 5.1.6 Ensure permissions on /etc/cron.monthly are configured. (Automated) + - id: 5160 + title: "Ensure permissions on /etc/cron.monthly are configured." description: "The /etc/cron.monthly directory contains system cron jobs that need to run on a monthly basis. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.monthly : # chown root:root /etc/cron.monthly # chmod og-rwx /etc/cron.monthly" + remediation: "Run the following commands to set ownership and permissions on /etc/cron.monthly : # chown root:root /etc/cron.monthly # chmod og-rwx /etc/cron.monthly." compliance: - cis: ["5.1.6"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/cron.monthly -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.1.7 Ensure permissions on /etc/cron.d are configured (Scored) - - id: 5139 - title: "Ensure permissions on /etc/cron.d are configured" - description: "Configure /etc/cron.allow and /etc/at.allow to allow specific users to use these services. If /etc/cron.allow or /etc/at.allow do not exist, then /etc/at.deny and /etc/cron.deny are checked. Any user not specifically defined in those files is allowed to use at and cron. By removing the files, only users in /etc/cron.allow and /etc/at.allow are allowed to use at and cron. Note that even though a given user is not listed in cron.allow , cron jobs can still be run as that user. The cron.allow file only controls administrative access to the crontab command for scheduling and modifying cron jobs." - rationale: "On many systems, only the system administrator is authorized to schedule cron jobs. Using the cron.allow file to control who can run cron jobs enforces this policy. It is easier to manage an allow list than a deny list. In a deny list, you could potentially add a user ID to the system and forget to add it to the deny files." - remediation: "Run the following commands to set ownership and permissions on /etc/cron.d : # chown root:root /etc/cron.d # chmod og-rwx /etc/cron.d" + # 5.1.7 Ensure permissions on /etc/cron.d are configured. (Automated) + - id: 5161 + title: "Ensure permissions on /etc/cron.d are configured." + description: "The /etc/cron.d directory contains system cron jobs that need to run in a similar manner to the hourly, daily weekly and monthly jobs from /etc/crontab , but require more granular control as to when they run. The files in this directory cannot be manipulated by the crontab command, but are instead edited by system administrators using a text editor. The commands below restrict read/write and search access to user and group root, preventing regular users from accessing this directory." + rationale: "Granting write access to this directory for non-privileged users could provide them the means for gaining unauthorized elevated privileges. Granting read access to this directory could give an unprivileged user insight in how to gain elevated privileges or circumvent auditing controls." + remediation: "Run the following commands to set ownership and permissions on /etc/cron.d : # chown root:root /etc/cron.d # chmod og-rwx /etc/cron.d." compliance: - cis: ["5.1.7"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/cron.d -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.1.8 Ensure at/cron is restricted to authorized users (Scored) - - id: 5140 - title: "Ensure at/cron is restricted to authorized users" - description: "Configure /etc/cron.allow and /etc/at.allow to allow specific users to use these services. If /etc/cron.allow or /etc/at.allow do not exist, then /etc/at.deny and /etc/cron.deny are checked. Any user not specifically defined in those files is allowed to use at and cron. By removing the files, only users in /etc/cron.allow and /etc/at.allow are allowed to use at and cron. Note that even though a given user is not listed in cron.allow , cron jobs can still be run as that user. The cron.allow file only controls administrative access to the crontab command for scheduling and modifying cron jobs." + # 5.1.8 Ensure cron is restricted to authorized users. (Automated) + - id: 5162 + title: "Ensure cron is restricted to authorized users." + description: "If cron is installed in the system, configure /etc/cron.allow to allow specific users to use these services. If /etc/cron.allow does not exist, then /etc/cron.deny is checked. Any user not specifically defined in those files is allowed to use cron. By removing the file, only users in /etc/cron.allow are allowed to use cron. Note: Even though a given user is not listed in cron.allow, cron jobs can still be run as that user. The cron.allow file only controls administrative access to the crontab command for scheduling and modifying cron jobs." rationale: "On many systems, only the system administrator is authorized to schedule cron jobs. Using the cron.allow file to control who can run cron jobs enforces this policy. It is easier to manage an allow list than a deny list. In a deny list, you could potentially add a user ID to the system and forget to add it to the deny files." - remediation: "Run the following commands to remove /etc/cron.deny and /etc/at.deny and create and set permissions and ownership for /etc/cron.allow and /etc/at.allow : # rm /etc/cron.deny # rm /etc/at.deny # touch /etc/cron.allow # touch /etc/at.allow # chmod og-rwx /etc/cron.allow # chmod og-rwx /etc/at.allow # chown root:root /etc/cron.allow" + remediation: 'Run the following scritp to remove /etc/cron.deny, create /etc/cron.allow, and set the file mode on /etc/cron.allow`: #!/usr/bin/env bash cron_fix() { if rpm -q cronie >/dev/null; then [ -e /etc/cron.deny ] && rm -f /etc/cron.deny [ ! -e /etc/cron.allow ] && touch /etc/cron.allow chown root:root /etc/cron.allow chmod u-x,go-rwx /etc/cron.allow else echo "cron is not installed on the system" fi } cron_fix OR Run the following command to remove cron: # dnf remove cronie.' compliance: - cis: ["5.1.8"] - - cis_csc: ["16"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - "c:stat -L /etc/cron.deny -> r:No such file or directory$" - - "c:stat -L /etc/at.deny -> r:No such file or directory$" - 'c:stat -L /etc/cron.allow -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' + + # 5.1.9 Ensure at is restricted to authorized users. (Automated) + - id: 5163 + title: "Ensure at is restricted to authorized users." + description: "If at is installed in the system, configure /etc/at.allow to allow specific users to use these services. If /etc/at.allow does not exist, then /etc/at.deny is checked. Any user not specifically defined in those files is allowed to use at. By removing the file, only users in /etc/at.allow are allowed to use at. Note: Even though a given user is not listed in at.allow, at jobs can still be run as that user. The at.allow file only controls administrative access to the at command for scheduling and modifying at jobs." + rationale: "On many systems, only the system administrator is authorized to schedule at jobs. Using the at.allow file to control who can run at jobs enforces this policy. It is easier to manage an allow list than a deny list. In a deny list, you could potentially add a user ID to the system and forget to add it to the deny files." + remediation: 'Run the following script to remove /etc/at.deny, create /etc/at.allow, and set the file mode for /etc/at.allow: #!/usr/bin/env bash at_fix() { if rpm -q at >/dev/null; then [ -e /etc/at.deny ] && rm -f /etc/at.deny [ ! -e /etc/at.allow ] && touch /etc/at.allow chown root:root /etc/at.allow chmod u-x,go-rwx /etc/at.allow else echo "at is not installed on the system" fi } at_fix OR Run the following command to remove at: # dnf remove at.' + compliance: + - cis: ["5.1.9"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - "c:stat -L /etc/at.deny -> r:No such file or directory$" - 'c:stat -L /etc/at.allow -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' ############################################### # 5.2 Configure SSH ############################################### - # 5.2.1 Ensure permissions on /etc/ssh/sshd_config are configured (Scored) - - id: 5141 - title: "Ensure permissions on /etc/ssh/sshd_config are configured" + + # 5.2.1 Ensure permissions on /etc/ssh/sshd_config are configured. (Automated) + - id: 5164 + title: "Ensure permissions on /etc/ssh/sshd_config are configured." description: "The /etc/ssh/sshd_config file contains configuration specifications for sshd. The command below sets the owner and group of the file to root." rationale: "The /etc/ssh/sshd_config file needs to be protected from unauthorized changes by non-privileged users." - remediation: "Run the following commands to set ownership and permissions on /etc/ssh/sshd_config : # chown root:root /etc/ssh/sshd_config # chmod og-rwx /etc/ssh/sshd_config" + remediation: "Run the following commands to set ownership and permissions on /etc/ssh/sshd_config: # chown root:root /etc/ssh/sshd_config # chmod og-rwx /etc/ssh/sshd_config." compliance: - cis: ["5.2.1"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/ssh/sshd_config -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.2.2 Ensure SSH access is limited (Scored) - - id: 5142 - title: "Ensure SSH access is limited" - description: "There are several options available to limit which users and group can access the system via SSH. It is recommended that at least one of the following options be leveraged: AllowUsers The AllowUsers variable gives the system administrator the option of allowing specific users to ssh into the system. The list consists of space separated user names. Numeric user IDs are not recognized with this variable. If a system administrator wants to restrict user access further by only allowing the allowed users to log in from a particular host, the entry can be specified in the form of user@host. AllowGroups The AllowGroups variable gives the system administrator the option of allowing specific groups of users to ssh into the system. The list consists of space separated group names. Numeric group IDs are not recognized with this variable. DenyUsers The DenyUsers variable gives the system administrator the option of denying specific users to ssh into the system. The list consists of space separated user names. Numeric user IDs are not recognized with this variable. If a system administrator wants to restrict user access further by specifically denying a user's access from a particular host, the entry can be specified in the form of user@host. DenyGroups The DenyGroups variable gives the system administrator the option of denying specific groups of users to ssh into the system. The list consists of space separated group names. Numeric group IDs are not recognized with this variable." - rationale: "Restricting which users can remotely access the system via SSH will help ensure that only authorized users access the system." - remediation: "Edit the /etc/ssh/sshd_config file to set one or more of the parameter as follows: AllowUsers ; AllowGroups ; DenyUsers and DenyGroups " + # 5.2.2 Ensure permissions on SSH private host key files are configured. (Automated) + - id: 5165 + title: "Ensure permissions on SSH private host key files are configured." + description: "An SSH private key is one of two files used in SSH public key authentication. In this authentication method, the possession of the private key is proof of identity. Only a private key that corresponds to a public key will be able to authenticate successfully. The private keys need to be stored and handled carefully, and no copies of the private key should be distributed." + rationale: "If an unauthorized user obtains the private SSH host key file, the host could be impersonated." + remediation: "Run the following commands to set permissions, ownership, and group on the private SSH host key files: # find /etc/ssh -xdev -type f -name 'ssh_host_*_key' -exec chmod u-x,g-wx,o- rwx {} \\; # find /etc/ssh -xdev -type f -name 'ssh_host_*_key' -exec chown root:ssh_keys {} \\;." compliance: - cis: ["5.2.2"] - - cis_csc: ["4.3"] - - pci_dss: ["8.1"] - - tsc: ["CC6.1"] - condition: any - rules: - - 'f:/etc/ssh/sshd_config -> r:^\s*AllowUsers' - - 'f:/etc/ssh/sshd_config -> r:^\s*AllowGroups' - - 'f:/etc/ssh/sshd_config -> r:^\s*DenyUsers' - - 'f:/etc/ssh/sshd_config -> r:^\s*DenyGroups' - - # 5.2.3 Ensure permissions on SSH private host key files are configured (Scored) - - id: 5143 - title: "Ensure permissions on SSH private host key files are configured" - description: "An SSH private key is one of two files used in SSH public key authentication. In this authentication method, The possession of the private key is proof of identity. Only a private key that corresponds to a public key will be able to authenticate successfully. The private keys need to be stored and handled carefully, and no copies of the private key should be distributed." - rationale: "If an unauthorized user obtains the private SSH host key file, the host could be impersonated" - remediation: "Run the following commands to set ownership and permissions on the private SSH host key files: # find /etc/ssh -xdev -type f -name 'ssh_host_*_key' -exec chown root:root {} \\; # find /etc/ssh -xdev -type f -name 'ssh_host_*_key' -exec chmod 0600 {} \\;" - compliance: - - cis: ["5.2.3"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/ssh/ssh_host_rsa_key -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - 'c:stat -L /etc/ssh/ssh_host_ecdsa_key -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - 'c:stat -L /etc/ssh/ssh_host_ed25519_key -> r:^Access: \(0\d00/\w\w\w\w------\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.2.4 Ensure permissions on SSH public host key files are configured (Scored) - - id: 5144 - title: "Ensure permissions on SSH public host key files are configured" + # 5.2.3 Ensure permissions on SSH public host key files are configured. (Automated) + - id: 5166 + title: "Ensure permissions on SSH public host key files are configured." description: "An SSH public key is one of two files used in SSH public key authentication. In this authentication method, a public key is a key that can be used for verifying digital signatures generated using a corresponding private key. Only a public key that corresponds to a private key will be able to authenticate successfully." rationale: "If a public host key file is modified by an unauthorized user, the SSH service may be compromised." - remediation: "Run the following commands to set permissions and ownership on the SSH host public key files: # find /etc/ssh -xdev -type f -name 'ssh_host_*_key.pub' -exec chmod 0644 {} \\; #find /etc/ssh -xdev -type f -name 'ssh_host_*_key.pub' -exec chown root:root {} \\;" + remediation: "Run the following commands to set permissions and ownership on the SSH host public key files # find /etc/ssh -xdev -type f -name 'ssh_host_*_key.pub' -exec chmod u-x,go- wx {} \\; # find /etc/ssh -xdev -type f -name 'ssh_host_*_key.pub' -exec chown root:root {} \\;." compliance: - - cis: ["5.2.4"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["5.2.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/ssh/ssh_host_rsa_key.pub -> r:^Access: \(0\d\d\d/\w\w\w\w\w\w-\w\w-\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - 'c:stat -L /etc/ssh/ssh_host_ecdsa_key.pub -> r:^Access: \(0\d\d\d/\w\w\w\w\w\w-\w\w-\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - 'c:stat -L /etc/ssh/ssh_host_ed25519_key.pub -> r:^Access: \(0\d\d\d/\w\w\w\w\w\w-\w\w-\) Uid: \( 0/ root\) Gid: \( 0/ root\)$' - # 5.2.5 Ensure SSH LogLevel is appropriate (Scored) - - id: 5145 - title: "Ensure SSH LogLevel is appropriate" + # 5.2.4 Ensure SSH access is limited. (Automated) + - id: 5167 + title: "Ensure SSH access is limited." + description: "There are several options available to limit which users and group can access the system via SSH. It is recommended that at least one of the following options be leveraged: - AllowUsers: o The AllowUsers variable gives the system administrator the option of allowing specific users to ssh into the system. The list consists of space separated user names. Numeric user IDs are not recognized with this variable. If a system administrator wants to restrict user access further by only allowing the allowed users to log in from a particular host, the entry can be specified in the form of user@host. - AllowGroups: o The AllowGroups variable gives the system administrator the option of allowing specific groups of users to ssh into the system. The list consists of space separated group names. Numeric group IDs are not recognized with this variable. - DenyUsers: o The DenyUsers variable gives the system administrator the option of denying specific users to ssh into the system. The list consists of space separated user names. Numeric user IDs are not recognized with this variable. If a system administrator wants to restrict user access further by specifically denying a user's access from a particular host, the entry can be specified in the form of user@host. - DenyGroups: o The DenyGroups variable gives the system administrator the option of denying specific groups of users to ssh into the system. The list consists of space separated group names. Numeric group IDs are not recognized with this variable." + rationale: "Restricting which users can remotely access the system via SSH will help ensure that only authorized users access the system." + remediation: "Edit the /etc/ssh/sshd_config file to set one or more of the parameter as follows: AllowUsers OR AllowGroups OR DenyUsers OR DenyGroups ." + compliance: + - cis: ["5.2.4"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: any + rules: + - 'f:/etc/ssh/sshd_config -> r:^\s*AllowUsers' + - 'f:/etc/ssh/sshd_config -> r:^\s*AllowGroups' + - 'f:/etc/ssh/sshd_config -> r:^\s*DenyUsers' + - 'f:/etc/ssh/sshd_config -> r:^\s*DenyGroups' + + # 5.2.5 Ensure SSH LogLevel is appropriate. (Automated) + - id: 5168 + title: "Ensure SSH LogLevel is appropriate." description: "INFO level is the basic level that only records login activity of SSH users. In many situations, such as Incident Response, it is important to determine when a particular user was active on a system. The logout record can eliminate those users who disconnected, which helps narrow the field. VERBOSE level specifies that login and logout activity as well as the key fingerprint for any SSH key used for login will be logged. This information is important for SSH key management, especially in legacy environments." rationale: "SSH provides several logging levels with varying amounts of verbosity. DEBUG is specifically not recommended other than strictly for debugging SSH communications since it provides so much data that it is difficult to identify important security information." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: LogLevel VERBOSE or LogLevel INFO" + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: LogLevel VERBOSE OR LogLevel INFO." + references: + - "https://www.ssh.com/ssh/sshd_config/" compliance: - cis: ["5.2.5"] - - cis_csc: ["6.2", "6.3"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] - references: - - https://www.ssh.com/ssh/sshd_config/ + - cis_csc_v8: ["8.2"] + - cis_csc_v7: ["6.2", "6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - hipaa: ["164.312(b)"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-7"] + - pci_dss_v3.2.1: ["10.2", "10.3"] + - pci_dss_v4.0: ["10.2.1", "10.2.1.1", "10.2.1.2", "10.2.1.3", "10.2.1.4", "10.2.1.5", "10.2.1.6", "10.2.1.7", "10.2.2", "5.3.4", "6.4.1", "6.4.2"] condition: all rules: - 'c:sshd -T -C user=root -> r:^\s*LogLevel\s+VERBOSE|^\s*loglevel\s+INFO' + - "f:/etc/ssh/sshd_config -> r:loglevel" - # 5.2.6 Ensure SSH X11 forwarding is disabled (Scored) - - id: 5146 - title: "Ensure SSH X11 forwarding is disabled" - description: "The X11Forwarding parameter provides the ability to tunnel X11 traffic through the connection to enable remote graphic connections." - rationale: "Disable X11 forwarding unless there is an operational requirement to use X11 applications directly. There is a small risk that the remote X11 servers of users who are logged in via SSH with X11 forwarding could be compromised by other users on the X11 server. Note that even if X11 forwarding is disabled, users can always install their own forwarders." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: X11Forwarding no" + # 5.2.6 Ensure SSH PAM is enabled. (Automated) + - id: 5169 + title: "Ensure SSH PAM is enabled." + description: "UsePAM Enables the Pluggable Authentication Module interface. If set to yes this will enable PAM authentication using ChallengeResponseAuthentication and PasswordAuthentication in addition to PAM account and session module processing for all authentication types." + rationale: "When usePAM is set to yes, PAM runs through account and session types properly. This is important if you want to restrict access to services based off of IP, time or other factors of the account. Additionally, you can make sure users inherit certain environment variables on login or disallow access to the server." + impact: "If UsePAM is enabled, you will not be able to run sshd(8) as a non-root user." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: UsePAM yes." compliance: - cis: ["5.2.6"] - - cis_csc: ["9.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -C user=root -> r:^\s*X11Forwarding\s*\t*no' + - 'c:sshd -T -C user=root -> r:^\s*usepam\s+yes' + - 'not f:/etc/ssh/sshd_config -> r:^\sUsePAM\s+no' - # 5.2.7 Ensure SSH MaxAuthTries is set to 4 or less (Scored) - - id: 5147 - title: "Ensure SSH MaxAuthTries is set to 4 or less" - description: "The MaxAuthTries parameter specifies the maximum number of authentication attempts permitted per connection. When the login failure count reaches half the number, error messages will be written to the syslog file detailing the login failure." - rationale: "Setting the MaxAuthTries parameter to a low number will minimize the risk of successful brute force attacks to the SSH server. While the recommended setting is 4, set the number based on site policy." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: MaxAuthTries 4" + # 5.2.7 Ensure SSH root login is disabled. (Automated) + - id: 5170 + title: "Ensure SSH root login is disabled." + description: "The PermitRootLogin parameter specifies if the root user can log in using ssh. The default is no." + rationale: "Disallowing root logins over SSH requires system admins to authenticate using their own individual account, then escalating to root via sudo or su. This in turn limits opportunity for non-repudiation and provides a clear audit trail in the event of a security incident." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitRootLogin no." compliance: - cis: ["5.2.7"] - - cis_csc: ["16.13"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] condition: all rules: - - 'c:sshd -T -C user=root -> !r:^# && n:^MaxAuthTries\s*\t*(\d+) compare <= 4' + - 'c:sshd -T -C user=root -> !r:^# && r:PermitRootLogin\s*\t*no' + - 'not f:/etc/ssh/sshd_config -> r:^\sPermitRootLogin\s+yes' - # 5.2.8 Ensure SSH IgnoreRhosts is enabled (Scored) - - id: 5148 - title: "Ensure SSH IgnoreRhosts is enabled" - description: "The IgnoreRhosts parameter specifies that .rhosts and .shosts files will not be used in RhostsRSAAuthentication or HostbasedAuthentication." - rationale: "Setting this parameter forces users to enter a password when authenticating with ssh." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: Ignorerhosts yes" + # 5.2.8 Ensure SSH HostbasedAuthentication is disabled. (Automated) + - id: 5171 + title: "Ensure SSH HostbasedAuthentication is disabled." + description: "The HostbasedAuthentication parameter specifies if authentication is allowed through trusted hosts via the user of .rhosts, or /etc/hosts.equiv, along with successful public key client host authentication. This option only applies to SSH Protocol Version 2." + rationale: "Even though the .rhosts files are ineffective if support is disabled in /etc/pam.conf, disabling the ability to use .rhosts files in SSH provides an additional layer of protection." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: HostbasedAuthentication no." compliance: - cis: ["5.2.8"] - - cis_csc: ["9.2"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -C user=root -> !r:^# && r:ignorerhosts\s*\t*yes' + - 'c:sshd -T -C user=root -> !r:^# && r:HostbasedAuthentication\s*\t*no' + - 'not f:/etc/ssh/sshd_config -> r:^\sHostbasedAuthentication\s+yes' - # 5.2.9 Ensure SSH HostbasedAuthentication is disabled (Scored) - - id: 5149 - title: "Ensure SSH HostbasedAuthentication is disabled" - description: "The HostbasedAuthentication parameter specifies if authentication is allowed through trusted hosts via the user of .rhosts , or /etc/hosts.equiv, along with successful public key client host authentication. This option only applies to SSH Protocol Version 2." - rationale: "Even though the .rhosts files are ineffective if support is disabled in /etc/pam.conf, disabling the ability to use .rhosts files in SSH provides an additional layer of protection." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: HostbasedAuthentication no" + # 5.2.9 Ensure SSH PermitEmptyPasswords is disabled. (Automated) + - id: 5172 + title: "Ensure SSH PermitEmptyPasswords is disabled." + description: "The PermitEmptyPasswords parameter specifies if the SSH server allows login to accounts with empty password strings." + rationale: "Disallowing remote shell access to accounts that have an empty password reduces the probability of unauthorized access to the system." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitEmptyPasswords no." compliance: - cis: ["5.2.9"] - - cis_csc: ["16.3"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.3"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -C user=root -> !r:^# && r:HostbasedAuthentication\s*\t*no' + - 'c:sshd -T -C user=root -> !r:^# && r:PermitEmptyPasswords\s*\t*no' + - 'not f:/etc/ssh/sshd_config -> r:^\sPermitEmptyPasswords\s+yes' - # 5.2.10 Ensure SSH root login is disabled (Scored) - - id: 5150 - title: "Ensure SSH root login is disabled" - description: "The PermitRootLogin parameter specifies if the root user can log in using ssh. The default is no." - rationale: "Disallowing root logins over SSH requires system admins to authenticate using their own individual account, then escalating to root via sudo or su . This in turn limits opportunity for non-repudiation and provides a clear audit trail in the event of a security incident." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitRootLogin no" + # 5.2.10 Ensure SSH PermitUserEnvironment is disabled. (Automated) + - id: 5173 + title: "Ensure SSH PermitUserEnvironment is disabled." + description: "The PermitUserEnvironment option allows users to present environment options to the ssh daemon." + rationale: "Permitting users the ability to set environment variables through the SSH daemon could potentially allow users to bypass security controls (e.g. setting an execution path that has ssh executing trojan'd programs)." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitUserEnvironment no." compliance: - cis: ["5.2.10"] - - cis_csc: ["4.3"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -C user=root -> !r:^# && r:PermitRootLogin\s*\t*no' + - 'c:sshd -T -C user=root -> r:^\s*PermitUserEnvironment\s*\t*no' + - 'not f:/etc/ssh/sshd_config -> r:^\sPermitUserEnvironment\s+yes' - # 5.2.11 Ensure SSH PermitEmptyPasswords is disabled (Scored) - - id: 5151 - title: "Ensure SSH PermitEmptyPasswords is disabled" - description: "The PermitEmptyPasswords parameter specifies if the SSH server allows login to accounts with empty password strings." - rationale: "Disallowing remote shell access to accounts that have an empty password reduces the probability of unauthorized access to the system." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitEmptyPasswords no" + # 5.2.11 Ensure SSH IgnoreRhosts is enabled. (Automated) + - id: 5174 + title: "Ensure SSH IgnoreRhosts is enabled." + description: "The IgnoreRhosts parameter specifies that .rhosts and .shosts files will not be used in RhostsRSAAuthentication or HostbasedAuthentication." + rationale: "Setting this parameter forces users to enter a password when authenticating with ssh." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: IgnoreRhosts yes." compliance: - cis: ["5.2.11"] - - cis_csc: ["16.3"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.13.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -C user=root -> !r:^# && r:PermitEmptyPasswords\s*\t*no' + - 'c:sshd -T -C user=root -> !r:^# && r:ignorerhosts\s*\t*yes' + - 'not f:/etc/ssh/sshd_config -> r:^\s*ignorerhosts\s+no' - # 5.2.12 Ensure SSH PermitUserEnvironment is disabled (Scored) - - id: 5152 - title: "Ensure SSH PermitUserEnvironment is disabled" - description: "The PermitUserEnvironment option allows users to present environment options to the ssh daemon." - rationale: "Permitting users the ability to set environment variables through the SSH daemon could potentially allow users to bypass security controls (e.g. setting an execution path that has ssh executing trojan'd programs)" - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: PermitUserEnvironment no" + # 5.2.12 Ensure SSH X11 forwarding is disabled. (Automated) + - id: 5175 + title: "Ensure SSH X11 forwarding is disabled." + description: "The X11Forwarding parameter provides the ability to tunnel X11 traffic through the connection to enable remote graphic connections." + rationale: "Disable X11 forwarding unless there is an operational requirement to use X11 applications directly. There is a small risk that the remote X11 servers of users who are logged in via SSH with X11 forwarding could be compromised by other users on the X11 server. Note that even if X11 forwarding is disabled, users can always install their own forwarders." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: X11Forwarding no." compliance: - cis: ["5.2.12"] - - cis_csc: ["5.1"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis_csc_v8: ["4.8"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["CM.L2-3.4.7", "CM.L2-3.4.8", "SC.L2-3.13.6"] + - iso_27001-2013: ["A.13.1.3"] + - pci_dss_v3.2.1: ["1.1.6", "1.2.1", "2.2.2", "2.2.5"] + - pci_dss_v4.0: ["1.2.5", "2.2.4", "6.4.1"] + - soc_2: ["CC6.3", "CC6.6"] condition: all rules: - - 'c:sshd -T -C user=root -> r:^\s*PermitUserEnvironment\s*\t*no' + - 'c:sshd -T -C user=root -> r:^\s*X11Forwarding\s*\t*no' + - 'not f:/etc/ssh/sshd_config -> r:^\s*x11forwarding\s+yes' - # 5.2.13 Ensure SSH Idle Timeout Interval is configured (Scored) - - id: 5153 - title: "Ensure SSH Idle Timeout Interval is configured" - description: "The two options ClientAliveInterval and ClientAliveCountMax control the timeout of ssh sessions. When the ClientAliveInterval variable is set, ssh sessions that have no activity for the specified length of time are terminated. When the ClientAliveCountMax variable is set, sshd will send client alive messages at every ClientAliveInterval interval. When the number of consecutive client alive messages are sent with no response from the client, the ssh session is terminated. For example, if the ClientAliveInterval is set to 15 seconds and the ClientAliveCountMax is set to 3, the client ssh session will be terminated after 45 seconds of idle time." - rationale: "Having no timeout value associated with a connection could allow an unauthorized user access to another user's ssh session (e.g. user walks away from their computer and doesn't lock the screen). Setting a timeout value at least reduces the risk of this happening. While the recommended setting is 300 seconds (5 minutes), set this timeout value based on site policy. The recommended setting for ClientAliveCountMax is 0. In this case, the client session will be terminated after 5 minutes of idle time and no keepalive messages will be sent." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameters according to site policy: ClientAliveInterval 300 and ClientAliveCountMax 0" + # 5.2.13 Ensure SSH AllowTcpForwarding is disabled. (Automated) + - id: 5176 + title: "Ensure SSH AllowTcpForwarding is disabled." + description: "SSH port forwarding is a mechanism in SSH for tunneling application ports from the client to the server, or servers to clients. It can be used for adding encryption to legacy applications, going through firewalls, and some system administrators and IT professionals use it for opening backdoors into the internal network from their home machines." + rationale: "Leaving port forwarding enabled can expose the organization to security risks and back-doors. SSH connections are protected with strong encryption. This makes their contents invisible to most deployed network monitoring and traffic filtering solutions. This invisibility carries considerable risk potential if it is used for malicious purposes such as data exfiltration. Cybercriminals or malware could exploit SSH to hide their unauthorized communications, or to exfiltrate stolen data from the target network." + impact: "SSH tunnels are widely used in many corporate environments that employ mainframe systems as their application backends. In those environments the applications themselves may have very limited native support for security. By utilizing tunneling, compliance with SOX, HIPAA, PCI-DSS, and other standards can be achieved without having to modify the applications." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: AllowTcpForwarding no." + references: + - "https://www.ssh.com/ssh/tunneling/example" compliance: - cis: ["5.2.13"] - - cis_csc: ["16.11"] - - pci_dss: ["12.3.8"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["9.2"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.13.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -C user=root -> n:^\s*ClientAliveInterval\s*\t*(\d+) compare <= 900' - - 'c:sshd -T -C user=root -> n:^\s*ClientAliveCountMax\s*\t*(\d+) compare <= 3' + - 'c:sshd -T -C user=root -> r:^\s*AllowTcpForwarding\s+no' + - 'not f:/etc/ssh/sshd_config -> r:^\s*AllowTcpForwarding\s+yes' - # 5.2.14 Ensure SSH LoginGraceTime is set to one minute or less (Scored) - - id: 5154 - title: "Ensure SSH LoginGraceTime is set to one minute or less" - description: "The LoginGraceTime parameter specifies the time allowed for successful authentication to the SSH server. The longer the Grace period is the more open unauthenticated connections can exist. Like other session controls in this session the Grace Period should be limited to appropriate organizational limits to ensure the service is available for needed access." - rationale: "Setting the LoginGraceTime parameter to a low number will minimize the risk of successful brute force attacks to the SSH server. It will also limit the number of concurrent unauthenticated connections While the recommended setting is 60 seconds (1 Minute), set the number based on site policy." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: LoginGraceTime 60" + # 5.2.14 Ensure system-wide crypto policy is not over-ridden. (Automated) + - id: 5177 + title: "Ensure system-wide crypto policy is not over-ridden." + description: "System-wide Crypto policy can be over-ridden or opted out of for openSSH." + rationale: "Over-riding or opting out of the system-wide crypto policy could allow for the use of less secure Ciphers, MACs, KexAlgorithms and GSSAPIKexAlgorithm." + remediation: "Run the following commands: # sed -ri \"s/^\\s*(CRYPTO_POLICY\\s*=.*)$/# \\1/\" /etc/sysconfig/sshd # systemctl reload sshd." compliance: - cis: ["5.2.14"] - - cis_csc: ["5.1"] - - pci_dss: ["8.1"] - - tsc: ["CC6.1"] + - cis_csc_v8: ["3.10"] + - cis_csc_v7: ["14.4"] + - cmmc_v2.0: ["AC.L2-3.1.13", "AC.L2-3.1.17", "IA.L2-3.5.10", "SC.L2-3.13.11", "SC.L2-3.13.15", "SC.L2-3.13.8"] + - hipaa: ["164.312(a)(2)(iv)", "164.312(e)(1)", "164.312(e)(2)(i)", "164.312(e)(2)(ii)"] + - iso_27001-2013: ["A.10.1.1", "A.13.1.1"] + - nist_sp_800-53: ["AC-17(2)", "SC-8", "SC-8(1)"] + - pci_dss_v3.2.1: ["2.1.1", "4.1", "4.1.1", "8.2.1"] + - pci_dss_v4.0: ["2.2.7", "4.1.1", "4.2.1", "4.2.1.2", "4.2.2", "8.3.2"] condition: all rules: - - 'c:sshd -T -C user=root -> n:^\s*LoginGraceTime\s*\t*(\d+) compare <= 60' + - 'f:/etc/sysconfig/sshd -> !r:^\s*CRYPTO_POLICY=' - # 5.2.15 Ensure SSH warning banner is configured (Scored) - - id: 5155 - title: "Ensure SSH warning banner is configured" + # 5.2.15 Ensure SSH warning banner is configured. (Automated) + - id: 5178 + title: "Ensure SSH warning banner is configured." description: "The Banner parameter specifies a file whose contents must be sent to the remote user before authentication is permitted. By default, no banner is displayed." rationale: "Banners are used to warn connecting users of the particular site's policy regarding connection. Presenting a warning message prior to the normal user login may assist the prosecution of trespassers on the computer system." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: Banner /etc/issue.net" + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: Banner /etc/issue.net." compliance: - cis: ["5.2.15"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'c:sshd -T -C user=root -> r:^\s*Banner\s*\t*/etc/issue.net' - # 5.2.16 Ensure SSH PAM is enabled (Scored) - - id: 5156 - title: "Ensure SSH PAM is enabled" - description: "UsePAM Enables the Pluggable Authentication Module interface. If set to “yes” this will enable PAM authentication using ChallengeResponseAuthentication and PasswordAuthentication in addition to PAM account and session module processing for all authentication types." - rationale: "When usePAM is set to yes, PAM runs through account and session types properly. This is important if you want to restrict access to services based off of IP, time or other factors of the account. Additionally, you can make sure users inherit certain environment variables on login or disallow access to the server." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: UsePAM yes" + # 5.2.16 Ensure SSH MaxAuthTries is set to 4 or less. (Automated) + - id: 5179 + title: "Ensure SSH MaxAuthTries is set to 4 or less." + description: "The MaxAuthTries parameter specifies the maximum number of authentication attempts permitted per connection. When the login failure count reaches half the number, error messages will be written to the syslog file detailing the login failure." + rationale: "Setting the MaxAuthTries parameter to a low number will minimize the risk of successful brute force attacks to the SSH server. While the recommended setting is 4, set the number based on site policy." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: MaxAuthTries 4." compliance: - cis: ["5.2.16"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["16.13"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] condition: all rules: - - 'c:sshd -T -C user=root -> r:^\s*usepam\s+yes' + - 'c:sshd -T -C user=root -> !r:^# && n:^MaxAuthTries\s*\t*(\d+) compare <= 4' + - 'f:/etc/ssh/sshd_config -> n:^\s*MaxAuthTries\s*\t*(\d+) compare <= 4' - # 5.2.17 Ensure SSH AllowTcpForwarding is disabled (Scored) - - id: 5157 - title: "Ensure SSH AllowTcpForwarding is disabled" - description: "SSH port forwarding is a mechanism in SSH for tunneling application ports from the client to the server, or servers to clients. It can be used for adding encryption to legacy applications, going through firewalls, and some system administrators and IT professionals use it for opening backdoors into the internal network from their home machines." - rationale: "Leaving port forwarding enabled can expose the organization to security risks and back-doors. SSH connections are protected with strong encryption. This makes their contents invisible to most deployed network monitoring and traffic filtering solutions. This invisibility carries considerable risk potential if it is used for malicious purposes such as data exfiltration. Cybercriminals or malware could exploit SSH to hide their unauthorized communications, or to exfiltrate stolen data from the target network." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: AllowTcpForwarding no" + # 5.2.17 Ensure SSH MaxStartups is configured. (Automated) + - id: 5180 + title: "Ensure SSH MaxStartups is configured." + description: "The MaxStartups parameter specifies the maximum number of concurrent unauthenticated connections to the SSH daemon." + rationale: "To protect a system from denial of service due to a large number of pending authentication connection attempts, use the rate limiting function of MaxStartups to protect availability of sshd logins and prevent overwhelming the daemon." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: maxstartups 10:30:60." compliance: - cis: ["5.2.17"] - - cis_csc: ["9.2"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] - references: - - https://www.ssh.com/ssh/tunneling/example - condition: all - rules: - - 'c:sshd -T -C user=root -> r:^\s*AllowTcpForwarding\s+no' - - # 5.2.19 Ensure SSH MaxSessions is set to 4 or less (Scored) - - id: 5158 - title: "Ensure SSH MaxSessions is set to 4 or less" + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: all + rules: + - 'c:sshd -T -C user=root -> n:^\s*maxstartups\s+(\d+):\d+:\d+ compare <= 10' + - 'c:sshd -T -C user=root -> n:^\s*maxstartups\s+\d+:(\d+):\d+ compare <= 30' + - 'c:sshd -T -C user=root -> n:^\s*maxstartups\s+\d+:\d+:(\d+) compare <= 60' + - 'f:/etc/ssh/sshd_config -> n:^\s*maxstartups\s+(\d+):\d+:\d+ compare <= 10' + - 'f:/etc/ssh/sshd_config -> n:^\s*maxstartups\s+\d+:(\d+):\d+ compare <= 30' + - 'f:/etc/ssh/sshd_config -> n:^\s*maxstartups\s+\d+:\d+:(\d+) compare <= 60' + + # 5.2.18 Ensure SSH MaxSessions is set to 10 or less. (Automated) + - id: 5181 + title: "Ensure SSH MaxSessions is set to 10 or less." description: "The MaxSessions parameter specifies the maximum number of open sessions permitted from a given connection." rationale: "To protect a system from denial of service due to a large number of concurrent sessions, use the rate limiting function of MaxSessions to protect availability of sshd logins and prevent overwhelming the daemon." - remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: MaxSessions 4" + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: MaxSessions 10." + compliance: + - cis: ["5.2.18"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] + condition: any + rules: + - 'c:sshd -T -C user=root -> n:^\s*MaxSessions\s+(\d+) compare <= 10' + - 'not f:/etc/ssh/sshd_config -> n:^\s*MaxSessions\s+(\d+) compare > 10' + + # 5.2.19 Ensure SSH LoginGraceTime is set to one minute or less. (Automated) + - id: 5182 + title: "Ensure SSH LoginGraceTime is set to one minute or less." + description: "The LoginGraceTime parameter specifies the time allowed for successful authentication to the SSH server. The longer the Grace period is the more open unauthenticated connections can exist. Like other session controls in this session the Grace Period should be limited to appropriate organizational limits to ensure the service is available for needed access." + rationale: "Setting the LoginGraceTime parameter to a low number will minimize the risk of successful brute force attacks to the SSH server. It will also limit the number of concurrent unauthenticated connections While the recommended setting is 60 seconds (1 Minute), set the number based on site policy." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameter as follows: LoginGraceTime 60." compliance: - cis: ["5.2.19"] - - cis_csc: ["5.1"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'c:sshd -T -C user=root -> n:^\s*MaxSessions\s+(\d+) compare <= 4' + - 'c:sshd -T -C user=root -> n:^\s*LoginGraceTime\s*\t*(\d+) compare <= 60 && n:^\s*LoginGraceTime\s*\t*(\d+) compare > 0' + - 'f:/etc/ssh/sshd_config -> n:^\s*LoginGraceTime\s*\t*(\d+) compare <= 60 && n:^\s*LoginGraceTime\s*\t*(\d+) compare > 0' - # 5.2.20 Ensure system-wide crypto policy is not over-ridden (Scored) - - id: 5159 - title: "Ensure system-wide crypto policy is not over-ridden" - description: "System-wide Crypto policy can be over-ridden or opted out of for openSSH" - rationale: "Over-riding or opting out of the system-wide crypto policy could allow for the use of less secure Ciphers, MACs, KexAlgoritms and GSSAPIKexAlgorithsm" - remediation: "Run the following commands: # sed -ri \"s/^\\s*(CRYPTO_POLICY\\s*=.*)$/# \\1/\" /etc/sysconfig/sshd; # systemctl reload sshd" + # 5.2.20 Ensure SSH Idle Timeout Interval is configured. (Automated) + - id: 5183 + title: "Ensure SSH Idle Timeout Interval is configured." + description: "The two options ClientAliveInterval and ClientAliveCountMax control the timeout of ssh sessions. - ClientAliveInterval sets a timeout interval in seconds after which if no data has been received from the client, sshd will send a message through the encrypted channel to request a response from the client. The default is 0, indicating that these messages will not be sent to the client. - ClientAliveCountMax sets the number of client alive messages which may be sent without sshd receiving any messages back from the client. If this threshold is reached while client alive messages are being sent, sshd will disconnect the client, terminating the session. The default value is 3. o The client alive messages are sent through the encrypted channel o Setting ClientAliveCountMax to 0 disables connection termination Example: The default value is 3. If ClientAliveInterval is set to 15, and ClientAliveCountMax is left at the default, unresponsive SSH clients will be disconnected after approximately 45 seconds." + rationale: "Having no timeout value associated with a connection could allow an unauthorized user access to another user's ssh session (e.g. user walks away from their computer and doesn't lock the screen). Setting a timeout value reduces this risk. - The recommended ClientAliveInterval setting is no greater than 900 seconds (15 minutes) - The recommended ClientAliveCountMax setting is 0 - At the 15 minute interval, if the ssh session is inactive, the session will be terminated." + impact: "In some cases this setting may cause termination of long-running scripts over SSH or remote automation tools which rely on SSH. In developing the local site policy, the requirements of such scripts should be considered and appropriate ServerAliveInterval and ClientAliveInterval settings should be calculated to insure operational continuity." + remediation: "Edit the /etc/ssh/sshd_config file to set the parameters according to site policy. This should include ClientAliveInterval between 1 and 900 and ClientAliveCountMax of 0: ClientAliveInterval 900 ClientAliveCountMax 0." + references: + - "https://man.openbsd.org/sshd_config" compliance: - cis: ["5.2.20"] - - cis_csc: ["14.4"] - - pci_dss: ["4.1"] - - hipaa: ["164.312.a.2.IV", "164.312.e.1", "164.312.e.2.I", "164.312.e.2.II"] - - nist_800_53: ["SC.8"] - - tsc: ["CC6.1", "CC6.7", "CC7.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.11"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - - 'f:/etc/sysconfig/sshd -> !r:^\s*CRYPTO_POLICY=' + - 'c:sshd -T -C user=root -> n:^\s*ClientAliveInterval\s*\t*(\d+) compare > 0 && n:^\s*ClientAliveInterval\s*\t*(\d+) compare <= 900' + - 'c:sshd -T -C user=root -> n:^\s*ClientAliveCountMax\s*\t*(\d+) compare == 0' ############################################### - # 5.3 Configure authselect + # 5.3 Configure sudo ############################################### - # 5.3.1 Create custom authselect profile (Scored) - - id: 5160 - title: "Create custom authselect profile" - description: "A custom profile can be created by copying and customizing one of the default profiles. The default profiles include: sssd, winbind, or the nis." - rationale: "A custom profile is required to customize many of the pam options" - remediation: "Run the following command to create a custom authselect profile: # authselect create-profile -b .Example: # authselect create-profile custom-profile -b sssd --symlink-meta" + + # 5.3.1 Ensure sudo is installed. (Automated) + - id: 5184 + title: "Ensure sudo is installed." + description: "sudo allows a permitted user to execute a command as the superuser or another user, as specified by the security policy. The invoking user's real (not effective) user ID is used to determine the user name with which to query the security policy." + rationale: "sudo supports a plug-in architecture for security policies and input/output logging. Third parties can develop and distribute their own policy and I/O logging plug-ins to work seamlessly with the sudo front end. The default security policy is sudoers, which is configured via the file /etc/sudoers and any entries in /etc/sudoers.d. The security policy determines what privileges, if any, a user has to run sudo. The policy may require that users authenticate themselves with a password or another authentication mechanism. If authentication is required, sudo will exit if the user's password is not entered within a configurable time limit. This limit is policy-specific." + remediation: "Run the following command to install sudo # dnf install sudo." compliance: - cis: ["5.3.1"] - - pci_dss: ["8.1"] - - tsc: ["CC6.1"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] + condition: all + rules: + - "c:rpm -q sudo -> r:sudo-" + + # 5.3.2 Ensure sudo commands use pty. (Automated) + - id: 5185 + title: "Ensure sudo commands use pty." + description: "sudo can be configured to run only from a pseudo terminal (pseudo-pty)." + rationale: "Attackers can run a malicious program using sudo which would fork a background process that remains even when the main program has finished executing." + impact: "WARNING: Editing the sudo configuration incorrectly can cause sudo to stop functioning. Always use visudo to modify sudo configuration files." + remediation: "Edit the file /etc/sudoers with visudo or a file in /etc/sudoers.d/ with visudo -f and add the following line: Defaults use_pty." + compliance: + - cis: ["5.3.2"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] + condition: any + rules: + - 'f:/etc/sudoers -> r:^\s*Defaults\s+use_pty' + - 'd:/etc/sudoers.d -> r:\. -> r:^\s*Defaults\s+use_pty' + + # 5.3.3 Ensure sudo log file exists. (Automated) + - id: 5186 + title: "Ensure sudo log file exists." + description: "sudo can use a custom log file." + rationale: "A sudo log file simplifies auditing of sudo commands." + impact: "WARNING: Editing the sudo configuration incorrectly can cause sudo to stop functioning. Always use visudo to modify sudo configuration files." + remediation: 'Edit the file /etc/sudoers or a file in /etc/sudoers.d/ with visudo or visudo -f and add the following line: Defaults logfile="" Example Defaults logfile="/var/log/sudo.log".' + compliance: + - cis: ["5.3.3"] + - cis_csc_v8: ["8.5"] + - cis_csc_v7: ["6.3"] + - cmmc_v2.0: ["AU.L2-3.3.1"] + - iso_27001-2013: ["A.12.4.1"] + - nist_sp_800-53: ["AU-3(1)", "AU-7"] + - pci_dss_v3.2.1: ["10.1", "10.2.2", "10.2.4", "10.2.5", "10.3"] + - pci_dss_v4.0: ["10.2", "10.2.1", "10.2.1.2", "10.2.1.5", "9.4.5"] + - soc_2: ["CC5.2", "CC7.2"] + condition: any + rules: + - 'f:/etc/sudoers -> r:^Defaults logfile="' + - 'd:/etc/sudoers.d -> r:\. -> r:^Defaults\s+logfile="' + + # 5.3.4 Ensure users must provide password for escalation. (Automated) + - id: 5187 + title: "Ensure users must provide password for escalation." + description: "The operating system must be configured so that users must provide a password for privilege escalation." + rationale: "Without re-authentication, users may access resources or perform tasks for which they do not have authorization. When operating systems provide the capability to escalate a functional capability, it is critical the user re-authenticate." + impact: "This will prevent automated processes from being able to elevate privileges. To include Ansible and AWS builds." + remediation: "Based on the outcome of the audit procedure, use visudo -f to edit the relevant sudoers file. Remove any line with occurrences of NOPASSWD tags in the file." + compliance: + - cis: ["5.3.4"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] + condition: none + rules: + - 'f:/etc/sudoers -> !r:^\s*# && r:NOPASSWD' + - 'd:/etc/sudoers.d -> r:\. -> !r:^\s*# && r:NOPASSWD' + + # 5.3.5 Ensure re-authentication for privilege escalation is not disabled globally. (Automated) + - id: 5188 + title: "Ensure re-authentication for privilege escalation is not disabled globally." + description: "The operating system must be configured so that users must re-authenticate for privilege escalation." + rationale: "Without re-authentication, users may access resources or perform tasks for which they do not have authorization. When operating systems provide the capability to escalate a functional capability, it is critical the user re-authenticate." + remediation: "Configure the operating system to require users to reauthenticate for privilege escalation. Based on the outcome of the audit procedure, use visudo -f to edit the relevant sudoers file. Remove any occurrences of !authenticate tags in the file(s)." + compliance: + - cis: ["5.3.5"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] + condition: none + rules: + - 'f:/etc/sudoers -> !r:^\s*# && r:!authenticate' + - 'd:/etc/sudoers.d -> r:\. -> !r:^\s*# && r:!authenticate' + + # 5.3.6 Ensure sudo authentication timeout is configured correctly. (Automated) + - id: 5189 + title: "Ensure sudo authentication timeout is configured correctly." + description: "sudo caches used credentials for a default of 5 minutes. This is for ease of use when there are multiple administrative tasks to perform. The timeout can be modified to suit local security policies." + rationale: "Setting a timeout value reduces the window of opportunity for unauthorized privileged access to another user." + remediation: "If the currently configured timeout is larger than 15 minutes, edit the file listed in the audit section with visudo -f and modify the entry timestamp_timeout= to 15 minutes or less as per your site policy. The value is in minutes. This particular entry may appear on it's own, or on the same line as env_reset. See the following two examples: Defaults env_reset, timestamp_timeout=15 Defaults timestamp_timeout=15 Defaults env_reset." + references: + - "https://www.sudo.ws/man/1.9.0/sudoers.man.html" + compliance: + - cis: ["5.3.6"] + - cis_csc_v8: ["5.4"] + - cis_csc_v7: ["4.3"] + - cmmc_v2.0: ["AC.L2-3.1.5", "AC.L2-3.1.6", "AC.L2-3.1.7", "SC.L2-3.13.3"] + - iso_27001-2013: ["A.9.2.3"] + - nist_sp_800-53: ["AC-6(2)", "AC-6(5)"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - soc_2: ["CC6.1", "CC6.3"] + condition: all + rules: + - 'not f:/etc/sudoers -> !r:^\s*\t*# && r:timestamp_timeout\s*\t*=\s*\t*-1' + - 'not d:/etc/sudoers.d -> r:\.+ -> !r:^\s*\t*# && r:timestamp_timeout\s*\t*=\s*\t*-1' + - 'not f:/etc/sudoers -> !r:^\s*\t*# && n:timestamp_timeout\s*\t*=\s*\t*(\d+) compare > 15' + - 'not d:/etc/sudoers.d -> r:\.+ -> !r:^\s*\t*# && n:timestamp_timeout\s*\t*=\s*\t*(\d+) compare > 15' + + # 5.3.7 Ensure access to the su command is restricted. (Automated) + - id: 5190 + title: "Ensure access to the su command is restricted." + description: "The su command allows a user to run a command or shell as another user. The program has been superseded by sudo, which allows for more granular control over privileged access. Normally, the su command can be executed by any user. By uncommenting the pam_wheel.so statement in /etc/pam.d/su, the su command will only allow users in a specific groups to execute su. This group should be empty to reinforce the use of sudo for privileged access." + rationale: "Restricting the use of su , and using sudo in its place, provides system administrators better control of the escalation of user privileges to execute privileged commands. The sudo utility also provides a better logging and audit mechanism, as it can log each command executed via sudo , whereas su can only record that a user executed the su program." + remediation: "Create an empty group that will be specified for use of the su command. The group should be named according to site policy. Example: # groupadd sugroup Add the following line to the /etc/pam.d/su file, specifying the empty group: auth required pam_wheel.so use_uid group=sugroup." + compliance: + - cis: ["5.3.7"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] + condition: all + rules: + - 'f:/etc/pam.d/su -> !r:# && r:auth\s*\t*required\s*\t*pam_wheel.so\s*\t*use_uid' + + ############################################### + # 5.4 Configure authselect + ############################################### + + # 5.4.1 Ensure custom authselect profile is used. (Manual) + - id: 5191 + title: "Ensure custom authselect profile is used." + description: "A custom profile can be created by copying and customizing one of the default profiles. The default profiles include: sssd, winbind, or the nis. This profile can then be customized to follow site specific requirements. You can select a profile for the authselect utility for a specific host. The profile will be applied to every user logging into the host." + rationale: "A custom profile is required to customize many of the pam options. When you deploy a profile, the profile is applied to every user logging into the given host." + remediation: "Run the following command to create a custom authselect profile: # authselect create-profile Example: # authselect create-profile custom-profile -b sssd --symlink-meta Run the following command to select a custom authselect profile: # authselect select custom/ {with-} Example: # authselect select custom/custom-profile with-sudo with-faillock without-nullok." + compliance: + - cis: ["5.4.1"] + - cis_csc_v8: ["16.2"] + - cis_csc_v7: ["16.7"] + - cmmc_v2.0: ["SI.L1-3.14.1"] + - iso_27001-2013: ["A.9.2.6"] + - pci_dss_v3.2.1: ["6.3.2"] + - pci_dss_v4.0: ["6.3.1"] condition: all rules: - "c:authselect current -> r:^Profile ID: custom/" - # 5.3.3 Ensure authselect includes with-faillock (Scored) - - id: 5161 - title: "Ensure authselect includes with-faillock" + # 5.4.2 Ensure authselect includes with-faillock. (Automated) + - id: 5192 + title: "Ensure authselect includes with-faillock." description: "The pam_faillock.so module maintains a list of failed authentication attempts per user during a specified interval and locks the account in case there were more than deny consecutive failed authentications. It stores the failure records into per-user files in the tally directory." rationale: "Locking out user IDs after n unsuccessful consecutive login attempts mitigates brute force password attacks against your systems." - remediation: "Run the following command to include the with-faillock option: # authselect select with-faillock Example: # authselect select custom/custom-profile with-sudo with-faillock without-nullok" + remediation: "Run the following commands to include the with-faillock option to the current authselect profile: # authselect enable-feature with-faillock # authselect apply-changes." compliance: - - cis: ["5.3.3"] - - pci_dss: ["8.1"] - - tsc: ["CC6.1"] + - cis: ["5.4.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["16.7"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.9.2.6"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - "c:authselect current -> r:with-faillock" - "f:/etc/authselect/authselect.conf -> r:with-faillock" ############################################### - # 5.4 Configure PAM + # 5.5 Configure PAM ############################################### - # 5.4.1 Ensure password creation requirements are configured (Scored) - - id: 5162 - title: "Ensure password creation requirements are configured" - description: "The pam_pwquality.so module checks the strength of passwords. It performs checks such as making sure a password is not a dictionary word, it is a certain length, contains a mix of characters (e.g. alphabet, numeric, other) and more. The following are definitions of the pam_pwquality.so options. try_first_pass - retrieve the password from a previous stacked PAM module. If not available, then prompt the user for a password. retry=3 - Allow 3 tries before sending back a failure. minlen=14 - password must be 14 characters or more Either of the following can be used to enforce complex passwords: minclass=4 - provide at least four classes of characters for the new password OR dcredit=-1 - provide at least one digit ucredit=-1 - provide at least one uppercase character ocredit=-1 - provide at least one special character lcredit=-1 - provide at least one lowercase character The settings shown above are one possible policy. Alter these values to conform to your own organization's password policies" + + # 5.5.1 Ensure password creation requirements are configured. (Automated) + - id: 5193 + title: "Ensure password creation requirements are configured." + description: "The pam_pwquality.so module checks the strength of passwords. It performs checks such as making sure a password is not a dictionary word, it is a certain length, contains a mix of characters (e.g. alphabet, numeric, other) and more. The following are definitions of the pam_pwquality.so options. - try_first_pass - retrieve the password from a previous stacked PAM module. If not available, then prompt the user for a password. - retry=3 - Allow 3 tries before sending back a failure. - minlen=14 - password must be 14 characters or more ** Either of the following can be used to enforce complex passwords:** - minclass=4 - provide at least four classes of characters for the new password OR - dcredit=-1 - provide at least one digit - ucredit=-1 - provide at least one uppercase character - ocredit=-1 - provide at least one special character - lcredit=-1 - provide at least one lowercase character The settings shown above are one possible policy. Alter these values to conform to your own organization's password policies." rationale: "Strong passwords protect systems from being hacked through brute force methods." - remediation: "Edit the file /etc/security/pwquality.conf and add or modify the following line for password length to conform to site policy: minlen = 14 Edit the file /etc/security/pwquality.conf and add or modify the following line for password complexity to conform to site policy: minclass = 4 OR dcredit = -1 ucredit = -1 ocredit = -1 -1 = -1 Run the following to update the system-auth and password-auth files: CP=$(authselect current | awk 'NR == 1 {print $3}' | grep custom/) for FN in system-auth password-auth; do [[ -n $CP ]] && PTF=/etc/authselect/$CP/$FN || PTF=/etc/authselect/$FN [[ -z $(grep -E '^\\s*password\\s+requisite\\s+pam_pwquality.so\\s+.*enforce-for-root\\s*.*$' $PTF) ]] && sed -ri 's/^\\s*(password\\s+requisite\\s+pam_pwquality.so\\s+)(.*)$/\\1\\2 enforce-for-root/' $PTF [[ -n $(grep -E '^\\s*password\\s+requisite\\s+pam_pwquality.so\\s+.*\\s+retry=\\S+\\s*.*$' $PTF) ]] && sed -ri '/pwquality/s/retry=\\S+/retry=3/' $PTF || sed -ri 's/^\\s*(password\\s+requisite\\s+pam_pwquality.so\\s+)(.*)$/\\1\\2 retry=3/' $PTF done authselect apply-changes" + remediation: "Edit the file /etc/security/pwquality.conf and add or modify the following line for password length to conform to site policy minlen = 14 Edit the file /etc/security/pwquality.conf and add or modify the following line for password complexity to conform to site policy minclass = 4 OR dcredit = -1 ucredit = -1 ocredit = -1 lcredit = -1 Run the following script to update the system-auth and password-auth files #!/usr/bin/env bash for fn in system-auth password-auth; do file=\"/etc/authselect/$(head -1 /etc/authselect/authselect.conf | grep 'custom/')/$fn\" if ! grep -Pq -- '^\\h*password\\h+requisite\\h+pam_pwquality.so(\\h+[^#\\n\\r]+)?\\h+.*enforce_for_r oot\\b.*$' \"$file\"; then sed -ri 's/^\\s*(password\\s+requisite\\s+pam_pwquality.so\\s+)(.*)$/\\1\\2 enforce_for_root/' \"$file\" fi if grep -Pq -- '^\\h*password\\h+requisite\\h+pam_pwquality.so(\\h+[^#\\n\\r]+)?\\h+retry=([4- 9]|[1-9][0-9]+)\\b.*$' \"$file\"; then sed -ri '/pwquality/s/retry=\\S+/retry=3/' \"$file\" elif ! grep -Pq -- '^\\h*password\\h+requisite\\h+pam_pwquality.so(\\h+[^#\\n\\r]+)?\\h+retry=\\d+\\b.*$' \"$file\"; then sed -ri 's/^\\s*(password\\s+requisite\\s+pam_pwquality.so\\s+)(.*)$/\\1\\2 retry=3/' \"$file\" fi done authselect apply-changes." compliance: - - cis: ["5.4.1"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2.3"] - - tsc: ["CC6.1"] + - cis: ["5.5.1"] + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - iso_27001-2013: ["A.9.4.3"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] condition: all rules: - "f:/etc/pam.d/password-auth -> r:pam_pwquality.so && r:try_first_pass" - "f:/etc/pam.d/system-auth -> r:pam_pwquality.so && r:try_first_pass" - 'f:/etc/security/pwquality.conf -> n:^\s*minlen\s+\t*=\s+\t*(\d+) compare >= 14' - # 5.4.2 Ensure lockout for failed password attempts is configured (Scored) - - id: 5163 - title: "Ensure lockout for failed password attempts is configured" - description: "Lock out users after n unsuccessful consecutive login attempts. deny= - Number of attempts before the account is locked. unlock_time= - Time in seconds before the account is unlocked. Set the lockout number and unlock time to follow local site policy." + # 5.5.2 Ensure lockout for failed password attempts is configured. (Automated) + - id: 5194 + title: "Ensure lockout for failed password attempts is configured." + description: "Lock out users after n unsuccessful consecutive login attempts. - deny= - Number of attempts before the account is locked - unlock_time= - Time in seconds before the account is unlocked Note: The maximum configurable value for unlock_time is 604800." rationale: "Locking out user IDs after n unsuccessful consecutive login attempts mitigates brute force password attacks against your systems." - remediation: "Set password lockouts and unlock times to conform to site policy. Run the following to update the system-auth and password-auth files. This script will update/add the deny=5 and unlock_time=900 options. This script should be modified as needed to follow local site policy.CP=$(authselect current | awk \"NR == 1 {print $3}\" | grep custom/) for FN in system-auth password-auth; do [[ -n $CP ]] && PTF=/etc/authselect/$CP/$FN || PTF=/etc/authselect/$FN [[ -n $(grep -E \"^\\s*auth\\s+required\\s+pam_faillock.so\\s+.*deny=\\S+\\s*.*$\" $PTF) ]] && sed -ri \"/pam_faillock.so/s/deny=\\S+/deny=5/g\" $PTF || sed -ri \"s/^\\^\\s*(auth\\s+required\\s+pam_faillock\\.so\\s+)(.*[^{}])(\\{.*\\}|)$/\\1\\2 deny=5 \\3/\" $PTF [[ -n $(grep -E \"^\\s*auth\\s+required\\s+pam_faillock.so\\s+.*unlock_time=\\S+\\s*.*$\" $PTF) ]] && sed -ri \"/pam_faillock.so/s/unlock_time=\\S+/unlock_time=900/g\" $PTF || sed -ri \"s/^\\s*(auth\\s+required\\s+pam_faillock\\.so\\s+)(.*[^{}])(\\{.*\\}|)$/\\1\\2 unlock_time=900 \\3/\" $PTF done authselect apply-changes" + remediation: "Set password lockouts and unlock times to conform to site policy. deny should be not greater than 5 and unlock_time should be 0 (never), or 900 seconds or greater. Depending on the version you are running, follow one of the two methods bellow. Versions 8.2 and later: Edit /etc/security/faillock.conf and update or add the following lines: deny = 5 unlock_time = 900 Versions 8.0 and 8.1: Run the following script to update the system-auth and password-auth files. This script will update/add the deny=5 and unlock_time=900 options. This script should be modified as needed to follow local site policy. #!/usr/bin/env bash for fn in system-auth password-auth; do file=\"/etc/authselect/$(head -1 /etc/authselect/authselect.conf | grep 'custom/')/$fn\" if grep -Pq -- '^\\h*auth\\h+required\\h+pam_faillock\\.so(\\h+[^#\\n\\r]+)?\\h+deny=(0|[6-9]|[1- 9][0-9]+)\\b.*$' \"$file\"; then sed -ri '/pam_faillock.so/s/deny=\\S+/deny=5/g' \"$file\" elif ! grep -Pq -- '^\\h*auth\\h+required\\h+pam_faillock\\.so(\\h+[^#\\n\\r]+)?\\h+deny=\\d*\\b.*$' \"$file\"; then sed -r 's/^\\s*(auth\\s+required\\s+pam_faillock\\.so\\s+)([^{}#\\n\\r]+)?\\s*(\\{.*\\})?(.*)$ /\\1\\2\\3 deny=5 \\4/' $file fi if grep -P -- '^\\h*(auth\\h+required\\h+pam_faillock\\.so\\h+)([^#\\n\\r]+)?\\h+unlock_time=([1- 9]|[1-9][0-9]|[1-8][0-9][0-9])\\b.*$' \"$file\"; then sed -ri '/pam_faillock.so/s/unlock_time=\\S+/unlock_time=900/g' \"$file\" elif ! grep -Pq -- '^\\h*auth\\h+required\\h+pam_faillock\\.so(\\h+[^#\\n\\r]+)?\\h+unlock_time=\\d*\\b.*$ ' \"$file\"; then sed -ri 's/^\\s*(auth\\s+required\\s+pam_faillock\\.so\\s+)([^{}#\\n\\r]+)?\\s*(\\{.*\\})?(.*)$ /\\1\\2\\3 unlock_time=900 \\4/' \"$file\" fi done authselect apply-changes." compliance: - - cis: ["5.4.2"] - - cis_csc: ["16.7"] - - pci_dss: ["8.2.5"] - - tsc: ["CC6.1"] + - cis: ["5.5.2"] + - cis_csc_v8: ["6.2"] + - cis_csc_v7: ["16.7"] + - cmmc_v2.0: ["AC.L1-3.1.1"] + - hipaa: ["164.308(a)(3)(ii)(C)"] + - iso_27001-2013: ["A.9.2.6"] + - nist_sp_800-53: ["AC-2(1)"] + - pci_dss_v3.2.1: ["8.1.3"] + - pci_dss_v4.0: ["8.2.4", "8.2.5"] + - soc_2: ["CC6.2", "CC6.3"] condition: all rules: - 'f:/etc/pam.d/password-auth -> r:^\s*auth\.+required\.+pam_faillock.so\.+ && n:deny=(\d+) compare <= 5 && n:unlock_time=(\d+) compare >= 900' - 'f:/etc/pam.d/system-auth -> r:^\s*auth\.+required\.+pam_faillock.so\.+ && n:deny=(\d+) compare <= 5 && n:unlock_time=(\d+) compare >= 900' - # 5.4.3 Ensure password reuse is limited (Scored) - - id: 5164 - title: "Ensure password reuse is limited" - description: 'The /etc/security/opasswd file stores the users" old passwords and can be checked to ensure that users are not recycling recent passwords. remember=<5> - Number of old passwords to remember' - rationale: "Forcing users not to reuse their past 5 passwords make it less likely that an attacker will be able to guess the password. Note that these change only apply to accounts configured on the local system." - remediation: "Set remembered password history to conform to site policy. Run the following script to add or modify the pam_pwhistory.so and pam_unix.so lines to include the remember option: CP=$(authselect current | awk \"NR == 1 {print $3}\" | grep custom/) [[ -n $CP ]] && PTF=/etc/authselect/$CP/system-auth || PTF=/etc/authselect/system-auth [[ -n $(grep -E \"^\\s*password\\s+(sufficient\\s+pam_unix|requi(red|site)\\s+pam_pwhistory).so\\s+ ([^#]+\\s+)*remember=\\S+\\s*.*$\" $PTF) ]] && sed -ri \"s/^\\s*(password\\s+(requisite|sufficient)\\s+(pam_pwquality\\.so|pam_unix\\.so)\\s+)(.*)(remember=\\S+\\s*)(.*)$/\\1\\4 remember=5 \\6/\" $PTF || sed -ri \"s/^\\s*(password\\s+(requisite|sufficient)\\s+(pam_pwquality\\.so|pam_unix\\.so)\\s+)(.*)$/\\1\\4 remember=5/\" $PTF authselect apply-changes" + # 5.5.3 Ensure password reuse is limited. (Automated) + - id: 5195 + title: "Ensure password reuse is limited." + description: "The /etc/security/opasswd file stores the users' old passwords and can be checked to ensure that users are not recycling recent passwords. - remember=<5> - Number of old passwords to remember." + rationale: "Forcing users not to reuse their past 5 passwords make it less likely that an attacker will be able to guess the password. Note: These change only apply to accounts configured on the local system." + remediation: "Set remembered password history to conform to site policy. Run the following script to add or modify the pam_pwhistory.so and pam_unix.so lines to include the remember option: #!/usr/bin/env bash { file=\"/etc/authselect/$(head -1 /etc/authselect/authselect.conf | grep 'custom/')/system-auth\" if ! grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_pwhistory\\.so\\h+([^#\\n\\ r]+\\h+)?remember=([5-9]|[1-9][0-9]+)\\b.*$' \"$file\"; then if grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_pwhistory\\.so\\h+([^#\\n\\ r]+\\h+)?remember=\\d+\\b.*$' \"$file\"; then sed -ri 's/^\\s*(password\\s+(requisite|required|sufficient)\\s+pam_pwhistory\\.so\\s+([^# \\n\\r]+\\s+)?)(remember=\\S+\\s*)(\\s+.*)?$/\\1 remember=5 \\5/' $file elif grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_pwhistory\\.so\\h+([^#\\n\\ r]+\\h+)?.*$' \"$file\"; then sed -ri '/^\\s*password\\s+(requisite|required|sufficient)\\s+pam_pwhistory\\.so/ s/$/ remember=5/' $file else sed -ri '/^\\s*password\\s+(requisite|required|sufficient)\\s+pam_unix\\.so/i password required pam_pwhistory.so remember=5 use_authtok' $file fi fi if ! grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_unix\\.so\\h+([^#\\n\\r]+\\h +)?remember=([5-9]|[1-9][0-9]+)\\b.*$' \"$file\"; then if grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_unix\\.so\\h+([^#\\n\\r]+\\h +)?remember=\\d+\\b.*$' \"$file\"; then sed -ri 's/^\\s*(password\\s+(requisite|required|sufficient)\\s+pam_unix\\.so\\s+([^#\\n\\r] +\\s+)?)(remember=\\S+\\s*)(\\s+.*)?$/\\1 remember=5 \\5/' $file else sed -ri '/^\\s*password\\s+(requisite|required|sufficient)\\s+pam_unix\\.so/ s/$/ remember=5/' $file fi fi authselect apply-changes }." compliance: - - cis: ["5.4.3"] - - cis_csc: ["16"] - - pci_dss: ["8.2.5"] - - tsc: ["CC6.1"] + - cis: ["5.5.3"] + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - iso_27001-2013: ["A.9.4.3"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] condition: all rules: - 'f:/etc/pam.d/system-auth -> r:^\s*password\.+requisite\.+pam_pwquality\.so\.+ && n:remember=(\d+) compare >= 5' - 'f:/etc/pam.d/system-auth -> r:^\s*password\.+sufficient\.+pam_unix\.so\.+ && n:remember=(\d+) compare >= 5' - # 5.4.4 Ensure password hashing algorithm is SHA-512 (Scored) - - id: 5165 - title: "Ensure password hashing algorithm is SHA-512" - description: "The commands below change password encryption from md5 to sha512 (a much stronger hashing algorithm). All existing accounts will need to perform a password change to upgrade the stored hashes to the new algorithm." - rationale: "The SHA-512 algorithm provides much stronger hashing than MD5, thus providing additional protection to the system by increasing the level of effort for an attacker to successfully determine passwords. Note that these change only apply to accounts configured on the local system." - remediation: "Set password hashing algorithm to sha512. Run the following script to dd or modify the pam_unix.so lines in the password-auth and system-auth files to include the sha512 option: CP=$(authselect current | awk 'NR == 1 {print $3}' | grep custom/) for FN in system-auth password-auth; do [[ -z $(grep -E '^\\s*password\\s+sufficient\\s+pam_unix.so\\s+.*sha512\\s*.*$' $PTF) ]] && sed -ri 's/^\\s*(password\\s+sufficient\\s+pam_unix.so\\s+)(.*)$/\\1\\2 sha512/' $PTF done authselect apply-changes" + # 5.5.4 Ensure password hashing algorithm is SHA-512. (Automated) + - id: 5196 + title: "Ensure password hashing algorithm is SHA-512." + description: "A cryptographic hash function converts an arbitrary-length input into a fixed length output. Password hashing performs a one-way transformation of a password, turning the password into another string, called the hashed password." + rationale: "The SHA-512 algorithm provides stronger hashing than other hashing algorithms used for password hashing with Linux, providing additional protection to the system by increasing the level of effort for an attacker to successfully determine passwords. Note: These changes only apply to accounts configured on the local system." + remediation: "Set password hashing algorithm to sha512. Edit /etc/libuser.conf and edit of add the following line: crypt_style = sha512 Edit /etc/login.defs and edit or add the following line: ENCRYPT_METHOD SHA512 Run the following script to configure pam_unix.so to use the sha512 hashing algorithm: #!/usr/bin/env bash for fn in system-auth password-auth; do file=\"/etc/authselect/$(head -1 /etc/authselect/authselect.conf | grep 'custom/')/$fn\" if ! grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_unix\\.so(\\h+[^#\\n\\r]+)? \\h+sha512\\b.*$' \"$file\"; then if grep -Pq -- '^\\h*password\\h+(requisite|required|sufficient)\\h+pam_unix\\.so(\\h+[^#\\n\\r]+)? \\h+(md5|blowfish|bigcrypt|sha256)\\b.*$' \"$file\"; then sed -ri 's/(md5|blowfish|bigcrypt|sha256)/sha512/' \"$file\" else sed -ri 's/(^\\s*password\\s+(requisite|required|sufficient)\\s+pam_unix.so\\s+)(.*)$/\\1s ha512 \\3/' $file fi fi done authselect apply-changes Note: This only effects local users and passwords created after updating the files to use sha512. If it is determined that the password algorithm being used is not SHA-512, once it is changed, it is recommended that all user ID's be immediately expired and forced to change their passwords on next login." compliance: - - cis: ["5.4.4"] - - cis_csc: ["16.4"] - - pci_dss: ["3.6.1", "8.2.1"] - - tsc: ["CC6.1", "CC6.7"] + - cis: ["5.5.4"] + - cis_csc_v8: ["3.11"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L2-3.1.19", "IA.L2-3.5.10", "MP.L2-3.8.1", "SC.L2-3.13.11", "SC.L2-3.13.16"] + - hipaa: ["164.312(a)(2)(iv)", "164.312(e)(2)(ii)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["SC-28", "SC-28(1)"] + - pci_dss_v3.2.1: ["3.4", "3.4.1", "8.2.1"] + - pci_dss_v4.0: ["3.1.1", "3.3.2", "3.3.3", "3.5.1", "3.5.1.2", "3.5.1.3", "8.3.2"] + - soc_2: ["CC6.1"] condition: all rules: - 'f:/etc/pam.d/password-auth -> r:^\s*password\.+sufficient\.+pam_unix\.so && r:sha512' - 'f:/etc/pam.d/system-auth -> r:^\s*password\.+sufficient\.+pam_unix\.so && r:sha512' ############################################### - # 5.5 User Accounts and Environment + # 5.6 User Accounts and Environment ############################################### ############################################### - # 5.5.1 Set Shadow Password Suite Parameters + # 5.6.1 Set Shadow Password Suite Parameters ############################################### - # 5.5.1.1 Ensure password expiration is 365 days or less (Scored) - - id: 5166 - title: "Ensure password expiration is 365 days or less" + + # 5.6.1.1 Ensure password expiration is 365 days or less. (Automated) + - id: 5197 + title: "Ensure password expiration is 365 days or less." description: "The PASS_MAX_DAYS parameter in /etc/login.defs allows an administrator to force passwords to expire once they reach a defined age. It is recommended that the PASS_MAX_DAYS parameter be set to less than or equal to 365 days." rationale: "The window of opportunity for an attacker to leverage compromised credentials or successfully compromise credentials via an online brute force attack is limited by the age of the password. Therefore, reducing the maximum age of a password also reduces an attacker's window of opportunity." - remediation: "Set the PASS_MAX_DAYS parameter to conform to site policy in /etc/login.defs : PASS_MAX_DAYS 90 and modify user parameters for all users with a password set to match: chage --maxdays 90 " + remediation: "Set the PASS_MAX_DAYS parameter to conform to site policy in /etc/login.defs : PASS_MAX_DAYS 365 Modify user parameters for all users with a password set to match: # chage --maxdays 365 ." compliance: - - cis: ["5.5.1.1"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2.4"] - - tsc: ["CC6.1"] + - cis: ["5.6.1.1"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.9.4.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'f:/etc/login.defs -> n:^\s*PASS_MAX_DAYS\s*\t*(\d+) compare <= 365' - # 5.5.1.2 Ensure minimum days between password changes is 7 or more (Scored) - - id: 5167 - title: "Ensure minimum days between password changes is 7 or more" + # 5.6.1.2 Ensure minimum days between password changes is 7 or more. (Automated) + - id: 5198 + title: "Ensure minimum days between password changes is 7 or more." description: "The PASS_MIN_DAYS parameter in /etc/login.defs allows an administrator to prevent users from changing their password until a minimum number of days have passed since the last time the user changed their password. It is recommended that PASS_MIN_DAYS parameter be set to 7 or more days." rationale: "By restricting the frequency of password changes, an administrator can prevent users from repeatedly changing their password in an attempt to circumvent password reuse controls." - remediation: "Set the PASS_MIN_DAYS parameter to 7 in /etc/login.defs: PASS_MIN_DAYS 7 and modify user parameters for all users with a password set to match: chage --mindays 7 " + remediation: "Set the PASS_MIN_DAYS parameter to 7 in /etc/login.defs : PASS_MIN_DAYS 7 Modify user parameters for all users with a password set to match: # chage --mindays 7 ." compliance: - - cis: ["5.5.1.2"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis: ["5.6.1.2"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.9.4.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'f:/etc/login.defs -> n:^\s*PASS_MIN_DAYS\s*\t*(\d+) compare >= 7' - # 5.5.1.3 Ensure password expiration warning days is 7 or more (Scored) - - id: 5168 - title: "Ensure minimum days between password changes is 7 or more" + # 5.6.1.3 Ensure password expiration warning days is 7 or more. (Automated) + - id: 5199 + title: "Ensure password expiration warning days is 7 or more." description: "The PASS_WARN_AGE parameter in /etc/login.defs allows an administrator to notify users that their password will expire in a defined number of days. It is recommended that the PASS_WARN_AGE parameter be set to 7 or more days." rationale: "Providing an advance warning that a password will be expiring gives users time to think of a secure password. Users caught unaware may choose a simple password or write it down where it may be discovered." - remediation: "Set the PASS_WARN_AGE parameter to 7 in /etc/login.defs: PASS_WARN_AGE 7 and modify user parameters for all users with a password set to match: chage --warndays 7 " + remediation: "Set the PASS_WARN_AGE parameter to 7 in /etc/login.defs : PASS_WARN_AGE 7 Modify user parameters for all users with a password set to match: # chage --warndays 7 ." compliance: - - cis: ["5.5.1.3"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis: ["5.6.1.3"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.9.4.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'f:/etc/login.defs -> n:^\s*PASS_WARN_AGE\s*\t*(\d+) compare >= 7' - # 5.4.1.4 Ensure inactive password lock is 30 days or less (Scored) - - id: 5169 - title: "Ensure inactive password lock is 30 days or less" + # 5.6.1.4 Ensure inactive password lock is 30 days or less. (Automated) + - id: 5200 + title: "Ensure inactive password lock is 30 days or less." description: "User accounts that have been inactive for over a given period of time can be automatically disabled. It is recommended that accounts that are inactive for 30 days after password expiration be disabled." rationale: "Inactive accounts pose a threat to system security since the users are not logging in to notice failed login attempts or other anomalies." - remediation: "Run the following command to set the default password inactivity period to 30 days: useradd -D -f 30 and modify user parameters for all users with a password set to match: chage --inactive 30 " + remediation: "Run the following command to set the default password inactivity period to 30 days: # useradd -D -f 30 Modify user parameters for all users with a password set to match: # chage --inactive 30 ." compliance: - - cis: ["5.4.1.4"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis: ["5.6.1.4"] + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - iso_27001-2013: ["A.9.4.3"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] condition: all rules: - 'c:useradd -D -> n:^\s*INACTIVE\s*=\s*(\d+) compare <= 30' - # 5.5.3 Ensure default user shell timeout is 900 seconds or less (Scored) - - id: 5170 - title: "Ensure default user shell timeout is 900 seconds or less" - description: "The default TMOUT determines the shell timeout for users. The TMOUT value is measured in seconds." - rationale: "Having no timeout value associated with a shell could allow an unauthorized user access to another user's shell session (e.g. user walks away from their computer and doesn't lock the screen). Setting a timeout value at least reduces the risk of this happening." - remediation: "Edit the /etc/bashrc , /etc/profile and /etc/profile.d/*.sh files (and the appropriate files for any other shell supported on your system) and add or edit any umask parameters as follows: readonly TMOUT=900 ; export TMOUT . Note that setting the value to readonly prevents unwanted modification during runtime." + # 5.6.1.5 Ensure all users last password change date is in the past. (Automated) - Not Implemented + # 5.6.2 Ensure system accounts are secured. (Automated) - Not Implemented + + # 5.6.3 Ensure default user shell timeout is 900 seconds or less. (Automated) + - id: 5201 + title: "Ensure default user shell timeout is 900 seconds or less." + description: "TMOUT is an environmental setting that determines the timeout of a shell in seconds. - TMOUT=n - Sets the shell timeout to n seconds. A setting of TMOUT=0 disables - timeout. readonly TMOUT- Sets the TMOUT environmental variable as readonly, preventing unwanted modification during run-time. - export TMOUT - exports the TMOUT variable System Wide Shell Configuration Files: - /etc/profile - used to set system wide environmental variables on users shells. The variables are sometimes the same ones that are in the .bash_profile, however this file is used to set an initial PATH or PS1 for all shell users of the system. is only executed for interactive login shells, or shells executed with the --login parameter. - /etc/profile.d - /etc/profile will execute the scripts within /etc/profile.d/*.sh. It is recommended to place your configuration in a shell script within /etc/profile.d to set your own system wide environmental variables. - /etc/bashrc - System wide version of .bashrc. In Fedora derived distributions, etc/bashrc also invokes /etc/profile.d/*.sh if non-login shell, but redirects output to /dev/null if non-interactive. Is only executed for interactive shells or if BASH_ENV is set to /etc/bashrc." + rationale: "Setting a timeout value reduces the window of opportunity for unauthorized user access to another user's shell session that has been left unattended. It also ends the inactive session and releases the resources associated with that session." + remediation: "Review /etc/bashrc, /etc/profile, and all files ending in *.sh in the /etc/profile.d/ directory and remove or edit all TMOUT=_n_ entries to follow local site policy. TMOUT should not exceed 900 or be equal to 0. Configure TMOUT in one of the following files: - A file in the /etc/profile.d/ directory ending in .sh - /etc/profile - /etc/bashrc TMOUT configuration examples: - As multiple lines: TMOUT=900 readonly TMOUT export TMOUT - As a single line: readonly TMOUT=900 ; export TMOUT." compliance: - - cis: ["5.5.3"] - - cis_csc: ["16.11"] - - pci_dss: ["12.3.8"] + - cis: ["5.6.3"] + - cis_csc_v8: ["4.3"] + - cis_csc_v7: ["16.11"] + - cmmc_v2.0: ["AC.L2-3.1.10", "AC.L2-3.1.11"] + - hipaa: ["164.312(a)(2)(iii)"] + - iso_27001-2013: ["A.8.1.3"] + - nist_sp_800-53: ["AC-11", "AC-11(1)", "AC-12", "AC-2(5)"] + - pci_dss_v3.2.1: ["8.1.8"] + - pci_dss_v4.0: ["8.2.8"] condition: all rules: - 'not f:/etc/bashrc -> !r:^# && n:TMOUT\s*\t*=\s*\t*(\d+) compare > 900' @@ -3123,32 +4417,43 @@ checks: - 'f:/etc/bashrc -> !r:^# && n:readonly TMOUT\s*=\s*(\d+)\s*; compare <= 900 && r:export TMOUT\s*$' - 'c:grep -Rh TMOUT /etc/profile /etc/profile.d/*.sh -> !r:^# && n:readonly TMOUT\s*=\s*(\d+)\s*; compare <= 900 && r:export TMOUT\s*$' - # 5.5.4 Ensure default group for the root account is GID 0 (Scored) - - id: 5171 - title: "Ensure default group for the root account is GID 0" - description: "The usermod command can be used to specify which group the root user belongs to. This affects permissions of files that are created by the root user." + # 5.6.4 Ensure default group for the root account is GID 0. (Automated) + - id: 5202 + title: "Ensure default group for the root account is GID 0." + description: "The usermod command can be used to specify which group the root account belongs to. This affects permissions of files that are created by the root account." rationale: "Using GID 0 for the root account helps prevent root -owned files from accidentally becoming accessible to non-privileged users." - remediation: "Run the following command to set the root user default group to GID 0: usermod -g 0 root" + remediation: "Run the following command to set the root account default group to GID 0 : # usermod -g 0 root." compliance: - - cis: ["5.5.4"] - - cis_csc: ["5.1"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis: ["5.6.4"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: all rules: - 'f:/etc/passwd -> r:^root:\w:\w:0' - # 5.5.5 Ensure default user umask is 027 or more restrictive (Scored) - - id: 5172 - title: "Ensure default user umask is 027 or more restrictive" - description: "The default umask determines the permissions of files created by users. The user creating the file has the discretion of making their files and directories readable by others via the chmod command. Users who wish to allow their files and directories to be readable by others by default may choose a different default umask by inserting the umask command into the standard shell configuration files ( .profile , .bashrc , etc.) in their home directories." - rationale: "Setting a very secure default value for umask ensures that users make a conscious choice about their file permissions. A default umask setting of 077 causes files and directories created by users to not be readable by any other user on the system. A umask of 027 would make files and directories readable by users in the same Unix group, while a umask of 022 would make files readable by every user on the system." - remediation: "Edit the /etc/bashrc , /etc/profile and /etc/profile.d/*.sh files (and the appropriate files for any other shell supported on your system) and add or edit any umask parameters as follows: umask 027" - compliance: - - cis: ["5.5.5"] - - cis_csc: ["5.1", "13"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + # 5.6.5 Ensure default user umask is 027 or more restrictive. (Automated) + - id: 5203 + title: "Ensure default user umask is 027 or more restrictive." + description: "The user file-creation mode mask (umask) is use to determine the file permission for newly created directories and files. In Linux, the default permissions for any newly created directory is 0777 (rwxrwxrwx), and for any newly created file it is 0666 (rw-rw-rw-). The umask modifies the default Linux permissions by restricting (masking) these permissions. The umask is not simply subtracted, but is processed bitwise. Bits set in the umask are cleared in the resulting file mode. umask can be set with either octal or Symbolic values: - Octal (Numeric) Value - Represented by either three or four digits. ie umask 0027 or umask 027. If a four digit umask is used, the first digit is ignored. The remaining three digits effect the resulting permissions for user, group, and world/other respectively. - Symbolic Value - Represented by a comma separated list for User u, group g, and world/other o. The permissions listed are not masked by umask. ie a umask set by umask u=rwx,g=rx,o= is the Symbolic equivalent of the Octal umask 027. This umask would set a newly created directory with file mode drwxr-x--- and a newly created file with file mode rw-r-----. The default umask can be set to use the pam_umask module or in a System Wide Shell Configuration File. The user creating the directories or files has the discretion of changing the permissions via the chmod command, or choosing a different default umask by adding the umask command into a User Shell Configuration File, (.bash_profile or .bashrc), in their home directory. Setting the default umask: - pam_umask module: o will set the umask according to the system default in /etc/login.defs and user settings, solving the problem of different umask settings with different shells, display managers, remote sessions etc. o umask= value in the /etc/login.defs file is interpreted as Octal o Setting USERGROUPS_ENAB to yes in /etc/login.defs (default): will enable setting of the umask group bits to be the same as owner bits. (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is the same as gid, and username is the same as the userdel will remove the user's group if it contains no more members, and useradd will create by default a group with the name of the user - System Wide Shell Configuration File: o /etc/profile - used to set system wide environmental variables on users shells. The variables are sometimes the same ones that are in the .bash_profile, however this file is used to set an initial PATH or PS1 for all shell users of the system. is only executed for interactive login shells, or shells executed with the --login parameter. o /etc/profile.d - /etc/profile will execute the scripts within /etc/profile.d/*.sh. It is recommended to place your configuration in a shell script within /etc/profile.d to set your own system wide environmental variables. o /etc/bashrc - System wide version of .bashrc. In Fedora derived distributions, etc/bashrc also invokes /etc/profile.d/*.sh if non-login shell, but redirects output to /dev/null if non-interactive. Is only executed for interactive shells or if BASH_ENV is set to /etc/bashrc. User Shell Configuration Files: - ~/.bash_profile - Is executed to configure your shell before the initial command prompt. Is only read by login shells. - ~/.bashrc - Is executed for interactive shells. only read by a shell that's both interactive and non-login." + rationale: "Setting a secure default value for umask ensures that users make a conscious choice about their file permissions. A permissive umask value could result in directories or files with excessive permissions that can be read and/or written to by unauthorized users." + remediation: "Review /etc/bashrc, /etc/profile, and all files ending in *.sh in the /etc/profile.d/ directory and remove or edit all umask entries to follow local site policy. Any remaining entries should be: umask 027, umask u=rwx,g=rx,o= or more restrictive. Configure umask in one of the following files: - A file in the /etc/profile.d/ directory ending in .sh - /etc/profile - /etc/bashrc Example: # vi /etc/profile.d/set_umask.sh umask 027 Run the following command and remove or modify the umask of any returned files: # grep -RPi '(^|^[^#]*)\\s*umask\\s+([0-7][0-7][01][0-7]\\b|[0-7][0-7][0-7][0- 6]\\b|[0-7][01][0-7]\\b|[0-7][0-7][0- 6]\\b|(u=[rwx]{0,3},)?(g=[rwx]{0,3},)?o=[rwx]+\\b|(u=[rwx]{1,3},)?g=[^rx]{1,3}( ,o=[rwx]{0,3})?\\b)' /etc/login.defs /etc/profile* /etc/bashrc* Follow one of the following methods to set the default user umask: Edit /etc/login.defs and edit the UMASK and USERGROUPS_ENAB lines as follows: UMASK 027 USERGROUPS_ENAB no Edit the files /etc/pam.d/password-auth and /etc/pam.d/system-auth and add or edit the following: session optional pam_umask.so OR Configure umask in one of the following files: - A file in the /etc/profile.d/ directory ending in .sh - /etc/profile - /etc/bashrc Example: /etc/profile.d/set_umask.sh umask 027 Note: this method only applies to bash and shell. If other shells are supported on the system, it is recommended that their configuration files also are checked." + compliance: + - cis: ["5.6.5"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["14.6"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.9.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: none rules: - 'f:/etc/bashrc -> !r:^\s*\t*# && r:umask \d0\d|umask \d1\d|umask \d4\d|umask \d5\d' @@ -3158,24 +4463,8 @@ checks: - 'd:/etc/profile.d -> .sh -> !r:^\s*\t*# && r:umask \d0\d|umask \d1\d|umask \d4\d|umask \d5\d' - 'd:/etc/profile.d -> .sh -> !r:^\s*t*# && n:umask \d\d(\d) compare != 7' - # 5.7 Ensure access to the su command is restricted (Scored) - - id: 5173 - title: "Ensure access to the su command is restricted." - description: "The su command allows a user to run a command or shell as another user. The program has been superseded by sudo , which allows for more granular control over privileged access. Normally, the su command can be executed by any user. By uncommenting the pam_wheel.so statement in /etc/pam.d/su , the su command will only allow users in the wheel group to execute su ." - rationale: "Restricting the use of su , and using sudo in its place, provides system administrators better control of the escalation of user privileges to execute privileged commands. The sudo utility also provides a better logging and audit mechanism, as it can log each command executed via sudo , whereas su can only record that a user executed the su program." - remediation: "Add the following line to the /etc/pam.d/su file: auth required pam_wheel.so use_uid" - compliance: - - cis: ["5.7"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] - condition: all - rules: - - 'f:/etc/pam.d/su -> !r:# && r:auth\s*\t*required\s*\t*pam_wheel.so\s*\t*use_uid' + # 6.1.1 Audit system file permissions. (Manual) - Not Implemented + # 6.1.2 Ensure sticky bit is set on all world-writable directories. (Automated) - Not Implemented ############################################### # 6 System Maintenance @@ -3183,224 +4472,235 @@ checks: ############################################### # 6.1 System File Permissions ############################################### - # 6.1.2 Configure /etc/passwd permissions (Scored) - - id: 5174 - title: "Ensure permissions on /etc/passwd are configured" + + # 6.1.3 Ensure permissions on /etc/passwd are configured. (Automated) + - id: 5204 + title: "Ensure permissions on /etc/passwd are configured." description: "The /etc/passwd file contains user account information that is used by many system utilities and therefore must be readable for these utilities to operate." rationale: "It is critical to ensure that the /etc/passwd file is protected from unauthorized write access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/passwd: # chown root:root /etc/passwd # chmod 644 /etc/passwd" + remediation: "Run the following command to set permissions on /etc/passwd: # chown root:root /etc/passwd # chmod 644 /etc/passwd." compliance: - - cis: ["6.1.2"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["6.1.3"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/passwd -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 6.1.4 Configure /etc/shadow permissions (Scored) - - id: 5175 - title: "Ensure permissions on /etc/shadow are configured" + # 6.1.4 Ensure permissions on /etc/shadow are configured. (Automated) + - id: 5205 + title: "Ensure permissions on /etc/shadow are configured." description: "The /etc/shadow file is used to store the information about user accounts that is critical to the security of those accounts, such as the hashed password and other security information." rationale: "If attackers can gain read access to the /etc/shadow file, they can easily run a password cracking program against the hashed password to break it. Other security information that is stored in the /etc/shadow file (such as expiration) could also be useful to subvert the user accounts." - remediation: "Run the following command to set permissions on /etc/ shadow: # chown root:root /etc/shadow # chown root:shadow /etc/shadow # chmod o-rwx,g-wx /etc/shadow" + remediation: "Run the following commands to set owner, group, and permissions on /etc/shadow: # chown root:root /etc/shadow # chmod 0000 /etc/shadow." compliance: - cis: ["6.1.4"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/shadow -> r:Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*\w*/\s*\t*shadow\)' - # 6.1.8 Configure /etc/group permissions (Scored) - - id: 5176 - title: "Ensure permissions on /etc/group are configured" + # 6.1.5 Ensure permissions on /etc/group are configured. (Automated) + - id: 5206 + title: "Ensure permissions on /etc/group are configured." description: "The /etc/group file contains a list of all the valid groups defined in the system. The command below allows read/write access for root and read access for everyone else." rationale: "The /etc/group file needs to be protected from unauthorized changes by non-privileged users, but needs to be readable as this information is used with many non-privileged programs." - remediation: "Run the following command to set permissions on /etc/group: # chown root:root /etc/group # chmod 644 /etc/group" + remediation: "Run the following commands to set owner, group, and permissions on /etc/group: # chown root:root /etc/group # chmod u-x,g-wx,o-wx /etc/group." compliance: - - cis: ["6.1.8"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["6.1.5"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/group -> r:Access:\s*\(0644/-rw-r--r--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 6.1.6 Configure /etc/gshadow permissions (Scored) - - id: 5177 - title: "Ensure permissions on /etc/gshadow are configured" + # 6.1.6 Ensure permissions on /etc/gshadow are configured. (Automated) + - id: 5207 + title: "Ensure permissions on /etc/gshadow are configured." description: "The /etc/gshadow file is used to store the information about groups that is critical to the security of those accounts, such as the hashed password and other security information." - rationale: "If attackers can gain read access to the /etc/gshadow file, they can easily run a password cracking program against the hashed password to break it. Other security information that is stored in the /etc/gshadow file (such as group administrators) could also be useful to subvert the group" - remediation: "Run the following command to set permissions on /etc/gshadow: # chown root:root /etc/gshadow # chown root:shadow /etc/gshadow # chmod o-rwx,g-rw /etc/gshadow" + rationale: "If attackers can gain read access to the /etc/gshadow file, they can easily run a password cracking program against the hashed password to break it. Other security information that is stored in the /etc/gshadow file (such as group administrators) could also be useful to subvert the group." + remediation: "Run the following commands to set owner, group, and permissions on /etc/gshadow # chown root:root /etc/gshadow # chmod 0000 /etc/gshadow." compliance: - cis: ["6.1.6"] - - cis_csc: ["16.14"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/gshadow -> r:Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*\w*/\s*\t*shadow\)' - # 6.1.3 Configure /etc/passwd- permissions (Scored) - - id: 5178 - title: "Ensure permissions on /etc/passwd- are configured" + # 6.1.7 Ensure permissions on /etc/passwd- are configured. (Automated) + - id: 5208 + title: "Ensure permissions on /etc/passwd- are configured." description: "The /etc/passwd- file contains backup user account information." rationale: "It is critical to ensure that the /etc/passwd- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/passwd-: # chown root:root /etc/passwd- # chmod 600 /etc/passwd-" + remediation: "Run the following commands to set owner, group, and permissions on /etc/passwd-: # chown root:root /etc/passwd- # chmod chmod u-x,go-wx /etc/passwd-." compliance: - - cis: ["6.1.3"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["6.1.7"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/passwd- -> r:Access:\s*\(0\d00/-\w\w-------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 6.1.5 Configure /etc/shadow- permissions (Scored) - - id: 5179 - title: "Ensure permissions on /etc/shadow- are configured" + # 6.1.8 Ensure permissions on /etc/shadow- are configured. (Automated) + - id: 5209 + title: "Ensure permissions on /etc/shadow- are configured." description: "The /etc/shadow- file is used to store backup information about user accounts that is critical to the security of those accounts, such as the hashed password and other security information." rationale: "It is critical to ensure that the /etc/shadow- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/shadow-: # chown root:shadow /etc/shadow- # chmod u-x,go-rwx /etc/shadow-" + remediation: "Run the following commands to set owner, group, and permissions on /etc/shadow- : # chown root:root /etc/shadow- # chmod 0000 /etc/shadow-." compliance: - - cis: ["6.1.5"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["6.1.8"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/shadow- -> r:Access:\s*\(0\d00/-\w\w-------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|Access:\s*\(0\d00/-\w\w-------\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*\w*/\s*\t*shadow\)' - # 6.1.9 Configure /etc/group- permissions (Scored) - - id: 5180 - title: "Ensure permissions on /etc/group- are configured" + # 6.1.9 Ensure permissions on /etc/group- are configured. (Automated) + - id: 5210 + title: "Ensure permissions on /etc/group- are configured." description: "The /etc/group- file contains a backup list of all the valid groups defined in the system." rationale: "It is critical to ensure that the /etc/group- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the following command to set permissions on /etc/group-: # chown root:root /etc/group- # chmod 644 /etc/group-" + remediation: "Run the following commands to set owner, group, and permissions on /etc/group-: # chown root:root /etc/group- # chmod u-x,go-wx /etc/group-." compliance: - cis: ["6.1.9"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/group- -> r:Access:\s*\(0\d\d\d/-\w\w-\w--\w--\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)' - # 6.1.7 Configure /etc/gshadow- permissions (Scored) - - id: 5181 - title: "Ensure permissions on /etc/gshadow- are configured" + # 6.1.10 Ensure permissions on /etc/gshadow- are configured. (Automated) + - id: 5211 + title: "Ensure permissions on /etc/gshadow- are configured." description: "The /etc/gshadow- file is used to store backup information about groups that is critical to the security of those accounts, such as the hashed password and other security information." rationale: "It is critical to ensure that the /etc/gshadow- file is protected from unauthorized access. Although it is protected by default, the file permissions could be changed either inadvertently or through malicious actions." - remediation: "Run the one of the following chown commands as appropriate and the chmod to set permissions on /etc/gshadow- : # chown root:root /etc/gshadow- # chown root:shadow /etc/gshadow- # chmod o-rwx,g-rw /etc/gshadow-" + remediation: "Run the following commands to set owner, group, and permissions on /etc/gshadow- : # chown root:root /etc/gshadow- # chmod 0000 /etc/gshadow-." compliance: - - cis: ["6.1.7"] - - cis_csc: ["16.4"] - - pci_dss: ["2.2.4"] - - nist_800_53: ["CM.1"] - - tsc: ["CC5.2"] + - cis: ["6.1.10"] + - cis_csc_v8: ["3.3"] + - cis_csc_v7: ["16.4"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "AC.L2-3.1.3", "AC.L2-3.1.5", "MP.L2-3.8.2"] + - hipaa: ["164.308(a)(3)(i)", "164.308(a)(3)(ii)(A)", "164.312(a)(1)"] + - iso_27001-2013: ["A.10.1.1"] + - nist_sp_800-53: ["AC-5", "AC-6"] + - pci_dss_v3.2.1: ["7.1", "7.1.1", "7.1.2", "7.1.3"] + - pci_dss_v4.0: ["1.3.1", "7.1"] + - soc_2: ["CC5.2", "CC6.1"] condition: all rules: - 'c:stat -L /etc/gshadow- -> r:Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*0/\s*\t*root\)|Access:\s*\(0\d\d0/-\w\w-\w-----\)\s*Uid:\s*\(\s*\t*0/\s*\t*root\)\s*\t*Gid:\s*\(\s*\t*\w*/\s*\t*shadow\)' + # 6.1.11 Ensure no world writable files exist. (Automated) - Not Implemented + # 6.1.12 Ensure no unowned files or directories exist. (Automated) - Not Implemented + # 6.1.13 Ensure no ungrouped files or directories exist. (Automated) - Not Implemented + # 6.1.14 Audit SUID executables. (Manual) - Not Implemented + # 6.1.15 Audit SGID executables. (Manual) - Not Implemented + ############################################### # 6.2 Review User and Group Settings ############################################### - # 6.2.1 Check passwords fields (Scored) - - id: 5182 - title: "Ensure password fields are not empty" + + # 6.2.1 Ensure password fields are not empty. (Automated) + - id: 5212 + title: "Ensure password fields are not empty." description: "An account with an empty password field means that anybody may log in as that user without providing a password." rationale: "All accounts must have passwords or be locked to prevent the account from being used by an unauthorized user." - remediation: "If any accounts in the /etc/shadow file do not have a password, run the following command to lock the account until it can be determined why it does not have a password: passwd -l || Also, check to see if the account is logged in and investigate what it is being used for to determine if it needs to be forced off." + remediation: "If any accounts in the /etc/shadow file do not have a password, run the following command to lock the account until it can be determined why it does not have a password: # passwd -l Also, check to see if the account is logged in and investigate what it is being used for to determine if it needs to be forced off." compliance: - cis: ["6.2.1"] - - cis_csc: ["4.4"] - - pci_dss: ["8.2"] - - tsc: ["CC6.1"] + - cis_csc_v8: ["5.2"] + - cis_csc_v7: ["4.4"] + - cmmc_v2.0: ["IA.L2-3.5.7"] + - iso_27001-2013: ["A.9.4.3"] + - pci_dss_v4.0: ["2.2.2", "8.3.5", "8.3.6", "8.6.3"] + - soc_2: ["CC6.1"] condition: none rules: - 'f:/etc/shadow -> !r:^# && r:^\w+::' - # 6.2.2 Delete legacy entries in /etc/passwd (Scored) - - id: 5183 - title: 'Ensure no legacy "+" entries exist in /etc/passwd' - description: "The character + in various files used to be markers for systems to insert data from NIS maps at a certain point in a system configuration file. These entries are no longer required on most systems, but may exist in files that have been imported from other platforms." - rationale: "These entries may provide an avenue for attackers to gain privileged access on the system." - remediation: "Remove any legacy '+' entries from /etc/passwd if they exist." - compliance: - - cis: ["6.2.2"] - - cis_csc: ["16.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none - rules: - - "f:/etc/passwd -> !r:^# && r:^+:" - - # 6.2.4 Delete legacy entries in /etc/shadow (Scored) - - id: 5184 - title: 'Ensure no legacy "+" entries exist in /etc/shadow' - description: "The character + in various files used to be markers for systems to insert data from NIS maps at a certain point in a system configuration file. These entries are no longer required on most systems, but may exist in files that have been imported from other platforms." - rationale: "These entries may provide an avenue for attackers to gain privileged access on the system." - remediation: "Remove any legacy '+' entries from /etc/shadow if they exist." - compliance: - - cis: ["6.2.4"] - - cis_csc: ["16.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none - rules: - - "f:/etc/shadow -> !r:^# && r:^+:" - - # 6.2.5 Delete legacy entries in /etc/group (Scored) - - id: 5185 - title: 'Ensure no legacy "+" entries exist in /etc/group' - description: "The character + in various files used to be markers for systems to insert data from NIS maps at a certain point in a system configuration file. These entries are no longer required on most systems, but may exist in files that have been imported from other platforms." - rationale: "These entries may provide an avenue for attackers to gain privileged access on the system." - remediation: "Remove any legacy '+' entries from /etc/group if they exist." - compliance: - - cis: ["6.2.5"] - - cis_csc: ["16.2"] - - pci_dss: ["2.2.3"] - - nist_800_53: ["CM.1"] - - gpg_13: ["4.3"] - - gdpr_IV: ["35.7.d"] - - hipaa: ["164.312.b"] - - tsc: ["CC5.2"] - condition: none - rules: - - "f:/etc/group -> !r:^# && r:^+:" + # 6.2.2 Ensure all groups in /etc/passwd exist in /etc/group. (Automated) - Not Implemented + # 6.2.3 Ensure no duplicate UIDs exist. (Automated) - Not Implemented + # 6.2.4 Ensure no duplicate GIDs exist. (Automated) - Not Implemented + # 6.2.5 Ensure no duplicate user names exist. (Automated) - Not Implemented + # 6.2.6 Ensure no duplicate group names exist. (Automated) - Not Implemented + # 6.2.7 Ensure root PATH Integrity. (Automated) - Not Implemented - # 6.2.6 Verify No UID 0 Accounts Exist Other Than root (Scored) - - id: 5186 - title: "Ensure root is the only UID 0 account" + # 6.2.8 Ensure root is the only UID 0 account. (Automated) + - id: 5213 + title: "Ensure root is the only UID 0 account." description: "Any account with UID 0 has superuser privileges on the system." - rationale: "This access must be limited to only the default root account and only from the system console. Administrative access must be through an unprivileged account using an approved mechanism as noted in Item 5.6 Ensure access to the su command is restricted." + rationale: 'This access must be limited to only the default root account and only from the system console. Administrative access must be through an unprivileged account using an approved mechanism as noted in recommendation "Ensure access to the su command is restricted".' remediation: "Remove any users other than root with UID 0 or assign them a new UID if appropriate." compliance: - - cis: ["6.2.6"] - - cis_csc: ["5.1"] - - pci_dss: ["10.2.5"] - - hipaa: ["164.312.b"] - - nist_800_53: ["AU.14", "AC.7"] - - gpg_13: ["7.8"] - - gdpr_IV: ["35.7", "32.2"] - - tsc: ["CC6.1", "CC6.8", "CC7.2", "CC7.3", "CC7.4"] + - cis: ["6.2.8"] + - cis_csc_v8: ["4.1"] + - cis_csc_v7: ["5.1"] + - cmmc_v2.0: ["AC.L1-3.1.1", "AC.L1-3.1.2", "CM.L2-3.4.1", "CM.L2-3.4.2", "CM.L2-3.4.6", "CM.L2-3.4.7"] + - iso_27001-2013: ["A.14.2.5", "A.8.1.3"] + - nist_sp_800-53: ["CM-7(1)", "CM-9", "SA-10"] + - pci_dss_v3.2.1: ["11.5", "2.2"] + - pci_dss_v4.0: ["1.1.1", "1.2.1", "1.2.6", "1.2.7", "1.5.1", "2.1.1", "2.2.1"] + - soc_2: ["CC7.1", "CC8.1"] condition: none rules: - 'f:/etc/passwd -> !r:^# && !r:^\s*\t*root: && r:^\w+:\w+:0:' + + # 6.2.9 Ensure all users' home directories exist. (Automated) - Not Implemented + # 6.2.10 Ensure users own their home directories. (Automated) - Not Implemented + # 6.2.11 Ensure users' home directories permissions are 750 or more restrictive. (Automated) - Not Implemented + # 6.2.12 Ensure users' dot files are not group or world writable. (Automated) - Not Implemented + # 6.2.13 Ensure users' .netrc Files are not group or world accessible. (Automated) - Not Implemented + # 6.2.14 Ensure no users have .forward files. (Automated) - Not Implemented + # 6.2.15 Ensure no users have .netrc files. (Automated) - Not Implemented + # 6.2.16 Ensure no users have .rhosts files. (Automated) - Not Implemented From c084c7c63c53f382953e648712a2bef6ffbb8b0b Mon Sep 17 00:00:00 2001 From: Tomas Turina Date: Fri, 31 May 2024 16:02:28 +0000 Subject: [PATCH 386/419] Fix wazuh DB IT test_wazuh_db_messages_agent after latest syscollector changes --- .../test_databases/data/test_cases/cases_agent_messages.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_wazuh_db/test_databases/data/test_cases/cases_agent_messages.yaml b/tests/integration/test_wazuh_db/test_databases/data/test_cases/cases_agent_messages.yaml index cacd835d120..f634cb9fdc1 100644 --- a/tests/integration/test_wazuh_db/test_databases/data/test_cases/cases_agent_messages.yaml +++ b/tests/integration/test_wazuh_db/test_databases/data/test_cases/cases_agent_messages.yaml @@ -803,7 +803,7 @@ output: 'ok []' stage: "agent sync_info set synced" - input: 'agent 003 package get' - output: ['due {"name":"test_deb_pkg","version":"1.0.0","architecture":"amd64","vendor":"Wazuh wazuh@wazuh.com","format":"deb","item_id":"1"}', 'due {"name":"test_rpm_pkg","version":"1.0.0","architecture":"amd64","vendor":"Wazuh wazuh@wazuh.com","format":"rpm","item_id":"1"}', 'ok {"status":"SUCCESS"}'] + output: ['due {"name":"test_deb_pkg","version":"1.0.0","architecture":"amd64","vendor":"Wazuh wazuh@wazuh.com","format":"deb","item_id":"1","description":"Test package","location":"","size":7490}', 'due {"name":"test_rpm_pkg","version":"1.0.0","architecture":"amd64","vendor":"Wazuh wazuh@wazuh.com","format":"rpm","item_id":"1","description":"Test package","location":"","size":7490}', 'ok {"status":"SUCCESS"}'] stage: "agent sys_programs getting not all packages" - name: Synchronization value From 8d719ff47e8f41db133ad9208a1773e821795923 Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Tue, 16 Apr 2024 11:36:59 -0300 Subject: [PATCH 387/419] Test for process_priority added. Fixed the way files are deleted after tests. Move some names to framework as constatns. --- tests/integration/test_fim/conftest.py | 30 +++- .../test_files/test_max_eps/test_max_eps.py | 28 +++- .../test_process_priority/__init__.py | 10 ++ .../configuration_process_priority.yaml | 11 ++ .../test_cases/cases_process_priority.yaml | 143 ++++++++++++++++ .../test_process_priority.py | 156 ++++++++++++++++++ 6 files changed, 360 insertions(+), 18 deletions(-) create mode 100644 tests/integration/test_fim/test_files/test_process_priority/__init__.py create mode 100644 tests/integration/test_fim/test_files/test_process_priority/data/configuration_templates/configuration_process_priority.yaml create mode 100644 tests/integration/test_fim/test_files/test_process_priority/data/test_cases/cases_process_priority.yaml create mode 100644 tests/integration/test_fim/test_files/test_process_priority/test_process_priority.py diff --git a/tests/integration/test_fim/conftest.py b/tests/integration/test_fim/conftest.py index fb4dd4b8b5e..5a1d9dbf954 100644 --- a/tests/integration/test_fim/conftest.py +++ b/tests/integration/test_fim/conftest.py @@ -15,7 +15,7 @@ from pathlib import Path from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH -from wazuh_testing.constants.platforms import WINDOWS +from wazuh_testing.constants.platforms import WINDOWS, MACOS, CENTOS, UBUNTU, DEBIAN from wazuh_testing.modules.fim.patterns import MONITORING_PATH from wazuh_testing.tools.monitors.file_monitor import FileMonitor from wazuh_testing.tools.simulators.authd_simulator import AuthdSimulator @@ -59,17 +59,31 @@ def fill_folder_to_monitor(test_metadata: dict) -> None: path = test_metadata.get('folder_to_monitor') amount = test_metadata.get('files_amount') amount = 2 if not amount else amount + max_retries = 3 + retry_delay = 1 if not file.exists(path): file.recursive_directory_creation(path) [file.write_file(Path(path, f'test{i}.log'), 'content') for i in range(amount)] - # file.write_file(Path(path, f'test{i}.log'), 'content') yield - [file.remove_file(Path(path, f'test{i}.log')) for i in range(amount)] - + for i in range(amount): + retry_count = 0 + while retry_count < max_retries: + try: + file.remove_file(Path(path, f'test{i}.log')) + break + except Exception as e: + print(f"Error deleting file {i}: {e}") + retry_count += 1 + if retry_count == max_retries: + print(f"Failed to delete file {i} after {max_retries} attempts.") + break + else: + print(f"Retrying in {retry_delay} seconds...") + sleep(retry_delay) @pytest.fixture() def start_monitoring() -> None: @@ -97,19 +111,17 @@ def set_agent_config(request: pytest.FixtureRequest): @pytest.fixture(scope='session', autouse=True) def install_audit(): """Automatically install auditd before test session on linux distros.""" - if sys.platform == WINDOWS: + if sys.platform == WINDOWS or sys.platform == MACOS: return # Check distro linux_distro = distro.id() - if re.match(linux_distro, "darwin"): - return - elif re.match(linux_distro, "centos"): + if re.match(linux_distro, CENTOS): package_management = "yum" audit = "audit" option = "--assumeyes" - elif re.match(linux_distro, "ubuntu") or re.match(linux_distro, "debian"): + elif re.match(linux_distro, UBUNTU) or re.match(linux_distro, DEBIAN): package_management = "apt-get" audit = "auditd" option = "--yes" diff --git a/tests/integration/test_fim/test_files/test_max_eps/test_max_eps.py b/tests/integration/test_fim/test_files/test_max_eps/test_max_eps.py index c62b1250f6a..cfe47cb9d54 100644 --- a/tests/integration/test_fim/test_files/test_max_eps/test_max_eps.py +++ b/tests/integration/test_fim/test_files/test_max_eps/test_max_eps.py @@ -112,24 +112,34 @@ def test_max_eps(test_configuration, test_metadata, configure_local_internal_opt tier: 1 parameters: - - get_configuration: + - test_configuration: + type: dict + brief: Configuration values for ossec.conf. + - test_metadata: + type: dict + brief: Test case data. + - configure_local_internal_options: type: fixture - brief: Get configurations from the module. - - configure_environment: + brief: Set local_internal_options.conf file. + - truncate_monitored_files: type: fixture - brief: Configure a custom environment for testing. - - restart_syscheckd: + brief: Truncate all the log files and json alerts files before and after the test execution. + - set_wazuh_configuration: type: fixture - brief: Clear the 'ossec.log' file and start a new monitor. - - wait_for_fim_start: + brief: Set ossec.conf configuration. + - folder_to_monitor: + type: str + brief: Folder created for monitoring. + - daemons_handler: type: fixture - brief: Wait for realtime start, whodata start, or end of initial FIM scan. + brief: Handler of Wazuh daemons. + assertions: - Verify that FIM events are generated for each testing file created. - Verify that the eps limit set in the 'max_eps' tag has not been exceeded at generating FIM events. - input_description: A test case (max_eps) is contained in external YAML file (wazuh_conf.yaml) which + input_description: A test case (max_eps) is contained in external YAML file (cases_max_eps.yaml) which includes configuration settings for the 'wazuh-syscheckd' daemon and, these are combined with the testing directory to be monitored defined in the module. diff --git a/tests/integration/test_fim/test_files/test_process_priority/__init__.py b/tests/integration/test_fim/test_files/test_process_priority/__init__.py new file mode 100644 index 00000000000..be20efa8ec4 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_process_priority/__init__.py @@ -0,0 +1,10 @@ +# Copyright (C) 2015-2024, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +from pathlib import Path + + +# Constants & base paths +DATA_PATH = Path(Path(__file__).parent, 'data') +CONFIGS_PATH = Path(DATA_PATH, 'configuration_templates') +TEST_CASES_PATH = Path(DATA_PATH, 'test_cases') diff --git a/tests/integration/test_fim/test_files/test_process_priority/data/configuration_templates/configuration_process_priority.yaml b/tests/integration/test_fim/test_files/test_process_priority/data/configuration_templates/configuration_process_priority.yaml new file mode 100644 index 00000000000..59623386322 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_process_priority/data/configuration_templates/configuration_process_priority.yaml @@ -0,0 +1,11 @@ +- sections: + - section: syscheck + elements: + - disabled: + value: 'no' + - directories: + value: TEST_DIR + attributes: + - FIM_MODE + - process_priority: + value: PROCESS_PRIORITY diff --git a/tests/integration/test_fim/test_files/test_process_priority/data/test_cases/cases_process_priority.yaml b/tests/integration/test_fim/test_files/test_process_priority/data/test_cases/cases_process_priority.yaml new file mode 100644 index 00000000000..4047875b2c3 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_process_priority/data/test_cases/cases_process_priority.yaml @@ -0,0 +1,143 @@ +- name: process_priority_0_scheduled + description: Check if configured process priorty is correct + configuration_parameters: + TEST_DIR: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + FIM_MODE: + realtime: 'no' + whodata: 'no' + MAX_EPS: 10 + PROCESS_PRIORITY: 0 + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + priority: 0 + fim_mode: 'scheduled' + +- name: process_priority_0_realtime + description: Check if configured process priorty is correct + configuration_parameters: + TEST_DIR: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + FIM_MODE: + realtime: 'yes' + whodata: 'no' + MAX_EPS: 10 + PROCESS_PRIORITY: 0 + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + priority: 0 + fim_mode: 'realtime' + +- name: process_priority_0_whodata + description: Check if configured process priorty is correct + configuration_parameters: + TEST_DIR: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + FIM_MODE: + realtime: 'no' + whodata: 'yes' + MAX_EPS: 10 + PROCESS_PRIORITY: 0 + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + priority: 0 + fim_mode: 'whodata' + +- name: process_priority_4_scheduled + description: Check if configured process priorty is correct + configuration_parameters: + TEST_DIR: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + FIM_MODE: + realtime: 'no' + whodata: 'no' + MAX_EPS: 10 + PROCESS_PRIORITY: 4 + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + priority: 4 + fim_mode: 'scheduled' + +- name: process_priority_4_realtime + description: Check if configured process priorty is correct + configuration_parameters: + TEST_DIR: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + FIM_MODE: + realtime: 'yes' + whodata: 'no' + MAX_EPS: 10 + PROCESS_PRIORITY: 4 + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + priority: 4 + fim_mode: 'realtime' + +- name: process_priority_4_whodata + description: Check if configured process priorty is correct + configuration_parameters: + TEST_DIR: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + FIM_MODE: + realtime: 'no' + whodata: 'yes' + MAX_EPS: 10 + PROCESS_PRIORITY: 4 + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + priority: 4 + fim_mode: 'whodata' + +- name: process_priority_-5_scheduled + description: Check if configured process priorty is correct + configuration_parameters: + TEST_DIR: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + FIM_MODE: + realtime: 'no' + whodata: 'no' + MAX_EPS: 10 + PROCESS_PRIORITY: -5 + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + priority: -5 + fim_mode: 'scheduled' + +- name: process_priority_-5_realtime + description: Check if configured process priorty is correct + configuration_parameters: + TEST_DIR: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + FIM_MODE: + realtime: 'yes' + whodata: 'no' + MAX_EPS: 10 + PROCESS_PRIORITY: -5 + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + priority: -5 + fim_mode: 'realtime' + +- name: process_priority_-5_whodata + description: Check if configured process priorty is correct + configuration_parameters: + TEST_DIR: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + FIM_MODE: + realtime: 'no' + whodata: 'yes' + MAX_EPS: 10 + PROCESS_PRIORITY: -5 + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + priority: -5 + fim_mode: 'whodata' \ No newline at end of file diff --git a/tests/integration/test_fim/test_files/test_process_priority/test_process_priority.py b/tests/integration/test_fim/test_files/test_process_priority/test_process_priority.py new file mode 100644 index 00000000000..94f83389917 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_process_priority/test_process_priority.py @@ -0,0 +1,156 @@ +''' +copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when + these files are modified. Specifically, these tests will check if the process priority of + the 'wazuh-syscheckd' daemon set in the 'process_priority' tag is applied successfully. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks + configured files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: files_process_priority + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - linux + - macos + +os_version: + - Arch Linux + - Amazon Linux 2 + - Amazon Linux 1 + - CentOS 8 + - CentOS 7 + - Debian Buster + - Red Hat 8 + - macOS Catalina + - macOS Server + - Ubuntu Focal + - Ubuntu Bionic + - Windows 10 + - Windows Server 2019 + - Windows Server 2016 + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#process-priority + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_process_priority +''' + +from pathlib import Path + +import sys +import os + +import pytest +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG +from wazuh_testing.modules.agentd.configuration import AGENTD_DEBUG +from wazuh_testing.constants.platforms import MACOS +from wazuh_testing.utils.services import search_process_by_command +from wazuh_testing.constants.daemons import SYSCHECK_DAEMON + +from . import TEST_CASES_PATH, CONFIGS_PATH + +# Marks + +# Pytest marks to run on any service type on linux or windows. +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.darwin, pytest.mark.tier(level=1)] + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_process_priority.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_process_priority.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + +# Set configurations required by the fixtures. +daemons_handler_configuration = {'all_daemons': True} +local_internal_options = {SYSCHECK_DEBUG: 2, AGENTD_DEBUG: 2 } + +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_process_priority(test_configuration, test_metadata, configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, folder_to_monitor, daemons_handler): + ''' + description: Check if the process priority of the 'wazuh-syscheckd' daemon set in the 'process_priority' tag + is updated correctly. For this purpose, the test will monitor a testing folder and, once FIM starts, + it will get the priority value from the 'process_priority' tag and the system information of + the 'wazuh-syscheckd' process. Finally, the test will compare the current process priority + with the target priority to verify that they match. + + wazuh_min_version: 4.2.0 + + tier: 1 + + parameters: + - test_configuration: + type: dict + brief: Configuration values for ossec.conf. + - test_metadata: + type: dict + brief: Test case data. + - configure_local_internal_options: + type: fixture + brief: Set local_internal_options.conf file. + - truncate_monitored_files: + type: fixture + brief: Truncate all the log files and json alerts files before and after the test execution. + - set_wazuh_configuration: + type: fixture + brief: Set ossec.conf configuration. + - folder_to_monitor: + type: str + brief: Folder created for monitoring. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + + assertions: + - Verify that the 'wazuh-syscheckd' daemon is running. + - Verify that the process priority of the 'wazuh-syscheckd' daemon matches the 'process_priority' tag. + + input_description: A test case (ossec_conf) is contained in external YAML file (cases_process_priority.yaml) + which includes configuration settings for the 'wazuh-syscheckd' daemon and, + these are combined with the testing directory to be monitored defined in the module. + + expected_output: + - r'.*Sending FIM event: (.+)$' ('added' events) + - r'.*Ignoring .* due to' + + tags: + - realtime + - scheduled + ''' + + if sys.platform == MACOS and not test_metadata['fim_mode'] == 'scheduled': + pytest.skip(reason="Realtime and whodata are not supported on macos") + + priority = int(test_metadata['priority']) + syscheckd_process = search_process_by_command(SYSCHECK_DAEMON) + + assert syscheckd_process is not None, f'Process {SYSCHECK_DAEMON} not found' + assert (os.getpriority(os.PRIO_PROCESS, syscheckd_process.pid)) == priority, \ + f'Process {SYSCHECK_DAEMON} has not updated its priority.' From b55625168ddce77816921fd7d807b5c73397ca48 Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Fri, 19 Apr 2024 08:52:40 -0300 Subject: [PATCH 388/419] Missing new line added. --- .../data/test_cases/cases_process_priority.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_fim/test_files/test_process_priority/data/test_cases/cases_process_priority.yaml b/tests/integration/test_fim/test_files/test_process_priority/data/test_cases/cases_process_priority.yaml index 4047875b2c3..68c06659135 100644 --- a/tests/integration/test_fim/test_files/test_process_priority/data/test_cases/cases_process_priority.yaml +++ b/tests/integration/test_fim/test_files/test_process_priority/data/test_cases/cases_process_priority.yaml @@ -140,4 +140,4 @@ folder_to_monitor: !!python/object/apply:os.path.join args: [!!python/object/apply:os.getcwd [], testdir1] priority: -5 - fim_mode: 'whodata' \ No newline at end of file + fim_mode: 'whodata' From b17ec5f121653f2940dd11bee0ed95dedfcbd0da Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Fri, 19 Apr 2024 14:11:38 -0300 Subject: [PATCH 389/419] Macos pytest mark deleted --- .../test_files/test_process_priority/test_process_priority.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_fim/test_files/test_process_priority/test_process_priority.py b/tests/integration/test_fim/test_files/test_process_priority/test_process_priority.py index 94f83389917..455a44acf3a 100644 --- a/tests/integration/test_fim/test_files/test_process_priority/test_process_priority.py +++ b/tests/integration/test_fim/test_files/test_process_priority/test_process_priority.py @@ -79,7 +79,7 @@ # Marks # Pytest marks to run on any service type on linux or windows. -pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.darwin, pytest.mark.tier(level=1)] +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.tier(level=1)] # Test metadata, configuration and ids. cases_path = Path(TEST_CASES_PATH, 'cases_process_priority.yaml') From cac7ce7f86992ce6980abd34d96f871db2ddcde1 Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Fri, 19 Apr 2024 14:13:16 -0300 Subject: [PATCH 390/419] Delete macos check in test --- .../test_files/test_process_priority/test_process_priority.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/integration/test_fim/test_files/test_process_priority/test_process_priority.py b/tests/integration/test_fim/test_files/test_process_priority/test_process_priority.py index 455a44acf3a..285f251d987 100644 --- a/tests/integration/test_fim/test_files/test_process_priority/test_process_priority.py +++ b/tests/integration/test_fim/test_files/test_process_priority/test_process_priority.py @@ -145,9 +145,6 @@ def test_process_priority(test_configuration, test_metadata, configure_local_int - scheduled ''' - if sys.platform == MACOS and not test_metadata['fim_mode'] == 'scheduled': - pytest.skip(reason="Realtime and whodata are not supported on macos") - priority = int(test_metadata['priority']) syscheckd_process = search_process_by_command(SYSCHECK_DAEMON) From 3f949efdbc5d97fc5e1c34ce82ae62f8a4b74b11 Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Fri, 19 Apr 2024 16:11:22 -0300 Subject: [PATCH 391/419] Fix name on macOS workflow. Replace OS names in uninstall_audit function. --- .github/workflows/integration-tests-fim-tier-0-1-macos.yml | 2 +- tests/integration/test_fim/test_files/test_audit/conftest.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/integration-tests-fim-tier-0-1-macos.yml b/.github/workflows/integration-tests-fim-tier-0-1-macos.yml index 9bcce9ee77c..d0c343effb9 100644 --- a/.github/workflows/integration-tests-fim-tier-0-1-macos.yml +++ b/.github/workflows/integration-tests-fim-tier-0-1-macos.yml @@ -1,4 +1,4 @@ -name: Integration tests for fim on MacOS - Tier 0 and 1 +name: Integration tests for FIM on MacOS - Tier 0 and 1 on: workflow_dispatch: diff --git a/tests/integration/test_fim/test_files/test_audit/conftest.py b/tests/integration/test_fim/test_files/test_audit/conftest.py index b27c331fda9..8fd62223c93 100644 --- a/tests/integration/test_fim/test_files/test_audit/conftest.py +++ b/tests/integration/test_fim/test_files/test_audit/conftest.py @@ -6,6 +6,7 @@ import pytest import subprocess +from wazuh_testing.constants.platforms import CENTOS, UBUNTU, DEBIAN @pytest.fixture(scope='module') def uninstall_audit(): @@ -14,11 +15,11 @@ def uninstall_audit(): # Check distro linux_distro = distro.id() - if re.match(linux_distro, "centos"): + if re.match(linux_distro, CENTOS): package_management = "yum" audit = "audit" option = "--assumeyes" - elif re.match(linux_distro, "ubuntu") or re.match(linux_distro, "debian"): + elif re.match(linux_distro, UBUNTU) or re.match(linux_distro, DEBIAN): package_management = "apt-get" audit = "auditd" option = "--yes" From d7d70d14de610f92cd8dc30250cb9315548806d7 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Wed, 8 May 2024 21:45:57 -0300 Subject: [PATCH 392/419] IT fim: Added test_moving_files test. --- .../test_files/test_moving_files/__init__.py | 10 + .../test_files/test_moving_files/conftest.py | 39 ++++ .../configuration_moving_files.yaml | 35 ++++ .../data/test_cases/cases_moving_files.yaml | 33 ++++ .../test_moving_files/test_moving_files.py | 175 ++++++++++++++++++ 5 files changed, 292 insertions(+) create mode 100644 tests/integration/test_fim/test_files/test_moving_files/__init__.py create mode 100644 tests/integration/test_fim/test_files/test_moving_files/conftest.py create mode 100644 tests/integration/test_fim/test_files/test_moving_files/data/configuration_templates/configuration_moving_files.yaml create mode 100644 tests/integration/test_fim/test_files/test_moving_files/data/test_cases/cases_moving_files.yaml create mode 100644 tests/integration/test_fim/test_files/test_moving_files/test_moving_files.py diff --git a/tests/integration/test_fim/test_files/test_moving_files/__init__.py b/tests/integration/test_fim/test_files/test_moving_files/__init__.py new file mode 100644 index 00000000000..be20efa8ec4 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_moving_files/__init__.py @@ -0,0 +1,10 @@ +# Copyright (C) 2015-2024, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +from pathlib import Path + + +# Constants & base paths +DATA_PATH = Path(Path(__file__).parent, 'data') +CONFIGS_PATH = Path(DATA_PATH, 'configuration_templates') +TEST_CASES_PATH = Path(DATA_PATH, 'test_cases') diff --git a/tests/integration/test_fim/test_files/test_moving_files/conftest.py b/tests/integration/test_fim/test_files/test_moving_files/conftest.py new file mode 100644 index 00000000000..3d2db0e788f --- /dev/null +++ b/tests/integration/test_fim/test_files/test_moving_files/conftest.py @@ -0,0 +1,39 @@ +# Copyright (C) 2015-2024, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +from pathlib import Path + +import pytest + +from wazuh_testing.utils import file + + +@pytest.fixture() +def create_paths_files(test_metadata: dict) -> str: + to_edit = test_metadata.get('path_or_files_to_create') + + if not isinstance(to_edit, list): + raise TypeError(f"`files` should be a 'list', not a '{type(to_edit)}'") + + created_files = [] + for item in to_edit: + item_path = Path(item) + if item_path.exists(): + raise FileExistsError(f"`{item_path}` already exists.") + + # If file does not have suffixes, consider it a directory + if item_path.suffixes == []: + # Add a dummy file to the target directory to create the directory + created_files.extend(file.create_parent_directories( + Path(item_path).joinpath('dummy.file'))) + else: + created_files.extend(file.create_parent_directories(item_path)) + + file.write_file(file_path=item_path, data='') + created_files.append(item_path) + + yield to_edit + + for item in to_edit: + item_path = Path(item) + file.delete_path_recursively(item_path) diff --git a/tests/integration/test_fim/test_files/test_moving_files/data/configuration_templates/configuration_moving_files.yaml b/tests/integration/test_fim/test_files/test_moving_files/data/configuration_templates/configuration_moving_files.yaml new file mode 100644 index 00000000000..6f859e38f2f --- /dev/null +++ b/tests/integration/test_fim/test_files/test_moving_files/data/configuration_templates/configuration_moving_files.yaml @@ -0,0 +1,35 @@ +- sections: + - section: syscheck + elements: + - disabled: + value: 'no' + - directories: + value: TEST_DIR_1 + attributes: + - whodata: 'yes' + - directories: + value: TEST_DIR_2 + attributes: + - realtime: 'yes' + + - section: sca + elements: + - enabled: + value: 'no' + + - section: rootcheck + elements: + - disabled: + value: 'yes' + + - section: wodle + attributes: + - name: syscollector + elements: + - disabled: + value: 'yes' + + - section: active-response + elements: + - disabled: + value: 'yes' diff --git a/tests/integration/test_fim/test_files/test_moving_files/data/test_cases/cases_moving_files.yaml b/tests/integration/test_fim/test_files/test_moving_files/data/test_cases/cases_moving_files.yaml new file mode 100644 index 00000000000..3728c1ee6f2 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_moving_files/data/test_cases/cases_moving_files.yaml @@ -0,0 +1,33 @@ +- name: moving_files + description: Checks if the event is generated correctly when a file is moved from a whodata directory to realtime. + configuration_parameters: + TEST_DIR_1: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + TEST_DIR_2: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir2] + metadata: + folder_src: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + folder_dst: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir2] + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir1], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir2], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir1, testfile1.txt]] + filename: 'testfile1.txt' + mod_del_event: 'whodata' + mod_add_event: 'realtime' + +- name: moving_files + description: Checks if the event is generated correctly when a file is moved from a realtime directory to whodata. + configuration_parameters: + TEST_DIR_1: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + TEST_DIR_2: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir2] + metadata: + folder_src: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir2] + folder_dst: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir1] + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir1], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir2], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir2, testfile1.txt]] + filename: 'testfile1.txt' + mod_del_event: 'realtime' + mod_add_event: 'whodata' diff --git a/tests/integration/test_fim/test_files/test_moving_files/test_moving_files.py b/tests/integration/test_fim/test_files/test_moving_files/test_moving_files.py new file mode 100644 index 00000000000..4ba793c04e7 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_moving_files/test_moving_files.py @@ -0,0 +1,175 @@ +''' +copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts + when these files are modified. Specifically, these tests will check if FIM detects + moving files from one directory using the 'whodata' monitoring mode to another using + the 'realtime' monitoring mode and vice versa. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: files_moving_files + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - linux + +os_version: + - Arch Linux + - Amazon Linux 2 + - Amazon Linux 1 + - CentOS 8 + - CentOS 7 + - Debian Buster + - Red Hat 8 + - Ubuntu Focal + - Ubuntu Bionic + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#synchronization + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_moving_files +''' +import os + +from pathlib import Path + +import pytest + +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.modules.fim.patterns import EVENT_TYPE_DELETED, EVENT_TYPE_ADDED +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template +from wazuh_testing.modules.fim.utils import get_fim_event_data + +from . import TEST_CASES_PATH, CONFIGS_PATH + +# Marks +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.tier(level=1)] + + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_moving_files.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_moving_files.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + + +# Set configurations required by the fixtures. +local_internal_options = {SYSCHECK_DEBUG: 2} + + +# Test +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_moving_file_to_whodata(test_configuration, test_metadata, set_wazuh_configuration, truncate_monitored_files, + configure_local_internal_options, create_paths_files, daemons_handler, start_monitoring): + ''' + description: Check if the 'wazuh-syscheckd' daemon detects events when moving files from a directory + monitored by 'whodata' to another monitored by 'realtime' and vice versa. For this purpose, + the test will monitor two folders using both FIM monitoring modes and create a testing file + inside each one. Then, it will rename the testing file of the target folder using the name + of the one inside the source folder. Finally, the test will verify that the FIM events + generated to match the monitoring mode used in the folders. + + wazuh_min_version: 4.2.0 + + tier: 1 + + parameters: + - test_configuration: + type: dict + brief: Configuration values for ossec.conf. + - test_metadata: + type: dict + brief: Test case data. + - set_wazuh_configuration: + type: fixture + brief: Set ossec.conf configuration. + - truncate_monitored_files: + type: fixture + brief: Truncate all the log files and json alerts files before and after the test execution. + - configure_local_internal_options: + type: fixture + brief: Set local_internal_options.conf file. + - create_paths_files: + type: list + brief: Create the required directory or file to edit. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + - start_monitoring: + type: fixture + brief: Wait FIM to start. + + assertions: + - Verify that the 'mode' field in FIM 'deleted' events match with one used + in the source folder of moved files. + - Verify that the 'mode' field in FIM 'added' events match with one used + in the target folder of moved files. + + input_description: A test case is contained in external YAML files (configuration_moving_files.yaml, cases_moving_files.yaml) + which includes configuration settings for the 'wazuh-syscheckd' daemon and, these are + combined with the testing directories to be monitored defined in the module. + + expected_output: + - r'.*Sending FIM event: (.+)$' ('added' and 'deleted' events) + + tags: + - realtime + - who_data + ''' + dirsrc = test_metadata.get('folder_src') + dirdst = test_metadata.get('folder_dst') + filename = test_metadata.get('filename') + mod_del_event = test_metadata.get('mod_del_event') + mod_add_event = test_metadata.get('mod_add_event') + + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + os.rename(os.path.join(dirsrc, filename), os.path.join(dirdst, filename)) + + # Check event 'delete' + wazuh_log_monitor.start(generate_callback(EVENT_TYPE_DELETED), timeout=60) + callback_result = wazuh_log_monitor.callback_result + assert callback_result + + event_data = get_fim_event_data(callback_result) + assert event_data.get('path') == os.path.join(dirsrc, filename), 'Event path not equal' + assert event_data.get('type') == 'deleted', 'Event type not equal' + assert event_data.get('mode') == mod_del_event, 'FIM mode not equal' + + # Check event 'add' + wazuh_log_monitor.start(generate_callback(EVENT_TYPE_ADDED), timeout=60) + callback_result = wazuh_log_monitor.callback_result + assert callback_result + + event_data = get_fim_event_data(callback_result) + assert event_data.get('path') == os.path.join(dirdst, filename), 'Event path not equal' + assert event_data.get('type') == 'added', 'Event type not equal' + assert event_data.get('mode') == mod_add_event, 'FIM mode not equal' From 5c9c7465b8476facebc68300d3a97e84f9dd3d70 Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Tue, 14 May 2024 14:26:02 -0300 Subject: [PATCH 393/419] Test cases for windows_system_folder_redirection suite added. --- .../__init__.py | 10 ++ ...ion_windows_system_folder_redirection.yaml | 32 ++++ ...ses_windows_system_folder_redirection.yaml | 134 +++++++++++++++ .../test_windows_system_folder_redirection.py | 158 ++++++++++++++++++ 4 files changed, 334 insertions(+) create mode 100644 tests/integration/test_fim/test_files/test_windows_system_folder_redirection/__init__.py create mode 100644 tests/integration/test_fim/test_files/test_windows_system_folder_redirection/configuration_templates/configuration_windows_system_folder_redirection.yaml create mode 100644 tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_cases/cases_windows_system_folder_redirection.yaml create mode 100644 tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_windows_system_folder_redirection.py diff --git a/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/__init__.py b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/__init__.py new file mode 100644 index 00000000000..be20efa8ec4 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/__init__.py @@ -0,0 +1,10 @@ +# Copyright (C) 2015-2024, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +from pathlib import Path + + +# Constants & base paths +DATA_PATH = Path(Path(__file__).parent, 'data') +CONFIGS_PATH = Path(DATA_PATH, 'configuration_templates') +TEST_CASES_PATH = Path(DATA_PATH, 'test_cases') diff --git a/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/configuration_templates/configuration_windows_system_folder_redirection.yaml b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/configuration_templates/configuration_windows_system_folder_redirection.yaml new file mode 100644 index 00000000000..86d9a8db1a8 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/configuration_templates/configuration_windows_system_folder_redirection.yaml @@ -0,0 +1,32 @@ +- sections: + - section: syscheck + elements: + - disabled: + value: 'no' + - frequency: + value: INTERVAL + - directories: + value: TEST_DIRECTORIES + attributes: + - realtime: REALTIME + - whodata: WHODATA + - recursion_level: 0 + - windows_audit_interval: + value: 500 + + - section: sca + elements: + - enabled: + value: 'no' + + - section: rootcheck + elements: + - disabled: + value: 'yes' + + - section: wodle + attributes: + - name: syscollector + elements: + - disabled: + value: 'yes' diff --git a/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_cases/cases_windows_system_folder_redirection.yaml b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_cases/cases_windows_system_folder_redirection.yaml new file mode 100644 index 00000000000..b6a77fd7f14 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_cases/cases_windows_system_folder_redirection.yaml @@ -0,0 +1,134 @@ +- name: monitor /Windows/System32 - scheduled + description: Monitor the System32 folder without redirection in Scheduled mode + configuration_parameters: + INTERVAL: 3 + REALTIME: 'no' + WHODATA: 'no' + TEST_DIRECTORIES: '%WINDIR%\System32\testdir1' + fim_mode: scheduled + metadata: + folder: system32 + fim_mode: scheduled + redirected: false + folder_to_monitor: !!python/object/apply:os.path.join + args: [\%WINDIR\%, System32, testdir1] + +- name: monitor /Windows/System32 - realtime + description: Monitor the System32 folder without redirection in Realtime mode + configuration_parameters: + INTERVAL: 10000 + REALTIME: 'yes' + WHODATA: 'no' + TEST_DIRECTORIES: '%WINDIR%\System32\testdir1' + fim_mode: realtime + metadata: + folder: system32 + fim_mode: realtime + redirected: false + folder_to_monitor: !!python/object/apply:os.path.join + args: [\%WINDIR\%, System32, testdir1] + +- name: monitor /Windows/System32 - whodata + description: Monitor the System32 folder without redirection in Whodata mode + configuration_parameters: + INTERVAL: 10000 + REALTIME: 'no' + WHODATA: 'yes' + TEST_DIRECTORIES: '%WINDIR%\System32\testdir1' + fim_mode: whodata + metadata: + folder: system32 + fim_mode: whodata + redirected: false + folder_to_monitor: !!python/object/apply:os.path.join + args: [\%WINDIR\%, System32, testdir1] + +- name: monitor /Windows/Sysnative - scheduled + description: Monitor the System32 through Sysnative redirection in Scheduled mode + configuration_parameters: + INTERVAL: 3 + REALTIME: 'no' + WHODATA: 'no' + TEST_DIRECTORIES: '%WINDIR%\Sysnative\testdir1' + fim_mode: scheduled + metadata: + folder: system32 + fim_mode: scheduled + redirected: true + folder_to_monitor: !!python/object/apply:os.path.join + args: [\%WINDIR\%, System32, testdir1] + +- name: monitor /Windows/Sysnative - realtime + description: Monitor the System32 through Sysnative redirection in Realtime mode + configuration_parameters: + INTERVAL: 10000 + REALTIME: 'yes' + WHODATA: 'no' + TEST_DIRECTORIES: '%WINDIR%\Sysnative\testdir1' + fim_mode: realtime + metadata: + folder: system32 + fim_mode: realtime + redirected: true + folder_to_monitor: !!python/object/apply:os.path.join + args: [\%WINDIR\%, System32, testdir1] + +- name: monitor /Windows/Sysnative - whodata + description: Monitor the System32 through Sysnative redirection in Whodata mode + configuration_parameters: + INTERVAL: 10000 + REALTIME: 'no' + WHODATA: 'yes' + TEST_DIRECTORIES: '%WINDIR%\Sysnative\testdir1' + fim_mode: whodata + metadata: + folder: system32 + fim_mode: whodata + redirected: true + folder_to_monitor: !!python/object/apply:os.path.join + args: [\%WINDIR\%, System32, testdir1] + +- name: monitor SyWOW64 - scheduled + description: Monitor the SysWOW64 without redirection in Scheduled mode + configuration_parameters: + INTERVAL: 3 + REALTIME: 'no' + WHODATA: 'no' + TEST_DIRECTORIES: '%WINDIR%\SysWOW64\testdir1' + fim_mode: scheduled + metadata: + folder: syswow64 + fim_mode: scheduled + redirected: false + folder_to_monitor: !!python/object/apply:os.path.join + args: [\%WINDIR\%, System32, testdir1] + +- name: monitor SysWOW64 - realtime + description: Monitor the SysWOW64 without redirection in Realtime mode + configuration_parameters: + INTERVAL: 10000 + REALTIME: 'yes' + WHODATA: 'no' + TEST_DIRECTORIES: '%WINDIR%\SysWOW64\testdir1' + fim_mode: realtime + metadata: + folder: syswow64 + fim_mode: realtime + redirected: false + folder_to_monitor: !!python/object/apply:os.path.join + args: [\%WINDIR\%, System32, testdir1] + +- name: monitor SysWOW64 - whodata + description: Monitor the SysWOW64 without redirection in Whodata mode + configuration_parameters: + INTERVAL: 10000 + REALTIME: 'no' + WHODATA: 'yes' + TEST_DIRECTORIES: '%WINDIR%\SysWOW64\testdir1' + fim_mode: whodata + metadata: + folder: syswow64 + fim_mode: whodata + redirected: false + folder_to_monitor: !!python/object/apply:os.path.join + args: [\%WINDIR\%, System32, testdir1] diff --git a/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_windows_system_folder_redirection.py b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_windows_system_folder_redirection.py new file mode 100644 index 00000000000..ba7fb4ad974 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_windows_system_folder_redirection.py @@ -0,0 +1,158 @@ +''' +copyright: Copyright (C) 2015-2023, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when these files are + added, modified or deleted. Specifically, these tests will check that FIM is able to monitor Windows system + folders. FIM can redirect %WINDIR%/Sysnative monitoring toward System32 folder, so the tests also check that + when monitoring Sysnative the path is converted to system32 and events are generated there properly. + +components: + - fim + +suite: windows_system_folder_redirection + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - windows + +os_version: + - Windows 10 + - Windows Server 2019 + - Windows Server 2016 + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + +pytest_args: + - fim_mode: + scheduled: File monitoring is done after every configured interval elapses. + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - windows_folder_redirection +''' +from pathlib import Path + +import os + +import pytest +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils import file +from wazuh_testing.modules.fim.patterns import EVENT_TYPE_ADDED, WIN_CONVERT_FOLDER +from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG + +from . import TEST_CASES_PATH, CONFIGS_PATH + +# Marks +pytestmark = [pytest.mark.agent, pytest.mark.win32, pytest.mark.tier(level=1)] + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_windows_system_folder_redirection.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_windows_system_folder_redirection.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + +# Set configurations required by the fixtures. +daemons_handler_configuration = {'all_daemons': True} +local_internal_options = {SYSCHECK_DEBUG: 2, AGENTD_WINDOWS_DEBUG: 2 } + +# Tests +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_windows_system_monitoring(test_configuration, test_metadata,configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, folder_to_monitor, daemons_handler): + ''' + description: Check if the 'wazuh-syscheckd' monitors the windows system folders (System32 and SysWOW64) properly, + and that monitoring for Sysnative folder is redirected to System32 and works properly. + + test_phases: + - setup: + - Set wazuh configuration and local_internal_options. + - Create custom folder for monitoring + - Clean logs files and restart wazuh to apply the configuration. + - test: + - In case of monitoring Sysnative, check it is redirected to System32. + - Create, Update and Delete files in monitored folders, and check logs appear. + - teardown: + - Delete custom monitored folder + - Restore configuration + - Stop wazuh + + wazuh_min_version: 4.6.0 + + tier: 1 + + parameters: + - configuration: + type: dict + brief: Configuration values for ossec.conf. + - metadata: + type: dict + brief: Test case data. + - test_folders: + type: dict + brief: List of folders to be created for monitoring. + - set_wazuh_configuration: + type: fixture + brief: Set ossec.conf configuration. + - create_monitored_folders_module: + type: fixture + brief: Create a given list of folders when the module starts. Delete the folders at the end of the module. + - configure_local_internal_options_function: + type: fixture + brief: Set local_internal_options.conf file. + - restart_syscheck_function: + type: fixture + brief: restart syscheckd daemon, and truncate the ossec.log. + - wait_syscheck_start: + type: fixture + brief: check that the starting FIM scan is detected. + + assertions: + - Verify that for each modified file a 'diff' file is generated. + - Verify that FIM events include the 'content_changes' field. + - Verify that FIM events truncate the modifications made in a monitored file + when it matches the 'nodiff' tag. + - Verify that FIM events include the modifications made in a monitored file + when it does not match the 'nodiff' tag. + + input_description: The file 'configuration_windows_system_folder_redirection.yaml' provides the configuration + template. + The file 'cases_windows_system_folder_redirection.yaml' provides the tes cases configuration + details for each test case. + + expected_output: + - r'.*fim_adjust_path.*Convert '(.*) to '(.*)' to process the FIM events.' + - r'.*Sending FIM event: (.+)$' ('added', 'modified', and 'deleted' events)' + ''' + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + + # If monitoring sysnative, check redirection log message + if test_metadata['redirected']: + wazuh_log_monitor.start(callback=generate_callback(WIN_CONVERT_FOLDER)) + assert wazuh_log_monitor.callback_result + + file_to_monitor = os.path.join(test_metadata['folder_to_monitor'], 'testfile') + + # Write the file + file.write_file(file_to_monitor) + wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_ADDED)) From 558c55c20c7049deaf5bbea2031e2415b6d6abd6 Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Tue, 14 May 2024 14:36:49 -0300 Subject: [PATCH 394/419] Comments fixed --- .../test_windows_system_folder_redirection.py | 43 ++++++++----------- 1 file changed, 18 insertions(+), 25 deletions(-) diff --git a/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_windows_system_folder_redirection.py b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_windows_system_folder_redirection.py index ba7fb4ad974..6ec7892129c 100644 --- a/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_windows_system_folder_redirection.py +++ b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_windows_system_folder_redirection.py @@ -1,5 +1,5 @@ ''' -copyright: Copyright (C) 2015-2023, Wazuh Inc. +copyright: Copyright (C) 2015-2024, Wazuh Inc. Created by Wazuh, Inc. . @@ -91,7 +91,7 @@ def test_windows_system_monitoring(test_configuration, test_metadata,configure_l - Clean logs files and restart wazuh to apply the configuration. - test: - In case of monitoring Sysnative, check it is redirected to System32. - - Create, Update and Delete files in monitored folders, and check logs appear. + - Write file in monitored folders, and check logs appear. - teardown: - Delete custom monitored folder - Restore configuration @@ -102,38 +102,31 @@ def test_windows_system_monitoring(test_configuration, test_metadata,configure_l tier: 1 parameters: - - configuration: + - test_configuration: type: dict brief: Configuration values for ossec.conf. - - metadata: + - test_metadata: type: dict brief: Test case data. - - test_folders: - type: dict - brief: List of folders to be created for monitoring. - - set_wazuh_configuration: - type: fixture - brief: Set ossec.conf configuration. - - create_monitored_folders_module: - type: fixture - brief: Create a given list of folders when the module starts. Delete the folders at the end of the module. - - configure_local_internal_options_function: + - configure_local_internal_options: type: fixture brief: Set local_internal_options.conf file. - - restart_syscheck_function: + - truncate_monitored_files: + type: fixture + brief: Truncate all the log files and json alerts files before and after the test execution. + - set_wazuh_configuration: type: fixture - brief: restart syscheckd daemon, and truncate the ossec.log. - - wait_syscheck_start: + brief: Set ossec.conf configuration. + - folder_to_monitor: + type: str + brief: Folder created for monitoring. + - daemons_handler: type: fixture - brief: check that the starting FIM scan is detected. + brief: Handler of Wazuh daemons. assertions: - - Verify that for each modified file a 'diff' file is generated. - - Verify that FIM events include the 'content_changes' field. - - Verify that FIM events truncate the modifications made in a monitored file - when it matches the 'nodiff' tag. - - Verify that FIM events include the modifications made in a monitored file - when it does not match the 'nodiff' tag. + - Verify that for each modified file a FIM event is generated. + - Verify that log due to folder converted is generated. input_description: The file 'configuration_windows_system_folder_redirection.yaml' provides the configuration template. @@ -142,7 +135,7 @@ def test_windows_system_monitoring(test_configuration, test_metadata,configure_l expected_output: - r'.*fim_adjust_path.*Convert '(.*) to '(.*)' to process the FIM events.' - - r'.*Sending FIM event: (.+)$' ('added', 'modified', and 'deleted' events)' + - r'.*Sending FIM event: (.+)$' ('added' events)' ''' wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) From 5d6693a85776e65cf3ce132af9a36670a1c25981 Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Tue, 14 May 2024 14:52:22 -0300 Subject: [PATCH 395/419] Add missing space and reorder test files structure --- .../configuration_windows_system_folder_redirection.yaml | 0 .../test_cases/cases_windows_system_folder_redirection.yaml | 0 .../test_windows_system_folder_redirection.py | 2 +- 3 files changed, 1 insertion(+), 1 deletion(-) rename tests/integration/test_fim/test_files/test_windows_system_folder_redirection/{ => data}/configuration_templates/configuration_windows_system_folder_redirection.yaml (100%) rename tests/integration/test_fim/test_files/test_windows_system_folder_redirection/{ => data}/test_cases/cases_windows_system_folder_redirection.yaml (100%) diff --git a/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/configuration_templates/configuration_windows_system_folder_redirection.yaml b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/data/configuration_templates/configuration_windows_system_folder_redirection.yaml similarity index 100% rename from tests/integration/test_fim/test_files/test_windows_system_folder_redirection/configuration_templates/configuration_windows_system_folder_redirection.yaml rename to tests/integration/test_fim/test_files/test_windows_system_folder_redirection/data/configuration_templates/configuration_windows_system_folder_redirection.yaml diff --git a/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_cases/cases_windows_system_folder_redirection.yaml b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/data/test_cases/cases_windows_system_folder_redirection.yaml similarity index 100% rename from tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_cases/cases_windows_system_folder_redirection.yaml rename to tests/integration/test_fim/test_files/test_windows_system_folder_redirection/data/test_cases/cases_windows_system_folder_redirection.yaml diff --git a/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_windows_system_folder_redirection.py b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_windows_system_folder_redirection.py index 6ec7892129c..c6c8ef59479 100644 --- a/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_windows_system_folder_redirection.py +++ b/tests/integration/test_fim/test_files/test_windows_system_folder_redirection/test_windows_system_folder_redirection.py @@ -78,7 +78,7 @@ # Tests @pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) -def test_windows_system_monitoring(test_configuration, test_metadata,configure_local_internal_options, +def test_windows_system_monitoring(test_configuration, test_metadata, configure_local_internal_options, truncate_monitored_files, set_wazuh_configuration, folder_to_monitor, daemons_handler): ''' description: Check if the 'wazuh-syscheckd' monitors the windows system folders (System32 and SysWOW64) properly, From 6919b88e51ed53058f3073f240234260c401f978 Mon Sep 17 00:00:00 2001 From: Julian Bustamante N Date: Wed, 15 May 2024 20:39:47 +0000 Subject: [PATCH 396/419] restrict test was added --- .../test_files/test_restrict/__init__.py | 10 ++ .../config_restrict.yaml | 27 +++ .../data/test_cases/cases_restrict.yaml | 159 ++++++++++++++++++ .../test_files/test_restrict/test_restrict.py | 109 ++++++++++++ 4 files changed, 305 insertions(+) create mode 100644 tests/integration/test_fim/test_files/test_restrict/__init__.py create mode 100644 tests/integration/test_fim/test_files/test_restrict/data/configuration_templates/config_restrict.yaml create mode 100644 tests/integration/test_fim/test_files/test_restrict/data/test_cases/cases_restrict.yaml create mode 100644 tests/integration/test_fim/test_files/test_restrict/test_restrict.py diff --git a/tests/integration/test_fim/test_files/test_restrict/__init__.py b/tests/integration/test_fim/test_files/test_restrict/__init__.py new file mode 100644 index 00000000000..be20efa8ec4 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_restrict/__init__.py @@ -0,0 +1,10 @@ +# Copyright (C) 2015-2024, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +from pathlib import Path + + +# Constants & base paths +DATA_PATH = Path(Path(__file__).parent, 'data') +CONFIGS_PATH = Path(DATA_PATH, 'configuration_templates') +TEST_CASES_PATH = Path(DATA_PATH, 'test_cases') diff --git a/tests/integration/test_fim/test_files/test_restrict/data/configuration_templates/config_restrict.yaml b/tests/integration/test_fim/test_files/test_restrict/data/configuration_templates/config_restrict.yaml new file mode 100644 index 00000000000..772429f86d8 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_restrict/data/configuration_templates/config_restrict.yaml @@ -0,0 +1,27 @@ +- sections: + - section: syscheck + elements: + - disabled: + value: 'no' + - frequency: + value: 2 + - directories: + value: TEST_DIRECTORIES + attributes: + - check_all: 'yes' + - FIM_MODE + - restrict: RESTRICT + - section: sca + elements: + - enabled: + value: 'no' + - section: rootcheck + elements: + - disabled: + value: 'yes' + - section: wodle + attributes: + - name: syscollector + elements: + - disabled: + value: 'yes' diff --git a/tests/integration/test_fim/test_files/test_restrict/data/test_cases/cases_restrict.yaml b/tests/integration/test_fim/test_files/test_restrict/data/test_cases/cases_restrict.yaml new file mode 100644 index 00000000000..634da7814cc --- /dev/null +++ b/tests/integration/test_fim/test_files/test_restrict/data/test_cases/cases_restrict.yaml @@ -0,0 +1,159 @@ +- name: Restrict valid_empty + description: '' + configuration_parameters: + BASE_DIR: !!python/object/apply:os.path.join + args: [/testdir1] + TEST_DIRECTORIES: '/testdir1' + FIM_MODE: + realtime: 'yes' + RESTRICT: "" + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/testdir1] + file_to_monitor: !!python/object/apply:os.path.join [/testdir1, testfile2.txt] + fim_mode: 'realtime' + restrict: "" + data: ['testfile2.txt', True] + +- name: Restrict valid regex + description: '' + configuration_parameters: + BASE_DIR: !!python/object/apply:os.path.join + args: [/testdir1] + TEST_DIRECTORIES: '/testdir1' + FIM_MODE: + realtime: 'yes' + RESTRICT: .restricted.txt$ + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/testdir1] + file_to_monitor: !!python/object/apply:os.path.join [/testdir1,.restricted.txt] + fim_mode: 'realtime' + restrict: .restricted.txt$ + data: ['.restricted.txt', True] + +- name: Restrict valid regex + description: '' + configuration_parameters: + BASE_DIR: !!python/object/apply:os.path.join + args: [/testdir1] + TEST_DIRECTORIES: '/testdir1' + FIM_MODE: + realtime: 'yes' + RESTRICT: .restricted.txt$ + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/testdir1] + file_to_monitor: !!python/object/apply:os.path.join [/testdir1, binary.restricted.txt] + fim_mode: 'realtime' + restrict: .restricted.txt$ + data: ['binary.restricted.txt', True] + +- name: Restrict valid regex + description: '' + configuration_parameters: + BASE_DIR: !!python/object/apply:os.path.join + args: [/testdir1] + TEST_DIRECTORIES: '/testdir1' + FIM_MODE: + realtime: 'yes' + RESTRICT: ^restricted + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/testdir1] + file_to_monitor: !!python/object/apply:os.path.join [/testdir1, testfile2.txt] + fim_mode: 'realtime' + restrict: ^restricted + data: ['testfile2.txt', False] + +- name: Restrict valid regex + description: '' + configuration_parameters: + BASE_DIR: !!python/object/apply:os.path.join + args: [/testdir1] + TEST_DIRECTORIES: '/testdir1' + FIM_MODE: + realtime: 'yes' + RESTRICT: filerestricted|other_restricted.txt$ + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/testdir1] + file_to_monitor: !!python/object/apply:os.path.join [/testdir1, testfile2.txt] + fim_mode: 'realtime' + restrict: filerestricted|other_restricted.txt$ + data: ['testfile2.txt', False] + +- name: Restrict valid regex + description: '' + configuration_parameters: + BASE_DIR: !!python/object/apply:os.path.join + args: [/testdir1] + TEST_DIRECTORIES: '/testdir1' + FIM_MODE: + realtime: 'yes' + RESTRICT: filerestricted|other_restricted.txt$ + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/testdir1] + file_to_monitor: !!python/object/apply:os.path.join [/testdir1, myother_restricted.txt] + fim_mode: 'realtime' + restrict: filerestricted|other_restricted.txt$ + data: ['myother_restricted.txt', True] + +- name: Restrict valid regex + description: '' + configuration_parameters: + BASE_DIR: !!python/object/apply:os.path.join + args: [/testdir1] + TEST_DIRECTORIES: '/testdir1' + FIM_MODE: + realtime: 'yes' + RESTRICT: filerestricted|other_restricted.txt$ + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/testdir1] + file_to_monitor: !!python/object/apply:os.path.join [/testdir1, myfilerestricted.txt] + fim_mode: 'realtime' + restrict: filerestricted|other_restricted.txt$ + data: ['myfilerestricted.txt', True] + +- name: Restrict valid_regex_incomplete_unix + description: '' + configuration_parameters: + BASE_DIR: !!python/object/apply:os.path.join + args: [/testdir1] + TEST_DIRECTORIES: '/testdir1' + FIM_MODE: + realtime: 'yes' + RESTRICT: ^/testdir1/f|^/testdir1/subdir/f + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/testdir1] + file_to_monitor: !!python/object/apply:os.path.join [/testdir1, fileinfolder.txt] + fim_mode: 'realtime' + restrict: ^/testdir1/f|^/testdir1/subdir/f + data: ['fileinfolder.txt', True] + +- name: Restrict valid_regex_incomplete_unix + description: '' + configuration_parameters: + BASE_DIR: !!python/object/apply:os.path.join + args: [/testdir1] + TEST_DIRECTORIES: '/testdir1' + FIM_MODE: + realtime: 'yes' + RESTRICT: ^/testdir1/f|^/testdir1/subdir/f + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/testdir1] + file_to_monitor: !!python/object/apply:os.path.join [/testdir1, fileinfolder1.txt] + fim_mode: 'realtime' + restrict: ^/testdir1/f|^/testdir1/subdir/f + data: ['fileinfolder1.txt', True] + +- name: Restrict valid_regex_incomplete_unix + description: '' + configuration_parameters: + BASE_DIR: !!python/object/apply:os.path.join + args: [/testdir1] + TEST_DIRECTORIES: '/testdir1' + FIM_MODE: + realtime: 'yes' + RESTRICT: ^/testdir1/f|^/testdir1/subdir/f + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/testdir1] + file_to_monitor: !!python/object/apply:os.path.join [/testdir1, testing_regex.txt] + fim_mode: 'realtime' + restrict: ^/testdir1/f|^/testdir1/subdir/f + data: ['testing_regex.txt', False] diff --git a/tests/integration/test_fim/test_files/test_restrict/test_restrict.py b/tests/integration/test_fim/test_files/test_restrict/test_restrict.py new file mode 100644 index 00000000000..f0f3876e83d --- /dev/null +++ b/tests/integration/test_fim/test_files/test_restrict/test_restrict.py @@ -0,0 +1,109 @@ +""" + Copyright (C) 2015-2024, Wazuh Inc. + Created by Wazuh, Inc. . + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +""" + +import sys +import time +import pytest +import os + +from pathlib import Path +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.constants.platforms import WINDOWS +from wazuh_testing.modules.agentd.configuration import AGENTD_DEBUG, AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.patterns import EVENT_TYPE_ADDED, FIM_EVENT_RESTRICT +from wazuh_testing.modules.fim.utils import get_fim_event_data +from wazuh_testing.modules.monitord.configuration import MONITORD_ROTATE_LOG +from wazuh_testing.modules.fim import configuration +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template + +from . import TEST_CASES_PATH, CONFIGS_PATH + +# Pytest marks to run on any service type on linux or windows. +pytestmark = [ pytest.mark.linux, pytest.mark.tier(level=2)] + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_restrict.yaml') +config_path = Path(CONFIGS_PATH, 'config_restrict.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + +# Set configurations required by the fixtures. +local_internal_options = {configuration.SYSCHECK_DEBUG: 2, AGENTD_DEBUG: 2, MONITORD_ROTATE_LOG: 0} +if sys.platform == WINDOWS: local_internal_options.update({AGENTD_WINDOWS_DEBUG: 2}) + +@pytest.mark.skipif(sys.platform=='win32', reason="Blocked by #4077.") +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_restrict(test_configuration, test_metadata, set_wazuh_configuration, configure_local_internal_options, + truncate_monitored_files, folder_to_monitor, daemons_handler, file_to_monitor): + ''' + description: Check if the 'wazuh-syscheckd' daemon detects or ignores events in monitored files depending + on the value set in the 'restrict' attribute. This attribute limit checks to files that match + the entered string or regex and its file name. For this purpose, the test will monitor a folder + and create a testing file inside it. Finally, the test will verify that FIM 'added' events are + generated only for the testing files that not are restricted. + + wazuh_min_version: 4.2.0 + + tier: 2 + + parameters: + - test_configuration: + type: dict + brief: Configuration values for ossec.conf. + - test_metadata: + type: dict + brief: Test case data. + - set_wazuh_configuration: + type: fixture + brief: Set ossec.conf configuration. + - configure_local_internal_options: + type: fixture + brief: Set local_internal_options.conf file. + - truncate_monitored_files: + type: fixture + brief: Truncate all the log files and json alerts files before and after the test execution. + - file_to_monitor: + type: str + brief: File created for monitoring. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + + + assertions: + - Verify that FIM events are only generated for file operations in monitored directories + that do not match the 'restrict' attribute. + - Verify that FIM 'ignoring' events are generated for monitored files that are restricted. + + input_description: Different test cases are contained in external YAML file (wazuh_conf.yaml) which + includes configuration settings for the 'wazuh-syscheckd' daemon and, these + are combined with the testing directories to be monitored defined in the module. + + expected_output: + - r'.*Sending FIM event: (.+)$' ('added' events) + - r'.*Ignoring entry .* due to restriction .*' + + tags: + - scheduled + ''' + + path = os.path.join(test_metadata['folder_to_monitor'], test_metadata['data'][0]) + + monitor = FileMonitor(WAZUH_LOG_PATH) + + time.sleep(3) + + if test_metadata['data'][1] == True: + monitor.start(generate_callback(EVENT_TYPE_ADDED)) + print(monitor.callback_result) + assert monitor.callback_result + fim_data = get_fim_event_data(monitor.callback_result) + assert fim_data['path'] == path + else: + ignored_file = monitor.start(generate_callback(FIM_EVENT_RESTRICT)) + assert monitor.callback_result From 2ad034c65767bf2d23987ab54d21174799f35dc9 Mon Sep 17 00:00:00 2001 From: Julian Bustamante N Date: Thu, 16 May 2024 19:01:25 +0000 Subject: [PATCH 397/419] syntax changes --- .../test_files/test_restrict/test_restrict.py | 72 ++++++++++++++++--- 1 file changed, 64 insertions(+), 8 deletions(-) diff --git a/tests/integration/test_fim/test_files/test_restrict/test_restrict.py b/tests/integration/test_fim/test_files/test_restrict/test_restrict.py index f0f3876e83d..1ef054ad10d 100644 --- a/tests/integration/test_fim/test_files/test_restrict/test_restrict.py +++ b/tests/integration/test_fim/test_files/test_restrict/test_restrict.py @@ -1,7 +1,63 @@ """ - Copyright (C) 2015-2024, Wazuh Inc. - Created by Wazuh, Inc. . - This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when + these files are modified. Specifically, these tests will verify that FIM generates events + only for file operations in monitored directories that do not match the 'restrict' attribute. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: files_restrict + +targets: + - agent + - manager + +daemons: + - wazuh-syscheckd + +os_platform: + - linux + + +os_version: + - Arch Linux + - Amazon Linux 2 + - Amazon Linux 1 + - CentOS 8 + - CentOS 7 + - Debian Buster + - Red Hat 8 + - Ubuntu Focal + - Ubuntu Bionic + - Windows 10 + - Windows Server 2019 + - Windows Server 2016 + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#directories + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_restrict """ import sys @@ -23,8 +79,8 @@ from . import TEST_CASES_PATH, CONFIGS_PATH -# Pytest marks to run on any service type on linux or windows. -pytestmark = [ pytest.mark.linux, pytest.mark.tier(level=2)] +# Pytest marks to run on any service type on linux. +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.tier(level=2)] # Test metadata, configuration and ids. cases_path = Path(TEST_CASES_PATH, 'cases_restrict.yaml') @@ -36,7 +92,6 @@ local_internal_options = {configuration.SYSCHECK_DEBUG: 2, AGENTD_DEBUG: 2, MONITORD_ROTATE_LOG: 0} if sys.platform == WINDOWS: local_internal_options.update({AGENTD_WINDOWS_DEBUG: 2}) -@pytest.mark.skipif(sys.platform=='win32', reason="Blocked by #4077.") @pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) def test_restrict(test_configuration, test_metadata, set_wazuh_configuration, configure_local_internal_options, truncate_monitored_files, folder_to_monitor, daemons_handler, file_to_monitor): @@ -67,6 +122,9 @@ def test_restrict(test_configuration, test_metadata, set_wazuh_configuration, co - truncate_monitored_files: type: fixture brief: Truncate all the log files and json alerts files before and after the test execution. + - folder_to_monitor: + type: str + brief: Folder created for monitoring. - file_to_monitor: type: str brief: File created for monitoring. @@ -96,8 +154,6 @@ def test_restrict(test_configuration, test_metadata, set_wazuh_configuration, co monitor = FileMonitor(WAZUH_LOG_PATH) - time.sleep(3) - if test_metadata['data'][1] == True: monitor.start(generate_callback(EVENT_TYPE_ADDED)) print(monitor.callback_result) From 89c0699f23984bc21182af792bb584785c3126a0 Mon Sep 17 00:00:00 2001 From: Julian Bustamante N Date: Thu, 16 May 2024 22:47:42 +0000 Subject: [PATCH 398/419] syntax changes --- .../test_fim/test_files/test_restrict/test_restrict.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/integration/test_fim/test_files/test_restrict/test_restrict.py b/tests/integration/test_fim/test_files/test_restrict/test_restrict.py index 1ef054ad10d..43aa9d5b2c9 100644 --- a/tests/integration/test_fim/test_files/test_restrict/test_restrict.py +++ b/tests/integration/test_fim/test_files/test_restrict/test_restrict.py @@ -20,7 +20,6 @@ targets: - agent - - manager daemons: - wazuh-syscheckd @@ -39,9 +38,6 @@ - Red Hat 8 - Ubuntu Focal - Ubuntu Bionic - - Windows 10 - - Windows Server 2019 - - Windows Server 2016 references: - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html @@ -61,7 +57,6 @@ """ import sys -import time import pytest import os From f9a22008fe9dafc52c9c629a767558b365effb3f Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Mon, 20 May 2024 18:00:21 -0300 Subject: [PATCH 399/419] Test added to check if a restriction on the monitored windows registry key is ignored or generates an event depending on whether the setting matches or not --- tests/integration/test_fim/conftest.py | 16 ++ .../test_fim/test_registry/__init__.py | 0 .../test_registry_restrict/__init__.py | 10 + .../configuration_registry_restrict.yaml | 28 +++ .../cases_registry_restrict_key.yaml | 62 +++++++ .../test_registry_restrict_key.py | 174 ++++++++++++++++++ 6 files changed, 290 insertions(+) create mode 100644 tests/integration/test_fim/test_registry/__init__.py create mode 100644 tests/integration/test_fim/test_registry/test_registry_restrict/__init__.py create mode 100644 tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict.yaml create mode 100644 tests/integration/test_fim/test_registry/test_registry_restrict/data/test_cases/cases_registry_restrict_key.yaml create mode 100644 tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_key.py diff --git a/tests/integration/test_fim/conftest.py b/tests/integration/test_fim/conftest.py index 5a1d9dbf954..4f808eb585b 100644 --- a/tests/integration/test_fim/conftest.py +++ b/tests/integration/test_fim/conftest.py @@ -10,6 +10,7 @@ import re import subprocess import sys +import win32con from typing import Any from pathlib import Path @@ -17,6 +18,7 @@ from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH from wazuh_testing.constants.platforms import WINDOWS, MACOS, CENTOS, UBUNTU, DEBIAN from wazuh_testing.modules.fim.patterns import MONITORING_PATH +from wazuh_testing.modules.fim.utils import create_registry, delete_registry from wazuh_testing.tools.monitors.file_monitor import FileMonitor from wazuh_testing.tools.simulators.authd_simulator import AuthdSimulator from wazuh_testing.tools.simulators.remoted_simulator import RemotedSimulator @@ -151,3 +153,17 @@ def symlink(i: int): [file.remove_file(f'test_h{i}') for i in range(hardlink_amount)] [file.remove_file(f'test_s{i}') for i in range(symlink_amount)] + + +@pytest.fixture() +def create_registry(test_metadata: dict) -> None: + key = win32con.HKEY_LOCAL_MACHINE + sub_key = test_metadata.get('sub_key') + arch = win32con.KEY_WOW64_64KEY if test_metadata.get('arch') == 'x64' else win32con.KEY_WOW64_32KEY + + sleep(10) + create_registry(key, sub_key, arch) + + yield + + delete_registry(key, sub_key, arch) diff --git a/tests/integration/test_fim/test_registry/__init__.py b/tests/integration/test_fim/test_registry/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_fim/test_registry/test_registry_restrict/__init__.py b/tests/integration/test_fim/test_registry/test_registry_restrict/__init__.py new file mode 100644 index 00000000000..be20efa8ec4 --- /dev/null +++ b/tests/integration/test_fim/test_registry/test_registry_restrict/__init__.py @@ -0,0 +1,10 @@ +# Copyright (C) 2015-2024, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +from pathlib import Path + + +# Constants & base paths +DATA_PATH = Path(Path(__file__).parent, 'data') +CONFIGS_PATH = Path(DATA_PATH, 'configuration_templates') +TEST_CASES_PATH = Path(DATA_PATH, 'test_cases') diff --git a/tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict.yaml b/tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict.yaml new file mode 100644 index 00000000000..f8368fc8779 --- /dev/null +++ b/tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict.yaml @@ -0,0 +1,28 @@ +- sections: + - section: syscheck + elements: + - disabled: + value: 'no' + - windows_registry: + value: WINDOWS_REGISTRY + attributes: + - check_all: 'yes' + - arch: ARCH + - restrict_key: RESTRICT_KEY + - restrict_value: RESTRICT_VALUE + - frequency: + value: 3 + - section: sca + elements: + - enabled: + value: 'no' + - section: rootcheck + elements: + - disabled: + value: 'yes' + - section: wodle + attributes: + - name: 'syscollector' + elements: + - disabled: + value: 'yes' diff --git a/tests/integration/test_fim/test_registry/test_registry_restrict/data/test_cases/cases_registry_restrict_key.yaml b/tests/integration/test_fim/test_registry/test_registry_restrict/data/test_cases/cases_registry_restrict_key.yaml new file mode 100644 index 00000000000..14fd5e64a53 --- /dev/null +++ b/tests/integration/test_fim/test_registry/test_registry_restrict/data/test_cases/cases_registry_restrict_key.yaml @@ -0,0 +1,62 @@ +- name: Value restrict not triggers event x64 + description: The event should not trigger when the key does not match the restriction on a 64-bit system. + configuration_parameters: + WINDOWS_REGISTRY: !!python/object/apply:os.path.join + args: [HKEY_LOCAL_MACHINE, SOFTWARE, Classes, testkey] + RESTRICT_KEY: 'key_restrict$' + RESTRICT_VALUE: '.*' + ARCH: '64bit' + metadata: + fim_mode: scheduled + key: 'HKEY_LOCAL_MACHINE' + sub_key: !!python/object/apply:os.path.join + args: [SOFTWARE, Classes, testkey] + arch: 'x64' + triggers_event: !!python/object/apply:eval ['False'] + +- name: Value restrict not triggers event x32 + description: The event should not trigger when the key does not match the restriction on a 32-bit system. + configuration_parameters: + WINDOWS_REGISTRY: !!python/object/apply:os.path.join + args: [HKEY_LOCAL_MACHINE, SOFTWARE, Classes, testkey] + RESTRICT_KEY: 'key_restrict$' + RESTRICT_VALUE: '.*' + ARCH: '32bit' + metadata: + fim_mode: scheduled + key: 'HKEY_LOCAL_MACHINE' + sub_key: !!python/object/apply:os.path.join + args: [SOFTWARE, Classes, testkey] + arch: 'x32' + triggers_event: !!python/object/apply:eval ['False'] + +- name: Value restrict triggers event x64 + description: The event should trigger when the key matches the restriction on a 64-bit system. + configuration_parameters: + WINDOWS_REGISTRY: !!python/object/apply:os.path.join + args: [HKEY_LOCAL_MACHINE, SOFTWARE, Classes, testkey] + RESTRICT_KEY: 'testkey$' + RESTRICT_VALUE: '.*' + ARCH: '64bit' + metadata: + fim_mode: scheduled + key: 'HKEY_LOCAL_MACHINE' + sub_key: !!python/object/apply:os.path.join + args: [SOFTWARE, Classes, testkey] + arch: 'x64' + triggers_event: !!python/object/apply:eval ['True'] + +- name: Value restrict triggers event x32 + description: The event should trigger when the key matches the restriction on a 32-bit system. + configuration_parameters: + WINDOWS_REGISTRY: !!python/object/apply:os.path.join + args: [HKEY_LOCAL_MACHINE, SOFTWARE, testkey] + RESTRICT_KEY: 'testkey$' + RESTRICT_VALUE: '.*' + ARCH: '32bit' + metadata: + key: 'HKEY_LOCAL_MACHINE' + sub_key: !!python/object/apply:os.path.join + args: [SOFTWARE, testkey] + arch: 'x32' + triggers_event: !!python/object/apply:eval ['True'] diff --git a/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_key.py b/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_key.py new file mode 100644 index 00000000000..5caa480216c --- /dev/null +++ b/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_key.py @@ -0,0 +1,174 @@ +''' +copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when + these files are modified. Specifically, these tests will verify that FIM generates events + only for registry entry operations in monitored keys that do not match the 'restrict_key' + or the 'restrict_value' attributes. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: registry_restrict + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - windows + +os_version: + - Windows 10 + - Windows 8 + - Windows 7 + - Windows Server 2019 + - Windows Server 2016 + - Windows Server 2012 + - Windows Server 2003 + - Windows XP + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#windows-registry + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_registry_restrict +''' +from pathlib import Path + +import os +import win32con + +import pytest +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils import file +from wazuh_testing.modules.fim.patterns import EVENT_TYPE_ADDED, EVENT_TYPE_DELETED, IGNORING_DUE_TO_RESTRICTION +from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG +from wazuh_testing.modules.fim.utils import get_fim_event_data, delete_registry + + +from . import TEST_CASES_PATH, CONFIGS_PATH + +# Marks + +pytestmark = [pytest.mark.agent, pytest.mark.win32, pytest.mark.tier(level=1)] + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_registry_restrict_key.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_registry_restrict.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + +# Set configurations required by the fixtures. +daemons_handler_configuration = {'all_daemons': True} +local_internal_options = {SYSCHECK_DEBUG: 2} + +# Tests +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_restrict_key(test_configuration, test_metadata,configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, daemons_handler, create_registry_key): + ''' + description: Check if the 'wazuh-syscheckd' daemon detects or ignores events in monitored registry entries + depending on the value set in the 'restrict_key' attribute. This attribute limit checks to + keys that match the entered string or regex and its name. For this purpose, the test will + monitor a key, create testing subkeys inside it, and make operations on those subkeys. Finally, + the test will verify that FIM 'added' and 'deleted' events are generated only for the testing + subkeys that are not restricted. + + wazuh_min_version: 4.2.0 + + tier: 1 + + parameters: + - test_configuration: + type: dict + brief: Configuration values for ossec.conf. + - test_metadata: + type: dict + brief: Test case data. + - configure_local_internal_options: + type: fixture + brief: Set local_internal_options.conf file. + - truncate_monitored_files: + type: fixture + brief: Truncate all the log files and json alerts files before and after the test execution. + - set_wazuh_configuration: + type: fixture + brief: Set ossec.conf configuration. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + - create_registry_key + type: fixture + brief: Create windows registry key. + + assertions: + - Verify that FIM events are only generated for operations in monitored keys + that do not match the 'restrict_key' attribute. + - Verify that FIM 'ignoring' events are generated for monitored keys that are restricted. + + input_description: The file 'configuration_registry_restrict.yaml' provides the configuration + template. + The file 'cases_registry_restrict_key.yaml' provides the tes cases configuration + details for each test case. + + expected_output: + - r'.*Sending FIM event: (.+)$' ('added', 'deleted' events) + - r'.*Ignoring entry .* due to restriction .*' + + tags: + - scheduled + - time_travel + ''' + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + + if test_metadata['triggers_event']: + wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_ADDED)) + assert wazuh_log_monitor.callback_result + event = get_fim_event_data(wazuh_log_monitor.callback_result) + assert event['type'] == 'added', 'Event type not equal' + assert event['path'] == os.path.join(test_metadata['key'], test_metadata['sub_key']), 'Event path not equal' + assert event['arch'].strip('[]') == test_metadata['arch'], 'Arch not equal' + + delete_registry(win32con.HKEY_LOCAL_MACHINE, test_metadata.get('sub_key'), win32con.KEY_WOW64_64KEY if test_metadata.get('arch') == 'x64' else win32con.KEY_WOW64_32KEY) + wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_DELETED)) + assert wazuh_log_monitor.callback_result + event = get_fim_event_data(wazuh_log_monitor.callback_result) + assert event['type'] == 'deleted', 'Event type not equal' + assert event['path'] == os.path.join(test_metadata['key'], test_metadata['sub_key']), 'Event path not equal' + assert event['arch'].strip('[]') == test_metadata['arch'], 'Arch not equal' + else: + wazuh_log_monitor.start(callback=generate_callback(IGNORING_DUE_TO_RESTRICTION)) + assert wazuh_log_monitor.callback_result + + delete_registry(win32con.HKEY_LOCAL_MACHINE, test_metadata.get('sub_key'), win32con.KEY_WOW64_64KEY if test_metadata.get('arch') == 'x64' else win32con.KEY_WOW64_32KEY) + + wazuh_log_monitor.start(callback=generate_callback(IGNORING_DUE_TO_RESTRICTION), only_new_events=True) + assert not wazuh_log_monitor.callback_result + + wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_DELETED)) + assert not wazuh_log_monitor.callback_result From 0b402a6567d483778548e5b0af7fd588b1de1cd6 Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Tue, 21 May 2024 08:52:10 -0300 Subject: [PATCH 400/419] win32con library are excluded from imports when the platform is not win32 --- tests/integration/test_fim/conftest.py | 4 +++- .../test_registry_restrict/test_registry_restrict_key.py | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_fim/conftest.py b/tests/integration/test_fim/conftest.py index 4f808eb585b..d18c2923207 100644 --- a/tests/integration/test_fim/conftest.py +++ b/tests/integration/test_fim/conftest.py @@ -10,7 +10,9 @@ import re import subprocess import sys -import win32con + +if sys.platform == 'win32': + import win32con from typing import Any from pathlib import Path diff --git a/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_key.py b/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_key.py index 5caa480216c..59d0dc9a282 100644 --- a/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_key.py +++ b/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_key.py @@ -57,7 +57,10 @@ from pathlib import Path import os -import win32con +import sys + +if sys.platform == 'win32': + import win32con import pytest from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH From d5a0e51e188b7e09384c0bbcce1c9ecdfbe2422f Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Tue, 21 May 2024 10:13:01 -0300 Subject: [PATCH 401/419] Fixture name fixed --- tests/integration/test_fim/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_fim/conftest.py b/tests/integration/test_fim/conftest.py index d18c2923207..d9e131928aa 100644 --- a/tests/integration/test_fim/conftest.py +++ b/tests/integration/test_fim/conftest.py @@ -158,7 +158,7 @@ def symlink(i: int): @pytest.fixture() -def create_registry(test_metadata: dict) -> None: +def create_registry_key(test_metadata: dict) -> None: key = win32con.HKEY_LOCAL_MACHINE sub_key = test_metadata.get('sub_key') arch = win32con.KEY_WOW64_64KEY if test_metadata.get('arch') == 'x64' else win32con.KEY_WOW64_32KEY From d48fcc6e997d7507c121dcc31d2829f1a3ae0186 Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Tue, 21 May 2024 17:09:21 -0300 Subject: [PATCH 402/419] Test added to check if a restriction on the monitored windows registry value is ignored or generates an event depending on whether the setting matches or not --- tests/integration/test_fim/conftest.py | 10 +- .../configuration_registry_restrict_key.yaml | 27 +++ ...onfiguration_registry_restrict_value.yaml} | 1 - .../cases_registry_restrict_key.yaml | 12 +- .../cases_registry_restrict_value.yaml | 59 +++++ .../test_registry_restrict_key.py | 17 +- .../test_registry_restrict_value.py | 203 ++++++++++++++++++ 7 files changed, 310 insertions(+), 19 deletions(-) create mode 100644 tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict_key.yaml rename tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/{configuration_registry_restrict.yaml => configuration_registry_restrict_value.yaml} (93%) create mode 100644 tests/integration/test_fim/test_registry/test_registry_restrict/data/test_cases/cases_registry_restrict_value.yaml create mode 100644 tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_value.py diff --git a/tests/integration/test_fim/conftest.py b/tests/integration/test_fim/conftest.py index d9e131928aa..a2da15bbcc0 100644 --- a/tests/integration/test_fim/conftest.py +++ b/tests/integration/test_fim/conftest.py @@ -19,7 +19,7 @@ from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH from wazuh_testing.constants.platforms import WINDOWS, MACOS, CENTOS, UBUNTU, DEBIAN -from wazuh_testing.modules.fim.patterns import MONITORING_PATH +from wazuh_testing.modules.fim.patterns import MONITORING_PATH, EVENT_TYPE_SCAN_END from wazuh_testing.modules.fim.utils import create_registry, delete_registry from wazuh_testing.tools.monitors.file_monitor import FileMonitor from wazuh_testing.tools.simulators.authd_simulator import AuthdSimulator @@ -163,9 +163,15 @@ def create_registry_key(test_metadata: dict) -> None: sub_key = test_metadata.get('sub_key') arch = win32con.KEY_WOW64_64KEY if test_metadata.get('arch') == 'x64' else win32con.KEY_WOW64_32KEY - sleep(10) create_registry(key, sub_key, arch) yield delete_registry(key, sub_key, arch) + + +@pytest.fixture() +def detect_end_scan(test_metadata: dict) -> None: + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + wazuh_log_monitor.start(timeout=60, callback=generate_callback(EVENT_TYPE_SCAN_END)) + assert wazuh_log_monitor.callback_result diff --git a/tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict_key.yaml b/tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict_key.yaml new file mode 100644 index 00000000000..0d754859abe --- /dev/null +++ b/tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict_key.yaml @@ -0,0 +1,27 @@ +- sections: + - section: syscheck + elements: + - disabled: + value: 'no' + - windows_registry: + value: WINDOWS_REGISTRY + attributes: + - check_all: 'yes' + - arch: ARCH + - restrict_key: RESTRICT_KEY + - frequency: + value: 3 + - section: sca + elements: + - enabled: + value: 'no' + - section: rootcheck + elements: + - disabled: + value: 'yes' + - section: wodle + attributes: + - name: 'syscollector' + elements: + - disabled: + value: 'yes' diff --git a/tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict.yaml b/tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict_value.yaml similarity index 93% rename from tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict.yaml rename to tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict_value.yaml index f8368fc8779..1cc803c065b 100644 --- a/tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict.yaml +++ b/tests/integration/test_fim/test_registry/test_registry_restrict/data/configuration_templates/configuration_registry_restrict_value.yaml @@ -8,7 +8,6 @@ attributes: - check_all: 'yes' - arch: ARCH - - restrict_key: RESTRICT_KEY - restrict_value: RESTRICT_VALUE - frequency: value: 3 diff --git a/tests/integration/test_fim/test_registry/test_registry_restrict/data/test_cases/cases_registry_restrict_key.yaml b/tests/integration/test_fim/test_registry/test_registry_restrict/data/test_cases/cases_registry_restrict_key.yaml index 14fd5e64a53..42f3f43817b 100644 --- a/tests/integration/test_fim/test_registry/test_registry_restrict/data/test_cases/cases_registry_restrict_key.yaml +++ b/tests/integration/test_fim/test_registry/test_registry_restrict/data/test_cases/cases_registry_restrict_key.yaml @@ -1,10 +1,9 @@ -- name: Value restrict not triggers event x64 +- name: Key restrict not triggers event x64 description: The event should not trigger when the key does not match the restriction on a 64-bit system. configuration_parameters: WINDOWS_REGISTRY: !!python/object/apply:os.path.join args: [HKEY_LOCAL_MACHINE, SOFTWARE, Classes, testkey] RESTRICT_KEY: 'key_restrict$' - RESTRICT_VALUE: '.*' ARCH: '64bit' metadata: fim_mode: scheduled @@ -14,13 +13,12 @@ arch: 'x64' triggers_event: !!python/object/apply:eval ['False'] -- name: Value restrict not triggers event x32 +- name: Key restrict not triggers event x32 description: The event should not trigger when the key does not match the restriction on a 32-bit system. configuration_parameters: WINDOWS_REGISTRY: !!python/object/apply:os.path.join args: [HKEY_LOCAL_MACHINE, SOFTWARE, Classes, testkey] RESTRICT_KEY: 'key_restrict$' - RESTRICT_VALUE: '.*' ARCH: '32bit' metadata: fim_mode: scheduled @@ -30,13 +28,12 @@ arch: 'x32' triggers_event: !!python/object/apply:eval ['False'] -- name: Value restrict triggers event x64 +- name: Key restrict triggers event x64 description: The event should trigger when the key matches the restriction on a 64-bit system. configuration_parameters: WINDOWS_REGISTRY: !!python/object/apply:os.path.join args: [HKEY_LOCAL_MACHINE, SOFTWARE, Classes, testkey] RESTRICT_KEY: 'testkey$' - RESTRICT_VALUE: '.*' ARCH: '64bit' metadata: fim_mode: scheduled @@ -46,13 +43,12 @@ arch: 'x64' triggers_event: !!python/object/apply:eval ['True'] -- name: Value restrict triggers event x32 +- name: Key restrict triggers event x32 description: The event should trigger when the key matches the restriction on a 32-bit system. configuration_parameters: WINDOWS_REGISTRY: !!python/object/apply:os.path.join args: [HKEY_LOCAL_MACHINE, SOFTWARE, testkey] RESTRICT_KEY: 'testkey$' - RESTRICT_VALUE: '.*' ARCH: '32bit' metadata: key: 'HKEY_LOCAL_MACHINE' diff --git a/tests/integration/test_fim/test_registry/test_registry_restrict/data/test_cases/cases_registry_restrict_value.yaml b/tests/integration/test_fim/test_registry/test_registry_restrict/data/test_cases/cases_registry_restrict_value.yaml new file mode 100644 index 00000000000..6d8897d2048 --- /dev/null +++ b/tests/integration/test_fim/test_registry/test_registry_restrict/data/test_cases/cases_registry_restrict_value.yaml @@ -0,0 +1,59 @@ +- name: Value restrict not triggers event x64 + description: The event should not trigger when the value does not match the restriction on a 64-bit system. + configuration_parameters: + WINDOWS_REGISTRY: !!python/object/apply:os.path.join + args: [HKEY_LOCAL_MACHINE, SOFTWARE, Classes, testkey] + RESTRICT_VALUE: 'value_restrict$' + ARCH: '64bit' + metadata: + key: 'HKEY_LOCAL_MACHINE' + sub_key: !!python/object/apply:os.path.join + args: [SOFTWARE, Classes, testkey] + arch: 'x64' + value_name: 'some_value' + triggers_event: !!python/object/apply:eval ['False'] + +- name: Value restrict not triggers event x32 + description: The event should not trigger when the value does not match the restriction on a 32-bit system. + configuration_parameters: + WINDOWS_REGISTRY: !!python/object/apply:os.path.join + args: [HKEY_LOCAL_MACHINE, SOFTWARE, Classes, testkey] + RESTRICT_VALUE: 'value_restrict$' + ARCH: '32bit' + metadata: + key: 'HKEY_LOCAL_MACHINE' + sub_key: !!python/object/apply:os.path.join + args: [SOFTWARE, Classes, testkey] + arch: 'x32' + value_name: 'some_value' + triggers_event: !!python/object/apply:eval ['False'] + +- name: Value restrict not triggers event x64 + description: The event should trigger when the value matches the restriction on a 64-bit system. + configuration_parameters: + WINDOWS_REGISTRY: !!python/object/apply:os.path.join + args: [HKEY_LOCAL_MACHINE, SOFTWARE, Classes, testkey] + RESTRICT_VALUE: 'value_restrict$' + ARCH: '64bit' + metadata: + key: 'HKEY_LOCAL_MACHINE' + sub_key: !!python/object/apply:os.path.join + args: [SOFTWARE, Classes, testkey] + arch: 'x64' + value_name: 'value_restrict' + triggers_event: !!python/object/apply:eval ['True'] + +- name: Value restrict triggers event x32 + description: The event should trigger when the value matches the restriction on a 32-bit system. + configuration_parameters: + WINDOWS_REGISTRY: !!python/object/apply:os.path.join + args: [HKEY_LOCAL_MACHINE, SOFTWARE, testkey] + RESTRICT_VALUE: 'value_restrict$' + ARCH: '32bit' + metadata: + key: 'HKEY_LOCAL_MACHINE' + sub_key: !!python/object/apply:os.path.join + args: [SOFTWARE, testkey] + arch: 'x32' + value_name: 'value_restrict' + triggers_event: !!python/object/apply:eval ['True'] diff --git a/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_key.py b/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_key.py index 59d0dc9a282..54ccdfac67a 100644 --- a/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_key.py +++ b/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_key.py @@ -61,6 +61,7 @@ if sys.platform == 'win32': import win32con + from win32con import KEY_WOW64_32KEY, KEY_WOW64_64KEY import pytest from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH @@ -82,7 +83,7 @@ # Test metadata, configuration and ids. cases_path = Path(TEST_CASES_PATH, 'cases_registry_restrict_key.yaml') -config_path = Path(CONFIGS_PATH, 'configuration_registry_restrict.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_registry_restrict_key.yaml') test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) @@ -93,7 +94,7 @@ # Tests @pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) def test_restrict_key(test_configuration, test_metadata,configure_local_internal_options, - truncate_monitored_files, set_wazuh_configuration, daemons_handler, create_registry_key): + truncate_monitored_files, set_wazuh_configuration, daemons_handler, detect_end_scan, create_registry_key): ''' description: Check if the 'wazuh-syscheckd' daemon detects or ignores events in monitored registry entries depending on the value set in the 'restrict_key' attribute. This attribute limit checks to @@ -128,13 +129,16 @@ def test_restrict_key(test_configuration, test_metadata,configure_local_internal - create_registry_key type: fixture brief: Create windows registry key. + - detect_end_scan + type: fixture + brief: Check first scan end. assertions: - Verify that FIM events are only generated for operations in monitored keys that do not match the 'restrict_key' attribute. - Verify that FIM 'ignoring' events are generated for monitored keys that are restricted. - input_description: The file 'configuration_registry_restrict.yaml' provides the configuration + input_description: The file 'configuration_registry_restrict_key.yaml' provides the configuration template. The file 'cases_registry_restrict_key.yaml' provides the tes cases configuration details for each test case. @@ -143,9 +147,6 @@ def test_restrict_key(test_configuration, test_metadata,configure_local_internal - r'.*Sending FIM event: (.+)$' ('added', 'deleted' events) - r'.*Ignoring entry .* due to restriction .*' - tags: - - scheduled - - time_travel ''' wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) @@ -157,7 +158,7 @@ def test_restrict_key(test_configuration, test_metadata,configure_local_internal assert event['path'] == os.path.join(test_metadata['key'], test_metadata['sub_key']), 'Event path not equal' assert event['arch'].strip('[]') == test_metadata['arch'], 'Arch not equal' - delete_registry(win32con.HKEY_LOCAL_MACHINE, test_metadata.get('sub_key'), win32con.KEY_WOW64_64KEY if test_metadata.get('arch') == 'x64' else win32con.KEY_WOW64_32KEY) + delete_registry(win32con.HKEY_LOCAL_MACHINE, test_metadata['sub_key'], KEY_WOW64_64KEY if test_metadata['arch'] == 'x64' else KEY_WOW64_32KEY) wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_DELETED)) assert wazuh_log_monitor.callback_result event = get_fim_event_data(wazuh_log_monitor.callback_result) @@ -168,7 +169,7 @@ def test_restrict_key(test_configuration, test_metadata,configure_local_internal wazuh_log_monitor.start(callback=generate_callback(IGNORING_DUE_TO_RESTRICTION)) assert wazuh_log_monitor.callback_result - delete_registry(win32con.HKEY_LOCAL_MACHINE, test_metadata.get('sub_key'), win32con.KEY_WOW64_64KEY if test_metadata.get('arch') == 'x64' else win32con.KEY_WOW64_32KEY) + delete_registry(win32con.HKEY_LOCAL_MACHINE, test_metadata['sub_key'], KEY_WOW64_64KEY if test_metadata['arch'] == 'x64' else KEY_WOW64_32KEY) wazuh_log_monitor.start(callback=generate_callback(IGNORING_DUE_TO_RESTRICTION), only_new_events=True) assert not wazuh_log_monitor.callback_result diff --git a/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_value.py b/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_value.py new file mode 100644 index 00000000000..8e834db2cfe --- /dev/null +++ b/tests/integration/test_fim/test_registry/test_registry_restrict/test_registry_restrict_value.py @@ -0,0 +1,203 @@ +''' +copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when + these files are modified. Specifically, these tests will verify that FIM generates events + only for registry entry operations in monitored keys that do not match the 'restrict_key' + or the 'restrict_value' attributes. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: registry_restrict + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - windows + +os_version: + - Windows 10 + - Windows 8 + - Windows 7 + - Windows Server 2019 + - Windows Server 2016 + - Windows Server 2012 + - Windows Server 2003 + - Windows XP + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#windows-registry + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_registry_restrict +''' +from pathlib import Path + +import os +import sys + +if sys.platform == 'win32': + import win32con + from win32con import KEY_WOW64_32KEY, KEY_WOW64_64KEY + +import pytest +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils import file +from wazuh_testing.modules.fim.patterns import EVENT_TYPE_ADDED, EVENT_TYPE_MODIFIED, EVENT_TYPE_DELETED +from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.utils import get_fim_event_data, delete_registry, delete_registry_value, create_registry_value + + +from . import TEST_CASES_PATH, CONFIGS_PATH + +# Marks + +pytestmark = [pytest.mark.agent, pytest.mark.win32, pytest.mark.tier(level=1)] + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_registry_restrict_value.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_registry_restrict_value.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + +# Set configurations required by the fixtures. +daemons_handler_configuration = {'all_daemons': True} +local_internal_options = {AGENTD_WINDOWS_DEBUG: 2} + +# Tests + +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_restrict_value(test_configuration, test_metadata,configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, create_registry_key, daemons_handler, detect_end_scan): + ''' + description: Check if the 'wazuh-syscheckd' daemon detects or ignores events in monitored registry entries + depending on the value set in the 'restrict_value' attribute. This attribute limit checks to + keys that match the entered string or regex and its name. For this purpose, the test will + monitor a key, create testing subkeys inside it, and make operations on their values. Finally, + the test will verify that FIM 'added' and 'deleted' events are generated only for the testing + subkeys that are not restricted. + + wazuh_min_version: 4.2.0 + + tier: 1 + + parameters: + - test_configuration: + type: dict + brief: Configuration values for ossec.conf. + - test_metadata: + type: dict + brief: Test case data. + - configure_local_internal_options: + type: fixture + brief: Set local_internal_options.conf file. + - truncate_monitored_files: + type: fixture + brief: Truncate all the log files and json alerts files before and after the test execution. + - set_wazuh_configuration: + type: fixture + brief: Set ossec.conf configuration. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + - create_registry_key + type: fixture + brief: Create windows registry key. + - detect_end_scan + type: fixture + brief: Check first scan end. + + assertions: + - Verify that FIM events are only generated for operations in monitored keys + that do not match the 'restrict_key' attribute. + - Verify that FIM 'ignoring' events are generated for monitored keys that are restricted. + + input_description: The file 'configuration_registry_restrict_value.yaml' provides the configuration + template. + The file 'cases_registry_restrict_value.yaml' provides the tes cases configuration + details for each test case. + + expected_output: + - r'.*Sending FIM event: (.+)$' ('added', 'deleted' events) + - r'.*Ignoring entry .* due to restriction .*' + + ''' + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + + # Create values + create_registry_value(win32con.HKEY_LOCAL_MACHINE, test_metadata['sub_key'], test_metadata['value_name'], win32con.REG_SZ, "added", KEY_WOW64_64KEY if test_metadata['arch'] == 'x64' else KEY_WOW64_32KEY) + + if test_metadata['triggers_event']: + wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_MODIFIED)) + assert wazuh_log_monitor.callback_result + event = get_fim_event_data(wazuh_log_monitor.callback_result) + assert event['type'] == 'modified', 'Key event not modified' + assert event['path'] == os.path.join(test_metadata['key'], test_metadata['sub_key']), 'Key event wrong path' + assert event['arch'] == '[x32]' if test_metadata['arch'] == KEY_WOW64_32KEY else '[x64]', 'Key event arch not equal' + + wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_ADDED)) + assert wazuh_log_monitor.callback_result + event = get_fim_event_data(wazuh_log_monitor.callback_result) + assert event['type'] == 'added', 'Event type not equal' + assert event['path'] == os.path.join(test_metadata['key'], test_metadata['sub_key']), 'Event path not equal' + assert event['value_name'] == test_metadata['value_name'], 'Value name not equal' + assert event['arch'] == '[x32]' if test_metadata['arch'] == KEY_WOW64_32KEY else '[x64]', 'Value event arch not equal' + else: + wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_MODIFIED)) + assert wazuh_log_monitor.callback_result + event = get_fim_event_data(wazuh_log_monitor.callback_result) + assert event['type'] == 'modified', 'Key event not modified' + assert event['path'] == os.path.join(test_metadata['key'], test_metadata['sub_key']), 'Key event wrong path' + assert event['arch'] == '[x32]' if test_metadata['arch'] == KEY_WOW64_32KEY else '[x64]', 'Key event arch not equal' + + delete_registry_value(win32con.HKEY_LOCAL_MACHINE, test_metadata['sub_key'], test_metadata['value_name'], KEY_WOW64_64KEY if test_metadata['arch'] == 'x64' else KEY_WOW64_32KEY) + + if test_metadata['triggers_event']: + wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_MODIFIED)) + assert wazuh_log_monitor.callback_result + event = get_fim_event_data(wazuh_log_monitor.callback_result) + assert event['type'] == 'modified', 'Key event not modified' + assert event['path'] == os.path.join(test_metadata['key'], test_metadata['sub_key']), 'Key event wrong path' + assert event['arch'] == '[x32]' if test_metadata['arch'] == KEY_WOW64_32KEY else '[x64]', 'Key event arch not equal' + + wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_DELETED)) + assert wazuh_log_monitor.callback_result + event = get_fim_event_data(wazuh_log_monitor.callback_result) + assert event['type'] == 'deleted', 'Event type not equal' + assert event['path'] == os.path.join(test_metadata['key'], test_metadata['sub_key']), 'Event path not equal' + assert event['value_name'] == test_metadata['value_name'], 'Value name not equal' + assert event['arch'] == '[x32]' if test_metadata['arch'] == KEY_WOW64_32KEY else '[x64]', 'Value event arch not equal' + else: + wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_MODIFIED)) + assert wazuh_log_monitor.callback_result + event = get_fim_event_data(wazuh_log_monitor.callback_result) + # After deleting the value, we don't expect any message of the value because it's not in the DB + assert event['type'] == 'modified', 'Key event not modified' + assert event['path'] == os.path.join(test_metadata['key'], test_metadata['sub_key']), 'Key event wrong path' + assert event['arch'] == '[x32]' if test_metadata['arch'] == KEY_WOW64_32KEY else '[x64]', 'Key event arch not equal' From 3b780f72a074b15cc48f52cecd578fd95fc3727c Mon Sep 17 00:00:00 2001 From: Santiago Vendramini Date: Fri, 31 May 2024 13:04:44 -0300 Subject: [PATCH 403/419] detect_end_scan fixture added to test_move.py test to avoid failures --- .../test_fim/test_files/test_basic_usage/test_move.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_fim/test_files/test_basic_usage/test_move.py b/tests/integration/test_fim/test_files/test_basic_usage/test_move.py index 4e297080d77..f306f14c1c3 100644 --- a/tests/integration/test_fim/test_files/test_basic_usage/test_move.py +++ b/tests/integration/test_fim/test_files/test_basic_usage/test_move.py @@ -96,7 +96,7 @@ @pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) def test_move(test_configuration, test_metadata, set_wazuh_configuration, configure_local_internal_options, - truncate_monitored_files, folder_to_monitor, daemons_handler, start_monitoring, path_to_edit): + truncate_monitored_files, folder_to_monitor, daemons_handler, start_monitoring, path_to_edit, detect_end_scan): ''' description: Check if the 'wazuh-syscheckd' daemon detects 'added' and 'deleted' events when moving a subdirectory or a file from a monitored folder to another one. For this purpose, the test @@ -126,15 +126,18 @@ def test_move(test_configuration, test_metadata, set_wazuh_configuration, config - folder_to_monitor: type: str brief: Folder created for monitoring. - - path_to_edit: - type: str - brief: Create the required directory or file to edit. - daemons_handler: type: fixture brief: Handler of Wazuh daemons. - start_monitoring: type: fixture brief: Wait FIM to start. + - path_to_edit: + type: str + brief: Create the required directory or file to edit. + - detect_end_scan + type: fixture + brief: Check first scan end. assertions: - Verify that FIM events of type 'added' and 'deleted' are generated From 013e817581457c46a5a4be42fd218f558c21bab3 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Mon, 20 May 2024 18:54:17 -0300 Subject: [PATCH 404/419] IT FIM: added test test_diff_size_limit_configured.py. --- .../test_report_changes/__init__.py | 10 ++ .../configuration_diff_size.yaml | 29 ++++ .../cases_diff_size_limit_configured.yaml | 67 ++++++++ .../test_diff_size_limit_configured.py | 153 ++++++++++++++++++ 4 files changed, 259 insertions(+) create mode 100644 tests/integration/test_fim/test_files/test_report_changes/__init__.py create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_diff_size.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_diff_size_limit_configured.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit_configured.py diff --git a/tests/integration/test_fim/test_files/test_report_changes/__init__.py b/tests/integration/test_fim/test_files/test_report_changes/__init__.py new file mode 100644 index 00000000000..be20efa8ec4 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/__init__.py @@ -0,0 +1,10 @@ +# Copyright (C) 2015-2024, Wazuh Inc. +# Created by Wazuh, Inc. . +# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +from pathlib import Path + + +# Constants & base paths +DATA_PATH = Path(Path(__file__).parent, 'data') +CONFIGS_PATH = Path(DATA_PATH, 'configuration_templates') +TEST_CASES_PATH = Path(DATA_PATH, 'test_cases') diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_diff_size.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_diff_size.yaml new file mode 100644 index 00000000000..038d2506112 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_diff_size.yaml @@ -0,0 +1,29 @@ +- sections: + - section: syscheck + elements: + - disabled: + value: 'no' + - frequency: + value: 3 + - directories: + value: TEST_DIRECTORIES + attributes: ATTRIBUTES + - diff: + elements: + - file_size: + elements: + - enabled: + value: FILE_SIZE_ENABLED + - limit: + value: FILE_SIZE_LIMIT + - disk_quota: + elements: + - enabled: + value: DISK_QUOTA_ENABLED + - limit: + value: DISK_QUOTA_LIMIT + + - section: sca + elements: + - enabled: + value: 'no' diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_diff_size_limit_configured.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_diff_size_limit_configured.yaml new file mode 100644 index 00000000000..12194628be6 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_diff_size_limit_configured.yaml @@ -0,0 +1,67 @@ +- name: Test 'diff' information, fim_mode = scheduled + description: Check if the 'wazuh-syscheckd' daemon limits + the size of 'diff' information. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + ATTRIBUTES: + - report_changes: 'yes' + - diff_size_limit: '2kb' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '1GB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + fim_mode: scheduled + report_changes: 'yes' + diff_size_limit_kb: '2' + file_size_enabled: 'yes' + file_size_limit: '1GB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'diff' information, fim_mode = realtime + description: Check if the 'wazuh-syscheckd' daemon limits + the size of 'diff' information. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + ATTRIBUTES: + - realtime: 'yes' + - report_changes: 'yes' + - diff_size_limit: '2kb' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '1GB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + fim_mode: realtime + report_changes: 'yes' + diff_size_limit_kb: '2' + file_size_enabled: 'yes' + file_size_limit: '1GB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'diff' information, fim_mode = whodata + description: Check if the 'wazuh-syscheckd' daemon limits + the size of 'diff' information. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + ATTRIBUTES: + - whodata: 'yes' + - report_changes: 'yes' + - diff_size_limit: '2kb' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '1GB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + fim_mode: whodata + report_changes: 'yes' + diff_size_limit_kb: '2' + file_size_enabled: 'yes' + file_size_limit: '1GB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit_configured.py b/tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit_configured.py new file mode 100644 index 00000000000..b3f900732f5 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit_configured.py @@ -0,0 +1,153 @@ +''' +copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when + these files are modified. Specifically, these tests will check if FIM limits the size of + 'diff' information to generate from the file monitored when the 'diff_size_limit' and + the 'report_changes' options are enabled. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: files_report_changes + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - linux + - windows + - macos + +os_version: + - Arch Linux + - Amazon Linux 2 + - Amazon Linux 1 + - CentOS 8 + - CentOS 7 + - Debian Buster + - Red Hat 8 + - macOS Catalina + - macOS Server + - Ubuntu Focal + - Ubuntu Bionic + - Windows 10 + - Windows Server 2019 + - Windows Server 2016 + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#directories + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + scheduled: Implies scheduled scan + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_report_changes +''' +from pathlib import Path + +import pytest + +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG +from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.patterns import DIFF_MAXIMUM_FILE_SIZE, ERROR_MSG_MAXIMUM_FILE_SIZE_EVENT, ERROR_MSG_WRONG_VALUE_MAXIMUM_FILE_SIZE +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template + +from . import TEST_CASES_PATH, CONFIGS_PATH + + +# Marks +pytestmark = [pytest.mark.tier(level=1)] + + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_diff_size_limit_configured.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_diff_size.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + + +# Set configurations required by the fixtures. +local_internal_options = {SYSCHECK_DEBUG: 2, AGENTD_WINDOWS_DEBUG: '2'} + + +# Tests +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_diff_size_limit_configured(test_configuration, test_metadata, configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, daemons_handler): + ''' + description: Check if the 'wazuh-syscheckd' daemon limits the size of 'diff' information to generate from + the value set in the 'diff_size_limit' attribute when the global 'file_size' tag is different. + For this purpose, the test will monitor a directory and, once the FIM is started, it will wait + for the FIM event related to the maximum file size to generate 'diff' information. Finally, + the test will verify that the value gotten from that FIM event corresponds with the one set + in the 'diff_size_limit'. + + wazuh_min_version: 4.6.0 + + tier: 1 + + parameters: + - test_configuration: + type: data + brief: Configuration used in the test. + - test_metadata: + type: data + brief: Configuration cases. + - configure_local_internal_options: + type: fixture + brief: Set internal configuration for testing. + - truncate_monitored_files: + type: fixture + brief: Reset the 'ossec.log' file and start a new monitor. + - set_wazuh_configuration: + type: fixture + brief: Configure a custom environment for testing. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + + assertions: + - Verify that an FIM event is generated indicating the size limit of 'diff' information to generate + set in the 'diff_size_limit' attribute when the global 'file_size' tag is different. + + input_description: An external YAML file (configuration_diff_size.yaml) includes configuration settings for the agent. + Different test cases are found in the cases_diff_size_limit_configured.yaml file and include parameters for + the environment setup, the requests to be made, and the expected result. + + expected_output: + - r'.*Maximum file size limit to generate diff information configured to' + + tags: + - diff + - scheduled + - realtime + - who_data + ''' + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + wazuh_log_monitor.start(generate_callback(DIFF_MAXIMUM_FILE_SIZE), timeout=30) + callback_result = wazuh_log_monitor.callback_result + assert callback_result, ERROR_MSG_MAXIMUM_FILE_SIZE_EVENT + assert str(wazuh_log_monitor.callback_result[0]) == test_metadata.get('diff_size_limit_kb'), ERROR_MSG_WRONG_VALUE_MAXIMUM_FILE_SIZE From 2cec0172ddd21e4abd1811cc78722789c6c2d037 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Tue, 21 May 2024 10:13:55 -0300 Subject: [PATCH 405/419] IT fim: unifies test_diff_size_limit_configured and test_diff_size_limit_default in one test. --- ...igured.yaml => cases_diff_size_limit.yaml} | 42 +++++++++++++++++++ ..._configured.py => test_diff_size_limit.py} | 4 +- 2 files changed, 44 insertions(+), 2 deletions(-) rename tests/integration/test_fim/test_files/test_report_changes/data/test_cases/{cases_diff_size_limit_configured.yaml => cases_diff_size_limit.yaml} (59%) rename tests/integration/test_fim/test_files/test_report_changes/{test_diff_size_limit_configured.py => test_diff_size_limit.py} (96%) diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_diff_size_limit_configured.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_diff_size_limit.yaml similarity index 59% rename from tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_diff_size_limit_configured.yaml rename to tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_diff_size_limit.yaml index 12194628be6..92d56e4c2f7 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_diff_size_limit_configured.yaml +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_diff_size_limit.yaml @@ -65,3 +65,45 @@ file_size_limit: '1GB' disk_quota_enabled: 'no' disk_quota_limit: '2KB' + +- name: Test 'diff' information, fim_mode = scheduled + description: Check if the default 'wazuh-syscheckd' daemon limits + the size of 'diff' information. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + ATTRIBUTES: + - whodata: 'yes' + - report_changes: 'yes' + metadata: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + fim_mode: scheduled + report_changes: 'yes' + diff_size_limit_kb: '51200' + +- name: Test 'diff' information, fim_mode = realtime + description: Check if the default 'wazuh-syscheckd' daemon limits + the size of 'diff' information. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + ATTRIBUTES: + - realtime: 'yes' + - report_changes: 'yes' + metadata: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + fim_mode: realtime + report_changes: 'yes' + diff_size_limit_kb: '51200' + +- name: Test 'diff' information, fim_mode = whodata + description: Check if the default 'wazuh-syscheckd' daemon limits + the size of 'diff' information. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + ATTRIBUTES: + - whodata: 'yes' + - report_changes: 'yes' + metadata: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + fim_mode: whodata + report_changes: 'yes' + diff_size_limit_kb: '51200' diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit_configured.py b/tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit.py similarity index 96% rename from tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit_configured.py rename to tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit.py index b3f900732f5..12a178a0e3f 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit_configured.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit.py @@ -83,7 +83,7 @@ # Test metadata, configuration and ids. -cases_path = Path(TEST_CASES_PATH, 'cases_diff_size_limit_configured.yaml') +cases_path = Path(TEST_CASES_PATH, 'cases_diff_size_limit.yaml') config_path = Path(CONFIGS_PATH, 'configuration_diff_size.yaml') test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) @@ -95,7 +95,7 @@ # Tests @pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) -def test_diff_size_limit_configured(test_configuration, test_metadata, configure_local_internal_options, +def test_diff_size_limit(test_configuration, test_metadata, configure_local_internal_options, truncate_monitored_files, set_wazuh_configuration, daemons_handler): ''' description: Check if the 'wazuh-syscheckd' daemon limits the size of 'diff' information to generate from From 335227cb8434804c8b7f89d788071359273e75ae Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Tue, 21 May 2024 16:20:31 -0300 Subject: [PATCH 406/419] IT FIM: added test test_disk_quota_default.py. --- .../configuration_disk_quota_default.yaml | 15 ++ .../test_cases/cases_disk_quota_default.yaml | 37 +++++ .../test_disk_quota_default.py | 148 ++++++++++++++++++ 3 files changed, 200 insertions(+) create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_disk_quota_default.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_disk_quota_default.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_disk_quota_default.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_disk_quota_default.yaml new file mode 100644 index 00000000000..f5e175f3665 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_disk_quota_default.yaml @@ -0,0 +1,15 @@ +- sections: + - section: syscheck + elements: + - disabled: + value: 'no' + - frequency: + value: 3 + - directories: + value: TEST_DIRECTORIES + attributes: ATTRIBUTES + + - section: sca + elements: + - enabled: + value: 'no' diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_disk_quota_default.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_disk_quota_default.yaml new file mode 100644 index 00000000000..dcd00ccd689 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_disk_quota_default.yaml @@ -0,0 +1,37 @@ +- name: Test 'disk_quota' information, fim_mode = scheduled + description: Check if the 'wazuh-syscheckd' daemon limits + the size of 'disk_quota' information. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + ATTRIBUTES: + - report_changes: 'yes' + metadata: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + fim_mode: scheduled + report_changes: 'yes' + +- name: Test 'disk_quota' information, fim_mode = realtime + description: Check if the 'wazuh-syscheckd' daemon limits + the size of 'disk_quota' information. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + ATTRIBUTES: + - realtime: 'yes' + - report_changes: 'yes' + metadata: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + fim_mode: realtime + report_changes: 'yes' + +- name: Test 'disk_quota' information, fim_mode = whodata + description: Check if the 'wazuh-syscheckd' daemon limits + the size of 'disk_quota' information. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + ATTRIBUTES: + - whodata: 'yes' + - report_changes: 'yes' + metadata: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + fim_mode: whodata + report_changes: 'yes' diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py new file mode 100644 index 00000000000..93e7408740b --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py @@ -0,0 +1,148 @@ +''' +copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when + these files are modified. Specifically, these tests will check if FIM limits the size of + the 'queue/diff/local' folder, where Wazuh stores the compressed files used to perform + the 'diff' operation, to the default value when the 'report_changes' option is enabled. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: files_report_changes + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - linux + - windows + +os_version: + - Arch Linux + - Amazon Linux 2 + - Amazon Linux 1 + - CentOS 8 + - CentOS 7 + - Debian Buster + - Red Hat 8 + - Ubuntu Focal + - Ubuntu Bionic + - Windows 10 + - Windows Server 2019 + - Windows Server 2016 + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#disk-quota + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_report_changes +''' +from pathlib import Path + +import pytest + +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG +from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.patterns import DISK_QUOTA_LIMIT_CONFIGURED_VALUE, ERROR_MSG_DISK_QUOTA_LIMIT +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template + +from . import TEST_CASES_PATH, CONFIGS_PATH + + +# Marks +pytestmark = [pytest.mark.tier(level=1)] + + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_disk_quota_default.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_diff_size.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + +DISK_QUOTA_DEFAULT_VALUE = 1048576 + + +# Set configurations required by the fixtures. +local_internal_options = {SYSCHECK_DEBUG: 2, AGENTD_WINDOWS_DEBUG: '2'} + + +# Tests +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_disk_quota_default(test_configuration, test_metadata, configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, daemons_handler): + ''' + description: Check if the 'wazuh-syscheckd' daemon limits the size of the folder where the data used to perform + the 'diff' operations is stored to the default value. For this purpose, the test will monitor + a directory and, once the FIM is started, it will wait for the FIM event related to the maximum + disk quota to store 'diff' information. Finally, the test will verify that the value gotten from + that FIM event corresponds with the default value of the 'disk_quota' tag (1GB). + + wazuh_min_version: 4.6.0 + + tier: 1 + + parameters: + - test_configuration: + type: data + brief: Configuration used in the test. + - test_metadata: + type: data + brief: Configuration cases. + - configure_local_internal_options: + type: fixture + brief: Set internal configuration for testing. + - truncate_monitored_files: + type: fixture + brief: Reset the 'ossec.log' file and start a new monitor. + - set_wazuh_configuration: + type: fixture + brief: Configure a custom environment for testing. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + + assertions: + - Verify that an FIM event is generated indicating the size limit of the folder + to store 'diff' information to the default limit of the 'disk_quota' tag (1GB). + + input_description: An external YAML file (configuration_diff_size.yaml) includes configuration settings for the agent. + Different test cases are found in the cases_disk_quota_default.yaml file and include parameters for + the environment setup, the requests to be made, and the expected result. + + expected_output: + - r'.*Maximum disk quota size limit configured to' + + tags: + - disk_quota + - scheduled + ''' + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + wazuh_log_monitor.start(generate_callback(DISK_QUOTA_LIMIT_CONFIGURED_VALUE), timeout=30) + callback_result = wazuh_log_monitor.callback_result + assert callback_result, ERROR_MSG_DISK_QUOTA_LIMIT + assert str(wazuh_log_monitor.callback_result[0]) == str(DISK_QUOTA_DEFAULT_VALUE), 'Wrong value for disk_quota' From 34414471f204ef21cce356b84f52b91d0c00ae2e Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Wed, 22 May 2024 14:52:40 -0300 Subject: [PATCH 407/419] IT FIM: added test test_disk_quota_disabled.py. --- .../test_cases/cases_disk_quota_disabled.yaml | 73 ++++++++ .../test_disk_quota_default.py | 2 +- .../test_disk_quota_disabled.py | 157 ++++++++++++++++++ 3 files changed, 231 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_disk_quota_disabled.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_disk_quota_disabled.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_disk_quota_disabled.yaml new file mode 100644 index 00000000000..a04fa0d1d84 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_disk_quota_disabled.yaml @@ -0,0 +1,73 @@ +- name: Test 'disk_quota' information, fim_mode = scheduled + description: Check if the 'wazuh-syscheckd' daemon skips disk_quota check. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'no' + FILE_SIZE_LIMIT: '1KB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + file_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir, testfile] + fim_mode: 'scheduled' + report_changes: 'yes' + file_size_enabled: 'no' + file_size_limit: '1KB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + string_size: 10000000 + +- name: Test 'disk_quota' information, fim_mode = realtime + description: Check if the 'wazuh-syscheckd' daemon skips disk_quota check. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - report_changes: 'yes' + - realtime: 'yes' + FILE_SIZE_ENABLED: 'no' + FILE_SIZE_LIMIT: '1KB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + file_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir, testfile] + fim_mode: 'realtime' + report_changes: 'yes' + file_size_enabled: 'no' + file_size_limit: '1KB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + string_size: 10000000 + +- name: Test 'disk_quota' information, fim_mode = whodata + description: Check if the 'wazuh-syscheckd' daemon skips disk_quota check. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - report_changes: 'yes' + - whodata: 'yes' + FILE_SIZE_ENABLED: 'no' + FILE_SIZE_LIMIT: '1KB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + file_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir, testfile] + fim_mode: 'whodata' + report_changes: 'yes' + file_size_enabled: 'no' + file_size_limit: '1KB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + string_size: 10000000 diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py index 93e7408740b..6511c030e2d 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py @@ -80,7 +80,7 @@ # Test metadata, configuration and ids. cases_path = Path(TEST_CASES_PATH, 'cases_disk_quota_default.yaml') -config_path = Path(CONFIGS_PATH, 'configuration_diff_size.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_disk_quota_default.yaml') test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py new file mode 100644 index 00000000000..dfb32c5cac0 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py @@ -0,0 +1,157 @@ +''' +copyright: Copyright (C) 2015-2022, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when + these files are modified. Specifically, these tests will verify that FIM does not limit + the size of the 'queue/diff/local' folder where Wazuh stores the compressed files used + to perform the 'diff' operation when the 'disk_quota' option is disabled. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: files_report_changes + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - linux + - windows + +os_version: + - Arch Linux + - Amazon Linux 2 + - Amazon Linux 1 + - CentOS 8 + - CentOS 7 + - Debian Buster + - Red Hat 8 + - Ubuntu Focal + - Ubuntu Bionic + - Windows 10 + - Windows Server 2019 + - Windows Server 2016 + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#disk-quota + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_report_changes +''' +from pathlib import Path + +import pytest + +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG +from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.patterns import FILE_EXCEEDS_DISK_QUOTA +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.file import write_file, generate_string +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template + +from . import TEST_CASES_PATH, CONFIGS_PATH + + +# Marks +pytestmark = [pytest.mark.tier(level=1)] + + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_disk_quota_disabled.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_diff_size.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + + +# Set configurations required by the fixtures. +local_internal_options = {SYSCHECK_DEBUG: 2, AGENTD_WINDOWS_DEBUG: '2'} + + +# Tests +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_disk_quota_disabled(test_configuration, test_metadata, configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, folder_to_monitor, file_to_monitor, daemons_handler): + ''' + description: Check if the 'wazuh-syscheckd' daemon limits the size of the folder where the data used + to perform the 'diff' operations is stored when the 'disk_quota' option is disabled. + For this purpose, the test will monitor a directory and, once the FIM is started, it + will create a testing file that, when compressed, is larger than the configured + 'disk_quota' limit. Finally, the test will verify that the FIM event related + to the reached disk quota has not been generated. + + wazuh_min_version: 4.6.0 + + tier: 1 + + parameters: + - test_configuration: + type: data + brief: Configuration used in the test. + - test_metadata: + type: data + brief: Configuration cases. + - configure_local_internal_options: + type: fixture + brief: Set internal configuration for testing. + - truncate_monitored_files: + type: fixture + brief: Reset the 'ossec.log' file and start a new monitor. + - set_wazuh_configuration: + type: fixture + brief: Configure a custom environment for testing. + - folder_to_monitor: + type: str + brief: Folder created for monitoring. + - file_to_monitor: + type: str + brief: File created for monitoring. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + + assertions: + - Verify that no FIM events are generated indicating the disk quota exceeded for monitored files + when the 'disk_quota' option is disabled. + + input_description: An external YAML file (configuration_diff_size.yaml) includes configuration settings for the agent. + Different test cases are found in the cases_disk_quota_disabled.yaml file and include parameters for + the environment setup, the requests to be made, and the expected result. + + expected_output: + - r'.*The (.*) of the file size .* exceeds the disk_quota.*' (if the test fails) + + tags: + - disk_quota + - scheduled + ''' + to_write = generate_string(test_metadata.get('string_size'), '0') + write_file(test_metadata.get('file_to_monitor'), data=to_write) + + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + + wazuh_log_monitor.start(generate_callback(FILE_EXCEEDS_DISK_QUOTA), timeout=30) + + assert (wazuh_log_monitor.callback_result == None), f'Error exceeds disk quota detected.' From b14c3093685ae4b5cf82517c64225a7f519c3dba Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Fri, 24 May 2024 13:17:01 -0300 Subject: [PATCH 408/419] IT FIM: added test test_file_size_default.py. --- .../test_cases/cases_file_size_default.yaml | 46 +++++ .../test_disk_quota_disabled.py | 2 +- .../test_file_size_default.py | 181 ++++++++++++++++++ 3 files changed, 228 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_file_size_default.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_file_size_default.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_file_size_default.yaml new file mode 100644 index 00000000000..caebed6d1f7 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_file_size_default.yaml @@ -0,0 +1,46 @@ +- name: Test 'file_size' information, fim_mode = scheduled + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - report_changes: 'yes' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: scheduled + report_changes: 'yes' + +- name: Test 'file_size' information, fim_mode = realtime + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - realtime: 'yes' + - report_changes: 'yes' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: realtime + report_changes: 'yes' + +- name: Test 'file_size' information, fim_mode = whodata + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - whodata: 'yes' + - report_changes: 'yes' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: whodata + report_changes: 'yes' diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py index dfb32c5cac0..59cc493a314 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py @@ -1,5 +1,5 @@ ''' -copyright: Copyright (C) 2015-2022, Wazuh Inc. +copyright: Copyright (C) 2015-2024, Wazuh Inc. Created by Wazuh, Inc. . diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py new file mode 100644 index 00000000000..053607aefee --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py @@ -0,0 +1,181 @@ +''' +copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when these + files are modified. Specifically, these tests will check if FIM limits the size of the file + monitored to generate 'diff' information to the default value of the 'file_size' tag when + the 'report_changes' option is enabled. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: files_report_changes + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - linux + - windows + +os_version: + - Arch Linux + - Amazon Linux 2 + - Amazon Linux 1 + - CentOS 8 + - CentOS 7 + - Debian Buster + - Red Hat 8 + - Ubuntu Focal + - Ubuntu Bionic + - Windows 10 + - Windows Server 2019 + - Windows Server 2016 + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#file-size + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_report_changes +''' +import os + +from pathlib import Path + +import pytest + +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG +from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.patterns import FILE_SIZE_LIMIT_REACHED, EVENT_TYPE_ADDED, ERROR_MSG_FIM_EVENT_NOT_DETECTED, ERROR_MSG_FILE_LIMIT_REACHED +from wazuh_testing.modules.fim.utils import make_diff_file_path +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.file import write_file, generate_string, translate_size +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template + +from . import TEST_CASES_PATH, CONFIGS_PATH + + +# Marks +pytestmark = [pytest.mark.tier(level=1)] + + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_file_size_default.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_disk_quota_default.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + + +# Set configurations required by the fixtures. +local_internal_options = {SYSCHECK_DEBUG: 2, AGENTD_WINDOWS_DEBUG: '2'} + + +# Tests +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_file_size_default(test_configuration, test_metadata, configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, folder_to_monitor, daemons_handler, detect_end_scan): + ''' + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. For this purpose, + the test will monitor a directory, create a testing file smaller than the default limit, + and check if the compressed file has been created. Then, it will increase the size of + the testing file. Finally, the test will verify that the FIM event related to the + reached file size limit has been generated, and the compressed file in the 'queue/diff/local' + directory does not exist. + + wazuh_min_version: 4.6.0 + + tier: 1 + + parameters: + - test_configuration: + type: data + brief: Configuration used in the test. + - test_metadata: + type: data + brief: Configuration cases. + - configure_local_internal_options: + type: fixture + brief: Set internal configuration for testing. + - truncate_monitored_files: + type: fixture + brief: Reset the 'ossec.log' file and start a new monitor. + - set_wazuh_configuration: + type: fixture + brief: Configure a custom environment for testing. + - folder_to_monitor: + type: str + brief: Folder created for monitoring. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + - detect_end_scan + type: fixture + brief: Check first scan end. + + assertions: + - Verify that the 'diff' folder is created when a monitored file does not exceed the size limit. + - Verify that FIM events are generated indicating the size limit reached of monitored files + to generate 'diff' information with the default limit of the 'file_size' tag (50MB). + - Verify that the 'diff' folder is removed when a monitored file exceeds the size limit. + + input_description: An external YAML file (configuration_disk_quota_default.yaml) includes configuration settings for the agent. + Different test cases are found in the cases_file_size_default.yaml file and include parameters for + the environment setup, the requests to be made, and the expected result. + + expected_output: + - r'.*Sending FIM event: (.+)$' ('added' events) + - r'.*File .* is too big for configured maximum size to perform diff operation' + + tags: + - diff + - scheduled + ''' + size_limit = translate_size('50MB') + diff_file_path = make_diff_file_path(folder=test_metadata.get('folder_to_monitor'), filename=test_metadata.get('filename')) + test_file_path = os.path.join(test_metadata.get('folder_to_monitor'), test_metadata.get('filename')) + + # Create file with a smaller size than the configured value + to_write = generate_string(int(size_limit / 10), '0') + write_file(test_file_path, data=to_write) + + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + wazuh_log_monitor.start(generate_callback(EVENT_TYPE_ADDED), timeout=30) + assert wazuh_log_monitor.callback_result, ERROR_MSG_FIM_EVENT_NOT_DETECTED + + if not os.path.exists(diff_file_path): + pytest.raises(FileNotFoundError(f"{diff_file_path} not found. It should exist before increasing the size.")) + + # Increase the size of the file over the configured value + to_write = generate_string(size_limit, '0') + write_file(test_file_path, data=to_write * 3) + + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + wazuh_log_monitor.start(generate_callback(FILE_SIZE_LIMIT_REACHED), timeout=30) + assert wazuh_log_monitor.callback_result, ERROR_MSG_FILE_LIMIT_REACHED + + if os.path.exists(diff_file_path): + pytest.raises(FileExistsError(f"{diff_file_path} found. It should not exist after incresing the size.")) From d6e71690628dbc9db82be059fa5622bf1c7dc174 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Fri, 24 May 2024 16:30:36 -0300 Subject: [PATCH 409/419] IT FIM: added test test_file_size_disabled.py. --- .../test_cases/cases_file_size_disabled.yaml | 73 +++++++++ .../test_file_size_disabled.py | 155 ++++++++++++++++++ 2 files changed, 228 insertions(+) create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_file_size_disabled.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_file_size_disabled.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_file_size_disabled.yaml new file mode 100644 index 00000000000..eb4cde4b687 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_file_size_disabled.yaml @@ -0,0 +1,73 @@ +- name: Test 'disk_quota' information, fim_mode = scheduled + description: Check if the 'wazuh-syscheckd' daemon skips disk_quota check. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'no' + FILE_SIZE_LIMIT: '1KB' + DISK_QUOTA_ENABLED: 'yes' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + file_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir, testfile] + fim_mode: 'scheduled' + report_changes: 'yes' + file_size_enabled: 'no' + file_size_limit: '1KB' + disk_quota_enabled: 'yes' + disk_quota_limit: '2KB' + string_size: 10000000 + +- name: Test 'disk_quota' information, fim_mode = realtime + description: Check if the 'wazuh-syscheckd' daemon skips disk_quota check. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - report_changes: 'yes' + - realtime: 'yes' + FILE_SIZE_ENABLED: 'no' + FILE_SIZE_LIMIT: '1KB' + DISK_QUOTA_ENABLED: 'yes' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + file_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir, testfile] + fim_mode: 'realtime' + report_changes: 'yes' + file_size_enabled: 'no' + file_size_limit: '1KB' + disk_quota_enabled: 'yes' + disk_quota_limit: '2KB' + string_size: 10000000 + +- name: Test 'disk_quota' information, fim_mode = whodata + description: Check if the 'wazuh-syscheckd' daemon skips disk_quota check. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - report_changes: 'yes' + - whodata: 'yes' + FILE_SIZE_ENABLED: 'no' + FILE_SIZE_LIMIT: '1KB' + DISK_QUOTA_ENABLED: 'yes' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + file_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir, testfile] + fim_mode: 'whodata' + report_changes: 'yes' + file_size_enabled: 'no' + file_size_limit: '1KB' + disk_quota_enabled: 'yes' + disk_quota_limit: '2KB' + string_size: 10000000 diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py new file mode 100644 index 00000000000..b9776551042 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py @@ -0,0 +1,155 @@ +''' +copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when these + files are modified. Specifically, these tests will verify that FIM does not limit the size of + the file monitored to generate 'diff' information when disabling the 'file_size' tag and + the 'report_changes' option is enabled. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: files_report_changes + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - linux + - windows + +os_version: + - Arch Linux + - Amazon Linux 2 + - Amazon Linux 1 + - CentOS 8 + - CentOS 7 + - Debian Buster + - Red Hat 8 + - Ubuntu Focal + - Ubuntu Bionic + - Windows 10 + - Windows Server 2019 + - Windows Server 2016 + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#file-size + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_report_changes +''' +from pathlib import Path + +import pytest + +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG +from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.patterns import FILE_SIZE_LIMIT_REACHED +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.file import write_file, generate_string +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template + +from . import TEST_CASES_PATH, CONFIGS_PATH + + +# Marks +pytestmark = [pytest.mark.tier(level=1)] + + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_file_size_disabled.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_diff_size.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + + +# Set configurations required by the fixtures. +local_internal_options = {SYSCHECK_DEBUG: 2, AGENTD_WINDOWS_DEBUG: '2'} + + +# Tests +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_file_size_disabled(test_configuration, test_metadata, configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, folder_to_monitor, file_to_monitor, daemons_handler): + ''' + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information when the 'file_size' option is disabled. For this purpose, the test + will monitor a directory and create a testing file larger than the limit set in the + 'file_size' tag. Finally, the test will verify that the FIM event related to the + reached file size limit has not been generated. + + wazuh_min_version: 4.6.0 + + tier: 1 + + parameters: + parameters: + - test_configuration: + type: data + brief: Configuration used in the test. + - test_metadata: + type: data + brief: Configuration cases. + - configure_local_internal_options: + type: fixture + brief: Set internal configuration for testing. + - truncate_monitored_files: + type: fixture + brief: Reset the 'ossec.log' file and start a new monitor. + - set_wazuh_configuration: + type: fixture + brief: Configure a custom environment for testing. + - folder_to_monitor: + type: str + brief: Folder created for monitoring. + - file_to_monitor: + type: str + brief: File created for monitoring. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + + assertions: + - Verify that no FIM events are generated indicating the reached file size limit of monitored files + when the 'file_size' option is disabled. + + input_description: An external YAML file (configuration_diff_size.yaml) includes configuration settings for the agent. + Different test cases are found in the cases_file_size_disabled.yaml file and include parameters for + the environment setup, the requests to be made, and the expected result. + + expected_output: + - r'.*The (.*) of the file size .* exceeds the disk_quota.*' (if the test fails) + + tags: + - diff + - scheduled + ''' + to_write = generate_string(test_metadata.get('string_size'), '0') + write_file(test_metadata.get('file_to_monitor'), data=to_write) + + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + wazuh_log_monitor.start(generate_callback(FILE_SIZE_LIMIT_REACHED), timeout=30) + assert (wazuh_log_monitor.callback_result == None), f'Error exceeds disk quota detected.' From 71207762c5e57282a11d00ccd57da94114ef9360 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Mon, 27 May 2024 16:08:13 -0300 Subject: [PATCH 410/419] IT FIM: added test test_file_size_values.py. --- .../test_cases/cases_file_size_values.yaml | 283 ++++++++++++++++++ .../test_file_size_values.py | 186 ++++++++++++ 2 files changed, 469 insertions(+) create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_file_size_values.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_file_size_values.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_file_size_values.yaml new file mode 100644 index 00000000000..8b6fcd2f36b --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_file_size_values.yaml @@ -0,0 +1,283 @@ +- name: Test 'file_size' information, fim_mode = scheduled + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '1KB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: 'scheduled' + report_changes: 'yes' + file_size_enabled: 'yes' + file_size_limit: '1KB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'file_size' information, fim_mode = scheduled + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '100KB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: 'scheduled' + report_changes: 'yes' + file_size_enabled: 'yes' + file_size_limit: '100KB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'file_size' information, fim_mode = scheduled + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '1MB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: 'scheduled' + report_changes: 'yes' + file_size_enabled: 'yes' + file_size_limit: '1MB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'file_size' information, fim_mode = scheduled + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '10MB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: 'scheduled' + report_changes: 'yes' + file_size_enabled: 'yes' + file_size_limit: '10MB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'file_size' information, fim_mode = realtime + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - realtime: 'yes' + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '1KB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: 'realtime' + report_changes: 'yes' + file_size_enabled: 'yes' + file_size_limit: '1KB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'file_size' information, fim_mode = realtime + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - realtime: 'yes' + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '100KB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: 'realtime' + report_changes: 'yes' + file_size_enabled: 'yes' + file_size_limit: '100KB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'file_size' information, fim_mode = realtime + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - realtime: 'yes' + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '1MB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: 'realtime' + report_changes: 'yes' + file_size_enabled: 'yes' + file_size_limit: '1MB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'file_size' information, fim_mode = realtime + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - realtime: 'yes' + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '10MB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: 'realtime' + report_changes: 'yes' + file_size_enabled: 'yes' + file_size_limit: '10MB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'file_size' information, fim_mode = whodata + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - whodata: 'yes' + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '1KB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: 'whodata' + report_changes: 'yes' + file_size_enabled: 'yes' + file_size_limit: '1KB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'file_size' information, fim_mode = whodata + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - whodata: 'yes' + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '100KB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: 'whodata' + report_changes: 'yes' + file_size_enabled: 'yes' + file_size_limit: '100KB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'file_size' information, fim_mode = whodata + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - whodata: 'yes' + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '1MB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: 'whodata' + report_changes: 'yes' + file_size_enabled: 'yes' + file_size_limit: '1MB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' + +- name: Test 'file_size' information, fim_mode = whodata + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the default value of the 'file_size' option. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + ATTRIBUTES: + - whodata: 'yes' + - report_changes: 'yes' + FILE_SIZE_ENABLED: 'yes' + FILE_SIZE_LIMIT: '10MB' + DISK_QUOTA_ENABLED: 'no' + DISK_QUOTA_LIMIT: '2KB' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: 'testfile' + fim_mode: 'whodata' + report_changes: 'yes' + file_size_enabled: 'yes' + file_size_limit: '10MB' + disk_quota_enabled: 'no' + disk_quota_limit: '2KB' diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py new file mode 100644 index 00000000000..95c040e88fd --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py @@ -0,0 +1,186 @@ +''' +copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when + these files are modified. Specifically, these tests will check if FIM limits the size of + the file monitored to generate 'diff' information to the limit set in the 'file_size' tag + when the 'report_changes' option is enabled. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: files_report_changes + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - linux + - windows + +os_version: + - Arch Linux + - Amazon Linux 2 + - Amazon Linux 1 + - CentOS 8 + - CentOS 7 + - Debian Buster + - Red Hat 8 + - Ubuntu Focal + - Ubuntu Bionic + - Windows 10 + - Windows Server 2019 + - Windows Server 2016 + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#file-size + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_report_changes +''' +import os + +from pathlib import Path + +import pytest + +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG, FILE_MAX_SIZE +from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.patterns import FILE_SIZE_LIMIT_REACHED, EVENT_TYPE_ADDED, ERROR_MSG_FIM_EVENT_NOT_DETECTED, ERROR_MSG_FILE_LIMIT_REACHED, DIFF_FOLDER_DELETED, ERROR_MSG_FOLDER_DELETED +from wazuh_testing.modules.fim.utils import make_diff_file_path +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.file import write_file, generate_string, translate_size +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template + +from . import TEST_CASES_PATH, CONFIGS_PATH + + +# Marks +pytestmark = [pytest.mark.tier(level=1)] + + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_file_size_values.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_diff_size.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + + +# Set configurations required by the fixtures. +local_internal_options = {SYSCHECK_DEBUG: 2, AGENTD_WINDOWS_DEBUG: 2, FILE_MAX_SIZE: 0} + + +# Tests +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_file_size_values(test_configuration, test_metadata, configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, folder_to_monitor, daemons_handler, detect_end_scan): + ''' + description: Check if the 'wazuh-syscheckd' daemon limits the size of the monitored file to generate + 'diff' information from the limit set in the 'file_size' tag. For this purpose, the test + will monitor a directory, create a testing file smaller than the 'file_limit' value, + and check if the compressed file has been created. Then, it will increase the size of + the testing file. Finally, the test will verify that the FIM event related to the reached + file size limit has been generated, and the compressed file in the 'queue/diff/local' + directory does not exist. + + wazuh_min_version: 4.6.0 + + tier: 1 + + parameters: + - test_configuration: + type: data + brief: Configuration used in the test. + - test_metadata: + type: data + brief: Configuration cases. + - configure_local_internal_options: + type: fixture + brief: Set internal configuration for testing. + - truncate_monitored_files: + type: fixture + brief: Reset the 'ossec.log' file and start a new monitor. + - set_wazuh_configuration: + type: fixture + brief: Configure a custom environment for testing. + - folder_to_monitor: + type: str + brief: Folder created for monitoring. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + - detect_end_scan + type: fixture + brief: Check first scan end. + + assertions: + - Verify that the 'diff' folder is created when a monitored file does not exceed the size limit. + - Verify that FIM events are generated indicating the size limit reached of monitored files + to generate 'diff' information when a limit is set in the 'file_size' tag. + - Verify that the 'diff' folder is removed when a monitored file exceeds the size limit. + + input_description: An external YAML file (configuration_diff_size.yaml) includes configuration settings for the agent. + Different test cases are found in the cases_file_size_values.yaml file and include parameters for + the environment setup, the requests to be made, and the expected result. + + expected_output: + - r'.*Sending FIM event: (.+)$' ('added' events) + - r'.*Folder .* has been deleted.*' + - r'.*File .* is too big for configured maximum size to perform diff operation' + + tags: + - diff + - scheduled + ''' + size_limit = translate_size(test_metadata.get('file_size_limit')) + diff_file_path = make_diff_file_path(folder=test_metadata.get('folder_to_monitor'), filename=test_metadata.get('filename')) + test_file_path = os.path.join(test_metadata.get('folder_to_monitor'), test_metadata.get('filename')) + + # Create file with a smaller size than the configured value + to_write = generate_string(int(size_limit / 2), '0') + write_file(test_file_path, data=to_write) + + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + wazuh_log_monitor.start(generate_callback(EVENT_TYPE_ADDED), timeout=30) + assert wazuh_log_monitor.callback_result, ERROR_MSG_FIM_EVENT_NOT_DETECTED + + if not os.path.exists(diff_file_path): + pytest.raises(FileNotFoundError(f"{diff_file_path} not found. It should exist before increasing the size.")) + + # Increase the size of the file over the configured value + to_write = generate_string(size_limit, '0') + write_file(test_file_path, data=to_write * 3) + + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + wazuh_log_monitor.start(generate_callback(DIFF_FOLDER_DELETED), timeout=30) + assert wazuh_log_monitor.callback_result, ERROR_MSG_FOLDER_DELETED + + if os.path.exists(diff_file_path): + pytest.raises(FileExistsError(f"{diff_file_path} found. It should not exist after incresing the size.")) + + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + wazuh_log_monitor.start(generate_callback(FILE_SIZE_LIMIT_REACHED), timeout=30) + assert wazuh_log_monitor.callback_result, ERROR_MSG_FILE_LIMIT_REACHED From 199abf4c4a692ad15c2cdba51d12aa75325b0c17 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Thu, 30 May 2024 18:13:42 -0300 Subject: [PATCH 411/419] IT FIM: added test test_large_changes.py. --- .../configuration_large_changes.yaml | 32 +++ .../data/test_cases/cases_large_changes.yaml | 179 ++++++++++++++++ .../test_report_changes/test_large_changes.py | 194 ++++++++++++++++++ 3 files changed, 405 insertions(+) create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_large_changes.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_large_changes.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_large_changes.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_large_changes.yaml new file mode 100644 index 00000000000..7c2542c201c --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_large_changes.yaml @@ -0,0 +1,32 @@ +- sections: + - section: syscheck + elements: + - disabled: + value: 'no' + - frequency: + value: INTERVAL + - directories: + value: TEST_DIRECTORIES + attributes: + - check_all: 'yes' + - realtime: REALTIME + - whodata: WHODATA + - report_changes: 'yes' + - diff_size_limit: 200KB + + - section: sca + elements: + - enabled: + value: 'no' + + - section: rootcheck + elements: + - disabled: + value: 'yes' + + - section: wodle + attributes: + - name: syscollector + elements: + - disabled: + value: 'yes' diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_large_changes.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_large_changes.yaml new file mode 100644 index 00000000000..333935f50d3 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_large_changes.yaml @@ -0,0 +1,179 @@ +- name: Test changes smaller than limit (Scheduled mode) + description: Test that changes are smaller than limit, 'More changes' does not appear in content_changes + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + INTERVAL: 4 + REALTIME: 'no' + WHODATA: 'no' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + filename: regular_1 + original_size: 500 + modified_size: 500 + has_more_changes: false + fim_mode: scheduled + +- name: Test changes smaller than limit (Realtime mode) + description: Test that changes are smaller than limit, 'More changes' does not appear in content_changes + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + INTERVAL: 10000 + REALTIME: 'yes' + WHODATA: 'no' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + filename: regular_1 + original_size: 500 + modified_size: 500 + has_more_changes: false + fim_mode: realtime + +- name: Test changes smaller than limit (Whodata mode) + description: Test that changes are smaller than limit, 'More changes' does not appear in content_changes + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + INTERVAL: 10000 + REALTIME: 'no' + WHODATA: 'yes' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + filename: regular_1 + original_size: 500 + modified_size: 500 + has_more_changes: false + fim_mode: whodata + +- name: Test large changes - Same size (Scheduled mode) + description: Test when changes are same size of set limit, 'More changes' appears in content_changes + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + INTERVAL: 4 + REALTIME: 'no' + WHODATA: 'no' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + filename: regular_2 + original_size: 200000 + modified_size: 200000 + has_more_changes: true + fim_mode: scheduled + +- name: Test large changes - Same size (Realtime mode) + description: Test when changes are same size of set limit, 'More changes' appears in content_changes + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + INTERVAL: 10000 + REALTIME: 'yes' + WHODATA: 'no' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + filename: regular_2 + original_size: 200000 + modified_size: 200000 + has_more_changes: true + fim_mode: realtime + +- name: Test large changes - Same size (Whodata mode) + description: Test when changes are same size of set limit, 'More changes' appears in content_changes. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + INTERVAL: 10000 + REALTIME: 'no' + WHODATA: 'yes' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + filename: regular_2 + original_size: 200000 + modified_size: 200000 + has_more_changes: true + fim_mode: whodata + +- name: Test large changes - File bigger after change (Scheduled mode) + description: Test that changes are bigger than limit, 'More changes' appears in content_changes. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + INTERVAL: 4 + REALTIME: 'no' + WHODATA: 'no' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + filename: regular_3 + original_size: 10 + modified_size: 200000 + has_more_changes: true + fim_mode: scheduled + +- name: Test large changes - File bigger after change (Realtime mode) + description: Test that changes are bigger than limit, 'More changes' appears in content_changes. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + INTERVAL: 10000 + REALTIME: 'yes' + WHODATA: 'no' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + filename: regular_3 + original_size: 10 + modified_size: 200000 + has_more_changes: true + fim_mode: realtime + +- name: Test large changes - File bigger after change (Whodata mode) + description: Test that changes are bigger than limit, 'More changes' appears in content_changes. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + INTERVAL: 10000 + REALTIME: 'no' + WHODATA: 'yes' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + filename: regular_3 + original_size: 10 + modified_size: 200000 + has_more_changes: true + fim_mode: whodata + +- name: Test large changes - File smaller after change (Scheduled mode) + description: Test when file is smaller after change, 'More changes' appears in content_changes. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + INTERVAL: 4 + REALTIME: 'no' + WHODATA: 'no' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + filename: regular_4 + original_size: 200000 + modified_size: 10 + has_more_changes: true + fim_mode: scheduled + +- name: Test large changes - File smaller after change (Realtime mode) + description: Test when file is smaller after change, 'More changes' appears in content_changes. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + INTERVAL: 10000 + REALTIME: 'yes' + WHODATA: 'no' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + filename: regular_4 + original_size: 200000 + modified_size: 10 + has_more_changes: true + fim_mode: realtime + +- name: Test large changes - File smaller after change (Whodata mode) + description: Test when file is smaller after change, 'More changes' appears in content_changes. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + INTERVAL: 10000 + REALTIME: 'no' + WHODATA: 'yes' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + filename: regular_4 + original_size: 200000 + modified_size: 10 + has_more_changes: true + fim_mode: whodata diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py b/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py new file mode 100644 index 00000000000..c1ce2155f93 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py @@ -0,0 +1,194 @@ +''' +copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when + these files are modified. Specifically, these tests will verify that FIM events include + the 'content_changes' field with the tag 'More changes' when it exceeds the maximum size + allowed, and the 'report_changes' option is enabled. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: files_report_changes + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - linux + - windows + +os_version: + - Arch Linux + - Amazon Linux 2 + - Amazon Linux 1 + - CentOS 8 + - CentOS 7 + - Debian Buster + - Red Hat 8 + - Solaris 10 + - Solaris 11 + - macOS Catalina + - macOS Server + - Ubuntu Focal + - Ubuntu Bionic + - Windows 10 + - Windows Server 2019 + - Windows Server 2016 + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#diff + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_report_changes +''' +import os +import sys + +from pathlib import Path + +import pytest + +from wazuh_testing.constants.platforms import WINDOWS +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG +from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.patterns import EVENT_TYPE_MODIFIED, EVENT_TYPE_ADDED, ERROR_MSG_FIM_EVENT_NOT_DETECTED +from wazuh_testing.modules.fim.utils import get_fim_event_data +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.file import write_file, generate_string +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template + +from . import TEST_CASES_PATH, CONFIGS_PATH + + +# Marks +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.win32, pytest.mark.darwin, pytest.mark.tier(level=1)] + + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_large_changes.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_large_changes.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + + +# Set configurations required by the fixtures. +local_internal_options = {SYSCHECK_DEBUG: 2, AGENTD_WINDOWS_DEBUG: 2} + + +# Tests +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_large_changes(test_configuration, test_metadata, configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, folder_to_monitor, daemons_handler, detect_end_scan): + ''' + description: Check if the 'wazuh-syscheckd' daemon detects the character limit in the file changes is reached + showing the 'More changes' tag in the 'content_changes' field of the generated events. For this + purpose, the test will monitor a directory, add a testing file and modify it, adding more characters + than the allowed limit. Then, it will unzip the 'diff' and get the size of the changes. Finally, + the test will verify that the generated FIM event contains in its 'content_changes' field the proper + value depending on the test case. + + wazuh_min_version: 4.6.0 + + tier: 1 + + parameters: + - test_configuration: + type: data + brief: Configuration used in the test. + - test_metadata: + type: data + brief: Configuration cases. + - configure_local_internal_options: + type: fixture + brief: Set internal configuration for testing. + - truncate_monitored_files: + type: fixture + brief: Reset the 'ossec.log' file and start a new monitor. + - set_wazuh_configuration: + type: fixture + brief: Configure a custom environment for testing. + - folder_to_monitor: + type: str + brief: Folder created for monitoring. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + - detect_end_scan + type: fixture + brief: Check first scan end. + + assertions: + - Verify that FIM events are generated when adding and modifying the testing file. + - Verify that FIM events include the 'content_changes' field with the 'More changes' tag when + the changes made on the testing file have more characters than the allowed limit. + - Verify that FIM events include the 'content_changes' field with the old content + of the monitored file. + - Verify that FIM events include the 'content_changes' field with the new content + of the monitored file when the old content is lower than the allowed limit or + the testing platform is Windows. + + input_description: The file 'configuration_large_changes.yaml' provides the configuration template. + The file 'cases_large_changes.yaml' provides the test cases configuration + details for each test case. + + expected_output: + - r'.*Sending FIM event: (.+)$' ('added' and 'modified' events) + - The 'More changes' message appears in content_changes when the changes size is bigger than the set limit. + ''' + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + limit = 50000 + test_file_path = os.path.join(test_metadata.get('folder_to_monitor'), test_metadata.get('filename')) + + # Create the file and and capture the event. + original_string = generate_string(test_metadata.get('original_size'), '0') + write_file(test_file_path, data=original_string) + + wazuh_log_monitor.start(generate_callback(EVENT_TYPE_ADDED), timeout=30) + assert wazuh_log_monitor.callback_result, ERROR_MSG_FIM_EVENT_NOT_DETECTED + + # Modify the file with new content + modified_string = generate_string(test_metadata.get('modified_size'), '1') + write_file(test_file_path, data=modified_string) + + wazuh_log_monitor.start(generate_callback(EVENT_TYPE_MODIFIED), timeout=20) + assert wazuh_log_monitor.callback_result + + event = get_fim_event_data(wazuh_log_monitor.callback_result) + + # Assert 'More changes' is shown when the command returns more than 'limit' characters + if test_metadata.get('has_more_changes'): + assert 'More changes' in event['content_changes'], 'Did not find event with "More changes" within content_changes.' + + else: + assert 'More changes' not in event['content_changes'], '"More changes" found within content_changes.' + + # Assert old content is shown in content_changes + assert '0' in event['content_changes'], '"0" is the old value but it is not found within content_changes' + + # Assert new content is shown when old content is lower than the limit or platform is Windows + if test_metadata.get('original_size') < limit or sys.platform == WINDOWS: + assert '1' in event['content_changes'], '"1" is the new value but it is not found within content_changes' From c725984799921531bdf34d98d8f5a5c9cc964874 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Thu, 30 May 2024 18:16:06 -0300 Subject: [PATCH 412/419] IT fim: added agent, linux, win32 and darwin marks. --- .../test_files/test_report_changes/test_diff_size_limit.py | 2 +- .../test_files/test_report_changes/test_disk_quota_default.py | 2 +- .../test_files/test_report_changes/test_disk_quota_disabled.py | 2 +- .../test_files/test_report_changes/test_file_size_default.py | 2 +- .../test_files/test_report_changes/test_file_size_disabled.py | 2 +- .../test_files/test_report_changes/test_file_size_values.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit.py b/tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit.py index 12a178a0e3f..0ed7d3d1428 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_diff_size_limit.py @@ -79,7 +79,7 @@ # Marks -pytestmark = [pytest.mark.tier(level=1)] +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.win32, pytest.mark.darwin, pytest.mark.tier(level=1)] # Test metadata, configuration and ids. diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py index 6511c030e2d..69744820e6c 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py @@ -75,7 +75,7 @@ # Marks -pytestmark = [pytest.mark.tier(level=1)] +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.win32, pytest.mark.darwin, pytest.mark.tier(level=1)] # Test metadata, configuration and ids. diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py index 59cc493a314..2aaa2de3771 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py @@ -76,7 +76,7 @@ # Marks -pytestmark = [pytest.mark.tier(level=1)] +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.win32, pytest.mark.darwin, pytest.mark.tier(level=1)] # Test metadata, configuration and ids. diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py index 053607aefee..37f74c09b1a 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py @@ -79,7 +79,7 @@ # Marks -pytestmark = [pytest.mark.tier(level=1)] +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.win32, pytest.mark.darwin, pytest.mark.tier(level=1)] # Test metadata, configuration and ids. diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py index b9776551042..81bcbf5e899 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py @@ -76,7 +76,7 @@ # Marks -pytestmark = [pytest.mark.tier(level=1)] +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.win32, pytest.mark.darwin, pytest.mark.tier(level=1)] # Test metadata, configuration and ids. diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py index 95c040e88fd..aa2de8cde5a 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py @@ -79,7 +79,7 @@ # Marks -pytestmark = [pytest.mark.tier(level=1)] +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.win32, pytest.mark.darwin, pytest.mark.tier(level=1)] # Test metadata, configuration and ids. From cef2f709dc9ef66489e00d5d2f3dcb15bb14e572 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Thu, 30 May 2024 18:56:27 -0300 Subject: [PATCH 413/419] IT fim: fixes test test_large_changes.py for macos. --- .../data/test_cases/cases_large_changes.yaml | 72 ++++++++++++------- 1 file changed, 48 insertions(+), 24 deletions(-) diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_large_changes.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_large_changes.yaml index 333935f50d3..2acabbf94a4 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_large_changes.yaml +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_large_changes.yaml @@ -1,12 +1,14 @@ - name: Test changes smaller than limit (Scheduled mode) description: Test that changes are smaller than limit, 'More changes' does not appear in content_changes configuration_parameters: - TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] INTERVAL: 4 REALTIME: 'no' WHODATA: 'no' metadata: - folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] filename: regular_1 original_size: 500 modified_size: 500 @@ -16,12 +18,14 @@ - name: Test changes smaller than limit (Realtime mode) description: Test that changes are smaller than limit, 'More changes' does not appear in content_changes configuration_parameters: - TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] INTERVAL: 10000 REALTIME: 'yes' WHODATA: 'no' metadata: - folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] filename: regular_1 original_size: 500 modified_size: 500 @@ -31,12 +35,14 @@ - name: Test changes smaller than limit (Whodata mode) description: Test that changes are smaller than limit, 'More changes' does not appear in content_changes configuration_parameters: - TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] INTERVAL: 10000 REALTIME: 'no' WHODATA: 'yes' metadata: - folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] filename: regular_1 original_size: 500 modified_size: 500 @@ -46,12 +52,14 @@ - name: Test large changes - Same size (Scheduled mode) description: Test when changes are same size of set limit, 'More changes' appears in content_changes configuration_parameters: - TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] INTERVAL: 4 REALTIME: 'no' WHODATA: 'no' metadata: - folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] filename: regular_2 original_size: 200000 modified_size: 200000 @@ -61,12 +69,14 @@ - name: Test large changes - Same size (Realtime mode) description: Test when changes are same size of set limit, 'More changes' appears in content_changes configuration_parameters: - TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] INTERVAL: 10000 REALTIME: 'yes' WHODATA: 'no' metadata: - folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] filename: regular_2 original_size: 200000 modified_size: 200000 @@ -76,12 +86,14 @@ - name: Test large changes - Same size (Whodata mode) description: Test when changes are same size of set limit, 'More changes' appears in content_changes. configuration_parameters: - TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] INTERVAL: 10000 REALTIME: 'no' WHODATA: 'yes' metadata: - folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] filename: regular_2 original_size: 200000 modified_size: 200000 @@ -91,12 +103,14 @@ - name: Test large changes - File bigger after change (Scheduled mode) description: Test that changes are bigger than limit, 'More changes' appears in content_changes. configuration_parameters: - TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] INTERVAL: 4 REALTIME: 'no' WHODATA: 'no' metadata: - folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] filename: regular_3 original_size: 10 modified_size: 200000 @@ -106,12 +120,14 @@ - name: Test large changes - File bigger after change (Realtime mode) description: Test that changes are bigger than limit, 'More changes' appears in content_changes. configuration_parameters: - TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] INTERVAL: 10000 REALTIME: 'yes' WHODATA: 'no' metadata: - folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] filename: regular_3 original_size: 10 modified_size: 200000 @@ -121,12 +137,14 @@ - name: Test large changes - File bigger after change (Whodata mode) description: Test that changes are bigger than limit, 'More changes' appears in content_changes. configuration_parameters: - TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] INTERVAL: 10000 REALTIME: 'no' WHODATA: 'yes' metadata: - folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] filename: regular_3 original_size: 10 modified_size: 200000 @@ -136,12 +154,14 @@ - name: Test large changes - File smaller after change (Scheduled mode) description: Test when file is smaller after change, 'More changes' appears in content_changes. configuration_parameters: - TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] INTERVAL: 4 REALTIME: 'no' WHODATA: 'no' metadata: - folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] filename: regular_4 original_size: 200000 modified_size: 10 @@ -151,12 +171,14 @@ - name: Test large changes - File smaller after change (Realtime mode) description: Test when file is smaller after change, 'More changes' appears in content_changes. configuration_parameters: - TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] INTERVAL: 10000 REALTIME: 'yes' WHODATA: 'no' metadata: - folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] filename: regular_4 original_size: 200000 modified_size: 10 @@ -166,12 +188,14 @@ - name: Test large changes - File smaller after change (Whodata mode) description: Test when file is smaller after change, 'More changes' appears in content_changes. configuration_parameters: - TEST_DIRECTORIES: !!python/object/apply:os.path.join [/test_dir] + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] INTERVAL: 10000 REALTIME: 'no' WHODATA: 'yes' metadata: - folder_to_monitor: !!python/object/apply:os.path.join [/test_dir] + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] filename: regular_4 original_size: 200000 modified_size: 10 From f3876ff946ca44a6f86094ce90e3a923d7563903 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Thu, 30 May 2024 20:47:12 -0300 Subject: [PATCH 414/419] IT FIM: added test test_report_changes_and_diff.py. moved fixture create_paths_files. --- tests/integration/test_fim/conftest.py | 31 +++ .../test_files/test_moving_files/conftest.py | 39 ---- ...configuration_report_changes_and_diff.yaml | 40 ++++ .../cases_report_changes_and_diff.yaml | 101 +++++++++ .../test_report_changes_and_diff.py | 198 ++++++++++++++++++ 5 files changed, 370 insertions(+), 39 deletions(-) delete mode 100644 tests/integration/test_fim/test_files/test_moving_files/conftest.py create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_report_changes_and_diff.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_report_changes_and_diff.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py diff --git a/tests/integration/test_fim/conftest.py b/tests/integration/test_fim/conftest.py index a2da15bbcc0..3003727eea7 100644 --- a/tests/integration/test_fim/conftest.py +++ b/tests/integration/test_fim/conftest.py @@ -175,3 +175,34 @@ def detect_end_scan(test_metadata: dict) -> None: wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) wazuh_log_monitor.start(timeout=60, callback=generate_callback(EVENT_TYPE_SCAN_END)) assert wazuh_log_monitor.callback_result + + +@pytest.fixture() +def create_paths_files(test_metadata: dict) -> str: + to_edit = test_metadata.get('path_or_files_to_create') + + if not isinstance(to_edit, list): + raise TypeError(f"`files` should be a 'list', not a '{type(to_edit)}'") + + created_files = [] + for item in to_edit: + item_path = Path(item) + if item_path.exists(): + raise FileExistsError(f"`{item_path}` already exists.") + + # If file does not have suffixes, consider it a directory + if item_path.suffixes == []: + # Add a dummy file to the target directory to create the directory + created_files.extend(file.create_parent_directories( + Path(item_path).joinpath('dummy.file'))) + else: + created_files.extend(file.create_parent_directories(item_path)) + + file.write_file(file_path=item_path, data='') + created_files.append(item_path) + + yield to_edit + + for item in to_edit: + item_path = Path(item) + file.delete_path_recursively(item_path) diff --git a/tests/integration/test_fim/test_files/test_moving_files/conftest.py b/tests/integration/test_fim/test_files/test_moving_files/conftest.py deleted file mode 100644 index 3d2db0e788f..00000000000 --- a/tests/integration/test_fim/test_files/test_moving_files/conftest.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2015-2024, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 -from pathlib import Path - -import pytest - -from wazuh_testing.utils import file - - -@pytest.fixture() -def create_paths_files(test_metadata: dict) -> str: - to_edit = test_metadata.get('path_or_files_to_create') - - if not isinstance(to_edit, list): - raise TypeError(f"`files` should be a 'list', not a '{type(to_edit)}'") - - created_files = [] - for item in to_edit: - item_path = Path(item) - if item_path.exists(): - raise FileExistsError(f"`{item_path}` already exists.") - - # If file does not have suffixes, consider it a directory - if item_path.suffixes == []: - # Add a dummy file to the target directory to create the directory - created_files.extend(file.create_parent_directories( - Path(item_path).joinpath('dummy.file'))) - else: - created_files.extend(file.create_parent_directories(item_path)) - - file.write_file(file_path=item_path, data='') - created_files.append(item_path) - - yield to_edit - - for item in to_edit: - item_path = Path(item) - file.delete_path_recursively(item_path) diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_report_changes_and_diff.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_report_changes_and_diff.yaml new file mode 100644 index 00000000000..91d5c548324 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_report_changes_and_diff.yaml @@ -0,0 +1,40 @@ +- sections: + - section: syscheck + elements: + - disabled: + value: 'no' + - frequency: + value: INTERVAL + - directories: + value: TEST_DIRECTORIES + attributes: + - check_all: 'yes' + - realtime: REALTIME + - whodata: WHODATA + - report_changes: 'yes' + - directories: + value: TEST_DIRECTORIES_NO_DIFF + attributes: + - check_all: 'yes' + - realtime: REALTIME + - whodata: WHODATA + - report_changes: 'yes' + - nodiff: + value: NODIFF_FILE + + - section: sca + elements: + - enabled: + value: 'no' + + - section: rootcheck + elements: + - disabled: + value: 'yes' + + - section: wodle + attributes: + - name: syscollector + elements: + - disabled: + value: 'yes' diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_report_changes_and_diff.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_report_changes_and_diff.yaml new file mode 100644 index 00000000000..841998be7e0 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_report_changes_and_diff.yaml @@ -0,0 +1,101 @@ +- name: report_changes_found_scheduled + description: When a file is monitored with report_changes, the diff file and changes are reported (Scheduled mode) + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_reports] + TEST_DIRECTORIES_NO_DIFF: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_nodiff] + INTERVAL: 5 + REALTIME: 'no' + WHODATA: 'no' + NODIFF_FILE: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], 'testdir_nodiff', 'regular_file'] + metadata: + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff]] + folder: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports] + filename: regular_file + fim_mode: scheduled + +- name: report_changes_truncated_scheduled + description: When a file is set to nodiff, report_changes information is truncated (Scheduled mode) + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_reports] + TEST_DIRECTORIES_NO_DIFF: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_nodiff] + INTERVAL: 5 + REALTIME: 'no' + WHODATA: 'no' + NODIFF_FILE: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], 'testdir_nodiff', 'regular_file'] + metadata: + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff]] + folder: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff] + filename: regular_file + fim_mode: scheduled + +- name: report_changes_found_realtime + description: When a file is monitored with report_changes, the diff file and changes are reported (Realtime mode) + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_reports] + TEST_DIRECTORIES_NO_DIFF: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_nodiff] + INTERVAL: 1000 + REALTIME: 'yes' + WHODATA: 'no' + NODIFF_FILE: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], 'testdir_nodiff', 'regular_file'] + metadata: + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff]] + folder: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports] + filename: regular_file + fim_mode: realtime + +- name: report_changes_truncated_realtime + description: When a file is set to nodiff, report_changes information is truncated (Realtime mode) + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_reports] + TEST_DIRECTORIES_NO_DIFF: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_nodiff] + INTERVAL: 1000 + REALTIME: 'yes' + WHODATA: 'no' + NODIFF_FILE: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], 'testdir_nodiff', 'regular_file'] + metadata: + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff]] + folder: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff] + filename: regular_file + fim_mode: realtime + +- name: report_changes_found_whodata + description: When a file is monitored with report_changes, the diff file and changes are reported (Whodata mode) + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_reports] + TEST_DIRECTORIES_NO_DIFF: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_nodiff] + INTERVAL: 1000 + REALTIME: 'no' + WHODATA: 'yes' + NODIFF_FILE: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], 'testdir_nodiff', 'regular_file'] + metadata: + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff]] + folder: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports] + filename: regular_file + fim_mode: whodata + +- name: report_changes_truncated_whodata + description: When a file is set to nodiff, report_changes information is truncated (Whodata mode) + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_reports] + TEST_DIRECTORIES_NO_DIFF: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_nodiff] + INTERVAL: 1000 + REALTIME: 'no' + WHODATA: 'yes' + NODIFF_FILE: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], 'testdir_nodiff', 'regular_file'] + metadata: + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff]] + folder: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff] + filename: regular_file + fim_mode: whodata diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py b/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py new file mode 100644 index 00000000000..c778daf027d --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py @@ -0,0 +1,198 @@ +''' +copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when + these files are modified. Specifically, these tests will verify that FIM events include + the 'content_changes' field with the tag 'More changes' when it exceeds the maximum size + allowed, and the 'report_changes' option is enabled. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: files_report_changes + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - linux + - windows + +os_version: + - Arch Linux + - Amazon Linux 2 + - Amazon Linux 1 + - CentOS 8 + - CentOS 7 + - Debian Buster + - Red Hat 8 + - Solaris 10 + - Solaris 11 + - macOS Catalina + - macOS Server + - Ubuntu Focal + - Ubuntu Bionic + - Windows 10 + - Windows Server 2019 + - Windows Server 2016 + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#diff + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_report_changes +''' +import os + +from pathlib import Path + +import pytest + +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG +from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.patterns import EVENT_TYPE_MODIFIED, EVENT_TYPE_ADDED, ERROR_MSG_FIM_EVENT_NOT_DETECTED, EVENT_TYPE_DELETED +from wazuh_testing.modules.fim.utils import make_diff_file_path, get_fim_event_data +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.file import write_file, generate_string, delete_files_in_folder +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template + +from . import TEST_CASES_PATH, CONFIGS_PATH + + +# Marks +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.win32, pytest.mark.darwin, pytest.mark.tier(level=1)] + + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_report_changes_and_diff.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_report_changes_and_diff.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + + +# Set configurations required by the fixtures. +local_internal_options = {SYSCHECK_DEBUG: 2, AGENTD_WINDOWS_DEBUG: 2} + + +# Tests +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_reports_file_and_nodiff(test_configuration, test_metadata, configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, create_paths_files, daemons_handler, detect_end_scan): + ''' + description: Check if the 'wazuh-syscheckd' daemon reports the file changes (or truncates if required) + in the generated events using the 'nodiff' tag and vice versa. For this purpose, the test + will monitor a directory and make file operations inside it. Then, it will check if a + 'diff' file is created for the modified testing file. Finally, if the testing file matches + the 'nodiff' tag, the test will verify that the FIM event generated contains in its + 'content_changes' field a message indicating that 'diff' is truncated because + the 'nodiff' option is used. + + wazuh_min_version: 4.6.0 + + tier: 1 + + parameters: + - test_configuration: + type: dict + brief: Configuration values for ossec.conf. + - test_metadata: + type: dict + brief: Test case data. + - configure_local_internal_options: + type: fixture + brief: Set local_internal_options.conf file. + - truncate_monitored_files: + type: fixture + brief: Truncate all the log files and json alerts files before and after the test execution. + - set_wazuh_configuration: + type: fixture + brief: Set ossec.conf configuration. + - create_paths_files: + type: list + brief: Create the required directory or file to edit. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + - detect_end_scan + type: fixture + brief: Check first scan end. + + assertions: + - Verify that for each modified file a 'diff' file is generated. + - Verify that FIM events include the 'content_changes' field. + - Verify that FIM events truncate the modifications made in a monitored file + when it matches the 'nodiff' tag. + - Verify that FIM events include the modifications made in a monitored file + when it does not match the 'nodiff' tag. + + input_description: A test case is contained in external YAML files (configuration_report_changes_and_diff.yaml, cases_report_changes_and_diff.yaml) + which includes configuration settings for the 'wazuh-syscheckd' daemon and, these are + combined with the testing directories to be monitored defined in the module. + + expected_output: + - r'.*Sending FIM event: (.+)$' ('added', 'modified', and 'deleted' events) + + tags: + - diff + - scheduled + ''' + is_truncated = 'testdir_nodiff' in test_metadata.get('folder') + folder = test_metadata.get('folder') + test_file_path = os.path.join(folder, test_metadata.get('filename')) + + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + + # Create the file and and capture the event. + original_string = generate_string(1, '0') + write_file(test_file_path, data=original_string) + + wazuh_log_monitor.start(generate_callback(EVENT_TYPE_ADDED), timeout=30) + assert wazuh_log_monitor.callback_result, ERROR_MSG_FIM_EVENT_NOT_DETECTED + + # Modify the file with new content. + modified_string = generate_string(10, '1') + write_file(test_file_path, data=modified_string) + + wazuh_log_monitor.start(generate_callback(EVENT_TYPE_MODIFIED), timeout=20) + assert wazuh_log_monitor.callback_result + event = get_fim_event_data(wazuh_log_monitor.callback_result) + + # Validate content_changes attribute exists in the event + diff_file = make_diff_file_path(folder=test_metadata.get('folder'), filename=test_metadata.get('filename')) + assert os.path.exists(diff_file), f'{diff_file} does not exist' + assert event.get('content_changes') is not None, 'content_changes is empty' + + # Validate content_changes value is truncated if the file is set to no_diff + if is_truncated: + assert '' in event.get('content_changes'), \ + 'content_changes is not truncated' + else: + assert '' not in event.get('content_changes'), \ + 'content_changes is truncated' + + delete_files_in_folder(folder) + wazuh_log_monitor.start(generate_callback(EVENT_TYPE_DELETED)) + assert get_fim_event_data(wazuh_log_monitor.callback_result)['mode'] == test_metadata.get('fim_mode') From cb9c499b4dfe862e47b6ed877ba95ea8c6dfffda Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Fri, 31 May 2024 00:59:50 -0300 Subject: [PATCH 415/419] IT FIM: added test test_report_when_deleted_directories --- .../configuration_report_deleted_diff.yaml | 32 ++++ .../test_cases/cases_report_deleted_diff.yaml | 52 +++++ .../test_report_deleted_diff.py | 181 ++++++++++++++++++ 3 files changed, 265 insertions(+) create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_report_deleted_diff.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_report_deleted_diff.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/test_report_deleted_diff.py diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_report_deleted_diff.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_report_deleted_diff.yaml new file mode 100644 index 00000000000..e7f677c87d5 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/configuration_templates/configuration_report_deleted_diff.yaml @@ -0,0 +1,32 @@ +- sections: + - section: syscheck + elements: + - disabled: + value: 'no' + - frequency: + value: 3 + - directories: + value: TEST_DIRECTORIES + attributes: ATTRIBUTES + - directories: + value: TEST_DIRECTORIES_NO_DIFF + attributes: ATTRIBUTES + - nodiff: + value: NODIFF_FILE + + - section: sca + elements: + - enabled: + value: 'no' + + - section: rootcheck + elements: + - disabled: + value: 'yes' + + - section: wodle + attributes: + - name: syscollector + elements: + - disabled: + value: 'yes' diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_report_deleted_diff.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_report_deleted_diff.yaml new file mode 100644 index 00000000000..62992bd4b7a --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_report_deleted_diff.yaml @@ -0,0 +1,52 @@ +- name: Test deletes the 'diff' folder, fim_mode = scheduled + description: Check if the 'wazuh-syscheckd' daemon deletes the 'diff' folder created. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_reports] + TEST_DIRECTORIES_NO_DIFF: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_nodiff] + ATTRIBUTES: + - check_all: 'yes' + - report_changes: 'yes' + NODIFF_FILE: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], 'testdir_nodiff', 'regular_file'] + metadata: + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff]] + folder: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff] + filename: regularfile + fim_mode: scheduled + +- name: Test deletes the 'diff' folder, fim_mode = realtime + description: Check if the 'wazuh-syscheckd' daemon deletes the 'diff' folder created. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_reports] + TEST_DIRECTORIES_NO_DIFF: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_nodiff] + ATTRIBUTES: + - check_all: 'yes' + - report_changes: 'yes' + - realtime: "yes" + NODIFF_FILE: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], 'testdir_nodiff', 'regular_file'] + metadata: + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff]] + folder: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff] + filename: regularfile + fim_mode: realtime + +- name: Test deletes the 'diff' folder, fim_mode = whodata + description: Check if the 'wazuh-syscheckd' daemon deletes the 'diff' folder created. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_reports] + TEST_DIRECTORIES_NO_DIFF: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_nodiff] + ATTRIBUTES: + - check_all: 'yes' + - report_changes: 'yes' + - whodata: "yes" + NODIFF_FILE: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], 'testdir_nodiff', 'regular_file'] + metadata: + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff]] + folder: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff] + filename: regularfile + fim_mode: whodata diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_report_deleted_diff.py b/tests/integration/test_fim/test_files/test_report_changes/test_report_deleted_diff.py new file mode 100644 index 00000000000..f6f8811c561 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/test_report_deleted_diff.py @@ -0,0 +1,181 @@ +''' +copyright: Copyright (C) 2015-2024, Wazuh Inc. + + Created by Wazuh, Inc. . + + This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 + +type: integration + +brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when + these files are modified. Specifically, these tests will check if FIM manages properly + the 'diff' folder created in the 'queue/diff/local' directory when removing a monitored + folder or the 'report_changes' option is disabled. + The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured + files for changes to the checksums, permissions, and ownership. + +components: + - fim + +suite: files_report_changes + +targets: + - agent + +daemons: + - wazuh-syscheckd + +os_platform: + - linux + - windows + +os_version: + - Arch Linux + - Amazon Linux 2 + - Amazon Linux 1 + - CentOS 8 + - CentOS 7 + - Debian Buster + - Red Hat 8 + - Ubuntu Focal + - Ubuntu Bionic + - Windows 10 + - Windows Server 2019 + - Windows Server 2016 + +references: + - https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#directories + - https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#diff + +pytest_args: + - fim_mode: + realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems. + whodata: Implies real-time monitoring but adding the 'who-data' information. + - tier: + 0: Only level 0 tests are performed, they check basic functionalities and are quick to perform. + 1: Only level 1 tests are performed, they check functionalities of medium complexity. + 2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform. + +tags: + - fim_report_changes +''' +import os + +from pathlib import Path + +import pytest +import time + +from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH +from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG +from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG +from wazuh_testing.modules.fim.patterns import EVENT_TYPE_ADDED, ERROR_MSG_FIM_EVENT_NOT_DETECTED, EVENT_TYPE_DELETED +from wazuh_testing.modules.fim.utils import make_diff_file_path, get_fim_event_data +from wazuh_testing.tools.monitors.file_monitor import FileMonitor +from wazuh_testing.utils.file import write_file, generate_string, delete_files_in_folder +from wazuh_testing.utils.callbacks import generate_callback +from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template + +from . import TEST_CASES_PATH, CONFIGS_PATH + + +# Marks +pytestmark = [pytest.mark.agent, pytest.mark.linux, pytest.mark.win32, pytest.mark.darwin, pytest.mark.tier(level=1)] + + +# Test metadata, configuration and ids. +cases_path = Path(TEST_CASES_PATH, 'cases_report_deleted_diff.yaml') +config_path = Path(CONFIGS_PATH, 'configuration_report_deleted_diff.yaml') +test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) +test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) + + +# Set configurations required by the fixtures. +local_internal_options = {SYSCHECK_DEBUG: 2, AGENTD_WINDOWS_DEBUG: 2} + + +# Tests +@pytest.mark.parametrize('test_configuration, test_metadata', zip(test_configuration, test_metadata), ids=cases_ids) +def test_report_when_deleted_directories(test_configuration, test_metadata, configure_local_internal_options, + truncate_monitored_files, set_wazuh_configuration, create_paths_files, daemons_handler, detect_end_scan): + ''' + description: Check if the 'wazuh-syscheckd' daemon deletes the 'diff' folder created in the 'queue/diff/local' + directory when removing a monitored folder and the 'report_changes' option is enabled. + For this purpose, the test will monitor a directory and add a testing file inside it. Then, + it will check if a 'diff' file is created for the modified testing file. Finally, the test + will remove the monitored folder, wait for the FIM 'deleted' event, and verify that + the corresponding 'diff' folder is deleted. + + wazuh_min_version: 4.6.0 + + tier: 1 + + parameters: + - test_configuration: + type: dict + brief: Configuration values for ossec.conf. + - test_metadata: + type: dict + brief: Test case data. + - configure_local_internal_options: + type: fixture + brief: Set local_internal_options.conf file. + - truncate_monitored_files: + type: fixture + brief: Truncate all the log files and json alerts files before and after the test execution. + - set_wazuh_configuration: + type: fixture + brief: Set ossec.conf configuration. + - create_paths_files: + type: list + brief: Create the required directory or file to edit. + - daemons_handler: + type: fixture + brief: Handler of Wazuh daemons. + - detect_end_scan + type: fixture + brief: Check first scan end. + + assertions: + - Verify that the FIM event is generated when removing the monitored folder. + - Verify that FIM adds the 'diff' file in the 'queue/diff/local' directory + when monitoring the corresponding testing file. + - Verify that FIM deletes the 'diff' folder in the 'queue/diff/local' directory + when removing the corresponding monitored folder. + + input_description: Different test cases are contained in external YAML file (configuration_report_deleted_diff.yaml) which + includes configuration settings for the 'wazuh-syscheckd' daemon and, these + are combined with the testing directory to be monitored defined in the module. + + expected_output: + - r'.*Sending FIM event: (.+)$' ('deleted' events) + + tags: + - diff + - scheduled + ''' + fim_mode = test_metadata.get('fim_mode') + folder = test_metadata.get('folder') + test_file_path = os.path.join(folder, test_metadata.get('filename')) + + wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) + + # Create the file and and capture the event. + original_string = generate_string(1, '0') + write_file(test_file_path, data=original_string) + + wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_ADDED), timeout=30) + assert wazuh_log_monitor.callback_result, ERROR_MSG_FIM_EVENT_NOT_DETECTED + + # Validate content_changes attribute exists in the event + diff_file = make_diff_file_path(folder=test_metadata.get('folder'), filename=test_metadata.get('filename')) + assert os.path.exists(diff_file), f'{diff_file} does not exist' + + delete_files_in_folder(folder) + wazuh_log_monitor.start(callback=generate_callback(EVENT_TYPE_DELETED)) + + # Wait a second so diff path is deleted + if 'scheduled' not in fim_mode: + time.sleep(2) + assert not os.path.exists(diff_file), f'{diff_file} exists' From 41140c08cb6c3529b783f3af0b7437683bbceecc Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Fri, 31 May 2024 12:44:49 -0300 Subject: [PATCH 416/419] IT fim: changes in tests tes_report_changes_and_diff and test_large_changes. + use the new function write_file_write + limits test cases in macos to scheduled only --- .../test_cases/cases_large_changes_macos.yaml | 67 +++++++++++++++++++ .../cases_report_changes_and_diff_macos.yaml | 33 +++++++++ .../test_report_changes/test_large_changes.py | 17 +++-- .../test_report_changes_and_diff.py | 21 ++++-- 4 files changed, 126 insertions(+), 12 deletions(-) create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_large_changes_macos.yaml create mode 100644 tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_report_changes_and_diff_macos.yaml diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_large_changes_macos.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_large_changes_macos.yaml new file mode 100644 index 00000000000..3ea90541578 --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_large_changes_macos.yaml @@ -0,0 +1,67 @@ +- name: Test changes smaller than limit (Scheduled mode) + description: Test that changes are smaller than limit, 'More changes' does not appear in content_changes + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + INTERVAL: 4 + REALTIME: 'no' + WHODATA: 'no' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: regular_1 + original_size: 500 + modified_size: 500 + has_more_changes: false + fim_mode: scheduled + +- name: Test large changes - Same size (Scheduled mode) + description: Test when changes are same size of set limit, 'More changes' appears in content_changes + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + INTERVAL: 4 + REALTIME: 'no' + WHODATA: 'no' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: regular_2 + original_size: 200000 + modified_size: 200000 + has_more_changes: true + fim_mode: scheduled + +- name: Test large changes - File bigger after change (Scheduled mode) + description: Test that changes are bigger than limit, 'More changes' appears in content_changes. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + INTERVAL: 4 + REALTIME: 'no' + WHODATA: 'no' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: regular_3 + original_size: 10 + modified_size: 200000 + has_more_changes: true + fim_mode: scheduled + +- name: Test large changes - File smaller after change (Scheduled mode) + description: Test when file is smaller after change, 'More changes' appears in content_changes. + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + INTERVAL: 4 + REALTIME: 'no' + WHODATA: 'no' + metadata: + folder_to_monitor: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], test_dir] + filename: regular_4 + original_size: 200000 + modified_size: 10 + has_more_changes: true + fim_mode: scheduled diff --git a/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_report_changes_and_diff_macos.yaml b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_report_changes_and_diff_macos.yaml new file mode 100644 index 00000000000..73c8b7f3e1a --- /dev/null +++ b/tests/integration/test_fim/test_files/test_report_changes/data/test_cases/cases_report_changes_and_diff_macos.yaml @@ -0,0 +1,33 @@ +- name: report_changes_found_scheduled + description: When a file is monitored with report_changes, the diff file and changes are reported (Scheduled mode) + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_reports] + TEST_DIRECTORIES_NO_DIFF: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_nodiff] + INTERVAL: 5 + REALTIME: 'no' + WHODATA: 'no' + NODIFF_FILE: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], 'testdir_nodiff', 'regular_file'] + metadata: + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff]] + folder: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports] + filename: regular_file + fim_mode: scheduled + +- name: report_changes_truncated_scheduled + description: When a file is set to nodiff, report_changes information is truncated (Scheduled mode) + configuration_parameters: + TEST_DIRECTORIES: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_reports] + TEST_DIRECTORIES_NO_DIFF: !!python/object/apply:os.path.join + args: [!!python/object/apply:os.getcwd [], testdir_nodiff] + INTERVAL: 5 + REALTIME: 'no' + WHODATA: 'no' + NODIFF_FILE: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], 'testdir_nodiff', 'regular_file'] + metadata: + path_or_files_to_create: [!!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_reports], !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff]] + folder: !!python/object/apply:os.path.join [!!python/object/apply:os.getcwd [], testdir_nodiff] + filename: regular_file + fim_mode: scheduled diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py b/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py index c1ce2155f93..6c0c9292ab9 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py @@ -70,14 +70,14 @@ import pytest -from wazuh_testing.constants.platforms import WINDOWS +from wazuh_testing.constants.platforms import WINDOWS, MACOS from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG from wazuh_testing.modules.fim.patterns import EVENT_TYPE_MODIFIED, EVENT_TYPE_ADDED, ERROR_MSG_FIM_EVENT_NOT_DETECTED from wazuh_testing.modules.fim.utils import get_fim_event_data from wazuh_testing.tools.monitors.file_monitor import FileMonitor -from wazuh_testing.utils.file import write_file, generate_string +from wazuh_testing.utils.file import generate_string, truncate_file, write_file_write from wazuh_testing.utils.callbacks import generate_callback from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template @@ -89,7 +89,11 @@ # Test metadata, configuration and ids. -cases_path = Path(TEST_CASES_PATH, 'cases_large_changes.yaml') +cases_path = '' +if sys.platform == MACOS: + cases_path = Path(TEST_CASES_PATH, 'cases_large_changes_macos.yaml') +else: + cases_path = Path(TEST_CASES_PATH, 'cases_large_changes.yaml') config_path = Path(CONFIGS_PATH, 'configuration_large_changes.yaml') test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) @@ -164,15 +168,17 @@ def test_large_changes(test_configuration, test_metadata, configure_local_intern test_file_path = os.path.join(test_metadata.get('folder_to_monitor'), test_metadata.get('filename')) # Create the file and and capture the event. + truncate_file(WAZUH_LOG_PATH) original_string = generate_string(test_metadata.get('original_size'), '0') - write_file(test_file_path, data=original_string) + write_file_write(test_file_path, content=original_string) wazuh_log_monitor.start(generate_callback(EVENT_TYPE_ADDED), timeout=30) assert wazuh_log_monitor.callback_result, ERROR_MSG_FIM_EVENT_NOT_DETECTED # Modify the file with new content + truncate_file(WAZUH_LOG_PATH) modified_string = generate_string(test_metadata.get('modified_size'), '1') - write_file(test_file_path, data=modified_string) + write_file_write(test_file_path, content=modified_string) wazuh_log_monitor.start(generate_callback(EVENT_TYPE_MODIFIED), timeout=20) assert wazuh_log_monitor.callback_result @@ -182,7 +188,6 @@ def test_large_changes(test_configuration, test_metadata, configure_local_intern # Assert 'More changes' is shown when the command returns more than 'limit' characters if test_metadata.get('has_more_changes'): assert 'More changes' in event['content_changes'], 'Did not find event with "More changes" within content_changes.' - else: assert 'More changes' not in event['content_changes'], '"More changes" found within content_changes.' diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py b/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py index c778daf027d..8de5f790efb 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py @@ -64,18 +64,20 @@ - fim_report_changes ''' import os +import sys from pathlib import Path import pytest +from wazuh_testing.constants.platforms import MACOS from wazuh_testing.constants.paths.logs import WAZUH_LOG_PATH from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG from wazuh_testing.modules.fim.patterns import EVENT_TYPE_MODIFIED, EVENT_TYPE_ADDED, ERROR_MSG_FIM_EVENT_NOT_DETECTED, EVENT_TYPE_DELETED from wazuh_testing.modules.fim.utils import make_diff_file_path, get_fim_event_data from wazuh_testing.tools.monitors.file_monitor import FileMonitor -from wazuh_testing.utils.file import write_file, generate_string, delete_files_in_folder +from wazuh_testing.utils.file import write_file_write, generate_string, delete_files_in_folder, truncate_file from wazuh_testing.utils.callbacks import generate_callback from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template @@ -87,7 +89,11 @@ # Test metadata, configuration and ids. -cases_path = Path(TEST_CASES_PATH, 'cases_report_changes_and_diff.yaml') +cases_path = '' +if sys.platform == MACOS: + cases_path = Path(TEST_CASES_PATH, 'cases_report_changes_and_diff_macos.yaml') +else: + cases_path = Path(TEST_CASES_PATH, 'cases_report_changes_and_diff.yaml') config_path = Path(CONFIGS_PATH, 'configuration_report_changes_and_diff.yaml') test_configuration, test_metadata, cases_ids = get_test_cases_data(cases_path) test_configuration = load_configuration_template(config_path, test_configuration, test_metadata) @@ -166,15 +172,17 @@ def test_reports_file_and_nodiff(test_configuration, test_metadata, configure_lo wazuh_log_monitor = FileMonitor(WAZUH_LOG_PATH) # Create the file and and capture the event. + truncate_file(WAZUH_LOG_PATH) original_string = generate_string(1, '0') - write_file(test_file_path, data=original_string) + write_file_write(test_file_path, content=original_string) wazuh_log_monitor.start(generate_callback(EVENT_TYPE_ADDED), timeout=30) assert wazuh_log_monitor.callback_result, ERROR_MSG_FIM_EVENT_NOT_DETECTED # Modify the file with new content. + truncate_file(WAZUH_LOG_PATH) modified_string = generate_string(10, '1') - write_file(test_file_path, data=modified_string) + write_file_write(test_file_path, content=modified_string) wazuh_log_monitor.start(generate_callback(EVENT_TYPE_MODIFIED), timeout=20) assert wazuh_log_monitor.callback_result @@ -187,12 +195,13 @@ def test_reports_file_and_nodiff(test_configuration, test_metadata, configure_lo # Validate content_changes value is truncated if the file is set to no_diff if is_truncated: - assert '' in event.get('content_changes'), \ + assert 'Diff truncated because nodiff option' in event.get('content_changes'), \ 'content_changes is not truncated' else: - assert '' not in event.get('content_changes'), \ + assert 'Diff truncated because nodiff option' not in event.get('content_changes'), \ 'content_changes is truncated' + truncate_file(WAZUH_LOG_PATH) delete_files_in_folder(folder) wazuh_log_monitor.start(generate_callback(EVENT_TYPE_DELETED)) assert get_fim_event_data(wazuh_log_monitor.callback_result)['mode'] == test_metadata.get('fim_mode') From ec3719e31f23b69a978fb5a62307ca4178a3aaa2 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Fri, 31 May 2024 13:39:57 -0300 Subject: [PATCH 417/419] IT fim: fixe log "Diff truncated due to 'nodiff'..." in test_report_changes_and_diff --- .../test_report_changes/test_report_changes_and_diff.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py b/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py index 8de5f790efb..30844ce1e1b 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py @@ -195,10 +195,10 @@ def test_reports_file_and_nodiff(test_configuration, test_metadata, configure_lo # Validate content_changes value is truncated if the file is set to no_diff if is_truncated: - assert 'Diff truncated because nodiff option' in event.get('content_changes'), \ + assert "Diff truncated due to 'nodiff' configuration detected for this file." in event.get('content_changes'), \ 'content_changes is not truncated' else: - assert 'Diff truncated because nodiff option' not in event.get('content_changes'), \ + assert "Diff truncated due to 'nodiff' configuration detected for this file." not in event.get('content_changes'), \ 'content_changes is truncated' truncate_file(WAZUH_LOG_PATH) From 1b813038ed313c8db90a7ff033524e37a0648ee6 Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Fri, 31 May 2024 14:23:53 -0300 Subject: [PATCH 418/419] IT fim: changed import of the function generate_string --- .../test_report_changes/test_disk_quota_disabled.py | 3 ++- .../test_files/test_report_changes/test_file_size_default.py | 3 ++- .../test_report_changes/test_file_size_disabled.py | 3 ++- .../test_files/test_report_changes/test_file_size_values.py | 3 ++- .../test_files/test_report_changes/test_large_changes.py | 3 ++- .../test_report_changes/test_report_changes_and_diff.py | 3 ++- .../test_report_changes/test_report_deleted_diff.py | 5 +++-- 7 files changed, 15 insertions(+), 8 deletions(-) diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py index 2aaa2de3771..d653c20f8db 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py @@ -68,7 +68,8 @@ from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG from wazuh_testing.modules.fim.patterns import FILE_EXCEEDS_DISK_QUOTA from wazuh_testing.tools.monitors.file_monitor import FileMonitor -from wazuh_testing.utils.file import write_file, generate_string +from wazuh_testing.utils.file import write_file +from wazuh_testing.utils.string import generate_string from wazuh_testing.utils.callbacks import generate_callback from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py index 37f74c09b1a..61ed38492e7 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py @@ -71,7 +71,8 @@ from wazuh_testing.modules.fim.patterns import FILE_SIZE_LIMIT_REACHED, EVENT_TYPE_ADDED, ERROR_MSG_FIM_EVENT_NOT_DETECTED, ERROR_MSG_FILE_LIMIT_REACHED from wazuh_testing.modules.fim.utils import make_diff_file_path from wazuh_testing.tools.monitors.file_monitor import FileMonitor -from wazuh_testing.utils.file import write_file, generate_string, translate_size +from wazuh_testing.utils.file import write_file, translate_size +from wazuh_testing.utils.string import generate_string from wazuh_testing.utils.callbacks import generate_callback from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py index 81bcbf5e899..4281fb74f43 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py @@ -68,7 +68,8 @@ from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG from wazuh_testing.modules.fim.patterns import FILE_SIZE_LIMIT_REACHED from wazuh_testing.tools.monitors.file_monitor import FileMonitor -from wazuh_testing.utils.file import write_file, generate_string +from wazuh_testing.utils.file import write_file +from wazuh_testing.utils.string import generate_string from wazuh_testing.utils.callbacks import generate_callback from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py index aa2de8cde5a..8c23c00570d 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py @@ -71,7 +71,8 @@ from wazuh_testing.modules.fim.patterns import FILE_SIZE_LIMIT_REACHED, EVENT_TYPE_ADDED, ERROR_MSG_FIM_EVENT_NOT_DETECTED, ERROR_MSG_FILE_LIMIT_REACHED, DIFF_FOLDER_DELETED, ERROR_MSG_FOLDER_DELETED from wazuh_testing.modules.fim.utils import make_diff_file_path from wazuh_testing.tools.monitors.file_monitor import FileMonitor -from wazuh_testing.utils.file import write_file, generate_string, translate_size +from wazuh_testing.utils.file import write_file, translate_size +from wazuh_testing.utils.string import generate_string from wazuh_testing.utils.callbacks import generate_callback from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py b/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py index 6c0c9292ab9..6a08ed7d23c 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py @@ -77,7 +77,8 @@ from wazuh_testing.modules.fim.patterns import EVENT_TYPE_MODIFIED, EVENT_TYPE_ADDED, ERROR_MSG_FIM_EVENT_NOT_DETECTED from wazuh_testing.modules.fim.utils import get_fim_event_data from wazuh_testing.tools.monitors.file_monitor import FileMonitor -from wazuh_testing.utils.file import generate_string, truncate_file, write_file_write +from wazuh_testing.utils.file import truncate_file, write_file_write +from wazuh_testing.utils.string import generate_string from wazuh_testing.utils.callbacks import generate_callback from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py b/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py index 30844ce1e1b..c5567a6dcaf 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py @@ -77,7 +77,8 @@ from wazuh_testing.modules.fim.patterns import EVENT_TYPE_MODIFIED, EVENT_TYPE_ADDED, ERROR_MSG_FIM_EVENT_NOT_DETECTED, EVENT_TYPE_DELETED from wazuh_testing.modules.fim.utils import make_diff_file_path, get_fim_event_data from wazuh_testing.tools.monitors.file_monitor import FileMonitor -from wazuh_testing.utils.file import write_file_write, generate_string, delete_files_in_folder, truncate_file +from wazuh_testing.utils.file import write_file_write, delete_files_in_folder, truncate_file +from wazuh_testing.utils.string import generate_string from wazuh_testing.utils.callbacks import generate_callback from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_report_deleted_diff.py b/tests/integration/test_fim/test_files/test_report_changes/test_report_deleted_diff.py index f6f8811c561..2f16322422c 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_report_deleted_diff.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_report_deleted_diff.py @@ -71,9 +71,10 @@ from wazuh_testing.modules.fim.configuration import SYSCHECK_DEBUG from wazuh_testing.modules.agentd.configuration import AGENTD_WINDOWS_DEBUG from wazuh_testing.modules.fim.patterns import EVENT_TYPE_ADDED, ERROR_MSG_FIM_EVENT_NOT_DETECTED, EVENT_TYPE_DELETED -from wazuh_testing.modules.fim.utils import make_diff_file_path, get_fim_event_data +from wazuh_testing.modules.fim.utils import make_diff_file_path from wazuh_testing.tools.monitors.file_monitor import FileMonitor -from wazuh_testing.utils.file import write_file, generate_string, delete_files_in_folder +from wazuh_testing.utils.file import write_file, delete_files_in_folder +from wazuh_testing.utils.string import generate_string from wazuh_testing.utils.callbacks import generate_callback from wazuh_testing.utils.configuration import get_test_cases_data, load_configuration_template From 3ddc97042ac595dbeed4670b9b94adf30116c16d Mon Sep 17 00:00:00 2001 From: Nicolas Gomez Palacios Date: Fri, 31 May 2024 15:58:18 -0300 Subject: [PATCH 419/419] IT fim: added macos to the os_platform and os_version description. --- .../test_files/test_report_changes/test_disk_quota_default.py | 3 +++ .../test_files/test_report_changes/test_disk_quota_disabled.py | 3 +++ .../test_files/test_report_changes/test_file_size_default.py | 3 +++ .../test_files/test_report_changes/test_file_size_disabled.py | 3 +++ .../test_files/test_report_changes/test_file_size_values.py | 3 +++ .../test_files/test_report_changes/test_large_changes.py | 3 +++ .../test_report_changes/test_report_changes_and_diff.py | 3 +++ .../test_files/test_report_changes/test_report_deleted_diff.py | 3 +++ 8 files changed, 24 insertions(+) diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py index 69744820e6c..6a9353c43d9 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_default.py @@ -28,6 +28,7 @@ os_platform: - linux - windows + - macos os_version: - Arch Linux @@ -37,6 +38,8 @@ - CentOS 7 - Debian Buster - Red Hat 8 + - macOS Catalina + - macOS Server - Ubuntu Focal - Ubuntu Bionic - Windows 10 diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py index d653c20f8db..bd6bb970eb4 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_disk_quota_disabled.py @@ -28,6 +28,7 @@ os_platform: - linux - windows + - macos os_version: - Arch Linux @@ -37,6 +38,8 @@ - CentOS 7 - Debian Buster - Red Hat 8 + - macOS Catalina + - macOS Server - Ubuntu Focal - Ubuntu Bionic - Windows 10 diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py index 61ed38492e7..f47a19d1db8 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_default.py @@ -28,6 +28,7 @@ os_platform: - linux - windows + - macos os_version: - Arch Linux @@ -37,6 +38,8 @@ - CentOS 7 - Debian Buster - Red Hat 8 + - macOS Catalina + - macOS Server - Ubuntu Focal - Ubuntu Bionic - Windows 10 diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py index 4281fb74f43..503d088abf5 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_disabled.py @@ -28,6 +28,7 @@ os_platform: - linux - windows + - macos os_version: - Arch Linux @@ -37,6 +38,8 @@ - CentOS 7 - Debian Buster - Red Hat 8 + - macOS Catalina + - macOS Server - Ubuntu Focal - Ubuntu Bionic - Windows 10 diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py index 8c23c00570d..320a0783bae 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_file_size_values.py @@ -28,6 +28,7 @@ os_platform: - linux - windows + - macos os_version: - Arch Linux @@ -37,6 +38,8 @@ - CentOS 7 - Debian Buster - Red Hat 8 + - macOS Catalina + - macOS Server - Ubuntu Focal - Ubuntu Bionic - Windows 10 diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py b/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py index 6a08ed7d23c..94298e66d22 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_large_changes.py @@ -28,6 +28,7 @@ os_platform: - linux - windows + - macos os_version: - Arch Linux @@ -37,6 +38,8 @@ - CentOS 7 - Debian Buster - Red Hat 8 + - macOS Catalina + - macOS Server - Solaris 10 - Solaris 11 - macOS Catalina diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py b/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py index c5567a6dcaf..fd81fb42c44 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_report_changes_and_diff.py @@ -28,6 +28,7 @@ os_platform: - linux - windows + - macos os_version: - Arch Linux @@ -37,6 +38,8 @@ - CentOS 7 - Debian Buster - Red Hat 8 + - macOS Catalina + - macOS Server - Solaris 10 - Solaris 11 - macOS Catalina diff --git a/tests/integration/test_fim/test_files/test_report_changes/test_report_deleted_diff.py b/tests/integration/test_fim/test_files/test_report_changes/test_report_deleted_diff.py index 2f16322422c..87b6c429c57 100644 --- a/tests/integration/test_fim/test_files/test_report_changes/test_report_deleted_diff.py +++ b/tests/integration/test_fim/test_files/test_report_changes/test_report_deleted_diff.py @@ -28,6 +28,7 @@ os_platform: - linux - windows + - macos os_version: - Arch Linux @@ -37,6 +38,8 @@ - CentOS 7 - Debian Buster - Red Hat 8 + - macOS Catalina + - macOS Server - Ubuntu Focal - Ubuntu Bionic - Windows 10