diff --git a/.github/workflows/testEndToEnd.yml b/.github/workflows/testEndToEnd.yml index 01e94745..fff5c5dc 100644 --- a/.github/workflows/testEndToEnd.yml +++ b/.github/workflows/testEndToEnd.yml @@ -11,22 +11,22 @@ jobs: strategy: fail-fast: false matrix: - python_version: ["3.9", "3.10", "3.11"] - operating_system: ["ubuntu-20.04", "ubuntu-22.04"] + python_version: ["3.11", "3.12"] + operating_system: ["ubuntu-20.04", "ubuntu-22.04", "macos-latest", "macos-14"] #operating_system: ["ubuntu-20.04", "ubuntu-22.04", "macos-latest"] runs-on: ${{ matrix.operating_system }} steps: - - name: Install Docker for macOS - run: | - brew install docker - #import magic fails on macos runner - brew install libmagic - colima start - # Mapping below is required to get the Python docker library working - sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock - if: matrix.operating_system == 'macos-latest' + #- name: Install Docker for macOS + # run: | + # brew install docker + # # import magic fails on macos runner + # brew install libmagic colima + # colima start + # # Mapping below is required to get the Python docker library working + # sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock + # if: matrix.operating_system == 'macos-latest' #Checkout the current branch - name: Checkout repo @@ -51,7 +51,12 @@ jobs: - name: Run contentctl init run: | cd my_splunk_content_pack - poetry run contentctl init + poetry run contentctl init + + - name: Clone the AtomicRedTeam Repo + run: | + cd my_splunk_content_pack + git clone --depth 1 https://github.com/redcanaryco/atomic-red-team - name: Run contentctl validate run: | @@ -65,9 +70,10 @@ jobs: #Do not pause on a failed detection - name: Run contentctl test + if: startsWith(matrix.operating_system, 'ubuntu') run: | cd my_splunk_content_pack - poetry run contentctl test --unattended + poetry run contentctl test --disable-tqdm --post-test-behavior never_pause - uses: actions/upload-artifact@v4 with: diff --git a/.vscode/launch.json b/.vscode/launch.json index b7d2fbad..aa882c59 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -1,19 +1,68 @@ { - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - - { - "name": "contentctl test", - "type": "python", - "request": "launch", - "program": "${workspaceFolder}/splunk_contentctl/contentctl.py", - "cwd": "${workspaceFolder}/splunk_contentctl", - "console": "integratedTerminal", - "justMyCode": true, - "args": ["-p", "tmp", "test"] - } - ] +"configurations": [ + { + "name": "contentctl init", + "type": "debugpy", + "request": "launch", + "program": "${workspaceFolder}/.venv/bin/contentctl", + "cwd": "${workspaceFolder}/../ddd/", + "args": ["init"] + }, + { + "name": "contentctl validate", + "type": "debugpy", + "request": "launch", + "program": "${workspaceFolder}/.venv/bin/contentctl", + "cwd": "${workspaceFolder}/../", + "args": ["validate"] + }, + { + "name": "contentctl validate enrich", + "type": "debugpy", + "request": "launch", + "program": "${workspaceFolder}/.venv/bin/contentctl", + "cwd": "${workspaceFolder}/../", + "args": ["validate", "--enrichments"] + }, + { + "name": "contentctl build", + "type": "debugpy", + "request": "launch", + "program": "${workspaceFolder}/.venv/bin/contentctl", + "cwd": "${workspaceFolder}/../", + "args": ["build"] + }, + { + "name": "contentctl build enrich", + "type": "debugpy", + "request": "launch", + "program": "${workspaceFolder}/.venv/bin/contentctl", + "cwd": "${workspaceFolder}/../", + "args": ["build", "--enrichments"] + }, + { + "name": "contentctl test", + "type": "debugpy", + "request": "launch", + "program": "${workspaceFolder}/.venv/bin/contentctl", + "cwd": "${workspaceFolder}/../", + "args": ["test"] + }, + { + "name": "contentctl --help", + "type": "debugpy", + "request": "launch", + "program": "${workspaceFolder}/.venv/bin/contentctl", + "cwd": "${workspaceFolder}/../", + "args": ["--help"] + }, + { + "name": "contentctl test detection", + "type": "debugpy", + "request": "launch", + "program": "${workspaceFolder}/.venv/bin/contentctl", + "cwd": "${workspaceFolder}/../", + "args": ["test", "mode:selected", "--mode.files", "detections/endpoint/3cx_supply_chain_attack_network_indicators.yml"] + } +] } \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 05800ac0..973b51b9 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,5 +1,9 @@ { "python.terminal.activateEnvironment": true, "python.envFile": "${workspaceFolder}/.env", - "python.testing.cwd": "${workspaceFolder}" + "python.testing.cwd": "${workspaceFolder}", + "python.languageServer": "Pylance", + "python.analysis.typeCheckingMode": "strict" + + } \ No newline at end of file diff --git a/README.md b/README.md index 2acedd6a..55853fb3 100644 --- a/README.md +++ b/README.md @@ -178,7 +178,28 @@ contentctl test's default mode allows it to quickly test all content with requir 6. **docs** - Create documentation as Markdown 7. **reporting** - Create different reporting files such as a Mitre ATT&CK overlay +# Shell tab-complete +Leveraging the tab completion featureset of the CLI library we're using, you can generate tab completions for `contentctl` automatically, for zsh, bash, and tcsh. For additional details, you can view the docs for the library [here.](https://brentyi.github.io/tyro/tab_completion/) + +### Zsh +If you already have a location for your ZSH tab completions, you only need to run the generation line and can skip the folder creation, configuring the rest to fit with your shell config. + +```zsh +mkdir -p ~/.zfunc +contentctl --tyro-write-completion zsh ~/.zfunc/_contentctl +echo "fpath+=~/.zfunc" >> ~/.zshrc +echo "autoload -Uz compinit && compinit" >> ~/.zshrc +source ~/.zshrc +``` + +### Bash + +```bash +completion_dir=${BASH_COMPLETION_USER_DIR:-${XDG_DATA_HOME:-$HOME/.local/share}/bash-completion}/completions/ +mkdir -p $completion_dir +contentctl --tyro-write-completion bash ${completion_dir}/_contentctl +``` # Acronyms | Acronym | Meaning| Description | diff --git a/contentctl/actions/build.py b/contentctl/actions/build.py new file mode 100644 index 00000000..a0d46195 --- /dev/null +++ b/contentctl/actions/build.py @@ -0,0 +1,89 @@ +import sys +import shutil +import os + +from dataclasses import dataclass + +from contentctl.objects.enums import SecurityContentProduct, SecurityContentType +from contentctl.input.director import Director, DirectorOutputDto +from contentctl.output.conf_output import ConfOutput +from contentctl.output.conf_writer import ConfWriter +from contentctl.output.ba_yml_output import BAYmlOutput +from contentctl.output.api_json_output import ApiJsonOutput +import pathlib +import json +import datetime +from typing import Union + +from contentctl.objects.config import build + +@dataclass(frozen=True) +class BuildInputDto: + director_output_dto: DirectorOutputDto + config:build + + +class Build: + + + + def execute(self, input_dto: BuildInputDto) -> DirectorOutputDto: + if input_dto.config.build_app: + updated_conf_files:set[pathlib.Path] = set() + conf_output = ConfOutput(input_dto.config) + updated_conf_files.update(conf_output.writeHeaders()) + updated_conf_files.update(conf_output.writeObjects(input_dto.director_output_dto.detections, SecurityContentType.detections)) + updated_conf_files.update(conf_output.writeObjects(input_dto.director_output_dto.stories, SecurityContentType.stories)) + updated_conf_files.update(conf_output.writeObjects(input_dto.director_output_dto.baselines, SecurityContentType.baselines)) + updated_conf_files.update(conf_output.writeObjects(input_dto.director_output_dto.investigations, SecurityContentType.investigations)) + updated_conf_files.update(conf_output.writeObjects(input_dto.director_output_dto.lookups, SecurityContentType.lookups)) + updated_conf_files.update(conf_output.writeObjects(input_dto.director_output_dto.macros, SecurityContentType.macros)) + updated_conf_files.update(conf_output.writeAppConf()) + + #Ensure that the conf file we just generated/update is syntactically valid + for conf_file in updated_conf_files: + ConfWriter.validateConfFile(conf_file) + + conf_output.packageApp() + + print(f"Build of '{input_dto.config.app.title}' APP successful to {input_dto.config.getPackageFilePath()}") + + + if input_dto.config.build_api: + shutil.rmtree(input_dto.config.getAPIPath(), ignore_errors=True) + input_dto.config.getAPIPath().mkdir(parents=True) + api_json_output = ApiJsonOutput() + for output_objects, output_type in [(input_dto.director_output_dto.detections, SecurityContentType.detections), + (input_dto.director_output_dto.stories, SecurityContentType.stories), + (input_dto.director_output_dto.baselines, SecurityContentType.baselines), + (input_dto.director_output_dto.investigations, SecurityContentType.investigations), + (input_dto.director_output_dto.lookups, SecurityContentType.lookups), + (input_dto.director_output_dto.macros, SecurityContentType.macros), + (input_dto.director_output_dto.deployments, SecurityContentType.deployments)]: + api_json_output.writeObjects(output_objects, input_dto.config.getAPIPath(), input_dto.config.app.label, output_type ) + + + + #create version file for sse api + version_file = input_dto.config.getAPIPath()/"version.json" + utc_time = datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0,tzinfo=None).isoformat() + version_dict = {"version":{"name":f"v{input_dto.config.app.version}","published_at": f"{utc_time}Z" }} + with open(version_file,"w") as version_f: + json.dump(version_dict,version_f) + + print(f"Build of '{input_dto.config.app.title}' API successful to {input_dto.config.getAPIPath()}") + + if input_dto.config.build_ssa: + + srs_path = input_dto.config.getSSAPath() / 'srs' + complex_path = input_dto.config.getSSAPath() / 'complex' + shutil.rmtree(srs_path, ignore_errors=True) + shutil.rmtree(complex_path, ignore_errors=True) + srs_path.mkdir(parents=True) + complex_path.mkdir(parents=True) + ba_yml_output = BAYmlOutput() + ba_yml_output.writeObjects(input_dto.director_output_dto.ssa_detections, str(input_dto.config.getSSAPath())) + + print(f"Build of 'SSA' successful to {input_dto.config.getSSAPath()}") + + return input_dto.director_output_dto \ No newline at end of file diff --git a/contentctl/actions/detection_testing/DataManipulation.py b/contentctl/actions/detection_testing/DataManipulation.py deleted file mode 100644 index d978ab13..00000000 --- a/contentctl/actions/detection_testing/DataManipulation.py +++ /dev/null @@ -1,149 +0,0 @@ -import json -from datetime import datetime -from datetime import timedelta -#import fileinput -import os -import re -import io - -class DataManipulation: - - def manipulate_timestamp(self, file_path, sourcetype, source): - - - #print('Updating timestamps in attack_data before replaying') - if sourcetype == 'aws:cloudtrail': - self.manipulate_timestamp_cloudtrail(file_path) - - if source == 'WinEventLog:System' or source == 'WinEventLog:Security': - self.manipulate_timestamp_windows_event_log_raw(file_path) - - if source == 'exchange': - self.manipulate_timestamp_exchange_logs(file_path) - - - def manipulate_timestamp_exchange_logs(self, path): - f = io.open(path, "r", encoding="utf-8") - - first_line = f.readline() - d = json.loads(first_line) - latest_event = datetime.strptime(d["CreationTime"],"%Y-%m-%dT%H:%M:%S") - - now = datetime.now() - now = now.strftime("%Y-%m-%dT%H:%M:%S") - now = datetime.strptime(now,"%Y-%m-%dT%H:%M:%S") - - difference = now - latest_event - f.close() - - #Mimic the behavior of fileinput but in a threadsafe way - #Rename the file, which fileinput does for inplace. - #Note that path will now be the new file - original_backup_file = f"{path}.bak" - os.rename(path, original_backup_file) - - with open(original_backup_file, "r") as original_file: - with open(path, "w") as new_file: - for line in original_file: - d = json.loads(line) - original_time = datetime.strptime(d["CreationTime"],"%Y-%m-%dT%H:%M:%S") - new_time = (difference + original_time) - - original_time = original_time.strftime("%Y-%m-%dT%H:%M:%S") - new_time = new_time.strftime("%Y-%m-%dT%H:%M:%S") - #There is no end character appended, no need for end='' - new_file.write(line.replace(original_time, new_time)) - - - os.remove(original_backup_file) - - def manipulate_timestamp_windows_event_log_raw(self, path): - - f = io.open(path, "r", encoding="utf-8") - self.now = datetime.now() - self.now = self.now.strftime("%Y-%m-%dT%H:%M:%S.%fZ") - self.now = datetime.strptime(self.now,"%Y-%m-%dT%H:%M:%S.%fZ") - - # read raw logs - regex = r'\d{2}/\d{2}/\d{4} \d{2}:\d{2}:\d{2} [AP]M' - data = f.read() - lst_matches = re.findall(regex, data) - if len(lst_matches) > 0: - latest_event = datetime.strptime(lst_matches[-1],"%m/%d/%Y %I:%M:%S %p") - self.difference = self.now - latest_event - f.close() - - result = re.sub(regex, self.replacement_function, data) - - with io.open(path, "w+", encoding='utf8') as f: - f.write(result) - else: - f.close() - return - - - def replacement_function(self, match): - try: - event_time = datetime.strptime(match.group(),"%m/%d/%Y %I:%M:%S %p") - new_time = self.difference + event_time - return new_time.strftime("%m/%d/%Y %I:%M:%S %p") - except Exception as e: - self.logger.error("Error in timestamp replacement occured: " + str(e)) - return match.group() - - - def manipulate_timestamp_cloudtrail(self, path): - - - f = io.open(path, "r", encoding="utf-8") - - try: - first_line = f.readline() - d = json.loads(first_line) - latest_event = datetime.strptime(d["eventTime"],"%Y-%m-%dT%H:%M:%S.%fZ") - - now = datetime.now() - now = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ") - now = datetime.strptime(now,"%Y-%m-%dT%H:%M:%S.%fZ") - except ValueError: - first_line = f.readline() - d = json.loads(first_line) - latest_event = datetime.strptime(d["eventTime"],"%Y-%m-%dT%H:%M:%SZ") - - now = datetime.now() - now = now.strftime("%Y-%m-%dT%H:%M:%SZ") - now = datetime.strptime(now,"%Y-%m-%dT%H:%M:%SZ") - - difference = now - latest_event - f.close() - - - - #Mimic the behavior of fileinput but in a threadsafe way - #Rename the file, which fileinput does for inplace. - #Note that path will now be the new file - original_backup_file = f"{path}.bak" - os.rename(path, original_backup_file) - - with open(original_backup_file, "r") as original_file: - with open(path, "w") as new_file: - for line in original_file: - try: - d = json.loads(line) - original_time = datetime.strptime(d["eventTime"],"%Y-%m-%dT%H:%M:%S.%fZ") - new_time = (difference + original_time) - - original_time = original_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ") - new_time = new_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ") - new_file.write(line.replace(original_time, new_time)) - except ValueError: - d = json.loads(line) - original_time = datetime.strptime(d["eventTime"],"%Y-%m-%dT%H:%M:%SZ") - new_time = (difference + original_time) - - original_time = original_time.strftime("%Y-%m-%dT%H:%M:%SZ") - new_time = new_time.strftime("%Y-%m-%dT%H:%M:%SZ") - new_file.write(line.replace(original_time, new_time)) - - - os.remove(original_backup_file) \ No newline at end of file diff --git a/contentctl/actions/detection_testing/DetectionTestingManager.py b/contentctl/actions/detection_testing/DetectionTestingManager.py index 70f9f2e1..5ad5e117 100644 --- a/contentctl/actions/detection_testing/DetectionTestingManager.py +++ b/contentctl/actions/detection_testing/DetectionTestingManager.py @@ -1,31 +1,15 @@ -from contentctl.objects.test_config import TestConfig -from contentctl.actions.detection_testing.infrastructures.DetectionTestingInfrastructure import ( - DetectionTestingInfrastructure, -) -from contentctl.actions.detection_testing.infrastructures.DetectionTestingInfrastructureContainer import ( - DetectionTestingInfrastructureContainer, -) -from contentctl.actions.detection_testing.infrastructures.DetectionTestingInfrastructureServer import ( - DetectionTestingInfrastructureServer, -) - -from contentctl.objects.app import App -import pathlib -import os -from contentctl.helper.utils import Utils +from typing import List,Union +from contentctl.objects.config import test, test_servers, Container,Infrastructure +from contentctl.actions.detection_testing.infrastructures.DetectionTestingInfrastructure import DetectionTestingInfrastructure +from contentctl.actions.detection_testing.infrastructures.DetectionTestingInfrastructureContainer import DetectionTestingInfrastructureContainer +from contentctl.actions.detection_testing.infrastructures.DetectionTestingInfrastructureServer import DetectionTestingInfrastructureServer from urllib.parse import urlparse -import time from copy import deepcopy from contentctl.objects.enums import DetectionTestingTargetInfrastructure import signal import datetime - # from queue import Queue - -CONTAINER_APP_PATH = pathlib.Path("apps") - from dataclasses import dataclass - # import threading import ctypes from contentctl.actions.detection_testing.infrastructures.DetectionTestingInfrastructure import ( @@ -35,23 +19,17 @@ from contentctl.actions.detection_testing.views.DetectionTestingView import ( DetectionTestingView, ) - from contentctl.objects.enums import PostTestBehavior - from pydantic import BaseModel, Field -from contentctl.input.director import DirectorOutputDto from contentctl.objects.detection import Detection - - import concurrent.futures - -import tqdm +import docker @dataclass(frozen=False) class DetectionTestingManagerInputDto: - config: TestConfig - testContent: DirectorOutputDto + config: Union[test,test_servers] + detections: List[Detection] views: list[DetectionTestingView] @@ -67,7 +45,7 @@ def setup(self): # for content in self.input_dto.testContent.detections: # self.pending_queue.put(content) - self.output_dto.inputQueue = self.input_dto.testContent.detections + self.output_dto.inputQueue = self.input_dto.detections self.create_DetectionTestingInfrastructureObjects() def execute(self) -> DetectionTestingManagerOutputDto: @@ -87,13 +65,13 @@ def sigint_handler(signum, frame): print("*******************************") signal.signal(signal.SIGINT, sigint_handler) - + with concurrent.futures.ThreadPoolExecutor( - max_workers=len(self.input_dto.config.infrastructure_config.infrastructures), + max_workers=len(self.input_dto.config.test_instances), ) as instance_pool, concurrent.futures.ThreadPoolExecutor( max_workers=len(self.input_dto.views) ) as view_runner, concurrent.futures.ThreadPoolExecutor( - max_workers=len(self.input_dto.config.infrastructure_config.infrastructures), + max_workers=len(self.input_dto.config.test_instances), ) as view_shutdowner: # Start all the views @@ -151,14 +129,41 @@ def sigint_handler(signum, frame): return self.output_dto def create_DetectionTestingInfrastructureObjects(self): - import sys + #Make sure that, if we need to, we pull the appropriate container + for infrastructure in self.input_dto.config.test_instances: + if (isinstance(self.input_dto.config, test) and isinstance(infrastructure, Container)): + try: + client = docker.from_env() + except Exception as e: + raise Exception("Unable to connect to docker. Are you sure that docker is running on this host?") + try: + + parts = self.input_dto.config.container_settings.full_image_path.split(':') + if len(parts) != 2: + raise Exception(f"Expected to find a name:tag in {self.input_dto.config.container_settings.full_image_path}, " + f"but instead found {parts}. Note that this path MUST include the tag, which is separated by ':'") + + print( + f"Getting the latest version of the container image [{self.input_dto.config.container_settings.full_image_path}]...", + end="", + flush=True, + ) + client.images.pull(parts[0], tag=parts[1], platform="linux/amd64") + print("done!") + break + except Exception as e: + raise Exception(f"Failed to pull docker container image [{self.input_dto.config.container_settings.full_image_path}]: {str(e)}") - for infrastructure in self.input_dto.config.infrastructure_config.infrastructures: + already_staged_container_files = False + for infrastructure in self.input_dto.config.test_instances: - if ( - self.input_dto.config.infrastructure_config.infrastructure_type - == DetectionTestingTargetInfrastructure.container - ): + if (isinstance(self.input_dto.config, test) and isinstance(infrastructure, Container)): + # Stage the files in the apps dir so that they can be passed directly to + # subsequent containers. Do this here, instead of inside each container, to + # avoid duplicate downloads/moves/copies + if not already_staged_container_files: + self.input_dto.config.getContainerEnvironmentString(stage_file=True) + already_staged_container_files = True self.detectionTestingInfrastructureObjects.append( DetectionTestingInfrastructureContainer( @@ -166,11 +171,7 @@ def create_DetectionTestingInfrastructureObjects(self): ) ) - elif ( - self.input_dto.config.infrastructure_config.infrastructure_type - == DetectionTestingTargetInfrastructure.server - ): - + elif (isinstance(self.input_dto.config, test_servers) and isinstance(infrastructure, Infrastructure)): self.detectionTestingInfrastructureObjects.append( DetectionTestingInfrastructureServer( global_config=self.input_dto.config, infrastructure=infrastructure, sync_obj=self.output_dto @@ -179,7 +180,5 @@ def create_DetectionTestingInfrastructureObjects(self): else: - print( - f"Unsupported target infrastructure '{self.input_dto.config.infrastructure_config.infrastructure_type}'" - ) - sys.exit(1) + raise Exception(f"Unsupported target infrastructure '{infrastructure}' and config type {self.input_dto.config}") + diff --git a/contentctl/actions/detection_testing/GitService.py b/contentctl/actions/detection_testing/GitService.py index cad52fbb..a0d7ff2c 100644 --- a/contentctl/actions/detection_testing/GitService.py +++ b/contentctl/actions/detection_testing/GitService.py @@ -1,258 +1,176 @@ -import csv -import glob import logging import os import pathlib -import subprocess -import sys -from typing import Union, Tuple -from docker import types -import datetime -import git -import yaml -from git.objects import base +import pygit2 +from pygit2.enums import DeltaStatus +from typing import List, Optional +from pydantic import BaseModel, FilePath +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from contentctl.input.director import DirectorOutputDto + -from contentctl.objects.detection import Detection -from contentctl.objects.story import Story -from contentctl.objects.baseline import Baseline -from contentctl.objects.investigation import Investigation -from contentctl.objects.playbook import Playbook from contentctl.objects.macro import Macro from contentctl.objects.lookup import Lookup -from contentctl.objects.unit_test import UnitTest - -from contentctl.objects.enums import DetectionTestingMode, DetectionStatus, AnalyticsType -import random -import pathlib -from contentctl.helper.utils import Utils - -from contentctl.objects.test_config import TestConfig -from contentctl.actions.generate import DirectorOutputDto +from contentctl.objects.detection import Detection +from contentctl.objects.security_content_object import SecurityContentObject +from contentctl.objects.config import test_common, All, Changes, Selected # Logger logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO")) LOGGER = logging.getLogger(__name__) -SSA_PREFIX = "ssa___" - - -class GitService: - def get_all_content(self, director: DirectorOutputDto) -> DirectorOutputDto: - # get a new director that will be used for testing. - return DirectorOutputDto( - self.get_detections(director), - self.get_stories(director), - self.get_baselines(director), - self.get_investigations(director), - self.get_playbooks(director), - self.get_macros(director), - self.get_lookups(director), - [], - [] - ) - def get_stories(self, director: DirectorOutputDto) -> list[Story]: - stories: list[Story] = [] - return stories +from contentctl.input.director import DirectorOutputDto - def get_baselines(self, director: DirectorOutputDto) -> list[Baseline]: - baselines: list[Baseline] = [] - return baselines - def get_investigations(self, director: DirectorOutputDto) -> list[Investigation]: - investigations: list[Investigation] = [] - return investigations - def get_playbooks(self, director: DirectorOutputDto) -> list[Playbook]: - playbooks: list[Playbook] = [] - return playbooks +class GitService(BaseModel): + director: DirectorOutputDto + config: test_common + gitHash: Optional[str] = None + + def getHash(self)->str: + if self.gitHash is None: + raise Exception("Cannot get hash of repo, it was not set") + return self.gitHash - def get_macros(self, director: DirectorOutputDto) -> list[Macro]: - macros: list[Macro] = [] - return macros - def get_lookups(self, director: DirectorOutputDto) -> list[Lookup]: - lookups: list[Lookup] = [] - return lookups - - def filter_detections_by_status(self, detections: list[Detection], - statuses_to_test: set[DetectionStatus] = {DetectionStatus.production})->list[Detection]: - #print("\n".join(sorted([f"{detection.file_path[92:]} - {detection.status}" for detection in detections if DetectionStatus(detection.status) not in statuses_to_test]))) - #print() - return [detection for detection in detections if DetectionStatus(detection.status) in statuses_to_test] - - # TODO (cmcginley): consider listing Correlation type detections as skips rather than excluding - # them from results altogether? - def filter_detections_by_type(self, detections: list[Detection], - types_to_test: set[AnalyticsType] = {AnalyticsType.Anomaly, AnalyticsType.TTP, AnalyticsType.Hunting})->list[Detection]: - #print("\n".join(sorted([f"{detection.file_path[92:]} - {detection.type}" for detection in detections if AnalyticsType(detection.type) not in types_to_test]))) - #print() - return [detection for detection in detections if AnalyticsType(detection.type) in types_to_test] - def get_detections(self, director: DirectorOutputDto) -> list[Detection]: - if self.config.mode == DetectionTestingMode.selected: - detections = self.get_detections_selected(director) - elif self.config.mode == DetectionTestingMode.all: - detections = self.get_detections_all(director) - elif self.config.mode == DetectionTestingMode.changes: - detections = self.get_detections_changed(director) + def getContent(self)->List[Detection]: + if isinstance(self.config.mode, Selected): + return self.getSelected(self.config.mode.files) + elif isinstance(self.config.mode, Changes): + return self.getChanges(self.config.mode.target_branch) + if isinstance(self.config.mode, All): + return self.getAll() else: - raise ( - Exception( - f"Error: Unsupported detection testing mode in GitService: {self.config.mode}" - ) - ) + raise Exception(f"Could not get content to test. Unsupported test mode '{self.config.mode}'") + def getAll(self)->List[Detection]: + return self.director.detections + + def getChanges(self, target_branch:str)->List[Detection]: + repo = pygit2.Repository(path=str(self.config.path)) + + try: + target_tree = repo.revparse_single(target_branch).tree + self.gitHash = target_tree.id + diffs = repo.index.diff_to_tree(target_tree) + except Exception as e: + raise Exception(f"Error parsing diff target_branch '{target_branch}'. Are you certain that it exists?") + #Get the uncommitted changes in the current directory + diffs2 = repo.index.diff_to_workdir() - detections = self.filter_detections_by_status(detections) - - detections = self.filter_detections_by_type(detections) - return detections - - def get_detections_selected(self, director: DirectorOutputDto) -> list[Detection]: - detections_to_test: list[Detection] = [] - requested_set = set(self.requested_detections) - missing_detections: set[pathlib.Path] = set() - - for requested in requested_set: - matching = list( - filter( - lambda detection: pathlib.Path(detection.file_path).resolve() - == requested.resolve(), - director.detections, - ) - ) - if len(matching) == 1: - detections_to_test.append(matching.pop()) - elif len(matching) == 0: - missing_detections.add(requested) + #Combine the uncommitted changes with the committed changes + all_diffs = list(diffs) + list(diffs2) + + #Make a filename to content map + filepath_to_content_map = { obj.file_path:obj for (_,obj) in self.director.name_to_content_map.items()} + updated_detections:List[Detection] = [] + updated_macros:List[Macro] = [] + updated_lookups:List[Lookup] =[] + + for diff in all_diffs: + if type(diff) == pygit2.Patch: + if diff.delta.status in (DeltaStatus.ADDED, DeltaStatus.MODIFIED, DeltaStatus.RENAMED): + #print(f"{DeltaStatus(diff.delta.status).name:<8}:{diff.delta.new_file.raw_path}") + decoded_path = pathlib.Path(diff.delta.new_file.raw_path.decode('utf-8')) + if 'app_template/' in str(decoded_path) or 'ssa_detections' in str(decoded_path) or str(self.config.getBuildDir()) in str(decoded_path): + #Ignore anything that is embedded in the app template. + #Also ignore ssa detections + pass + elif 'detections/' in str(decoded_path) and decoded_path.suffix == ".yml": + detectionObject = filepath_to_content_map.get(decoded_path, None) + if isinstance(detectionObject, Detection): + updated_detections.append(detectionObject) + else: + raise Exception(f"Error getting detection object for file {str(decoded_path)}") + + elif 'macros/' in str(decoded_path) and decoded_path.suffix == ".yml": + macroObject = filepath_to_content_map.get(decoded_path, None) + if isinstance(macroObject, Macro): + updated_macros.append(macroObject) + else: + raise Exception(f"Error getting macro object for file {str(decoded_path)}") + + elif 'lookups/' in str(decoded_path): + # We need to convert this to a yml. This means we will catch + # both changes to a csv AND changes to the YML that uses it + + + if decoded_path.suffix == ".yml": + updatedLookup = filepath_to_content_map.get(decoded_path, None) + if not isinstance(updatedLookup,Lookup): + raise Exception(f"Expected {decoded_path} to be type {type(Lookup)}, but instead if was {(type(lookupObject))}") + updated_lookups.append(updatedLookup) + + elif decoded_path.suffix == ".csv": + # If the CSV was updated, we want to make sure that we + # add the correct corresponding Lookup object. + #Filter to find the Lookup Object the references this CSV + matched = list(filter(lambda x: x.filename is not None and x.filename == decoded_path, self.director.lookups)) + if len(matched) == 0: + raise Exception(f"Failed to find any lookups that reference the modified CSV file '{decoded_path}'") + elif len(matched) > 1: + raise Exception(f"More than 1 Lookup reference the modified CSV file '{decoded_path}': {[l.file_path for l in matched ]}") + else: + updatedLookup = matched[0] + else: + raise Exception(f"Error getting lookup object for file {str(decoded_path)}") + + if updatedLookup not in updated_lookups: + # It is possible that both th CSV and YML have been modified for the same lookup, + # and we do not want to add it twice. + updated_lookups.append(updatedLookup) + + else: + pass + #print(f"Ignore changes to file {decoded_path} since it is not a detection, macro, or lookup.") + + # else: + # print(f"{diff.delta.new_file.raw_path}:{DeltaStatus(diff.delta.status).name} (IGNORED)") + # pass else: - raise ( - Exception( - f"Error: multiple detection files found when attemping to resolve [{str(requested)}]" - ) - ) - - if len(missing_detections) > 0: - missing_detections_str = "\n\t - ".join( - [str(path.absolute()) for path in missing_detections] - ) - print(director.detections) - raise ( - Exception( - f"Failed to find the following detection file(s) for testing:\n\t - {missing_detections_str}" - ) - ) + raise Exception(f"Unrecognized type {type(diff)}") - return detections_to_test - def get_detections_all(self, director: DirectorOutputDto) -> list[Detection]: - # Assume we don't need to remove anything, like deprecated or experimental from this - return director.detections - - def get_detections_changed(self, director: DirectorOutputDto) -> list[Detection]: - if self.repo is None: - raise ( - Exception( - f"Error: self.repo must be initialized before getting changed detections." - ) - ) - - target_branch_repo_object = self.repo.commit(f"origin/{self.config.version_control_config.target_branch}") - test_branch_repo_object = self.repo.commit(self.config.version_control_config.test_branch) - differences = target_branch_repo_object.diff(test_branch_repo_object) - - new_content = [] - modified_content = [] - deleted_content = [] - renamed_content = [] - - for content in differences.iter_change_type("M"): - modified_content.append(content.b_path) - for content in differences.iter_change_type("A"): - new_content.append(content.b_path) - for content in differences.iter_change_type("D"): - deleted_content.append(content.b_path) - for content in differences.iter_change_type("R"): - renamed_content.append(content.b_path) - - #Changes to detections, macros, and lookups should trigger a re-test for anything which uses them - changed_lookups_list = list(filter(lambda x: x.startswith("lookups"), new_content+modified_content)) - changed_lookups = set() - - #We must account for changes to the lookup yml AND for the underlying csv - for lookup in changed_lookups_list: - if lookup.endswith(".csv"): - lookup = lookup.replace(".csv", ".yml") - changed_lookups.add(lookup) - - # At some point we should account for macros which contain other macros... - changed_macros = set(filter(lambda x: x.startswith("macros"), new_content+modified_content)) - changed_macros_and_lookups = set([str(pathlib.Path(filename).absolute()) for filename in changed_lookups.union(changed_macros)]) - - changed_detections = set(filter(lambda x: x.startswith("detections"), new_content+modified_content+renamed_content)) - - #Check and see if content that has been modified uses any of the changed macros or lookups - for detection in director.detections: - deps = set([content.file_path for content in detection.get_content_dependencies()]) - if not deps.isdisjoint(changed_macros_and_lookups): - changed_detections.add(detection.file_path) - - changed_detections_string = '\n - '.join(changed_detections) - #print(f"The following [{len(changed_detections)}] detections, or their dependencies (macros/lookups), have changed:\n - {changed_detections_string}") - return Detection.get_detections_from_filenames(changed_detections, director.detections) - - def __init__(self, config: TestConfig): + # If a detection has at least one dependency on changed content, + # then we must test it again + changed_macros_and_lookups = updated_macros + updated_lookups - self.requested_detections: list[pathlib.Path] = [] - self.config = config - if config.version_control_config is not None: - self.repo = git.Repo(config.version_control_config.repo_path) - else: - self.repo = None - - - if config.mode == DetectionTestingMode.changes: - if self.repo is None: - raise Exception("You are using detection mode 'changes', but the app does not have a version_control_config in contentctl_test.yml.") - return - elif config.mode == DetectionTestingMode.all: - return - elif config.mode == DetectionTestingMode.selected: - if config.detections_list is None or len(config.detections_list) < 1: - raise ( - Exception( - f"Error: detection mode [{config.mode}] REQUIRES that [{config.detections_list}] contains 1 or more detections, but the value is [{config.detections_list}]" - ) - ) + for detection in self.director.detections: + if detection in updated_detections: + # we are already planning to test it, don't need + # to add it again + continue + + for obj in changed_macros_and_lookups: + if obj in detection.get_content_dependencies(): + updated_detections.append(detection) + break + + #Print out the names of all modified/new content + modifiedAndNewContentString = "\n - ".join(sorted([d.name for d in updated_detections])) + + print(f"[{len(updated_detections)}] Pieces of modifed and new content to test:\n - {modifiedAndNewContentString}") + return updated_detections + + def getSelected(self, detectionFilenames:List[FilePath])->List[Detection]: + filepath_to_content_map:dict[FilePath, SecurityContentObject] = { obj.file_path:obj for (_,obj) in self.director.name_to_content_map.items() if obj.file_path is not None} + errors = [] + detections:List[Detection] = [] + for name in detectionFilenames: + obj = filepath_to_content_map.get(name,None) + if obj == None: + errors.append(f"There is no detection file or security_content_object at '{name}'") + elif not isinstance(obj, Detection): + errors.append(f"The security_content_object at '{name}' is of type '{type(obj).__name__}', NOT '{Detection.__name__}'") else: - # Ensure that all of the detections exist - missing_files = [ - detection - for detection in config.detections_list - if not pathlib.Path(detection).is_file() - ] - if len(missing_files) > 0: - missing_string = "\n\t - ".join(missing_files) - raise ( - Exception( - f"Error: The following detection(s) test do not exist:\n\t - {missing_files}" - ) - ) - else: - self.requested_detections = [ - pathlib.Path(detection_file_name) - for detection_file_name in config.detections_list - ] - - else: - raise Exception(f"Unsupported detection testing mode [{config.mode}]. "\ - "Supported detection testing modes are [{DetectionTestingMode._member_names_}]") - return - + detections.append(obj) - def clone_project(self, url, project, branch): - LOGGER.info(f"Clone Security Content Project") - repo_obj = git.Repo.clone_from(url, project, branch=branch) - return repo_obj + if len(errors) > 0: + errorsString = "\n - ".join(errors) + raise Exception(f"There following errors were encountered while getting selected detections to test:\n - {errorsString}") + return detections diff --git a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py index 3a6c5063..1e892905 100644 --- a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +++ b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py @@ -10,11 +10,11 @@ from tempfile import TemporaryDirectory, mktemp from ssl import SSLEOFError, SSLZeroReturnError from sys import stdout -from dataclasses import dataclass +#from dataclasses import dataclass from shutil import copyfile from typing import Union, Optional -from pydantic import BaseModel, PrivateAttr, Field +from pydantic import BaseModel, PrivateAttr, Field, dataclasses import requests # type: ignore import splunklib.client as client # type: ignore from splunklib.binding import HTTPError # type: ignore @@ -23,6 +23,7 @@ from urllib3 import disable_warnings import urllib.parse +from contentctl.objects.config import test_common, Infrastructure from contentctl.objects.enums import PostTestBehavior, AnalyticsType from contentctl.objects.detection import Detection from contentctl.objects.base_test import BaseTest @@ -31,14 +32,10 @@ from contentctl.objects.unit_test_attack_data import UnitTestAttackData from contentctl.objects.unit_test_result import UnitTestResult from contentctl.objects.integration_test_result import IntegrationTestResult -from contentctl.objects.test_config import TestConfig, Infrastructure from contentctl.objects.test_group import TestGroup from contentctl.objects.base_test_result import TestResultStatus from contentctl.objects.correlation_search import CorrelationSearch, PbarData from contentctl.helper.utils import Utils -from contentctl.actions.detection_testing.DataManipulation import ( - DataManipulation, -) from contentctl.actions.detection_testing.progress_bar import ( format_pbar_string, TestReportingType, @@ -66,8 +63,8 @@ class ContainerStoppedException(Exception): pass -@dataclass(frozen=False) -class DetectionTestingManagerOutputDto: +@dataclasses.dataclass(frozen=False) +class DetectionTestingManagerOutputDto(): inputQueue: list[Detection] = Field(default_factory=list) outputQueue: list[Detection] = Field(default_factory=list) skippedQueue: list[Detection] = Field(default_factory=list) @@ -81,7 +78,7 @@ class DetectionTestingManagerOutputDto: class DetectionTestingInfrastructure(BaseModel, abc.ABC): # thread: threading.Thread = threading.Thread() - global_config: TestConfig + global_config: test_common infrastructure: Infrastructure sync_obj: DetectionTestingManagerOutputDto hec_token: str = "" @@ -239,6 +236,7 @@ def connect_to_api(self, sleep_seconds: int = 5): self.pbar.write( f"Error getting API connection (not quitting) '{type(e).__name__}': {str(e)}" ) + print("wow") # self.pbar.write( # f"Unhandled exception getting connection to splunk server: {str(e)}" # ) @@ -397,7 +395,7 @@ def execute(self): try: self.test_detection(detection) except ContainerStoppedException: - self.pbar.write(f"Stopped container [{self.get_name()}]") + self.pbar.write(f"Warning - container was stopped when trying to execute detection [{self.get_name()}]") self.finish() return except Exception as e: @@ -1196,14 +1194,12 @@ def replay_attack_data_file( ): tempfile = mktemp(dir=tmp_dir) - if not ( - attack_data_file.data.startswith("https://") - or attack_data_file.data.startswith("http://") - ): - if pathlib.Path(attack_data_file.data).is_file(): + if not (str(attack_data_file.data).startswith("http://") or + str(attack_data_file.data).startswith("https://")) : + if pathlib.Path(str(attack_data_file.data)).is_file(): self.format_pbar_string(TestReportingType.GROUP, test_group.name, "Copying Data", test_group_start_time) try: - copyfile(attack_data_file.data, tempfile) + copyfile(str(attack_data_file.data), tempfile) except Exception as e: raise Exception( f"Error copying local Attack Data File for [{test_group.name}] - [{attack_data_file.data}]: " @@ -1229,7 +1225,7 @@ def replay_attack_data_file( ) Utils.download_file_from_http( - attack_data_file.data, tempfile, self.pbar, overwrite_file=True + str(attack_data_file.data), tempfile, self.pbar, overwrite_file=True ) except Exception as e: raise ( @@ -1238,12 +1234,6 @@ def replay_attack_data_file( ) ) - # Update timestamps before replay - if attack_data_file.update_timestamp: - data_manipulation = DataManipulation() - data_manipulation.manipulate_timestamp( - tempfile, attack_data_file.sourcetype, attack_data_file.source - ) # Upload the data self.format_pbar_string( @@ -1366,7 +1356,7 @@ def status(self): pass def finish(self): - self.pbar.bar_format = f"Stopped container [{self.get_name()}]" + self.pbar.bar_format = f"Finished running tests on instance: [{self.get_name()}]" self.pbar.update() self.pbar.close() diff --git a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py index c3743730..7371cbfd 100644 --- a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +++ b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py @@ -1,21 +1,19 @@ from contentctl.actions.detection_testing.infrastructures.DetectionTestingInfrastructure import ( DetectionTestingInfrastructure, ) +from contentctl.objects.config import test import docker.models.resource import docker.models.containers import docker import docker.types -from contentctl.objects.test_config import ( - CONTAINER_APP_DIR, - LOCAL_APP_DIR, -) class DetectionTestingInfrastructureContainer(DetectionTestingInfrastructure): + global_config: test container: docker.models.resource.Model = None def start(self): - if self.global_config.infrastructure_config.persist_and_reuse_container: + if self.global_config.container_settings.leave_running: # If we are configured to use the persistent container, then check and see if it's already # running. If so, just use it without additional configuration. try: @@ -76,8 +74,8 @@ def make_container(self) -> docker.models.resource.Model: mounts = [ docker.types.Mount( - source=str(LOCAL_APP_DIR.absolute()), - target=str(CONTAINER_APP_DIR.absolute()), + source=str(self.global_config.getLocalAppDir()), + target=str(self.global_config.getContainerAppDir()), type="bind", read_only=True, ) @@ -86,18 +84,32 @@ def make_container(self) -> docker.models.resource.Model: environment = {} environment["SPLUNK_START_ARGS"] = "--accept-license" environment["SPLUNK_PASSWORD"] = self.infrastructure.splunk_app_password - environment["SPLUNK_APPS_URL"] = ",".join( - p.environment_path for p in self.global_config.apps - ) + # Files have already been staged by the time that we call this. Files must only be staged + # once, not staged by every container + environment["SPLUNK_APPS_URL"] = self.global_config.getContainerEnvironmentString(stage_file=False) if ( - self.global_config.splunkbase_password is not None - and self.global_config.splunkbase_username is not None + self.global_config.splunk_api_username is not None + and self.global_config.splunk_api_password is not None ): - environment["SPLUNKBASE_USERNAME"] = self.global_config.splunkbase_username - environment["SPLUNKBASE_PASSWORD"] = self.global_config.splunkbase_password - + environment["SPLUNKBASE_USERNAME"] = self.global_config.splunk_api_username + environment["SPLUNKBASE_PASSWORD"] = self.global_config.splunk_api_password + + + + def emit_docker_run_equivalent(): + environment_string = " ".join([f'-e "{k}={environment.get(k)}"' for k in environment.keys()]) + print(f"\n\ndocker run -d "\ + f"-p {self.infrastructure.web_ui_port}:8000 " + f"-p {self.infrastructure.hec_port}:8088 " + f"-p {self.infrastructure.api_port}:8089 " + f"{environment_string} " + f" --name {self.get_name()} " + f"--platform linux/amd64 " + f"{self.global_config.container_settings.full_image_path}\n\n") + #emit_docker_run_equivalent() + container = self.get_docker_client().containers.create( - self.global_config.infrastructure_config.full_image_path, + self.global_config.container_settings.full_image_path, ports=ports_dict, environment=environment, name=self.get_name(), @@ -105,6 +117,18 @@ def make_container(self) -> docker.models.resource.Model: detach=True, platform="linux/amd64" ) + + if self.global_config.enterpriseSecurityInApps(): + #ES sets up https, so make sure it is included in the link + address = f"https://{self.infrastructure.instance_address}:{self.infrastructure.web_ui_port}" + else: + address = f"http://{self.infrastructure.instance_address}:{self.infrastructure.web_ui_port}" + print(f"\nStarted container with the following information:\n" + f"\tname : [{self.get_name()}]\n" + f"\taddress : [{address}]\n" + f"\tusername: [{self.infrastructure.splunk_app_username}]\n" + f"\tpassword: [{self.infrastructure.splunk_app_password}]\n" + ) return container @@ -119,13 +143,15 @@ def removeContainer(self, removeVolumes: bool = True, forceRemove: bool = True): try: # If the user wants to persist the container (or use a previously configured container), then DO NOT remove it. # Emit the following message, which they will see on initial setup and teardown at the end of the test. - if self.global_config.infrastructure_config.persist_and_reuse_container: + if self.global_config.container_settings.leave_running: print(f"\nContainer [{self.get_name()}] has NOT been terminated because 'contentctl_test.yml ---> infrastructure_config ---> persist_and_reuse_container = True'") print(f"To remove it, please manually run the following at the command line: `docker container rm -fv {self.get_name()}`\n") return # container was found, so now we try to remove it # v also removes volumes linked to the container container.remove(v=removeVolumes, force=forceRemove) + print(f"container [{self.get_name()}] successfully removed") + # remove it even if it is running. remove volumes as well # No need to print that the container has been removed, it is expected behavior diff --git a/contentctl/actions/detection_testing/views/DetectionTestingView.py b/contentctl/actions/detection_testing/views/DetectionTestingView.py index c6011d8d..9b1d8e0d 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingView.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingView.py @@ -3,7 +3,8 @@ from pydantic import BaseModel -from contentctl.objects.test_config import TestConfig +from contentctl.objects.config import test_common + from contentctl.actions.detection_testing.infrastructures.DetectionTestingInfrastructure import ( DetectionTestingManagerOutputDto, ) @@ -12,7 +13,7 @@ class DetectionTestingView(BaseModel, abc.ABC): - config: TestConfig + config: test_common sync_obj: DetectionTestingManagerOutputDto interval: float = 10 diff --git a/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py b/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py index 7cdf5acf..4b31bca7 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py @@ -1,11 +1,3 @@ -from pydantic import BaseModel -import abc -from typing import Callable -from contentctl.objects.test_config import TestConfig -from contentctl.actions.detection_testing.infrastructures.DetectionTestingInfrastructure import ( - DetectionTestingManagerOutputDto, -) - from contentctl.actions.detection_testing.views.DetectionTestingView import ( DetectionTestingView, ) diff --git a/contentctl/actions/doc_gen.py b/contentctl/actions/doc_gen.py index 2936937a..ed5b607a 100644 --- a/contentctl/actions/doc_gen.py +++ b/contentctl/actions/doc_gen.py @@ -13,7 +13,7 @@ class DocGenInputDto: class DocGen: def execute(self, input_dto: DocGenInputDto) -> None: - director_output_dto = DirectorOutputDto([],[],[],[],[],[],[],[]) + director_output_dto = DirectorOutputDto([],[],[],[],[],[],[],[],[],[]) director = Director(director_output_dto) director.execute(input_dto.director_input_dto) diff --git a/contentctl/actions/generate.py b/contentctl/actions/generate.py deleted file mode 100644 index 97fa9bd3..00000000 --- a/contentctl/actions/generate.py +++ /dev/null @@ -1,91 +0,0 @@ -import sys -import shutil -import os - -from dataclasses import dataclass - -from contentctl.objects.enums import SecurityContentProduct, SecurityContentType -from contentctl.input.director import Director, DirectorInputDto, DirectorOutputDto -from contentctl.output.conf_output import ConfOutput -from contentctl.output.ba_yml_output import BAYmlOutput -from contentctl.output.api_json_output import ApiJsonOutput -import pathlib -import json -import datetime -from typing import Union - -@dataclass(frozen=True) -class GenerateInputDto: - director_input_dto: DirectorInputDto - splunk_api_username: Union[str,None] = None - splunk_api_password: Union[str,None] = None - #For most cloud stacks, the stack_type argument has been deprecated for appinspect. - #Still, we will pass it in case there are users of very old stacks. - stack_type: str = "victoria" - -class Generate: - - def execute(self, input_dto: GenerateInputDto) -> DirectorOutputDto: - director_output_dto = DirectorOutputDto([],[],[],[],[],[],[],[],[]) - director = Director(director_output_dto) - director.execute(input_dto.director_input_dto) - - if input_dto.director_input_dto.product == SecurityContentProduct.SPLUNK_APP: - if (input_dto.splunk_api_username is None) ^ (input_dto.splunk_api_password is None): - # Exclusive OR above finds when ONE of these is defined but the other is not - if input_dto.splunk_api_password: - raise Exception("splunk_api_password was provided, but splunk_api_username was not. Please provide both or neither") - else: - raise Exception("splunk_api_username was provided, but splunk_api_password was not. Please provide both or neither") - - - - - conf_output = ConfOutput(input_dto.director_input_dto.input_path, input_dto.director_input_dto.config) - conf_output.writeHeaders() - conf_output.writeObjects(director_output_dto.detections, SecurityContentType.detections) - conf_output.writeObjects(director_output_dto.stories, SecurityContentType.stories) - conf_output.writeObjects(director_output_dto.baselines, SecurityContentType.baselines) - conf_output.writeObjects(director_output_dto.investigations, SecurityContentType.investigations) - conf_output.writeObjects(director_output_dto.lookups, SecurityContentType.lookups) - conf_output.writeObjects(director_output_dto.macros, SecurityContentType.macros) - conf_output.writeAppConf() - conf_output.packageApp() - - #conf_output.inspectAppCLI() - if input_dto.splunk_api_username and input_dto.splunk_api_password: - _ = conf_output.inspectAppAPI(input_dto.splunk_api_username, input_dto.splunk_api_password, input_dto.stack_type) - - print(f'Generate of security content successful to {conf_output.output_path}') - return director_output_dto - - elif input_dto.director_input_dto.product == SecurityContentProduct.SSA: - output_path = os.path.join(input_dto.director_input_dto.input_path, input_dto.director_input_dto.config.build_ssa.path_root) - shutil.rmtree(output_path + '/srs/', ignore_errors=True) - shutil.rmtree(output_path + '/complex/', ignore_errors=True) - os.makedirs(output_path + '/complex/') - os.makedirs(output_path + '/srs/') - ba_yml_output = BAYmlOutput() - ba_yml_output.writeObjects(director_output_dto.ssa_detections, output_path) - - elif input_dto.director_input_dto.product == SecurityContentProduct.API: - output_path = os.path.join(input_dto.director_input_dto.input_path, input_dto.director_input_dto.config.build_api.path_root) - shutil.rmtree(output_path, ignore_errors=True) - os.makedirs(output_path) - api_json_output = ApiJsonOutput() - api_json_output.writeObjects(director_output_dto.detections, output_path, SecurityContentType.detections) - api_json_output.writeObjects(director_output_dto.stories, output_path, SecurityContentType.stories) - api_json_output.writeObjects(director_output_dto.baselines, output_path, SecurityContentType.baselines) - api_json_output.writeObjects(director_output_dto.investigations, output_path, SecurityContentType.investigations) - api_json_output.writeObjects(director_output_dto.lookups, output_path, SecurityContentType.lookups) - api_json_output.writeObjects(director_output_dto.macros, output_path, SecurityContentType.macros) - api_json_output.writeObjects(director_output_dto.deployments, output_path, SecurityContentType.deployments) - - #create version file for sse api - version_file = pathlib.Path(output_path)/"version.json" - utc_time = datetime.datetime.utcnow().replace(microsecond=0).isoformat() - version_dict = {"version":{"name":f"v{input_dto.director_input_dto.config.build.version}","published_at": f"{utc_time}Z" }} - with open(version_file,"w") as version_f: - json.dump(version_dict,version_f) - - return director_output_dto \ No newline at end of file diff --git a/contentctl/actions/initialize.py b/contentctl/actions/initialize.py index 18c89fc4..679574b8 100644 --- a/contentctl/actions/initialize.py +++ b/contentctl/actions/initialize.py @@ -2,81 +2,44 @@ import shutil import os import pathlib -from dataclasses import dataclass -from contentctl.objects.config import Config, TestConfig, PASSWORD -from contentctl.output.yml_writer import YmlWriter -import json -@dataclass(frozen=True) -class InitializeInputDto: - path: pathlib.Path - demo: bool = False +from pydantic import RootModel +from contentctl.objects.config import test +from contentctl.output.yml_writer import YmlWriter class Initialize: - def execute(self, input_dto: InitializeInputDto) -> None: - - c = Config() + def execute(self, config: test) -> None: + # construct a test object from the init object + # This way we can easily populate a yml with ALL the important + # fields for validating, building, and testing your app. - t = TestConfig.construct() #Disable validation for default object + YmlWriter.writeYmlFile(str(config.path/'contentctl.yml'), config.model_dump()) - config_as_dict = c.dict() - config_as_dict.pop("test") - YmlWriter.writeYmlFile(os.path.join(input_dto.path, 'contentctl.yml'), config_as_dict) - - - # This field serialization hack is required to get - # enums declared in Pydantic Models serialized properly - # without emitting tags that make them hard to read in yml + #Create the following empty directories: + for emptyDir in ['lookups', 'baselines', 'docs', 'reporting', 'investigations']: + #Throw an error if this directory already exists + (config.path/emptyDir).mkdir(exist_ok=False) - j = json.dumps(t.dict(),sort_keys=False) - obj=json.loads(j) - YmlWriter.writeYmlFile(os.path.join(input_dto.path, 'contentctl_test.yml'), dict(obj)) - - folders = ['detections', 'stories', 'lookups', 'macros', 'baselines', 'dist', 'docs', 'reporting', 'investigations'] - for folder in folders: - os.makedirs(os.path.join(input_dto.path, folder)) - - # Working Detection - source_path = pathlib.Path(os.path.join(os.path.dirname(__file__), '../templates/detections/')) - dest_path = pathlib.Path(os.path.join(input_dto.path, 'detections')) - detections_to_populate = ['anomalous_usage_of_7zip.yml'] - if input_dto.demo: - detections_to_populate += ['anomalous_usage_of_7zip_validation_fail.yml', - 'anomalous_usage_of_7zip_test_fail.yml'] - - for detection_name in detections_to_populate: - shutil.copyfile( - source_path/detection_name, - dest_path/detection_name) + #copy the contents of all template directories + for templateDir, targetDir in [ + ('../templates/app_template/', 'app_template'), + ('../templates/deployments/', 'deployments'), + ('../templates/detections/', 'detections'), + ('../templates/macros/','macros'), + ('../templates/stories/', 'stories'), + ]: + source_directory = pathlib.Path(os.path.dirname(__file__))/templateDir + target_directory = config.path/targetDir + #Throw an exception if the target exists + shutil.copytree(source_directory, target_directory, dirs_exist_ok=False) + #Create the config file as well + shutil.copyfile(pathlib.Path(os.path.dirname(__file__))/'../templates/README','README') - shutil.copytree( - os.path.join(os.path.dirname(__file__), '../templates/deployments'), - os.path.join(input_dto.path, 'deployments') - ) - - shutil.copyfile( - os.path.join(os.path.dirname(__file__), '../templates/stories/cobalt_strike.yml'), - os.path.join(input_dto.path, 'stories', 'cobalt_strike.yml') - ) - - shutil.copyfile( - os.path.join(os.path.dirname(__file__), '../templates/macros/security_content_ctime.yml'), - os.path.join(input_dto.path, 'macros', 'security_content_ctime.yml') - ) - - shutil.copyfile( - os.path.join(os.path.dirname(__file__), '../templates/macros/security_content_summariesonly.yml'), - os.path.join(input_dto.path, 'macros', 'security_content_summariesonly.yml') - ) - - shutil.copyfile( - os.path.join(os.path.dirname(__file__), '../templates/README'), - os.path.join(input_dto.path, 'README') - ) - print('The following folders were created: {0} under {1}.\nContent pack has been initialized, please run `new` to create new content.'.format(folders, input_dto.path)) + print(f"The app '{config.app.title}' has been initialized. " + "Please run 'contentctl new --type {detection,story}' to create new content") diff --git a/contentctl/actions/inspect.py b/contentctl/actions/inspect.py new file mode 100644 index 00000000..27210ed2 --- /dev/null +++ b/contentctl/actions/inspect.py @@ -0,0 +1,260 @@ +import sys + + +from dataclasses import dataclass + +import pathlib +import json +import datetime + + +from contentctl.objects.config import inspect +from requests import Session, post, get +from requests.auth import HTTPBasicAuth +import timeit +import time +@dataclass(frozen=True) +class InspectInputDto: + config:inspect + + +class Inspect: + + def execute(self, config: inspect) -> str: + if config.build_app or config.build_api: + + self.inspectAppCLI(config) + appinspect_token = self.inspectAppAPI(config) + + + return appinspect_token + + else: + raise Exception("Inspect only supported for app and api build targets") + + def getElapsedTime(self, startTime:float)->datetime.timedelta: + return datetime.timedelta(seconds=round(timeit.default_timer() - startTime)) + + + def inspectAppAPI(self, config: inspect)->str: + session = Session() + session.auth = HTTPBasicAuth(config.splunk_api_username, config.splunk_api_password) + if config.stack_type not in ['victoria', 'classic']: + raise Exception(f"stack_type MUST be either 'classic' or 'victoria', NOT '{config.stack_type}'") + + APPINSPECT_API_LOGIN = "https://api.splunk.com/2.0/rest/login/splunk" + + + + res = session.get(APPINSPECT_API_LOGIN) + #If login failed or other failure, raise an exception + res.raise_for_status() + + authorization_bearer = res.json().get("data",{}).get("token",None) + APPINSPECT_API_VALIDATION_REQUEST = "https://appinspect.splunk.com/v1/app/validate" + headers = { + "Authorization": f"bearer {authorization_bearer}", + "Cache-Control": "no-cache" + } + + package_path = config.getPackageFilePath(include_version=False) + if not package_path.is_file(): + raise Exception(f"Cannot run Appinspect API on App '{config.app.title}' - " + f"no package exists as expected path '{package_path}'.\nAre you " + "trying to 'contentctl acs_deploy' the package BEFORE running 'contentctl build'?") + + files = { + "app_package": open(package_path,"rb"), + "included_tags":(None,"cloud") + } + + res = post(APPINSPECT_API_VALIDATION_REQUEST, headers=headers, files=files) + + res.raise_for_status() + + request_id = res.json().get("request_id",None) + APPINSPECT_API_VALIDATION_STATUS = f"https://appinspect.splunk.com/v1/app/validate/status/{request_id}?included_tags=private_{config.stack_type}" + headers = headers = { + "Authorization": f"bearer {authorization_bearer}" + } + startTime = timeit.default_timer() + # the first time, wait for 40 seconds. subsequent times, wait for less. + # this is because appinspect takes some time to return, so there is no sense + # checking many times when we know it will take at least 40 seconds to run. + iteration_wait_time = 40 + while True: + + res = get(APPINSPECT_API_VALIDATION_STATUS, headers=headers) + res.raise_for_status() + status = res.json().get("status",None) + if status in ["PROCESSING", "PREPARING"]: + print(f"[{self.getElapsedTime(startTime)}] Appinspect API is {status}...") + time.sleep(iteration_wait_time) + iteration_wait_time = 1 + continue + elif status == "SUCCESS": + print(f"[{self.getElapsedTime(startTime)}] Appinspect API has finished!") + break + else: + raise Exception(f"Error - Unknown Appinspect API status '{status}'") + + + + #We have finished running appinspect, so get the report + APPINSPECT_API_REPORT = f"https://appinspect.splunk.com/v1/app/report/{request_id}?included_tags=private_{config.stack_type}" + #Get human-readable HTML report + headers = headers = { + "Authorization": f"bearer {authorization_bearer}", + "Content-Type": "text/html" + } + res = get(APPINSPECT_API_REPORT, headers=headers) + res.raise_for_status() + report_html = res.content + + #Get JSON report for processing + headers = headers = { + "Authorization": f"bearer {authorization_bearer}", + "Content-Type": "application/json" + } + res = get(APPINSPECT_API_REPORT, headers=headers) + res.raise_for_status() + report_json = res.json() + + # Just get app path here to avoid long function calls in the open() calls below + appPath = config.getPackageFilePath(include_version=True) + appinpect_html_path = appPath.with_suffix(appPath.suffix+".appinspect_api_results.html") + appinspect_json_path = appPath.with_suffix(appPath.suffix+".appinspect_api_results.json") + #Use the full path of the app, but update the suffix to include info about appinspect + with open(appinpect_html_path, "wb") as report: + report.write(report_html) + with open(appinspect_json_path, "w") as report: + json.dump(report_json, report) + + + self.parseAppinspectJsonLogFile(appinspect_json_path) + + + return authorization_bearer + + + def inspectAppCLI(self, config:inspect)-> None: + + try: + raise Exception("Local spunk-appinspect Not Supported at this time (you may use the appinspect api). If you would like to locally inspect your app with" + "Python 3.7, 3.8, or 3.9 (with limited support), please refer to:\n" + "\t - https://dev.splunk.com/enterprise/docs/developapps/testvalidate/appinspect/useappinspectclitool/") + from splunk_appinspect.main import ( + validate, MODE_OPTION, APP_PACKAGE_ARGUMENT, OUTPUT_FILE_OPTION, + LOG_FILE_OPTION, INCLUDED_TAGS_OPTION, EXCLUDED_TAGS_OPTION, + PRECERT_MODE, TEST_MODE) + except Exception as e: + print(e) + # print("******WARNING******") + # if sys.version_info.major == 3 and sys.version_info.minor > 9: + # print("The package splunk-appinspect was not installed due to a current issue with the library on Python3.10+. " + # "Please use the following commands to set up a virtualenvironment in a different folder so you may run appinspect manually (if desired):" + # "\n\tpython3.9 -m venv .venv" + # "\n\tsource .venv/bin/activate" + # "\n\tpython3 -m pip install splunk-appinspect" + # f"\n\tsplunk-appinspect inspect {self.getPackagePath(include_version=False).relative_to(pathlib.Path('.').absolute())} --mode precert") + + # else: + # print("splunk-appinspect is only compatable with Python3.9 at this time. Please see the following open issue here: https://github.com/splunk/contentctl/issues/28") + # print("******WARNING******") + return + + # Note that all tags are available and described here: + # https://dev.splunk.com/enterprise/reference/appinspect/appinspecttagreference/ + # By default, precert mode will run ALL checks. Explicitly included or excluding tags will + # change this behavior. To give the most thorough inspection, we leave these empty so that + # ALL checks are run + included_tags = [] + excluded_tags = [] + + appinspect_output = self.dist/f"{self.config.build.title}-{self.config.build.version}.appinspect_cli_results.json" + appinspect_logging = self.dist/f"{self.config.build.title}-{self.config.build.version}.appinspect_cli_logging.log" + try: + arguments_list = [(APP_PACKAGE_ARGUMENT, str(self.getPackagePath(include_version=False)))] + options_list = [] + options_list += [MODE_OPTION, TEST_MODE] + options_list += [OUTPUT_FILE_OPTION, str(appinspect_output)] + options_list += [LOG_FILE_OPTION, str(appinspect_logging)] + + #If there are any tags defined, then include them here + for opt in included_tags: + options_list += [INCLUDED_TAGS_OPTION, opt] + for opt in excluded_tags: + options_list += [EXCLUDED_TAGS_OPTION, opt] + + cmdline = options_list + [arg[1] for arg in arguments_list] + validate(cmdline) + + except SystemExit as e: + if e.code == 0: + # The sys.exit called inside of appinspect validate closes stdin. We need to + # reopen it. + sys.stdin = open("/dev/stdin","r") + print(f"AppInspect passed! Please check [ {appinspect_output} , {appinspect_logging} ] for verbose information.") + else: + if sys.version.startswith('3.11') or sys.version.startswith('3.12'): + raise Exception("At this time, AppInspect may fail on valid apps under Python>=3.11 with " + "the error 'global flags not at the start of the expression at position 1'. " + "If you encounter this error, please run AppInspect on a version of Python " + "<3.11. This issue is currently tracked. Please review the appinspect " + "report output above for errors.") + else: + raise Exception("AppInspect Failure - Please review the appinspect report output above for errors.") + finally: + # appinspect outputs the log in json format, but does not format it to be easier + # to read (it is all in one line). Read back that file and write it so it + # is easier to understand + + #Note that this may raise an exception itself! + self.parseAppinspectJsonLogFile(appinspect_output) + + def parseAppinspectJsonLogFile(self, logfile_path:pathlib.Path, + status_types:list[str] = ["error", "failure", "manual_check", "warning"], + exception_types = ["error","failure","manual_check"] )->None: + if not set(exception_types).issubset(set(status_types)): + raise Exception(f"Error - exception_types {exception_types} MUST be a subset of status_types {status_types}, but it is not") + with open(logfile_path, "r+") as logfile: + j = json.load(logfile) + #Seek back to the beginning of the file. We don't need to clear + #it sice we will always write AT LEAST the same number of characters + #back as we read (due to the addition of whitespace) + logfile.seek(0) + json.dump(j, logfile, indent=3, ) + + reports = j.get("reports", []) + if len(reports) != 1: + raise Exception("Expected to find one appinspect report but found 0") + verbose_errors = [] + + for group in reports[0].get("groups", []): + for check in group.get("checks",[]): + if check.get("result","") in status_types: + verbose_errors.append(f" - {check.get('result','')} [{group.get('name','NONAME')}: {check.get('name', 'NONAME')}]") + verbose_errors.sort() + + summary = j.get("summary", None) + if summary is None: + raise Exception("Missing summary from appinspect report") + msgs = [] + generated_exception = False + for key in status_types: + if summary.get(key,0)>0: + msgs.append(f" - {summary.get(key,0)} {key}s") + if key in exception_types: + generated_exception = True + if len(msgs)>0 or len(verbose_errors): + summary = '\n'.join(msgs) + details = '\n'.join(verbose_errors) + summary = f"{summary}\nDetails:\n{details}" + if generated_exception: + raise Exception(f"AppInspect found [{','.join(exception_types)}] that MUST be addressed to pass AppInspect API:\n{summary}") + else: + print(f"AppInspect found [{','.join(status_types)}] that MAY cause a failure during AppInspect API:\n{summary}") + else: + print("AppInspect was successful!") + + return diff --git a/contentctl/actions/new_content.py b/contentctl/actions/new_content.py index de9fa048..71e597c2 100644 --- a/contentctl/actions/new_content.py +++ b/contentctl/actions/new_content.py @@ -1,23 +1,116 @@ from dataclasses import dataclass - -from contentctl.input.new_content_generator import NewContentGenerator, NewContentGeneratorInputDto, NewContentGeneratorOutputDto +import questionary +from typing import Any +from contentctl.input.new_content_questions import NewContentQuestions from contentctl.output.new_content_yml_output import NewContentYmlOutput +from contentctl.objects.config import new, NewContentType +import uuid +from datetime import datetime +import pathlib +from contentctl.objects.abstract_security_content_objects.security_content_object_abstract import SecurityContentObject_Abstract +from contentctl.output.yml_writer import YmlWriter +class NewContent: -@dataclass(frozen=True) -class NewContentInputDto: - new_content_generator_input_dto: NewContentGeneratorInputDto - output_path: str + def buildDetection(self)->dict[str,Any]: + questions = NewContentQuestions.get_questions_detection() + answers = questionary.prompt(questions) + answers.update(answers) + answers['name'] = answers['detection_name'] + answers['id'] = str(uuid.uuid4()) + answers['version'] = 1 + answers['date'] = datetime.today().strftime('%Y-%m-%d') + answers['author'] = answers['detection_author'] + answers['data_source'] = answers['data_source'] + answers['type'] = answers['detection_type'] + answers['status'] = "production" #start everything as production since that's what we INTEND the content to become + answers['description'] = 'UPDATE_DESCRIPTION' + file_name = answers['name'].replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower() + answers['search'] = answers['detection_search'] + ' | `' + file_name + '_filter`' + answers['how_to_implement'] = 'UPDATE_HOW_TO_IMPLEMENT' + answers['known_false_positives'] = 'UPDATE_KNOWN_FALSE_POSITIVES' + answers['references'] = ['REFERENCE'] + answers['tags'] = dict() + answers['tags']['analytic_story'] = ['UPDATE_STORY_NAME'] + answers['tags']['asset_type'] = 'UPDATE asset_type' + answers['tags']['confidence'] = 'UPDATE value between 1-100' + answers['tags']['impact'] = 'UPDATE value between 1-100' + answers['tags']['message'] = 'UPDATE message' + answers['tags']['mitre_attack_id'] = [x.strip() for x in answers['mitre_attack_ids'].split(',')] + answers['tags']['observable'] = [{'name': 'UPDATE', 'type': 'UPDATE', 'role': ['UPDATE']}] + answers['tags']['product'] = ['Splunk Enterprise','Splunk Enterprise Security','Splunk Cloud'] + answers['tags']['required_fields'] = ['UPDATE'] + answers['tags']['risk_score'] = 'UPDATE (impact * confidence)/100' + answers['tags']['security_domain'] = answers['security_domain'] + answers['tags']['cve'] = ['UPDATE WITH CVE(S) IF APPLICABLE'] + + #generate the tests section + answers['tests'] = [ + { + 'name': "True Positive Test", + 'attack_data': [ + { + 'data': "Enter URL for Dataset Here. This may also be a relative or absolute path on your local system for testing.", + "sourcetype": "UPDATE SOURCETYPE", + "source": "UPDATE SOURCE" + } + ] + } + ] + return answers + def buildStory(self)->dict[str,Any]: + questions = NewContentQuestions.get_questions_story() + answers = questionary.prompt(questions) + answers['name'] = answers['story_name'] + answers['id'] = str(uuid.uuid4()) + answers['version'] = 1 + answers['date'] = datetime.today().strftime('%Y-%m-%d') + answers['author'] = answers['story_author'] + answers['description'] = 'UPDATE_DESCRIPTION' + answers['narrative'] = 'UPDATE_NARRATIVE' + answers['references'] = [] + answers['tags'] = dict() + answers['tags']['analytic_story'] = answers['name'] + answers['tags']['category'] = answers['category'] + answers['tags']['product'] = ['Splunk Enterprise','Splunk Enterprise Security','Splunk Cloud'] + answers['tags']['usecase'] = answers['usecase'] + answers['tags']['cve'] = ['UPDATE WITH CVE(S) IF APPLICABLE'] + return answers + -class NewContent: + def execute(self, input_dto: new) -> None: + if input_dto.type == NewContentType.detection: + content_dict = self.buildDetection() + subdirectory = pathlib.Path('detections') / content_dict.get('type') + elif input_dto.type == NewContentType.story: + content_dict = self.buildStory() + subdirectory = pathlib.Path('stories') + else: + raise Exception(f"Unsupported new content type: [{input_dto.type}]") + + full_output_path = input_dto.path / subdirectory / SecurityContentObject_Abstract.contentNameToFileName(content_dict.get('name')) + YmlWriter.writeYmlFile(str(full_output_path), content_dict) + + + + def writeObjectNewContent(self, object: dict, subdirectory_name: str, type: NewContentType) -> None: + if type == NewContentType.detection: + file_path = os.path.join(self.output_path, 'detections', subdirectory_name, self.convertNameToFileName(object['name'], object['tags']['product'])) + output_folder = pathlib.Path(self.output_path)/'detections'/subdirectory_name + #make sure the output folder exists for this detection + output_folder.mkdir(exist_ok=True) - def execute(self, input_dto: NewContentInputDto) -> None: - new_content_generator_output_dto = NewContentGeneratorOutputDto({},{}) - new_content_generator = NewContentGenerator(new_content_generator_output_dto) - new_content_generator.execute(input_dto.new_content_generator_input_dto) + YmlWriter.writeYmlFile(file_path, object) + print("Successfully created detection " + file_path) + + elif type == NewContentType.story: + file_path = os.path.join(self.output_path, 'stories', self.convertNameToFileName(object['name'], object['tags']['product'])) + YmlWriter.writeYmlFile(file_path, object) + print("Successfully created story " + file_path) + + else: + raise(Exception(f"Object Must be Story or Detection, but is not: {object}")) - new_content_yml_output = NewContentYmlOutput(input_dto.output_path) - new_content_yml_output.writeObjectNewContent(new_content_generator_output_dto.obj, new_content_generator_output_dto.answers.get("detection_kind",None), input_dto.new_content_generator_input_dto.type) diff --git a/contentctl/actions/release_notes.py b/contentctl/actions/release_notes.py index 48ca3e6c..859fcf87 100644 --- a/contentctl/actions/release_notes.py +++ b/contentctl/actions/release_notes.py @@ -1,120 +1,131 @@ import os - -from dataclasses import dataclass - -from contentctl.input.director import DirectorInputDto, Director, DirectorOutputDto -from contentctl.output.svg_output import SvgOutput -from contentctl.output.attack_nav_output import AttackNavOutput +from contentctl.objects.config import release_notes from git import Repo import re import yaml -from typing import Union +import pathlib +from typing import List, Union -@dataclass(frozen=True) -class ReleaseNotesInputDto: - director_input_dto: DirectorInputDto class ReleaseNotes: - def create_notes(self,repo_path, file_paths): - for file_path in file_paths: - # Check if the file exists - if os.path.exists(file_path) and os.path.isfile(file_path): - # Check if the file is a YAML file - if file_path.endswith('.yaml') or file_path.endswith('.yml'): - # Read and parse the YAML file - with open(file_path, 'r') as file: - try: - data = yaml.safe_load(file) - # Check and create story link - if 'name' in data and 'stories/' in file_path: - story_link = "https://research.splunk.com/stories/" + data['name'] - story_link=story_link.replace(" ","_") - story_link = story_link.lower() - print("- "+"["+f"{data['name']}"+"]"+"("+story_link+")") - - if 'name' in data and'playbooks/' in file_path: - playbook_link = "https://research.splunk.com" + file_path.replace(repo_path,"") - playbook_link=playbook_link.replace(".yml","/").lower() - print("- "+"["+f"{data['name']}"+"]"+"("+playbook_link+")") - - if 'name' in data and'macros/' in file_path: - print("- " + f"{data['name']}") - - if 'name' in data and'lookups/' in file_path: - print("- " + f"{data['name']}") - - # Create only SSA link when its production - if 'name' in data and 'id' in data and 'ssa_detections/' in file_path: - if data['status'] == "production": - temp_link = "https://research.splunk.com" + file_path.replace(repo_path,"") - pattern = r'(?<=/)[^/]*$' - detection_link = re.sub(pattern, data['id'], temp_link) - detection_link = detection_link.replace("detections","" ) - detection_link = detection_link.replace("ssa_/","" ) - print("- "+"["+f"{data['name']}"+"]"+"("+detection_link+")") - - if data['status'] == "validation": - print("- "+f"{data['name']}"+" (Validation Mode)") - - # Check and create detection link - if 'name' in data and 'id' in data and 'detections/' in file_path and not 'ssa_detections/' in file_path: - temp_link = "https://research.splunk.com" + file_path.replace(repo_path,"") + def create_notes(self,repo_path:pathlib.Path, file_paths:List[pathlib.Path], header:str)->dict[str,Union[List[str], str]]: + updates:List[str] = [] + warnings:List[str] = [] + for file_path in file_paths: + # Check if the file exists + if file_path.exists() and file_path.is_file(): + # Check if the file is a YAML file + if file_path.suffix in ['.yaml', '.yml']: + # Read and parse the YAML file + with open(file_path, 'r') as file: + try: + data = yaml.safe_load(file) + # Check and create story link + if 'name' in data and 'stories' in file_path.parts: + story_link = "https://research.splunk.com/stories/" + data['name'] + story_link=story_link.replace(" ","_") + story_link = story_link.lower() + updates.append("- "+"["+f"{data['name']}"+"]"+"("+story_link+")") + + if 'name' in data and'playbooks' in file_path.parts: + playbook_link = "https://research.splunk.com/" + str(file_path).replace(str(repo_path),"") + playbook_link=playbook_link.replace(".yml","/").lower() + updates.append("- "+"["+f"{data['name']}"+"]"+"("+playbook_link+")") + + if 'name' in data and'macros' in file_path.parts: + updates.append("- " + f"{data['name']}") + + if 'name' in data and'lookups' in file_path.parts: + updates.append("- " + f"{data['name']}") + + # Create only SSA link when its production + if 'name' in data and 'id' in data and 'ssa_detections' in file_path.parts: + if data['status'] == "production": + temp_link = "https://research.splunk.com/" + str(file_path).replace(str(repo_path),"") + pattern = r'(?<=/)[^/]*$' + detection_link = re.sub(pattern, data['id'], temp_link) + detection_link = detection_link.replace("detections","" ) + detection_link = detection_link.replace("ssa_/","" ) + updates.append("- "+"["+f"{data['name']}"+"]"+"("+detection_link+")") + + if data['status'] == "validation": + updates.append("- "+f"{data['name']}"+" (Validation Mode)") + + + # Check and create detection link + if 'name' in data and 'id' in data and 'detections' in file_path.parts and not 'ssa_detections' in file_path.parts and 'detections/deprecated' not in file_path.parts: + + if data['status'] == "production": + temp_link = "https://research.splunk.com" + str(file_path).replace(str(repo_path),"") pattern = r'(?<=/)[^/]*$' detection_link = re.sub(pattern, data['id'], temp_link) detection_link = detection_link.replace("detections","" ) detection_link = detection_link.replace(".com//",".com/" ) - print("- "+"["+f"{data['name']}"+"]"+"("+detection_link+")") - - except yaml.YAMLError as exc: - print(f"Error parsing YAML file {file_path}: {exc}") - else: - print(f"File not found or is not a file: {file_path}") + updates.append("- "+"["+f"{data['name']}"+"]"+"("+detection_link+")") + + if data['status'] == "deprecated": + temp_link = "https://research.splunk.com" + str(file_path).replace(str(repo_path),"") + pattern = r'(?<=/)[^/]*$' + detection_link = re.sub(pattern, data['id'], temp_link) + detection_link = detection_link.replace("detections","" ) + detection_link = detection_link.replace(".com//",".com/" ) + updates.append("- "+"["+f"{data['name']}"+"]"+"("+detection_link+")") + + except yaml.YAMLError as exc: + raise Exception(f"Error parsing YAML file for release_notes {file_path}: {str(exc)}") + else: + warnings.append(f"Error parsing YAML file for release_notes. File not found or is not a file: {file_path}") + #print out all updates at once + success_header = f'### {header} - [{len(updates)}]' + warning_header = f'### {header} - [{len(warnings)}]' + return {'header': success_header, 'changes': sorted(updates), + 'warning_header': warning_header, 'warnings': warnings} + - def release_notes(self, input_dto: DirectorInputDto, old_tag:Union[str,None], new_tag:str, latest_branch:str) -> None: + def release_notes(self, config:release_notes) -> None: ### Remove hard coded path directories = ['detections/','stories/','macros/','lookups/','playbooks/','ssa_detections/'] - repo_path = os.path.abspath(input_dto.director_input_dto.input_path) - repo = Repo(repo_path) + + repo = Repo(config.path) # Ensure the new tag is in the tags if tags are supplied - if new_tag: - if new_tag not in repo.tags: - raise ValueError(f"new_tag {new_tag} does not exist in the repository. Make sure your branch nameis ") - if old_tag is None: + if config.new_tag: + if config.new_tag not in repo.tags: + raise Exception(f"new_tag {config.new_tag} does not exist in the repository. Make sure your branch nameis ") + if config.old_tag is None: #Old tag was not supplied, so find the index of the new tag, then get the tag before it tags_sorted = sorted(repo.tags, key=lambda t: t.commit.committed_datetime, reverse=True) tags_names_sorted = [tag.name for tag in tags_sorted] - new_tag_index = tags_names_sorted.index(new_tag) + new_tag_index = tags_names_sorted.index(config.new_tag) try: - old_tag = tags_names_sorted[new_tag_index+1] + config.old_tag = tags_names_sorted[new_tag_index+1] except Exception: - raise ValueError(f"old_tag cannot be inferred. {new_tag} is the oldest tag in the repo!") - latest_tag = new_tag - previous_tag = old_tag + raise Exception(f"old_tag cannot be inferred. {config.new_tag} is the oldest tag in the repo!") + latest_tag = config.new_tag + previous_tag = config.old_tag commit1 = repo.commit(latest_tag) commit2 = repo.commit(previous_tag) diff_index = commit2.diff(commit1) # Ensure the branch is in the repo - if latest_branch: + if config.latest_branch: #If a branch name is supplied, compare against develop - if latest_branch not in repo.branches: - raise ValueError(f"latest branch {latest_branch} does not exist in the repository. Make sure your branch name is correct") - latest_branch = latest_branch + if config.latest_branch not in repo.branches: + raise ValueError(f"latest branch {config.latest_branch} does not exist in the repository. Make sure your branch name is correct") compare_against = "develop" - commit1 = repo.commit(latest_branch) + commit1 = repo.commit(config.latest_branch) commit2 = repo.commit(compare_against) diff_index = commit2.diff(commit1) - modified_files = [] - added_files = [] + modified_files:List[pathlib.Path] = [] + added_files:List[pathlib.Path] = [] for diff in diff_index: - file_path = diff.a_path + file_path = pathlib.Path(diff.a_path) # Check if the file is in the specified directories - if any(file_path.startswith(directory) for directory in directories): + if any(str(file_path).startswith(directory) for directory in directories): # Check if a file is Modified if diff.change_type == 'M': modified_files.append(file_path) @@ -124,91 +135,104 @@ def release_notes(self, input_dto: DirectorInputDto, old_tag:Union[str,None], ne elif diff.change_type == 'A': added_files.append(file_path) # print(added_files) - detections_added = [] - ba_detections_added = [] - stories_added = [] - macros_added = [] - lookups_added = [] - playbooks_added = [] - detections_modified = [] - ba_detections_modified = [] - stories_modified = [] - macros_modified = [] - lookups_modified = [] - playbooks_modified = [] + detections_added:List[pathlib.Path] = [] + ba_detections_added:List[pathlib.Path] = [] + stories_added:List[pathlib.Path] = [] + macros_added:List[pathlib.Path] = [] + lookups_added:List[pathlib.Path] = [] + playbooks_added:List[pathlib.Path] = [] + detections_modified:List[pathlib.Path] = [] + ba_detections_modified:List[pathlib.Path] = [] + stories_modified:List[pathlib.Path] = [] + macros_modified:List[pathlib.Path] = [] + lookups_modified:List[pathlib.Path] = [] + playbooks_modified:List[pathlib.Path] = [] + detections_deprecated:List[pathlib.Path] = [] for file in modified_files: - file=repo_path +"/"+file - if 'detections/' in file and 'ssa_detections/' not in file: + file= config.path / file + if 'detections' in file.parts and 'ssa_detections' not in file.parts and 'deprecated' not in file.parts: detections_modified.append(file) - if 'stories/' in file: + if 'detections' in file.parts and 'ssa_detections' not in file.parts and 'deprecated' in file.parts: + detections_deprecated.append(file) + if 'stories' in file.parts: stories_modified.append(file) - if 'macros/' in file: + if 'macros' in file.parts: macros_modified.append(file) - if 'lookups/' in file: + if 'lookups' in file.parts: lookups_modified.append(file) - if 'playbooks/' in file: + if 'playbooks' in file.parts: playbooks_modified.append(file) - if 'ssa_detections/' in file: + if 'ssa_detections' in file.parts: ba_detections_modified.append(file) for file in added_files: - file=repo_path +"/"+file - if 'detections/' in file and 'ssa_detections/' not in file: + file=config.path / file + if 'detections' in file.parts and 'ssa_detections' not in file.parts: detections_added.append(file) - if 'stories/' in file: + if 'stories' in file.parts: stories_added.append(file) - if 'macros/' in file: + if 'macros' in file.parts: macros_added.append(file) - if 'lookups/' in file: + if 'lookups' in file.parts: lookups_added.append(file) - if 'playbooks/' in file: + if 'playbooks' in file.parts: playbooks_added.append(file) - if 'ssa_detections/' in file: + if 'ssa_detections' in file.parts: ba_detections_added.append(file) - if new_tag: + if config.new_tag: print(f"Generating release notes - \033[92m{latest_tag}\033[0m") print(f"Compared against - \033[92m{previous_tag}\033[0m") print("\n## Release notes for ESCU " + latest_tag) - if latest_branch: - print(f"Generating release notes - \033[92m{latest_branch}\033[0m") + if config.latest_branch: + print(f"Generating release notes - \033[92m{config.latest_branch}\033[0m") print(f"Compared against - \033[92m{compare_against}\033[0m") - print("\n## Release notes for ESCU " + latest_branch) - - print("\n### New Analytics Story") - self.create_notes(repo_path, stories_added) - print("\n### Updated Analytics Story") - self.create_notes(repo_path,stories_modified) - print("\n### New Analytics") - self.create_notes(repo_path,detections_added) - print("\n### Updated Analytics") - self.create_notes(repo_path,detections_modified) - print("\n### Macros Added") - self.create_notes(repo_path,macros_added) - print("\n### Macros Updated") - self.create_notes(repo_path,macros_modified) - print("\n### Lookups Added") - self.create_notes(repo_path,lookups_added) - print("\n### Lookups Updated") - self.create_notes(repo_path,lookups_modified) - print("\n### Playbooks Added") - self.create_notes(repo_path,playbooks_added) - print("\n### Playbooks Updated") - self.create_notes(repo_path,playbooks_modified) - - print("\n### Other Updates\n-\n") - - print("\n## BA Release Notes") - - print("\n### New BA Analytics") - self.create_notes(repo_path,ba_detections_added) - - print("\n### Updated BA Analytics") - self.create_notes(repo_path,ba_detections_modified) - - + print("\n## Release notes for ESCU " + config.latest_branch) + + notes = [self.create_notes(config.path, stories_added, header="New Analytic Story"), + self.create_notes(config.path,stories_modified, header="Updated Analytic Story"), + self.create_notes(config.path,detections_added, header="New Analytics"), + self.create_notes(config.path,detections_modified, header="Updated Analytics"), + self.create_notes(config.path,macros_added, header="Macros Added"), + self.create_notes(config.path,macros_modified, header="Macros Updated"), + self.create_notes(config.path,lookups_added, header="Lookups Added"), + self.create_notes(config.path,lookups_modified, header="Lookups Updated"), + self.create_notes(config.path,playbooks_added, header="Playbooks Added"), + self.create_notes(config.path,playbooks_modified, header="Playbooks Updated"), + self.create_notes(config.path,detections_deprecated, header="Deprecated Analytics")] + + #generate and show ba_notes in a different section + ba_notes = [self.create_notes(config.path,ba_detections_added, header="New BA Analytics"), + self.create_notes(config.path,ba_detections_modified, header="Updated BA Analytics") ] + + def printNotes(notes:List[dict[str,Union[List[str], str]]], outfile:Union[pathlib.Path,None]=None): + num_changes = sum([len(note['changes']) for note in notes]) + num_warnings = sum([len(note['warnings']) for note in notes]) + lines:List[str] = [] + lines.append(f"Total New and Updated Content: [{num_changes}]") + for note in notes: + lines.append("") + lines.append(note['header']) + lines+=(note['changes']) + + lines.append(f"\n\nTotal Warnings: [{num_warnings}]") + for note in notes: + if len(note['warnings']) > 0: + lines.append(note['warning_header']) + lines+=note['warnings'] + text_blob = '\n'.join(lines) + print(text_blob) + if outfile is not None: + with open(outfile,'w') as writer: + writer.write(text_blob) + + printNotes(notes, config.releaseNotesFilename(f"release_notes.txt")) + + print("\n\n### Other Updates\n-\n") + print("\n## BA Release Notes") + printNotes(ba_notes, config.releaseNotesFilename("ba_release_notes.txt")) print(f"Release notes completed succesfully") \ No newline at end of file diff --git a/contentctl/actions/reporting.py b/contentctl/actions/reporting.py index 515237cf..a7997713 100644 --- a/contentctl/actions/reporting.py +++ b/contentctl/actions/reporting.py @@ -2,32 +2,43 @@ from dataclasses import dataclass -from contentctl.input.director import DirectorInputDto, Director, DirectorOutputDto +from contentctl.input.director import DirectorOutputDto from contentctl.output.svg_output import SvgOutput from contentctl.output.attack_nav_output import AttackNavOutput - +from contentctl.objects.config import report @dataclass(frozen=True) class ReportingInputDto: - director_input_dto: DirectorInputDto + director_output_dto: DirectorOutputDto + config: report class Reporting: def execute(self, input_dto: ReportingInputDto) -> None: - director_output_dto = DirectorOutputDto([],[],[],[],[],[],[],[],[]) - director = Director(director_output_dto) - director.execute(input_dto.director_input_dto) + + #Ensure the reporting path exists + try: + input_dto.config.getReportingPath().mkdir(exist_ok=True,parents=True) + except Exception as e: + if input_dto.config.getReportingPath().is_file(): + raise Exception(f"Error writing reporting: '{input_dto.config.getReportingPath()}' is a file, not a directory.") + else: + raise Exception(f"Error writing reporting : '{input_dto.config.getReportingPath()}': {str(e)}") + + print("Creating GitHub Badges...") + #Generate GitHub Badges svg_output = SvgOutput() svg_output.writeObjects( - director_output_dto.detections, - os.path.join(input_dto.director_input_dto.input_path, "reporting") - ) + input_dto.director_output_dto.detections, + input_dto.config.getReportingPath()) - attack_nav_output = AttackNavOutput() + #Generate coverage json + print("Generating coverage.json...") + attack_nav_output = AttackNavOutput() attack_nav_output.writeObjects( - director_output_dto.detections, - os.path.join(input_dto.director_input_dto.input_path, "reporting") + input_dto.director_output_dto.detections, + input_dto.config.getReportingPath() ) - print('Reporting of security content successful.') \ No newline at end of file + print(f"Reporting successfully written to '{input_dto.config.getReportingPath()}'") \ No newline at end of file diff --git a/contentctl/actions/test.py b/contentctl/actions/test.py index 2d749f38..b0ee5faf 100644 --- a/contentctl/actions/test.py +++ b/contentctl/actions/test.py @@ -1,14 +1,12 @@ from dataclasses import dataclass +from typing import List -from contentctl.objects.test_config import TestConfig -from contentctl.objects.enums import DetectionTestingMode +from contentctl.objects.config import test_common +from contentctl.objects.enums import DetectionTestingMode, DetectionStatus, AnalyticsType +from contentctl.objects.detection import Detection from contentctl.input.director import DirectorOutputDto -from contentctl.actions.detection_testing.GitService import ( - GitService, -) - from contentctl.actions.detection_testing.DetectionTestingManager import ( DetectionTestingManager, DetectionTestingManagerInputDto, @@ -32,24 +30,45 @@ DetectionTestingViewFile, ) -from argparse import Namespace -from os.path import relpath +from contentctl.objects.integration_test import IntegrationTest + +import pathlib MAXIMUM_CONFIGURATION_TIME_SECONDS = 600 @dataclass(frozen=True) class TestInputDto: - test_director_output_dto: DirectorOutputDto - gitService: GitService - config: TestConfig + detections: List[Detection] + config: test_common -class TestOutputDto: - results: list - - class Test: + + def filter_detections(self, input_dto: TestInputDto)->TestInputDto: + + if not input_dto.config.enable_integration_testing: + #Skip all integraiton tests if integration testing is not enabled: + for detection in input_dto.detections: + for test in detection.tests: + if isinstance(test, IntegrationTest): + test.skip("TEST SKIPPED: Skipping all integration tests") + + list_after_filtering:List[Detection] = [] + #extra filtering which may be removed/modified in the future + for detection in input_dto.detections: + if (detection.status != DetectionStatus.production.value): + #print(f"{detection.name} - Not testing because [STATUS: {detection.status}]") + pass + elif detection.type == AnalyticsType.Correlation: + #print(f"{detection.name} - Not testing because [ TYPE: {detection.type}]") + pass + else: + list_after_filtering.append(detection) + + return TestInputDto(list_after_filtering, input_dto.config) + + def execute(self, input_dto: TestInputDto) -> bool: @@ -62,19 +81,19 @@ def execute(self, input_dto: TestInputDto) -> bool: manager_input_dto = DetectionTestingManagerInputDto( config=input_dto.config, - testContent=input_dto.test_director_output_dto, + detections=input_dto.detections, views=[web, cli, file], ) manager = DetectionTestingManager( input_dto=manager_input_dto, output_dto=output_dto ) - if len(input_dto.test_director_output_dto.detections) == 0: - print(f"With Detection Testing Mode '{input_dto.config.mode.value}', there were detections [{len(input_dto.test_director_output_dto.detections)}] found to test.\nAs such, we will quit immediately.") + if len(input_dto.detections) == 0: + print(f"With Detection Testing Mode '{input_dto.config.getModeName()}', there were [0] detections found to test.\nAs such, we will quit immediately.") else: - print(f"MODE: [{input_dto.config.mode.value}] - Test [{len(input_dto.test_director_output_dto.detections)}] detections") + print(f"MODE: [{input_dto.config.getModeName()}] - Test [{len(input_dto.detections)}] detections") if input_dto.config.mode in [DetectionTestingMode.changes, DetectionTestingMode.selected]: - files_string = '\n- '.join([relpath(detection.file_path) for detection in input_dto.test_director_output_dto.detections]) + files_string = '\n- '.join([str(pathlib.Path(detection.file_path).relative_to(input_dto.config.path)) for detection in input_dto.detections]) print(f"Detections:\n- {files_string}") manager.setup() diff --git a/contentctl/actions/validate.py b/contentctl/actions/validate.py index a36f638f..90394b96 100644 --- a/contentctl/actions/validate.py +++ b/contentctl/actions/validate.py @@ -6,42 +6,32 @@ from typing import Union from contentctl.objects.enums import SecurityContentProduct +from contentctl.objects.abstract_security_content_objects.security_content_object_abstract import SecurityContentObject_Abstract from contentctl.input.director import ( Director, - DirectorInputDto, - DirectorOutputDto, + DirectorOutputDto ) - -@dataclass(frozen=True) -class ValidateInputDto: - director_input_dto: DirectorInputDto - +from contentctl.objects.config import validate +from contentctl.enrichments.attack_enrichment import AttackEnrichment +from contentctl.enrichments.cve_enrichment import CveEnrichment +from contentctl.objects.atomic import AtomicTest class Validate: - def execute(self, input_dto: ValidateInputDto) -> None: - director_output_dto = DirectorOutputDto([], [], [], [], [], [], [], [], []) + def execute(self, input_dto: validate) -> DirectorOutputDto: + + director_output_dto = DirectorOutputDto(AtomicTest.getAtomicTestsFromArtRepo(repo_path=input_dto.getAtomicRedTeamRepoPath(), + enabled=input_dto.enrichments), + AttackEnrichment.getAttackEnrichment(input_dto), + [],[],[],[],[],[],[],[],[]) + + director = Director(director_output_dto) - director.execute(input_dto.director_input_dto) - - # uuid validation all objects - try: - security_content_objects = ( - director_output_dto.detections - + director_output_dto.stories - + director_output_dto.baselines - + director_output_dto.investigations - + director_output_dto.playbooks - ) - self.validate_duplicate_uuids(security_content_objects) - - except ValueError as e: - print(e) - sys.exit(1) + director.execute(input_dto) - return None + return director_output_dto - def validate_duplicate_uuids(self, security_content_objects): + def validate_duplicate_uuids(self, security_content_objects:list[SecurityContentObject_Abstract]): all_uuids = set() duplicate_uuids = set() for elem in security_content_objects: @@ -54,28 +44,15 @@ def validate_duplicate_uuids(self, security_content_objects): if len(duplicate_uuids) == 0: return - + # At least once duplicate uuid has been found. Enumerate all # the pieces of content that use duplicate uuids - content_with_duplicate_uuid = [ - content_object - for content_object in security_content_objects - if content_object.id in duplicate_uuids - ] - + duplicate_messages = [] + for uuid in duplicate_uuids: + duplicate_uuid_content = [str(content.file_path) for content in security_content_objects if content.id in duplicate_uuids] + duplicate_messages.append(f"Duplicate UUID [{uuid}] in {duplicate_uuid_content}") + raise ValueError( - "ERROR: Duplicate ID found in objects:\n" - + "\n".join([obj.name for obj in content_with_duplicate_uuid]) + "ERROR: Duplicate ID(s) found in objects:\n" + + "\n - ".join(duplicate_messages) ) - - # def validate_detection_exist_for_test(self, tests: list, detections: list): - # for test in tests: - # found_detection = False - # for detection in detections: - # if test.tests[0].file in detection.file_path: - # found_detection = True - - # if not found_detection: - # raise ValueError( - # "ERROR: detection doesn't exist for test file: " + test.name - # ) diff --git a/contentctl/contentctl.py b/contentctl/contentctl.py index 65e5969a..e5c3718b 100644 --- a/contentctl/contentctl.py +++ b/contentctl/contentctl.py @@ -1,786 +1,228 @@ -import sys -import argparse -import os -import tqdm -import functools -from typing import Union -import pathlib -import yaml -from contentctl.actions.detection_testing.GitService import ( - GitService, -) -from contentctl.actions.validate import ValidateInputDto, Validate -from contentctl.actions.generate import ( - GenerateInputDto, - DirectorOutputDto, - Generate, +from contentctl.actions.initialize import Initialize +import tyro +from contentctl.objects.config import init, validate, build, new, deploy_acs, deploy_rest, test, test_servers, inspect, report, test_common, release_notes +from contentctl.actions.validate import Validate +from contentctl.actions.new_content import NewContent +from contentctl.actions.detection_testing.GitService import GitService +from contentctl.actions.build import ( + BuildInputDto, + DirectorOutputDto, + Build, ) -from contentctl.actions.acs_deploy import ACSDeployInputDto, Deploy +from contentctl.actions.test import Test +from contentctl.actions.test import TestInputDto from contentctl.actions.reporting import ReportingInputDto, Reporting -from contentctl.actions.new_content import NewContentInputDto, NewContent -from contentctl.actions.doc_gen import DocGenInputDto, DocGen -from contentctl.actions.initialize import Initialize, InitializeInputDto -from contentctl.actions.api_deploy import API_Deploy, API_DeployInputDto -from contentctl.actions.release_notes import ReleaseNotesInputDto, ReleaseNotes -from contentctl.input.director import DirectorInputDto -from contentctl.objects.enums import ( - SecurityContentType, - SecurityContentProduct, - DetectionTestingMode, - PostTestBehavior, - DetectionTestingTargetInfrastructure, - SigmaConverterTarget -) -from contentctl.input.new_content_generator import NewContentGeneratorInputDto -from contentctl.helper.config_handler import ConfigHandler - -from contentctl.objects.config import Config - -from contentctl.objects.app import App -from contentctl.objects.test_config import Infrastructure -from contentctl.actions.test import Test, TestInputDto -from contentctl.input.sigma_converter import SigmaConverterInputDto -from contentctl.actions.convert import ConvertInputDto, Convert - - -SERVER_ARGS_ENV_VARIABLE = "CONTENTCTL_TEST_INFRASTRUCTURES" - - -def configure_unattended(args: argparse.Namespace) -> argparse.Namespace: - # disable all calls to tqdm - this is so that CI/CD contexts don't - # have a large amount of output due to progress bar updates. - tqdm.tqdm.__init__ = functools.partialmethod( - tqdm.tqdm.__init__, disable=args.unattended - ) - if args.unattended: - if args.behavior != PostTestBehavior.never_pause.name: - print( - f"For unattended mode, --behavior MUST be {PostTestBehavior.never_pause.name}.\n" - f"Updating the behavior from '{args.behavior}' to " - f"'{PostTestBehavior.never_pause.name}'" - ) - args.behavior = PostTestBehavior.never_pause.name - - return args - - -def print_ascii_art(): - print( - """ -Running Splunk Security Content Control Tool (contentctl) -⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ -⠀⠀⠀⠀⠀⠀⠀⠀⠀⢶⠛⡇⠀⠀⠀⠀⠀⠀⣠⣦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ -⠀⠀⠀⠀⠀⠀⠀⠀⣀⠼⠖⠛⠋⠉⠉⠓⠢⣴⡻⣾⡇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ -⠀⠀⠀⢀⡠⠔⠊⠁⠀⠀⠀⠀⠀⠀⣠⣤⣄⠻⠟⣏⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ -⠀⣠⠞⠁⠀⠀⠀⡄⠀⠀⠀⠀⠀⠀⢻⣿⣿⠀⢀⠘⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ -⢸⡇⠀⠀⠀⡠⠊⠀⠀⠀⠀⠀⠀⠀⠀⠉⠉⠀⠈⠁⠘⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ -⢸⡉⠓⠒⠊⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⢄⠀⠀⠀⠈⢦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ -⠈⡇⠀⢠⠀⠀⠀⠀⠀⠀⠀⠈⡷⣄⠀⠀⢀⠈⠀⠀⠑⢄⠀⠑⢄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ -⠀⠹⡄⠘⡄⠀⠀⠀⠀⢀⡠⠊⠀⠙⠀⠀⠈⢣⠀⠀⠀⢀⠀⠀⠀⠉⠒⠤⣀⠀⠀⠀⠀⠀⠀⠀⠀ -⠀⠀⠉⠁⠛⠲⢶⡒⠈⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⡄⠀⠀⠉⠂⠀⠀⠀⠀⠤⡙⠢⣄⠀⠀⠀⠀⠀ -⠀⠀⠀⠀⠀⠀⠀⢹⠀⠀⡀⠀⠀⢸⠀⠀⠀⠀⠘⠇⠀⠀⠀⠀⠀⠀⠀⠀⢀⠈⠀⠈⠳⡄⠀⠀⠀ -⠀⠀⠀⠀⠀⠀⠀⠈⡇⠀⠣⠀⠀⠈⠀⢀⠀⠀⠀⠀⠀⠀⢀⣀⠀⠀⢀⡀⠀⠑⠄⠈⠣⡘⢆⠀⠀ -⠀⠀⠀⠀⠀⠀⠀⠀⢧⠀⠀⠀⠀⠀⠀⠿⠀⠀⠀⠀⣠⠞⠉⠀⠀⠀⠀⠙⢆⠀⠀⡀⠀⠁⠈⢇⠀ -⠀⠀⠀⠀⠀⠀⠀⠀⢹⠀⢤⠀⠀⠀⠀⠀⠀⠀⠀⢰⠁⠀⠀⠀⠀⠀⠀⠀⠀⠁⠀⠙⡄⠀⡀⠈⡆ -⠀⠀⠀⠀⠀⠀⠀⠀⠸⡆⠘⠃⠀⠀⠀⢀⡄⠀⠀⡇⠀⠀⡄⠀⠀⠀⠰⡀⠀⠀⡄⠀⠉⠀⠃⠀⢱ -⠀⠀⠀⠀⠀⠀⠀⠀⠀⢣⡀⠀⠀⡆⠀⠸⠇⠀⠀⢳⠀⠀⠈⠀⠀⠀⠐⠓⠀⠀⢸⡄⠀⠀⠀⡀⢸ -⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢳⡀⠀⢻⠀⠀⠀⠀⢰⠛⢆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠃⠀⡆⠀⠃⡼ -⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⣷⣤⣽⣧⠀⠀⠀⡜⠀⠈⠢⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⠃ -⠀⠀⠀⠀⠀⠀⠀⠀⠀⣾⣿⣿⣇⡿⠹⣷⣄⣬⡗⠢⣤⠖⠛⢳⣤⣀⠀⠀⠀⠀⠀⠀⠀⠀⡰⠃⠀ -⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⠋⢠⣾⢿⡏⣸⠀⠀⠈⠋⠛⠧⠤⠘⠛⠉⠙⠒⠒⠒⠒⠉⠀⠀⠀ -⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠉⠻⠶⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ - - By: Splunk Threat Research Team [STRT] - research@splunk.com - """ - ) - - -def start(args: argparse.Namespace, read_test_file: bool = False) -> Config: - base_config = ConfigHandler.read_config(args) - if read_test_file: - base_config.test = ConfigHandler.read_test_config(args) - return base_config - - -def initialize(args) -> None: - Initialize().execute(InitializeInputDto(path=pathlib.Path(args.path), demo=args.demo)) - - -def build(args, config: Union[Config, None] = None) -> DirectorOutputDto: - if config is None: - config = start(args) - if args.type == "app": - product_type = SecurityContentProduct.SPLUNK_APP - elif args.type == "ssa": - product_type = SecurityContentProduct.SSA - elif args.type == "api": - product_type = SecurityContentProduct.API - else: - print("Invalid build type. Valid options app, ssa or api") - sys.exit(1) - director_input_dto = DirectorInputDto( - input_path=pathlib.Path(os.path.abspath(args.path)), - product=product_type, - config=config - ) - generate_input_dto = GenerateInputDto( - director_input_dto, - args.splunk_api_username, - args.splunk_api_password, - ) - - generate = Generate() - - return generate.execute(generate_input_dto) - - -def api_deploy(args) -> None: - config = start(args) - deploy_input_dto = API_DeployInputDto(path=pathlib.Path(args.path), config=config) - deploy = API_Deploy() - - deploy.execute(deploy_input_dto) - +from contentctl.actions.inspect import Inspect +import sys +import warnings +import pathlib +from contentctl.input.yml_reader import YmlReader +from contentctl.actions.release_notes import ReleaseNotes + +# def print_ascii_art(): +# print( +# """ +# Running Splunk Security Content Control Tool (contentctl) +# ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ +# ⠀⠀⠀⠀⠀⠀⠀⠀⠀⢶⠛⡇⠀⠀⠀⠀⠀⠀⣠⣦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ +# ⠀⠀⠀⠀⠀⠀⠀⠀⣀⠼⠖⠛⠋⠉⠉⠓⠢⣴⡻⣾⡇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ +# ⠀⠀⠀⢀⡠⠔⠊⠁⠀⠀⠀⠀⠀⠀⣠⣤⣄⠻⠟⣏⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ +# ⠀⣠⠞⠁⠀⠀⠀⡄⠀⠀⠀⠀⠀⠀⢻⣿⣿⠀⢀⠘⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ +# ⢸⡇⠀⠀⠀⡠⠊⠀⠀⠀⠀⠀⠀⠀⠀⠉⠉⠀⠈⠁⠘⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ +# ⢸⡉⠓⠒⠊⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⢄⠀⠀⠀⠈⢦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ +# ⠈⡇⠀⢠⠀⠀⠀⠀⠀⠀⠀⠈⡷⣄⠀⠀⢀⠈⠀⠀⠑⢄⠀⠑⢄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ +# ⠀⠹⡄⠘⡄⠀⠀⠀⠀⢀⡠⠊⠀⠙⠀⠀⠈⢣⠀⠀⠀⢀⠀⠀⠀⠉⠒⠤⣀⠀⠀⠀⠀⠀⠀⠀⠀ +# ⠀⠀⠉⠁⠛⠲⢶⡒⠈⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⡄⠀⠀⠉⠂⠀⠀⠀⠀⠤⡙⠢⣄⠀⠀⠀⠀⠀ +# ⠀⠀⠀⠀⠀⠀⠀⢹⠀⠀⡀⠀⠀⢸⠀⠀⠀⠀⠘⠇⠀⠀⠀⠀⠀⠀⠀⠀⢀⠈⠀⠈⠳⡄⠀⠀⠀ +# ⠀⠀⠀⠀⠀⠀⠀⠈⡇⠀⠣⠀⠀⠈⠀⢀⠀⠀⠀⠀⠀⠀⢀⣀⠀⠀⢀⡀⠀⠑⠄⠈⠣⡘⢆⠀⠀ +# ⠀⠀⠀⠀⠀⠀⠀⠀⢧⠀⠀⠀⠀⠀⠀⠿⠀⠀⠀⠀⣠⠞⠉⠀⠀⠀⠀⠙⢆⠀⠀⡀⠀⠁⠈⢇⠀ +# ⠀⠀⠀⠀⠀⠀⠀⠀⢹⠀⢤⠀⠀⠀⠀⠀⠀⠀⠀⢰⠁⠀⠀⠀⠀⠀⠀⠀⠀⠁⠀⠙⡄⠀⡀⠈⡆ +# ⠀⠀⠀⠀⠀⠀⠀⠀⠸⡆⠘⠃⠀⠀⠀⢀⡄⠀⠀⡇⠀⠀⡄⠀⠀⠀⠰⡀⠀⠀⡄⠀⠉⠀⠃⠀⢱ +# ⠀⠀⠀⠀⠀⠀⠀⠀⠀⢣⡀⠀⠀⡆⠀⠸⠇⠀⠀⢳⠀⠀⠈⠀⠀⠀⠐⠓⠀⠀⢸⡄⠀⠀⠀⡀⢸ +# ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢳⡀⠀⢻⠀⠀⠀⠀⢰⠛⢆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠃⠀⡆⠀⠃⡼ +# ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⣷⣤⣽⣧⠀⠀⠀⡜⠀⠈⠢⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⠃ +# ⠀⠀⠀⠀⠀⠀⠀⠀⠀⣾⣿⣿⣇⡿⠹⣷⣄⣬⡗⠢⣤⠖⠛⢳⣤⣀⠀⠀⠀⠀⠀⠀⠀⠀⡰⠃⠀ +# ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⠋⢠⣾⢿⡏⣸⠀⠀⠈⠋⠛⠧⠤⠘⠛⠉⠙⠒⠒⠒⠒⠉⠀⠀⠀ +# ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠉⠻⠶⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ + +# By: Splunk Threat Research Team [STRT] - research@splunk.com +# """ +# ) + + + + +def init_func(config:test): + Initialize().execute(config) + + +def validate_func(config:validate)->DirectorOutputDto: + validate = Validate() + return validate.execute(config) -def acs_deploy(args) -> None: - config = start(args) - director_input_dto = DirectorInputDto( - input_path=pathlib.Path(os.path.abspath(args.path)), - product=SecurityContentProduct.SPLUNK_APP, - config=config - ) - acs_deply_dto = ACSDeployInputDto(director_input_dto, - args.splunk_api_username, - args.splunk_api_password, - args.splunk_cloud_jwt_token, - args.splunk_cloud_stack, - args.stack_type) +def report_func(config:report)->None: + # First, perform validation. Remember that the validate + # configuration is actually a subset of the build configuration + director_output_dto = validate_func(config) - deploy = Deploy() - deploy.execute(acs_deply_dto) - + r = Reporting() + return r.execute(ReportingInputDto(director_output_dto=director_output_dto, + config=config)) +def build_func(config:build)->DirectorOutputDto: + # First, perform validation. Remember that the validate + # configuration is actually a subset of the build configuration + director_output_dto = validate_func(config) + builder = Build() + return builder.execute(BuildInputDto(director_output_dto, config)) + +def inspect_func(config:inspect)->str: + #Make sure that we have built the most recent version of the app + _ = build_func(config) + inspect_token = Inspect().execute(config) + return inspect_token + -def test(args: argparse.Namespace): - args = configure_unattended(args) - - config = start(args, read_test_file=True) - #Don't do enrichment - if args.dry_run: - config.enrichments.attack_enrichment = False - config.enrichments.cve_enrichment = False - config.enrichments.splunk_app_enrichment = False - - if config.test is None: - raise Exception("Error parsing test configuration. Test Object was None.") - - if args.test_branch is not None: - if config.test.version_control_config is not None: - config.test.version_control_config.test_branch = args.test_branch - else: - raise Exception("Test argument 'test_branch' passed on the command line, but 'version_control_config' is not defined in contentctl_test.yml.") - if args.target_branch is not None: - if config.test.version_control_config is not None: - config.test.version_control_config.target_branch = args.target_branch - else: - raise Exception("Test argument 'target_branch' passed on the command line, but 'version_control_config' is not defined in contentctl_test.yml.") - - # set some arguments that are not - # yet exposed/written properly in - # the config file - if args.infrastructure is not None: - config.test.infrastructure_config.infrastructure_type = DetectionTestingTargetInfrastructure( - args.infrastructure - ) - if args.mode is not None: - config.test.mode = DetectionTestingMode(args.mode) - if args.behavior is not None: - config.test.post_test_behavior = PostTestBehavior(args.behavior) - if args.detections_list is not None: - config.test.detections_list = args.detections_list - if args.enable_integration_testing or config.test.enable_integration_testing: - config.test.enable_integration_testing = True - - # validate and setup according to infrastructure type - if config.test.infrastructure_config.infrastructure_type == DetectionTestingTargetInfrastructure.container: - if args.num_containers is None: - raise Exception( - "Error - trying to start a test using container infrastructure but no value for --num_containers was " - "found" - ) - config.test.infrastructure_config.infrastructures = Infrastructure.get_infrastructure_containers( - args.num_containers - ) - elif config.test.infrastructure_config.infrastructure_type == DetectionTestingTargetInfrastructure.server: - if args.server_info is None and os.environ.get(SERVER_ARGS_ENV_VARIABLE) is None: - if len(config.test.infrastructure_config.infrastructures) == 0: - raise Exception( - "Error - trying to start a test using server infrastructure, but server information was not stored " - "in contentctl_test.yml or passed on the command line. Please see the documentation for " - "--server_info at the command line or 'infrastructures' in contentctl.yml." - ) - else: - print("Using server configuration from: [contentctl_test.yml infrastructures section]") +def release_notes_func(config:release_notes)->None: + ReleaseNotes().release_notes(config) - else: - if args.server_info is not None: - print("Using server configuration from: [command line]") - pass - elif os.environ.get(SERVER_ARGS_ENV_VARIABLE) is not None: - args.server_info = os.environ.get(SERVER_ARGS_ENV_VARIABLE, "").split(';') - print(f"Using server configuration from: [{SERVER_ARGS_ENV_VARIABLE} environment variable]") - else: - raise Exception( - "Server infrastructure information not passed in contentctl_test.yml file, using --server_info " - f"switch on the command line, or in the {SERVER_ARGS_ENV_VARIABLE} environment variable" - ) - # if server info was provided on the command line, us that. Otherwise use the env +def new_func(config:new): + NewContent().execute(config) - config.test.infrastructure_config.infrastructures = [] - for server in args.server_info: - address, username, password, web_ui_port, hec_port, api_port = server.split(",") - config.test.infrastructure_config.infrastructures.append( - Infrastructure( - splunk_app_username=username, - splunk_app_password=password, - instance_address=address, - hec_port=int(hec_port), - web_ui_port=int(web_ui_port), - api_port=int(api_port) - ) - ) - # We do this before generating the app to save some time if options are incorrect. - # For example, if the detection(s) we are trying to test do not exist - gitService = GitService(config.test) +def deploy_acs_func(config:deploy_acs): + #This is a bit challenging to get to work with the default values. + raise Exception("deploy acs not yet implemented") +def deploy_rest_func(config:deploy_rest): + raise Exception("deploy rest not yet implemented") - director_output_dto = build(args, config) +def test_common_func(config:test_common): + director_output_dto = build_func(config) + gitServer = GitService(director=director_output_dto,config=config) + detections_to_test = gitServer.getContent() - test_director_output_dto = gitService.get_all_content(director_output_dto) - if args.dry_run: - #set the proper values in the config - config.test.mode = DetectionTestingMode.selected - config.test.detections_list = [d.file_path for d in test_director_output_dto.detections] - config.test.apps = [] - config.test.post_test_behavior = PostTestBehavior.never_pause - - #Disable enrichments to save time - config.enrichments.attack_enrichment = False - config.enrichments.cve_enrichment = False - config.enrichments.splunk_app_enrichment = False - - #Create a directory for artifacts. - dry_run_config_dir = pathlib.Path("dry_run_config") - - #It's okay if it already exists - dry_run_config_dir.mkdir(exist_ok=True) - - #Write out the test plan file - with open(dry_run_config_dir/"contentctl_test.yml", "w") as test_plan_config: - d = config.test.dict() - d['infrastructure_config']['infrastructure_type'] = d['infrastructure_config']['infrastructure_type'].value - d['mode'] = d['mode'].value - d['post_test_behavior'] = d['post_test_behavior'].value - yaml.safe_dump(d, test_plan_config) - - with open(dry_run_config_dir/"contentctl.yml", "w") as contentctl_cfg: - d = config.dict() - del d["test"] - yaml.safe_dump(d, contentctl_cfg) - - - - print(f"Wrote test plan to '{dry_run_config_dir/'contentctl_test.yml'}' and '{dry_run_config_dir/'contentctl.yml'}'") - return - - - - else: - # All this information will later come from the config, so we will - # be able to do it in Test().execute. For now, we will do it here - app = App( - uid=9999, - appid=config.build.name, - title=config.build.title, - release=config.build.version, - http_path=None, - local_path=str(pathlib.Path(config.build.path_root)/f"{config.build.name}-{config.build.version}.tar.gz"), - description=config.build.description, - splunkbase_path=None, - force_local=True - ) - - # We need to do this instead of appending to retrigger validation. - # It does not happen the first time since validation does not run for default values - # unless we use always=True in the validator - # we always want to keep CIM as the last app installed - - config.test.apps = [app] + config.test.apps - - test_input_dto = TestInputDto( - test_director_output_dto=test_director_output_dto, - gitService=gitService, - config=config.test, - ) - - test = Test() - - result = test.execute(test_input_dto) - # This return code is important. Even if testing - # fully completes, if everything does not pass then - # we want to return a nonzero status code - if result: - return - else: - sys.exit(1) - - -def validate(args) -> None: - config = start(args) - if args.type == "app": - product_type = SecurityContentProduct.SPLUNK_APP - elif args.type == "ssa": - product_type = SecurityContentProduct.SSA - elif args.type == "api": - product_type = SecurityContentProduct.API - else: - print("Invalid build type. Valid options app, ssa or api") - sys.exit(1) - director_input_dto = DirectorInputDto( - input_path=pathlib.Path(args.path), - product=product_type, - config=config - ) - validate_input_dto = ValidateInputDto(director_input_dto=director_input_dto) - validate = Validate() - return validate.execute(validate_input_dto) - -def release_notes(args)-> None: - - config = start(args) - director_input_dto = DirectorInputDto( - input_path=pathlib.Path(args.path), product=SecurityContentProduct.SPLUNK_APP, config=config - ) - - release_notes_input_dto = ReleaseNotesInputDto(director_input_dto=director_input_dto) - - release_notes = ReleaseNotes() - release_notes.release_notes(release_notes_input_dto, args.old_tag, args.new_tag, args.latest_branch) - -def doc_gen(args) -> None: - config = start(args) - director_input_dto = DirectorInputDto( - input_path=pathlib.Path(args.path), product=SecurityContentProduct.SPLUNK_APP, config=config - ) - - doc_gen_input_dto = DocGenInputDto(director_input_dto=director_input_dto) - - doc_gen = DocGen() - doc_gen.execute(doc_gen_input_dto) - - -def new_content(args) -> None: - if args.type == "detection": - contentType = SecurityContentType.detections - elif args.type == "story": - contentType = SecurityContentType.stories - else: - print("ERROR: type " + args.type + " not supported") - sys.exit(1) - - new_content_generator_input_dto = NewContentGeneratorInputDto(type=contentType) - new_content_input_dto = NewContentInputDto( - new_content_generator_input_dto, os.path.abspath(args.path) - ) - new_content = NewContent() - new_content.execute(new_content_input_dto) - - -def reporting(args) -> None: - config = start(args) - director_input_dto = DirectorInputDto( - input_path=args.path, product=SecurityContentProduct.SPLUNK_APP, config=config - ) - - reporting_input_dto = ReportingInputDto(director_input_dto=director_input_dto) - - reporting = Reporting() - reporting.execute(reporting_input_dto) - - -def convert(args) -> None: - if args.data_model == 'cim': - data_model = SigmaConverterTarget.CIM - elif args.data_model == 'raw': - data_model = SigmaConverterTarget.RAW - elif args.data_model == 'ocsf': - data_model = SigmaConverterTarget.OCSF - else: - print("ERROR: data model " + args.data_model + " not supported") - sys.exit(1) - - sigma_converter_input_dto = SigmaConverterInputDto( - data_model=data_model, - detection_path=args.detection_path, - detection_folder=args.detection_folder, - input_path=args.path, - log_source=args.log_source - ) - - convert_input_dto = ConvertInputDto( - sigma_converter_input_dto=sigma_converter_input_dto, - output_path=os.path.abspath(args.output) - ) - convert = Convert() - convert.execute(convert_input_dto) - - -def main(): - """ - main function parses the arguments passed to the script and calls the respctive method. - :param args: arguments passed by the user on command line while calling the script. - :return: returns the output of the function called. - """ - - # grab arguments - parser = argparse.ArgumentParser( - description="Use `contentctl action -h` to get help with any Splunk content action" - ) - parser.add_argument( - "-p", - "--path", - required=False, - default=".", - help="path to the content path containing the contentctl.yml", - ) - - parser.add_argument( - "--enable_enrichment", - required=False, - action="store_true", - help="Enrichment is only REQUIRED when building a release (or testing a release). In most cases, it is not required. Disabling enrichment BY DEFAULT (which is the default setting in contentctl.yml) is a signifcant time savings." - ) - - parser.set_defaults(func=lambda _: parser.print_help()) - actions_parser = parser.add_subparsers( - title="Splunk content actions", dest="action" - ) - - # available actions - init_parser = actions_parser.add_parser( - "init", - help="initialize a Splunk content pack using and customizes a configuration under contentctl.yml", - ) - validate_parser = actions_parser.add_parser( - "validate", help="validates a Splunk content pack" - ) - build_parser = actions_parser.add_parser( - "build", help="builds a Splunk content pack package to be distributed" - ) - - acs_deploy_parser = actions_parser.add_parser( - "acs_deploy", help="Deploys a previously built package via ACS. Note that 'contentctl build' command MUST have been run prior to running this command. It will NOT build a package itself." - ) - - new_content_parser = actions_parser.add_parser( - "new", help="create new Splunk content object (detection, or story)" - ) - reporting_parser = actions_parser.add_parser( - "report", help="create Splunk content report of the current pack" - ) - - api_deploy_parser = actions_parser.add_parser( - "api_deploy", help="Deploy content via API to a target Splunk Instance." - ) - - docs_parser = actions_parser.add_parser( - "docs", help="create documentation in docs folder" - ) - release_notes_parser = actions_parser.add_parser( - "release_notes", - help="Compares two tags and create release notes of what ESCU/BA content is added" - ) - - test_parser = actions_parser.add_parser( - "test", - help="Run a test of the detections against a Splunk Server or Splunk Docker Container", - ) - - convert_parser = actions_parser.add_parser("convert", help="Convert a sigma detection to a Splunk ESCU detection.") - - init_parser.set_defaults(func=initialize) - init_parser.add_argument( - "--demo", - action=argparse.BooleanOptionalAction, - help=( - "Use this flag to pre-populate the content pack " - "with one additional detection that will fail 'contentctl validate' " - "and on detection that will fail 'contentctl test'. This is useful " - "for demonstrating contentctl functionality." - ) - ) - - validate_parser.add_argument( - "-t", - "--type", - required=False, - type=str, - default="app", - help="Type of package: app, ssa or api" - ) - validate_parser.set_defaults(func=validate) - - build_parser.add_argument( - "-t", - "--type", - required=False, - type=str, - default="app", - help="Type of package: app, ssa or api" - ) - - build_parser.add_argument( - "--splunk_api_username", - required=False, - type=str, - default=None, - help=( - f"Username for running AppInspect and, if desired, installing your app via Admin Config Service (ACS). For documentation, " - "please review https://dev.splunk.com/enterprise/reference/appinspect/appinspectapiepref and https://docs.splunk.com/Documentation/SplunkCloud/9.1.2308/Config/ManageApps" - ) - ) - build_parser.add_argument( - "--splunk_api_password", - required=False, - type=str, - default=None, - help=( - f"Username for running AppInspect and, if desired, installing your app via Admin Config Service (ACS). For documentation, " - "please review https://dev.splunk.com/enterprise/reference/appinspect/appinspectapiepref and https://docs.splunk.com/Documentation/SplunkCloud/9.1.2308/Config/ManageApps" - ) - ) - - - build_parser.set_defaults(func=build) - - - acs_deploy_parser.add_argument( - "--splunk_api_username", - required=True, - type=str, - help=( - f"Username for running AppInspect and, if desired, installing your app via Admin Config Service (ACS). For documentation, " - "please review https://dev.splunk.com/enterprise/reference/appinspect/appinspectapiepref and https://docs.splunk.com/Documentation/SplunkCloud/9.1.2308/Config/ManageApps" - ) - ) - acs_deploy_parser.add_argument( - "--splunk_api_password", - required=True, - type=str, - help=( - f"Username for running AppInspect and, if desired, installing your app via Admin Config Service (ACS). For documentation, " - "please review https://dev.splunk.com/enterprise/reference/appinspect/appinspectapiepref and https://docs.splunk.com/Documentation/SplunkCloud/9.1.2308/Config/ManageApps" - ) - ) + test_input_dto = TestInputDto(detections_to_test, config) - acs_deploy_parser.add_argument( - "--splunk_cloud_jwt_token", - required=True, - type=str, - help=( - f"Target Splunk Cloud Stack JWT Token for app deployment. Note that your stack MUST Support Admin Config Server (ACS) and Automated Private App Vetting (APAV). For documentation, " - "on creating this token, please review https://docs.splunk.com/Documentation/SplunkCloud/9.1.2312/Security/CreateAuthTokens#Use_Splunk_Web_to_create_authentication_tokens" - ) - ) - - acs_deploy_parser.add_argument( - "--splunk_cloud_stack", - required=True, - type=str, - help=( - f"Target Splunk Cloud Stack for app deployment. Note that your stack MUST Support Admin Config Server (ACS) and Automated Private App Vetting (APAV). For documentation, " - "please review https://docs.splunk.com/Documentation/SplunkCloud/9.1.2308/Config/ManageApps" - ) - ) - - acs_deploy_parser.add_argument( - "--stack_type", - required=True, - type=str, - choices=["classic","victoria"], - help="Identifies your Splunk Cloud Stack as 'classic' or 'victoria' experience" - ) - - - acs_deploy_parser.set_defaults(func=acs_deploy) - - docs_parser.set_defaults(func=doc_gen) - - new_content_parser.add_argument( - "-t", - "--type", - required=True, - type=str, - help="Type of security content object, choose between `detection`, `story`", - ) - new_content_parser.set_defaults(func=new_content) - - reporting_parser.set_defaults(func=reporting) - - api_deploy_parser.set_defaults(func=api_deploy) - - test_parser.add_argument( - "-t", - "--type", - required=False, - type=str, - default="app", - help="Type of package: app, ssa or api" - ) - test_parser.add_argument( - "--mode", - required=False, - default=None, - type=str, - choices=DetectionTestingMode._member_names_, - help="Controls which detections to test. 'all' will test all detections in the repo." - "'selected' will test a list of detections that have " - "been provided via the --selected command line argument (see for more details).", - ) - test_parser.add_argument( - "--behavior", - required=False, - default=None, - type=str, - choices=PostTestBehavior._member_names_, - help="Controls what to do when a test completes. 'always_pause' means that the state of " - "the test will always pause after a test, allowing the user to log into the " - "server and experiment with the search and data before it is removed. 'pause_on_failure' " - "will pause execution ONLY when a test fails. The user may press ENTER in the terminal " - "running the test to move on to the next test. 'never_pause' will never stop testing, " - "even if a test fails. Please note that 'never_pause' MUST be used for a test to " - "run in an unattended manner or in a CI/CD system - otherwise a single failed test " - "will result in the testing never finishing as the tool waits for input.", - ) - test_parser.add_argument( - "-d", - "--detections_list", - required=False, - nargs="+", - default=None, - type=str, - help="An explicit list " - "of detections to test. Their paths should be relative to the app path.", - ) - - test_parser.add_argument("--unattended", action=argparse.BooleanOptionalAction) - - test_parser.add_argument( - "--infrastructure", - required=False, - type=str, - choices=DetectionTestingTargetInfrastructure._member_names_, - default=None, - help=( - "Determines what infrastructure to use for testing. The options are " - "container and server. Container will set up Splunk Container(s) at runtime, " - "install all relevant apps, and perform configurations. Server will use " - "preconfigured server(s) either specified on the command line or in " - "contentctl_test.yml." - ) - ) - test_parser.add_argument("--num_containers", required=False, default=1, type=int) - test_parser.add_argument("--server_info", required=False, default=None, type=str, nargs='+') + t = Test() - test_parser.add_argument("--target_branch", required=False, default=None, type=str) - test_parser.add_argument("--test_branch", required=False, default=None, type=str) - test_parser.add_argument("--dry_run", action=argparse.BooleanOptionalAction, help="Used to emit dry_run_config/contentctl_test.yml "\ - "and dry_run_config/contentctl.yml files. These are used for CI/CD-driven internal testing workflows and are not intended for public use at this time.") + # Remove detections that we do not want to test because they are + # not production, the correct type, or manual_test only + filted_test_input_dto = t.filter_detections(test_input_dto) - # Even though these are also options to build, make them available to test_parser - # as well to make the tool easier to use - test_parser.add_argument( - "--splunk_api_username", - required=False, - type=str, - default=None, - help=( - f"Username for running AppInspect on {SecurityContentProduct.SPLUNK_APP.name} ONLY. For documentation, " - "please review https://dev.splunk.com/enterprise/reference/appinspect/appinspectapiepref" - ) - ) - test_parser.add_argument( - "--splunk_api_password", - required=False, - type=str, - default=None, - help=( - f"Password for running AppInspect on {SecurityContentProduct.SPLUNK_APP.name} ONLY. For documentation, " - "please review https://dev.splunk.com/enterprise/reference/appinspect/appinspectapiepref" - ) - ) - test_parser.add_argument( - "--enable_integration_testing", - required=False, - action="store_true", - help="Whether integration testing should be enabled, in addition to unit testing (requires a configured Splunk " - "instance with ES installed)" - ) - - # TODO (cmcginley): add flag for enabling logging for correlation_search logging - # TODO (cmcginley): add flag for changing max_sleep time for integration tests - # TODO (cmcginley): add setting to skip listing skips -> test_config.TestConfig, - # contentctl.test, contentctl.main - - - - test_parser.set_defaults(func=test) + if config.plan_only: + #Emit the test plan and quit. Do not actually run the test + config.dumpCICDPlanAndQuit(gitServer.getHash(),filted_test_input_dto.detections) + return + + success = t.execute(filted_test_input_dto) + + if success: + #Everything passed! + print("All tests have run successfully or been marked as 'skipped'") + return + raise Exception("There was at least one unsuccessful test") - convert_parser.add_argument( - "-dm", - "--data_model", - required=False, - type=str, - default="cim", - help="converter target, choose between cim, raw, ocsf" - ) - convert_parser.add_argument("-lo", "--log_source", required=False, type=str, help="converter log source") - convert_parser.add_argument("-dp", "--detection_path", required=False, type=str, help="path to a single detection") - convert_parser.add_argument( - "-df", - "--detection_folder", - required=False, - type=str, - help="path to a detection folder" +def main(): + try: + configFile = pathlib.Path("contentctl.yml") + + # We MUST load a config (with testing info) object so that we can + # properly construct the command line, including 'contentctl test' parameters. + if not configFile.is_file(): + if "init" not in sys.argv and "--help" not in sys.argv and "-h" not in sys.argv: + raise Exception(f"'{configFile}' not found in the current directory.\n" + "Please ensure you are in the correct directory or run 'contentctl init' to create a new content pack.") + + if "--help" in sys.argv or "-h" in sys.argv: + print("Warning - contentctl.yml is missing from this directory. The configuration values showed at the default and are informational only.\n" + "Please ensure that contentctl.yml exists by manually creating it or running 'contentctl init'") + # Otherwise generate a stub config file. + # It will be used during init workflow + + t = test() + config_obj = t.model_dump() + + else: + #The file exists, so load it up! + config_obj = YmlReader().load_file(configFile) + t = test.model_validate(config_obj) + except Exception as e: + print(f"Error validating 'contentctl.yml':\n{str(e)}") + sys.exit(1) + + + # For ease of generating the constructor, we want to allow construction + # of an object from default values WITHOUT requiring all fields to be declared + # with defaults OR in the config file. As such, we construct the model rather + # than model_validating it so that validation does not run on missing required fields. + # Note that we HAVE model_validated the test object fields already above + + models = tyro.extras.subcommand_type_from_defaults( + { + "init":init.model_validate(config_obj), + "validate": validate.model_validate(config_obj), + "report": report.model_validate(config_obj), + "build":build.model_validate(config_obj), + "inspect": inspect.model_construct(**t.__dict__), + "new":new.model_validate(config_obj), + "test":test.model_validate(config_obj), + "test_servers":test_servers.model_construct(**t.__dict__), + "release_notes": release_notes.model_construct(**config_obj), + "deploy_acs": deploy_acs.model_construct(**t.__dict__), + #"deploy_rest":deploy_rest() + } ) - convert_parser.add_argument("-o", "--output", required=True, type=str, help="output path to store the detections") - convert_parser.set_defaults(func=convert) - - release_notes_parser.add_argument("--old_tag", "--old_tag", required=False, type=str, help="Choose the tag and compare with previous tag") - release_notes_parser.add_argument("--new_tag", "--new_tag", required=False, type=str, help="Choose the tag and compare with previous tag") - release_notes_parser.add_argument("--latest_branch", "--latest_branch", required=False, type=str, help="Choose the tag and compare with previous tag") - release_notes_parser.set_defaults(func=release_notes) - - # parse them - args = parser.parse_args() + - - print_ascii_art() try: - args.func(args) + # Since some model(s) were constructed and not model_validated, we have to catch + # warnings again when creating the cli + with warnings.catch_warnings(action="ignore"): + config = tyro.cli(models) + + + if type(config) == init: + t.__dict__.update(config.__dict__) + init_func(t) + elif type(config) == validate: + validate_func(config) + elif type(config) == report: + report_func(config) + elif type(config) == build: + build_func(config) + elif type(config) == new: + new_func(config) + elif type(config) == inspect: + inspect_func(config) + elif type(config) == release_notes: + release_notes_func(config) + elif type(config) == deploy_acs: + updated_config = deploy_acs.model_validate(config) + deploy_acs_func(updated_config) + elif type(config) == deploy_rest: + deploy_rest_func(config) + elif type(config) == test or type(config) == test_servers: + if type(config) == test: + #construct the container Infrastructure objects + config.getContainerInfrastructureObjects() + #otherwise, they have already been passed as servers + test_common_func(config) + else: + raise Exception(f"Unknown command line type '{type(config).__name__}'") except Exception as e: - print(f"Error during contentctl:\n{str(e)}") import traceback traceback.print_exc() - # traceback.print_stack() + traceback.print_stack() + #print(e) sys.exit(1) + \ No newline at end of file diff --git a/contentctl/enrichments/attack_enrichment.py b/contentctl/enrichments/attack_enrichment.py index 5995280d..db11aeb4 100644 --- a/contentctl/enrichments/attack_enrichment.py +++ b/contentctl/enrichments/attack_enrichment.py @@ -1,4 +1,5 @@ +from __future__ import annotations import csv import os from posixpath import split @@ -6,16 +7,57 @@ import sys from attackcti import attack_client import logging +from pydantic import BaseModel, Field +from dataclasses import field +from typing import Union,Annotated +from contentctl.objects.mitre_attack_enrichment import MitreAttackEnrichment +from contentctl.objects.config import validate logging.getLogger('taxii2client').setLevel(logging.CRITICAL) -class AttackEnrichment(): +class AttackEnrichment(BaseModel): + data: dict[str, MitreAttackEnrichment] = field(default_factory=dict) + use_enrichment:bool = True + + @staticmethod + def getAttackEnrichment(config:validate)->AttackEnrichment: + enrichment = AttackEnrichment(use_enrichment=config.enrichments) + _ = enrichment.get_attack_lookup(str(config.path)) + return enrichment + + def getEnrichmentByMitreID(self, mitre_id:Annotated[str, Field(pattern="^T\d{4}(.\d{3})?$")])->Union[MitreAttackEnrichment,None]: + if not self.use_enrichment: + return None + + enrichment = self.data.get(mitre_id, None) + if enrichment is not None: + return enrichment + else: + raise ValueError(f"Error, Unable to find Mitre Enrichment for MitreID {mitre_id}") + - @classmethod - def get_attack_lookup(self, input_path: str, store_csv = False, force_cached_or_offline: bool = False, skip_enrichment:bool = False) -> dict: + def addMitreID(self, technique:dict, tactics:list[str], groups:list[str])->None: + + technique_id = technique['technique_id'] + technique_obj = technique['technique'] + tactics.sort() + groups.sort() + + if technique_id in self.data: + raise ValueError(f"Error, trying to redefine MITRE ID '{technique_id}'") + + self.data[technique_id] = MitreAttackEnrichment(mitre_attack_id=technique_id, + mitre_attack_technique=technique_obj, + mitre_attack_tactics=tactics, + mitre_attack_groups=groups) + + + def get_attack_lookup(self, input_path: str, store_csv: bool = False, force_cached_or_offline: bool = False, skip_enrichment:bool = False) -> dict: + if self.use_enrichment is False: + return {} print("Getting MITRE Attack Enrichment Data. This may take some time...") attack_lookup = dict() - file_path = os.path.join(input_path, "lookups", "mitre_enrichment.csv") + file_path = os.path.join(input_path, "app_template", "lookups", "mitre_enrichment.csv") if skip_enrichment is True: print("Skipping enrichment") @@ -28,36 +70,38 @@ def get_attack_lookup(self, input_path: str, store_csv = False, force_cached_or_ lift = attack_client() print(f"\r{'Client'.rjust(23)}: [{100:3.0f}%]...Done!", end="\n", flush=True) - print(f"\r{'Enterprise'.rjust(23)}: [{0.0:3.0f}%]...", end="", flush=True) - all_enterprise = lift.get_enterprise(stix_format=False) - print(f"\r{'Enterprise'.rjust(23)}: [{100:3.0f}%]...Done!", end="\n", flush=True) + print(f"\r{'Techniques'.rjust(23)}: [{0.0:3.0f}%]...", end="", flush=True) + all_enterprise_techniques = lift.get_enterprise_techniques(stix_format=False) + + print(f"\r{'Techniques'.rjust(23)}: [{100:3.0f}%]...Done!", end="\n", flush=True) print(f"\r{'Relationships'.rjust(23)}: [{0.0:3.0f}%]...", end="", flush=True) - enterprise_relationships = lift.get_enterprise_relationships() + enterprise_relationships = lift.get_enterprise_relationships(stix_format=False) print(f"\r{'Relationships'.rjust(23)}: [{100:3.0f}%]...Done!", end="\n", flush=True) print(f"\r{'Groups'.rjust(23)}: [{0:3.0f}%]...", end="", flush=True) - enterprise_groups = lift.get_enterprise_groups() + enterprise_groups = lift.get_enterprise_groups(stix_format=False) print(f"\r{'Groups'.rjust(23)}: [{100:3.0f}%]...Done!", end="\n", flush=True) - for index, technique in enumerate(all_enterprise['techniques']): - progress_percent = ((index+1)/len(all_enterprise['techniques'])) * 100 + + for index, technique in enumerate(all_enterprise_techniques): + progress_percent = ((index+1)/len(all_enterprise_techniques)) * 100 if (sys.stdout.isatty() and sys.stdin.isatty() and sys.stderr.isatty()): print(f"\r\t{'MITRE Technique Progress'.rjust(23)}: [{progress_percent:3.0f}%]...", end="", flush=True) apt_groups = [] for relationship in enterprise_relationships: - if (relationship['target_ref'] == technique['id']) and relationship['source_ref'].startswith('intrusion-set'): + if (relationship['target_object'] == technique['id']) and relationship['source_object'].startswith('intrusion-set'): for group in enterprise_groups: - if relationship['source_ref'] == group['id']: - apt_groups.append(group['name']) + if relationship['source_object'] == group['id']: + apt_groups.append(group['group']) tactics = [] if ('tactic' in technique): for tactic in technique['tactic']: tactics.append(tactic.replace('-',' ').title()) - if not ('revoked' in technique): - attack_lookup[technique['technique_id']] = {'technique': technique['technique'], 'tactics': tactics, 'groups': apt_groups} + self.addMitreID(technique, tactics, apt_groups) + attack_lookup[technique['technique_id']] = {'technique': technique['technique'], 'tactics': tactics, 'groups': apt_groups} if store_csv: f = open(file_path, 'w') @@ -79,13 +123,19 @@ def get_attack_lookup(self, input_path: str, store_csv = False, force_cached_or_ f.close() except Exception as err: - print('Warning: ' + str(err)) - print('Use local copy lookups/mitre_enrichment.csv') - dict_from_csv = {} + print(f'\nError: {str(err)}') + print('Use local copy app_template/lookups/mitre_enrichment.csv') with open(file_path, mode='r') as inp: reader = csv.reader(inp) attack_lookup = {rows[0]:{'technique': rows[1], 'tactics': rows[2].split('|'), 'groups': rows[3].split('|')} for rows in reader} attack_lookup.pop('mitre_id') + for key in attack_lookup.keys(): + technique_input = {'technique_id': key , 'technique': attack_lookup[key]['technique'] } + tactics_input = attack_lookup[key]['tactics'] + groups_input = attack_lookup[key]['groups'] + self.addMitreID(technique=technique_input, tactics=tactics_input, groups=groups_input) + + print("Done!") return attack_lookup \ No newline at end of file diff --git a/contentctl/enrichments/cve_enrichment.py b/contentctl/enrichments/cve_enrichment.py index e09fbb89..2d4d824e 100644 --- a/contentctl/enrichments/cve_enrichment.py +++ b/contentctl/enrichments/cve_enrichment.py @@ -1,10 +1,13 @@ - +from __future__ import annotations from pycvesearch import CVESearch import functools import os import shelve import time -import sys +from typing import Annotated +from pydantic import BaseModel,Field,ConfigDict + +from decimal import Decimal CVESSEARCH_API_URL = 'https://cve.circl.lu' CVE_CACHE_FILENAME = "lookups/CVE_CACHE.db" @@ -12,7 +15,7 @@ NON_PERSISTENT_CACHE = {} - +'''''' @functools.cache def cvesearch_helper(url:str, cve_id:str, force_cached_or_offline:bool=False, max_api_attempts:int=3, retry_sleep_seconds:int=5): if max_api_attempts < 1: @@ -63,10 +66,22 @@ def cvesearch_id_helper(url:str): -class CveEnrichment(): +class CveEnrichmentObj(BaseModel): + id:Annotated[str, "^CVE-[1|2][0-9]{3}-[0-9]+$"] + cvss:Annotated[Decimal, Field(ge=.1, le=10, decimal_places=1)] + summary:str + + + @staticmethod + def buildEnrichmentOnFailure(id:Annotated[str, "^CVE-[1|2][0-9]{3}-[0-9]+$"], errorMessage:str)->CveEnrichmentObj: + message = f"{errorMessage}. Default CVSS of 5.0 used" + print(message) + return CveEnrichmentObj(id=id, cvss=Decimal(5.0), summary=message) + +class CveEnrichment(): @classmethod - def enrich_cve(self, cve_id: str, force_cached_or_offline: bool = False) -> dict: + def enrich_cve(cls, cve_id: str, force_cached_or_offline: bool = False, treat_failures_as_warnings:bool=True) -> CveEnrichmentObj: cve_enriched = dict() try: @@ -74,12 +89,12 @@ def enrich_cve(self, cve_id: str, force_cached_or_offline: bool = False) -> dict cve_enriched['id'] = cve_id cve_enriched['cvss'] = result['cvss'] cve_enriched['summary'] = result['summary'] - except TypeError as TypeErr: - # there was a error calling the circl api lets just empty the object - print("WARNING, issue enriching {0}, with error: {1}".format(cve_id, str(TypeErr))) - cve_enriched = dict() - except Exception as e: - print("WARNING - {0}".format(str(e))) - - return cve_enriched \ No newline at end of file + message = f"issue enriching {cve_id}, with error: {str(e)}" + if treat_failures_as_warnings: + return CveEnrichmentObj.buildEnrichmentOnFailure(id = cve_id, errorMessage=f"WARNING, {message}") + else: + raise ValueError(f"ERROR, {message}") + + return CveEnrichmentObj.model_validate(cve_enriched) + diff --git a/contentctl/helper/config_handler.py b/contentctl/helper/config_handler.py deleted file mode 100644 index 7169b907..00000000 --- a/contentctl/helper/config_handler.py +++ /dev/null @@ -1,75 +0,0 @@ -import os -import collections -import sys -import pathlib - -from contentctl.input.yml_reader import YmlReader -from contentctl.objects.config import Config, TestConfig, ConfigEnrichments -from contentctl.objects.test_config import InfrastructureConfig, Infrastructure -from contentctl.objects.enums import DetectionTestingMode -from typing import Union -import argparse - -from contentctl.objects.enums import ( - DetectionTestingTargetInfrastructure, -) - -class ConfigHandler: - - @classmethod - def read_config(cls, args:argparse.Namespace) -> Config: - config_path = pathlib.Path(args.path)/"contentctl.yml" - try: - yml_dict = YmlReader.load_file(config_path, add_fields=False) - - except: - print("ERROR: no contentctl.yml found in given path") - sys.exit(1) - - try: - config = Config.parse_obj(yml_dict) - if args.enable_enrichment: - config.enrichments.attack_enrichment = True - else: - # Use whatever setting is in contentctl.yml - pass - except Exception as e: - raise Exception(f"Error reading config file: {str(e)}") - - - return config - - @classmethod - def read_test_config(cls, args:argparse.Namespace) -> TestConfig: - test_config_path = pathlib.Path(args.path)/"contentctl_test.yml" - try: - yml_dict = YmlReader.load_file(test_config_path, add_fields=False) - except: - print("ERROR: no contentctl_test.yml found in given path") - sys.exit(1) - - try: - if args.dry_run: - yml_dict['apps'] = [] - yml_dict['infrastructure_config'] = InfrastructureConfig(infrastructure_type=DetectionTestingTargetInfrastructure.server, ).__dict__ - if args.server_info is None: - yml_dict['infrastructure_config']['infrastructures'] = [Infrastructure().__dict__] - if args.mode != DetectionTestingMode.changes: - yml_dict['version_control_config'] = None - if yml_dict.get("version_control_config", None) is not None: - #If they have been passed, override the target and test branch. If not, keep the defaults - yml_dict.get("version_control_config", None)['target_branch'] = args.target_branch or yml_dict.get("version_control_config", None)['target_branch'] - yml_dict.get("version_control_config", None)['test_branch'] = args.test_branch or yml_dict.get("version_control_config", None)['test_branch'] - if yml_dict.get("infrastructure_config", None) is not None: - yml_dict.get("infrastructure_config", None)['infrastructure_type'] = args.infrastructure or yml_dict.get("infrastructure_config", None)['infrastructure_type'] - test_config = TestConfig.parse_obj(yml_dict) - except Exception as e: - raise Exception(f"Error reading test config file: {str(e)}") - - - return test_config - - - - - \ No newline at end of file diff --git a/contentctl/helper/link_validator.py b/contentctl/helper/link_validator.py index f12bd027..6fbb9fde 100644 --- a/contentctl/helper/link_validator.py +++ b/contentctl/helper/link_validator.py @@ -1,8 +1,5 @@ -import re -from tracemalloc import start -from unittest.mock import DEFAULT -from pydantic import BaseModel, validator, root_validator,Field -from typing import Union, Callable +from pydantic import BaseModel, model_validator +from typing import Union, Callable, Any import requests import urllib3, urllib3.exceptions import time @@ -14,6 +11,7 @@ DEFAULT_USER_AGENT_STRING = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36" ALLOWED_HTTP_CODES = [200] class LinkStats(BaseModel): + #Static Values method: Callable = requests.get allowed_http_codes: list[int] = ALLOWED_HTTP_CODES @@ -42,17 +40,17 @@ def is_link_valid(self, referencing_file:str)->bool: self.referencing_files.add(referencing_file) return self.valid - @root_validator - def check_reference(cls, values): + @model_validator(mode="before") + def check_reference(cls, data:Any)->Any: start_time = time.time() #Get out all the fields names to make them easier to reference - method = values['method'] - reference = values['reference'] - timeout_seconds = values['timeout_seconds'] - headers = values['headers'] - allow_redirects = values['allow_redirects'] - verify_ssl = values['verify_ssl'] - allowed_http_codes = values['allowed_http_codes'] + method = data['method'] + reference = data['reference'] + timeout_seconds = data['timeout_seconds'] + headers = data['headers'] + allow_redirects = data['allow_redirects'] + verify_ssl = data['verify_ssl'] + allowed_http_codes = data['allowed_http_codes'] if not (reference.startswith("http://") or reference.startswith("https://")): raise(ValueError(f"Reference {reference} does not begin with http(s). Only http(s) references are supported")) @@ -61,29 +59,29 @@ def check_reference(cls, values): headers = headers, allow_redirects=allow_redirects, verify=verify_ssl) resolution_time = time.time() - start_time - values['status_code'] = get.status_code - values['resolution_time'] = resolution_time + data['status_code'] = get.status_code + data['resolution_time'] = resolution_time if reference != get.url: - values['redirect'] = get.url + data['redirect'] = get.url else: - values['redirect'] = None #None is also already the default + data['redirect'] = None #None is also already the default #Returns the updated values and sets them for the object if get.status_code in allowed_http_codes: - values['valid'] = True + data['valid'] = True else: #print(f"Unacceptable HTTP Status Code {get.status_code} received for {reference}") - values['valid'] = False - return values + data['valid'] = False + return data except Exception as e: resolution_time = time.time() - start_time #print(f"Reference {reference} was not reachable after {resolution_time:.2f} seconds") - values['status_code'] = 0 - values['valid'] = False - values['redirect'] = None - values['resolution_time'] = resolution_time - return values + data['status_code'] = 0 + data['valid'] = False + data['redirect'] = None + data['resolution_time'] = resolution_time + return data class LinkValidator(abc.ABC): diff --git a/contentctl/helper/utils.py b/contentctl/helper/utils.py index ddf0e407..a9b17d3b 100644 --- a/contentctl/helper/utils.py +++ b/contentctl/helper/utils.py @@ -6,13 +6,17 @@ import string from timeit import default_timer import pathlib -import datetime + from typing import Union, Tuple -from pydantic import ValidationError import tqdm -from contentctl.objects.security_content_object import SecurityContentObject from math import ceil +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from contentctl.objects.security_content_object import SecurityContentObject +from contentctl.objects.security_content_object import SecurityContentObject + + TOTAL_BYTES = 0 ALWAYS_PULL = True diff --git a/contentctl/input/baseline_builder.py b/contentctl/input/baseline_builder.py deleted file mode 100644 index d4608a3b..00000000 --- a/contentctl/input/baseline_builder.py +++ /dev/null @@ -1,66 +0,0 @@ -import sys -import pathlib -from pydantic import ValidationError - -from contentctl.input.yml_reader import YmlReader -from contentctl.objects.baseline import Baseline -from contentctl.objects.enums import SecurityContentType -from contentctl.objects.enums import SecurityContentProduct - - -class BaselineBuilder(): - baseline : Baseline - - def setObject(self, path: pathlib.Path) -> None: - yml_dict = YmlReader.load_file(path) - yml_dict["tags"]["name"] = yml_dict["name"] - - try: - self.baseline = Baseline.parse_obj(yml_dict) - - except ValidationError as e: - print('Validation Error for file ' + str(path)) - print(e) - sys.exit(1) - - - def addDeployment(self, deployments: list) -> None: - if not self.baseline.deployment: - - matched_deployments = [] - - for d in deployments: - d_tags = dict(d.tags) - baseline_dict = self.baseline.dict() - baseline_tags_dict = self.baseline.tags.dict() - for d_tag in d_tags.keys(): - for attr in baseline_dict.keys(): - if attr == d_tag: - if isinstance(baseline_dict[attr], str): - if baseline_dict[attr] == d_tags[d_tag]: - matched_deployments.append(d) - elif isinstance(baseline_dict[attr], list): - if d_tags[d_tag] in baseline_dict[attr]: - matched_deployments.append(d) - - for attr in baseline_tags_dict.keys(): - if attr == d_tag: - if isinstance(baseline_tags_dict[attr], str): - if baseline_tags_dict[attr] == d_tags[d_tag]: - matched_deployments.append(d) - elif isinstance(baseline_tags_dict[attr], list): - if d_tags[d_tag] in baseline_tags_dict[attr]: - matched_deployments.append(d) - - if len(matched_deployments) == 0: - raise ValueError('No deployment found for baseline: ' + self.baseline.name) - - self.baseline.deployment = matched_deployments[-1] - - - def reset(self) -> None: - self.baseline = None - - - def getObject(self) -> Baseline: - return self.baseline \ No newline at end of file diff --git a/contentctl/input/basic_builder.py b/contentctl/input/basic_builder.py deleted file mode 100644 index c1e5c6f3..00000000 --- a/contentctl/input/basic_builder.py +++ /dev/null @@ -1,58 +0,0 @@ -import sys -import pathlib -from pydantic import ValidationError - -from contentctl.objects.security_content_object import SecurityContentObject -from contentctl.input.yml_reader import YmlReader -from contentctl.objects.enums import SecurityContentType -from contentctl.objects.deployment import Deployment -from contentctl.objects.macro import Macro -from contentctl.objects.lookup import Lookup -from contentctl.objects.playbook import Playbook -from contentctl.objects.unit_test import UnitTest - - -class BasicBuilder(): - security_content_obj : SecurityContentObject - - - def setObject(self, path: pathlib.Path, type: SecurityContentType) -> None: - yml_dict = YmlReader.load_file(path) - if type == SecurityContentType.deployments: - if "alert_action" in yml_dict: - alert_action_dict = yml_dict["alert_action"] - for key in alert_action_dict.keys(): - yml_dict[key] = yml_dict["alert_action"][key] - try: - self.security_content_obj = Deployment.parse_obj(yml_dict) - except ValidationError as e: - print('Validation Error for file ' + str(path)) - print(e) - sys.exit(1) - elif type == SecurityContentType.macros: - try: - self.security_content_obj = Macro.parse_obj(yml_dict) - except ValidationError as e: - print('Validation Error for file ' + str(path)) - print(e) - sys.exit(1) - elif type == SecurityContentType.lookups: - try: - self.security_content_obj = Lookup.parse_obj(yml_dict) - except ValidationError as e: - print('Validation Error for file ' + str(path)) - print(e) - sys.exit(1) - elif type == SecurityContentType.unit_tests: - try: - self.security_content_obj = UnitTest.parse_obj(yml_dict) - except ValidationError as e: - print('Validation Error for file ' + str(path)) - print(e) - sys.exit(1) - - def reset(self) -> None: - self.security_content_obj = None - - def getObject(self) -> SecurityContentObject: - return self.security_content_obj \ No newline at end of file diff --git a/contentctl/input/detection_builder.py b/contentctl/input/detection_builder.py deleted file mode 100644 index e0d4e9c2..00000000 --- a/contentctl/input/detection_builder.py +++ /dev/null @@ -1,370 +0,0 @@ -import sys -import re -import os - -from pydantic import ValidationError - -from contentctl.input.yml_reader import YmlReader -from contentctl.objects.detection import Detection -from contentctl.objects.security_content_object import SecurityContentObject -from contentctl.objects.macro import Macro -from contentctl.objects.lookup import Lookup -from contentctl.objects.mitre_attack_enrichment import MitreAttackEnrichment -from contentctl.objects.integration_test import IntegrationTest -from contentctl.enrichments.cve_enrichment import CveEnrichment -from contentctl.enrichments.splunk_app_enrichment import SplunkAppEnrichment -from contentctl.objects.config import ConfigDetectionConfiguration -from contentctl.objects.constants import ATTACK_TACTICS_KILLCHAIN_MAPPING - -class DetectionBuilder(): - security_content_obj : SecurityContentObject - - - def setObject(self, path: str) -> None: - yml_dict = YmlReader.load_file(path) - yml_dict["tags"]["name"] = yml_dict["name"] - self.security_content_obj = Detection.parse_obj(yml_dict) - self.security_content_obj.source = os.path.split(os.path.dirname(self.security_content_obj.file_path))[-1] - - - def addDeployment(self, deployments: list) -> None: - if self.security_content_obj: - if not self.security_content_obj.deployment: - matched_deployments = [] - for d in deployments: - d_tags = dict(d.tags) - for d_tag in d_tags.keys(): - for attr in dir(self.security_content_obj): - if not (attr.startswith('__') or attr.startswith('_')): - if attr == d_tag: - if type(self.security_content_obj.__getattribute__(attr)) is str: - attr_values = [self.security_content_obj.__getattribute__(attr)] - else: - attr_values = self.security_content_obj.__getattribute__(attr) - - for attr_value in attr_values: - if attr_value == d_tags[d_tag]: - matched_deployments.append(d) - - if len(matched_deployments) == 0: - self.security_content_obj.deployment = None - else: - self.security_content_obj.deployment = matched_deployments[-1] - - - def addRBA(self) -> None: - if self.security_content_obj: - - risk_objects = [] - risk_object_user_types = {'user', 'username', 'email address'} - risk_object_system_types = {'device', 'endpoint', 'hostname', 'ip address'} - process_threat_object_types = {'process name','process'} - file_threat_object_types = {'file name','file', 'file hash'} - url_threat_object_types = {'url string','url'} - ip_threat_object_types = {'ip address'} - - if hasattr(self.security_content_obj.tags, 'observable') and hasattr(self.security_content_obj.tags, 'risk_score'): - for entity in self.security_content_obj.tags.observable: - - risk_object = dict() - if 'Victim' in entity.role and entity.type.lower() in risk_object_user_types: - risk_object['risk_object_type'] = 'user' - risk_object['risk_object_field'] = entity.name - risk_object['risk_score'] = self.security_content_obj.tags.risk_score - risk_objects.append(risk_object) - - elif 'Victim' in entity.role and entity.type.lower() in risk_object_system_types: - risk_object['risk_object_type'] = 'system' - risk_object['risk_object_field'] = entity.name - risk_object['risk_score'] = self.security_content_obj.tags.risk_score - risk_objects.append(risk_object) - - elif 'Attacker' in entity.role and entity.type.lower() in process_threat_object_types: - risk_object['threat_object_field'] = entity.name - risk_object['threat_object_type'] = "process" - risk_objects.append(risk_object) - - elif 'Attacker' in entity.role and entity.type.lower() in file_threat_object_types: - risk_object['threat_object_field'] = entity.name - risk_object['threat_object_type'] = "file_name" - risk_objects.append(risk_object) - - elif 'Attacker' in entity.role and entity.type.lower() in ip_threat_object_types: - risk_object['threat_object_field'] = entity.name - risk_object['threat_object_type'] = "ip_address" - risk_objects.append(risk_object) - - elif 'Attacker' in entity.role and entity.type.lower() in url_threat_object_types: - risk_object['threat_object_field'] = entity.name - risk_object['threat_object_type'] = "url" - risk_objects.append(risk_object) - - else: - risk_object['risk_object_type'] = 'other' - risk_object['risk_object_field'] = entity.name - risk_object['risk_score'] = self.security_content_obj.tags.risk_score - risk_objects.append(risk_object) - continue - - if self.security_content_obj.tags.risk_score >= 80: - self.security_content_obj.tags.risk_severity = 'high' - elif (self.security_content_obj.tags.risk_score >= 50 and self.security_content_obj.tags.risk_score <= 79): - self.security_content_obj.tags.risk_severity = 'medium' - else: - self.security_content_obj.tags.risk_severity = 'low' - - self.security_content_obj.risk = risk_objects - - - def addProvidingTechnologies(self) -> None: - if self.security_content_obj: - if 'Endpoint' in str(self.security_content_obj.search): - self.security_content_obj.providing_technologies = ["Sysmon", "Microsoft Windows","Carbon Black Response","CrowdStrike Falcon", "Symantec Endpoint Protection"] - - if "`sysmon`" in str(self.security_content_obj.search): - self.security_content_obj.providing_technologies = ["Microsoft Sysmon"] - - if "`cloudtrail`" in str(self.security_content_obj.search): - self.security_content_obj.providing_technologies = ["Amazon Web Services - Cloudtrail"] - - if '`wineventlog_security`' in self.security_content_obj.search or '`powershell`' in self.security_content_obj.search: - self.security_content_obj.providing_technologies = ["Microsoft Windows"] - - if '`ms_defender`' in self.security_content_obj.search: - self.security_content_obj.providing_technologies = ["Microsoft Defender"] - if '`pingid`' in self.security_content_obj.search: - self.security_content_obj.providing_technologies = ["Ping ID"] - if '`okta' in self.security_content_obj.search: - self.security_content_obj.providing_technologies = ["Okta"] - if '`zeek_' in self.security_content_obj.search: - self.security_content_obj.providing_technologies = ["Zeek"] - if '`amazon_security_lake`' in self.security_content_obj.search: - self.security_content_obj.providing_technologies = ["Amazon Security Lake"] - - if '`azure_monitor_aad`' in self.security_content_obj.search : - self.security_content_obj.providing_technologies = ["Azure AD", "Entra ID"] - - if '`o365_' in self.security_content_obj.search: - self.security_content_obj.providing_technologies = ["Microsoft Office 365"] - - if '`gsuite' in self.security_content_obj.search or '`google_' in self.security_content_obj.search or '`gws_' in self.security_content_obj.search: - self.security_content_obj.providing_technologies = ["Google Workspace","Google Cloud Platform"] - - if '`splunkd_' in self.security_content_obj.search or 'audit_searches' in self.security_content_obj.search: - self.security_content_obj.providing_technologies = ["Splunk Internal Logs"] - - if '`kube' in self.security_content_obj.search: - self.security_content_obj.providing_technologies = ["Kubernetes"] - - def addNesFields(self) -> None: - if self.security_content_obj: - if self.security_content_obj.deployment: - if self.security_content_obj.deployment.notable: - nes_fields = ",".join(list(self.security_content_obj.deployment.notable.nes_fields)) - self.security_content_obj.nes_fields = nes_fields - - - def addMappings(self) -> None: - if self.security_content_obj: - keys = ['mitre_attack', 'kill_chain_phases', 'cis20', 'nist'] - mappings = {} - for key in keys: - if key == 'mitre_attack': - if getattr(self.security_content_obj.tags, 'mitre_attack_id'): - mappings[key] = getattr(self.security_content_obj.tags, 'mitre_attack_id') - elif getattr(self.security_content_obj.tags, key): - mappings[key] = getattr(self.security_content_obj.tags, key) - self.security_content_obj.mappings = mappings - - - def addAnnotations(self) -> None: - if self.security_content_obj: - annotations = {} - annotation_keys = ['mitre_attack', 'kill_chain_phases', 'cis20', 'nist', - 'analytic_story', 'context', 'impact', 'confidence', 'cve'] - for key in annotation_keys: - if key == 'mitre_attack': - if getattr(self.security_content_obj.tags, 'mitre_attack_id'): - annotations[key] = getattr(self.security_content_obj.tags, 'mitre_attack_id') - try: - if getattr(self.security_content_obj.tags, key): - annotations[key] = getattr(self.security_content_obj.tags, key) - except AttributeError as e: - continue - self.security_content_obj.annotations = annotations - - - def addPlaybook(self, playbooks: list) -> None: - if self.security_content_obj: - matched_playbooks = [] - for playbook in playbooks: - if playbook.tags.detections: - for detection in playbook.tags.detections: - if detection == self.security_content_obj.name: - matched_playbooks.append(playbook) - - self.security_content_obj.playbooks = matched_playbooks - - - def addBaseline(self, baselines: list) -> None: - if self.security_content_obj: - matched_baselines = [] - for baseline in baselines: - for detection in baseline.tags.detections: - if detection == self.security_content_obj.name: - matched_baselines.append(baseline) - - self.security_content_obj.baselines = matched_baselines - - - def addUnitTest(self) -> None: - if self.security_content_obj: - if self.security_content_obj.tests: - self.security_content_obj.test = self.security_content_obj.tests[0] - - - def addMitreAttackEnrichment(self, attack_enrichment: dict) -> None: - if self.security_content_obj: - if attack_enrichment: - if self.security_content_obj.tags.mitre_attack_id: - self.security_content_obj.tags.mitre_attack_enrichments = [] - - for mitre_attack_id in self.security_content_obj.tags.mitre_attack_id: - if mitre_attack_id in attack_enrichment: - mitre_attack_enrichment = MitreAttackEnrichment( - mitre_attack_id = mitre_attack_id, - mitre_attack_technique = attack_enrichment[mitre_attack_id]["technique"], - mitre_attack_tactics = sorted(attack_enrichment[mitre_attack_id]["tactics"]), - mitre_attack_groups = sorted(attack_enrichment[mitre_attack_id]["groups"]) - ) - self.security_content_obj.tags.mitre_attack_enrichments.append(mitre_attack_enrichment) - else: - #print("mitre_attack_id " + mitre_attack_id + " doesn't exist for detecction " + self.security_content_obj.name) - raise ValueError("mitre_attack_id " + mitre_attack_id + " doesn't exist for detection " + self.security_content_obj.name) - - - def addMacros(self, macros: list) -> None: - if self.security_content_obj: - found_macros, missing_macros = Macro.get_macros(self.security_content_obj.search, macros) - name = self.security_content_obj.name.replace(' ', '_').replace('-', '_').replace('.', '_').replace('/', '_').lower() + '_filter' - macro = Macro(name=name, definition='search *', description='Update this macro to limit the output results to filter out false positives.') - found_macros.append(macro) - self.security_content_obj.macros = found_macros - if len(missing_macros) > 0: - raise Exception(f"{self.security_content_obj.name} is missing the following macros: {missing_macros}") - - - - def addLookups(self, lookups: list) -> None: - if self.security_content_obj: - found_lookups, missing_lookups = Lookup.get_lookups(self.security_content_obj.search, lookups) - self.security_content_obj.lookups = found_lookups - if len(missing_lookups) > 0: - raise Exception(f"{self.security_content_obj.name} is missing the following lookups: {missing_lookups}") - - - - def addCve(self) -> None: - if self.security_content_obj: - self.security_content_obj.cve_enrichment = [] - if self.security_content_obj.tags.cve: - for cve in self.security_content_obj.tags.cve: - self.security_content_obj.cve_enrichment.append(CveEnrichment.enrich_cve(cve)) - - - def addSplunkApp(self) -> None: - if self.security_content_obj: - self.security_content_obj.splunk_app_enrichment = [] - if self.security_content_obj.tags.supported_tas: - for splunk_app in self.security_content_obj.tags.supported_tas: - self.security_content_obj.splunk_app_enrichment.append(SplunkAppEnrichment.enrich_splunk_app(splunk_app)) - - - def addCIS(self) -> None: - if self.security_content_obj: - if self.security_content_obj.tags.security_domain == "network": - self.security_content_obj.tags.cis20 = ["CIS 13"] - else: - self.security_content_obj.tags.cis20 = ["CIS 10"] - - - def addKillChainPhase(self) -> None: - if self.security_content_obj: - if not self.security_content_obj.tags.kill_chain_phases: - kill_chain_phases = list() - if self.security_content_obj.tags.mitre_attack_enrichments: - for mitre_attack_enrichment in self.security_content_obj.tags.mitre_attack_enrichments: - for mitre_attack_tactic in mitre_attack_enrichment.mitre_attack_tactics: - kill_chain_phases.append(ATTACK_TACTICS_KILLCHAIN_MAPPING[mitre_attack_tactic]) - self.security_content_obj.tags.kill_chain_phases = list(dict.fromkeys(kill_chain_phases)) - - - def addNist(self) -> None: - if self.security_content_obj: - if self.security_content_obj.type == "TTP": - self.security_content_obj.tags.nist = ["DE.CM"] - else: - self.security_content_obj.tags.nist = ["DE.AE"] - - - def addDatamodel(self) -> None: - if self.security_content_obj: - self.security_content_obj.datamodel = [] - data_models = [ - "Authentication", - "Change", - "Change_Analysis", - "Email", - "Endpoint", - "Network_Resolution", - "Network_Sessions", - "Network_Traffic", - "Risk", - "Splunk_Audit", - "UEBA", - "Updates", - "Vulnerabilities", - "Web" - ] - for data_model in data_models: - if data_model in self.security_content_obj.search: - self.security_content_obj.datamodel.append(data_model) - - def skipIntegrationTests(self) -> None: - """ - Skip all integration tests - """ - # Sanity check for typing and in setObject wasn't called yet - if self.security_content_obj is not None and isinstance(self.security_content_obj, Detection): - for test in self.security_content_obj.tests: - if isinstance(test, IntegrationTest): - test.skip("TEST SKIPPED: Skipping all integration tests") - else: - raise ValueError( - "security_content_obj must be an instance of Detection to skip integration tests, " - f"not {type(self.security_content_obj)}" - ) - - def skipAllTests(self, manual_test_explanation:str) -> None: - """ - Skip all unit and integration tests if the manual_test flag is defined in the yml - """ - # Sanity check for typing and in setObject wasn't called yet - if self.security_content_obj is not None and isinstance(self.security_content_obj, Detection): - for test in self.security_content_obj.tests: - #This should skip both unit and integration tests as appropriate - test.skip(f"TEST SKIPPED: Detection marked as 'manual_test' with explanation: {manual_test_explanation}") - - else: - raise ValueError( - "security_content_obj must be an instance of Detection to skip unit and integration tests due " - f"to the presence of the manual_test field, not {type(self.security_content_obj)}" - ) - - - def reset(self) -> None: - self.security_content_obj = None - - - def getObject(self) -> SecurityContentObject: - return self.security_content_obj diff --git a/contentctl/input/director.py b/contentctl/input/director.py index 95655011..eef9879a 100644 --- a/contentctl/input/director.py +++ b/contentctl/input/director.py @@ -1,13 +1,17 @@ import os import sys -import pathlib -from dataclasses import dataclass +from typing import Union +from dataclasses import dataclass, field from pydantic import ValidationError - - - +from uuid import UUID +from contentctl.input.yml_reader import YmlReader + + + from contentctl.objects.detection import Detection from contentctl.objects.story import Story + +from contentctl.objects.enums import SecurityContentProduct from contentctl.objects.baseline import Baseline from contentctl.objects.investigation import Investigation from contentctl.objects.playbook import Playbook @@ -15,34 +19,22 @@ from contentctl.objects.macro import Macro from contentctl.objects.lookup import Lookup from contentctl.objects.ssa_detection import SSADetection +from contentctl.objects.atomic import AtomicTest +from contentctl.objects.security_content_object import SecurityContentObject -from contentctl.input.basic_builder import BasicBuilder -from contentctl.input.detection_builder import DetectionBuilder -from contentctl.input.ssa_detection_builder import SSADetectionBuilder -from contentctl.input.playbook_builder import PlaybookBuilder -from contentctl.input.baseline_builder import BaselineBuilder -from contentctl.input.investigation_builder import InvestigationBuilder -from contentctl.input.story_builder import StoryBuilder -from contentctl.objects.enums import SecurityContentType -from contentctl.objects.enums import SecurityContentProduct -from contentctl.objects.enums import DetectionStatus -from contentctl.helper.utils import Utils from contentctl.enrichments.attack_enrichment import AttackEnrichment -from contentctl.objects.config import Config - -from contentctl.objects.config import Config +from contentctl.enrichments.cve_enrichment import CveEnrichment +from contentctl.objects.config import validate -@dataclass(frozen=True) -class DirectorInputDto: - input_path: pathlib.Path - product: SecurityContentProduct - config: Config - @dataclass() class DirectorOutputDto: + # Atomic Tests are first because parsing them + # is far quicker than attack_enrichment + atomic_tests: Union[list[AtomicTest],None] + attack_enrichment: AttackEnrichment detections: list[Detection] stories: list[Story] baselines: list[Baseline] @@ -52,188 +44,183 @@ class DirectorOutputDto: lookups: list[Lookup] deployments: list[Deployment] ssa_detections: list[SSADetection] + #cve_enrichment: CveEnrichment + + name_to_content_map: dict[str, SecurityContentObject] = field(default_factory=dict) + uuid_to_content_map: dict[UUID, SecurityContentObject] = field(default_factory=dict) + + + + + +from contentctl.input.ssa_detection_builder import SSADetectionBuilder +from contentctl.objects.enums import SecurityContentType + +from contentctl.objects.enums import DetectionStatus +from contentctl.helper.utils import Utils + + + + + + + class Director(): - input_dto: DirectorInputDto + input_dto: validate output_dto: DirectorOutputDto - basic_builder: BasicBuilder - playbook_builder: PlaybookBuilder - baseline_builder: BaselineBuilder - investigation_builder: InvestigationBuilder - story_builder: StoryBuilder - detection_builder: DetectionBuilder ssa_detection_builder: SSADetectionBuilder - attack_enrichment: dict - config: Config + def __init__(self, output_dto: DirectorOutputDto) -> None: self.output_dto = output_dto - self.attack_enrichment = dict() - - - def execute(self, input_dto: DirectorInputDto) -> None: - self.input_dto = input_dto + self.ssa_detection_builder = SSADetectionBuilder() + + def addContentToDictMappings(self, content:SecurityContentObject): + content_name = content.name + if isinstance(content,SSADetection): + # Since SSA detections may have the same name as ESCU detection, + # for this function we prepend 'SSA ' to the name. + content_name = f"SSA {content_name}" + if content_name in self.output_dto.name_to_content_map: + raise ValueError(f"Duplicate name '{content_name}' with paths:\n" + f" - {content.file_path}\n" + f" - {self.output_dto.name_to_content_map[content_name].file_path}") + elif content.id in self.output_dto.uuid_to_content_map: + raise ValueError(f"Duplicate id '{content.id}' with paths:\n" + f" - {content.file_path}\n" + f" - {self.output_dto.name_to_content_map[content_name].file_path}") + + self.output_dto.name_to_content_map[content_name] = content + self.output_dto.uuid_to_content_map[content.id] = content + - if self.input_dto.config.enrichments.attack_enrichment: - self.attack_enrichment = AttackEnrichment.get_attack_lookup(self.input_dto.input_path) + + def execute(self, input_dto: validate) -> None: + self.input_dto = input_dto + - self.basic_builder = BasicBuilder() - self.playbook_builder = PlaybookBuilder(self.input_dto.input_path) - self.baseline_builder = BaselineBuilder() - self.investigation_builder = InvestigationBuilder() - self.story_builder = StoryBuilder() - self.detection_builder = DetectionBuilder() - self.ssa_detection_builder = SSADetectionBuilder() - if self.input_dto.product == SecurityContentProduct.SPLUNK_APP or self.input_dto.product == SecurityContentProduct.API: - self.createSecurityContent(SecurityContentType.deployments) - self.createSecurityContent(SecurityContentType.lookups) - self.createSecurityContent(SecurityContentType.macros) - self.createSecurityContent(SecurityContentType.baselines) - self.createSecurityContent(SecurityContentType.investigations) - self.createSecurityContent(SecurityContentType.playbooks) - self.createSecurityContent(SecurityContentType.detections) - self.createSecurityContent(SecurityContentType.stories) - elif self.input_dto.product == SecurityContentProduct.SSA: - self.createSecurityContent(SecurityContentType.ssa_detections) + self.createSecurityContent(SecurityContentType.deployments) + self.createSecurityContent(SecurityContentType.lookups) + self.createSecurityContent(SecurityContentType.macros) + self.createSecurityContent(SecurityContentType.stories) + self.createSecurityContent(SecurityContentType.baselines) + self.createSecurityContent(SecurityContentType.investigations) + self.createSecurityContent(SecurityContentType.playbooks) + self.createSecurityContent(SecurityContentType.detections) + + + self.createSecurityContent(SecurityContentType.ssa_detections) - def createSecurityContent(self, type: SecurityContentType) -> None: - if type == SecurityContentType.ssa_detections: - files = Utils.get_all_yml_files_from_directory(os.path.join(self.input_dto.input_path, 'ssa_detections')) - elif type == SecurityContentType.unit_tests: - files = Utils.get_all_yml_files_from_directory(os.path.join(self.input_dto.input_path, 'tests')) + def createSecurityContent(self, contentType: SecurityContentType) -> None: + if contentType == SecurityContentType.ssa_detections: + files = Utils.get_all_yml_files_from_directory(os.path.join(self.input_dto.path, 'ssa_detections')) + security_content_files = [f for f in files if f.name.startswith('ssa___')] + + elif contentType in [SecurityContentType.deployments, + SecurityContentType.lookups, + SecurityContentType.macros, + SecurityContentType.stories, + SecurityContentType.baselines, + SecurityContentType.investigations, + SecurityContentType.playbooks, + SecurityContentType.detections]: + files = Utils.get_all_yml_files_from_directory(os.path.join(self.input_dto.path, str(contentType.name))) + security_content_files = [f for f in files if not f.name.startswith('ssa___')] else: - files = Utils.get_all_yml_files_from_directory(os.path.join(self.input_dto.input_path, str(type.name))) + raise(Exception(f"Cannot createSecurityContent for unknown product.")) validation_errors = [] already_ran = False progress_percent = 0 - - if self.input_dto.product == SecurityContentProduct.SPLUNK_APP or self.input_dto.product == SecurityContentProduct.API: - security_content_files = [f for f in files if not f.name.startswith('ssa___')] - elif self.input_dto.product == SecurityContentProduct.SSA: - security_content_files = [f for f in files if f.name.startswith('ssa___')] - else: - raise(Exception(f"Cannot createSecurityContent for unknown product '{self.input_dto.product}'")) - for index,file in enumerate(security_content_files): progress_percent = ((index+1)/len(security_content_files)) * 100 try: - type_string = type.name.upper() - if type == SecurityContentType.lookups: - self.constructLookup(self.basic_builder, file) - lookup = self.basic_builder.getObject() + type_string = contentType.name.upper() + modelDict = YmlReader.load_file(file) + + if contentType == SecurityContentType.lookups: + lookup = Lookup.model_validate(modelDict,context={"output_dto":self.output_dto, "config":self.input_dto}) self.output_dto.lookups.append(lookup) + self.addContentToDictMappings(lookup) - elif type == SecurityContentType.macros: - self.constructMacro(self.basic_builder, file) - macro = self.basic_builder.getObject() + elif contentType == SecurityContentType.macros: + macro = Macro.model_validate(modelDict,context={"output_dto":self.output_dto}) self.output_dto.macros.append(macro) + self.addContentToDictMappings(macro) - elif type == SecurityContentType.deployments: - self.constructDeployment(self.basic_builder, file) - deployment = self.basic_builder.getObject() + elif contentType == SecurityContentType.deployments: + deployment = Deployment.model_validate(modelDict,context={"output_dto":self.output_dto}) self.output_dto.deployments.append(deployment) + self.addContentToDictMappings(deployment) - elif type == SecurityContentType.playbooks: - self.constructPlaybook(self.playbook_builder, file) - playbook = self.playbook_builder.getObject() - self.output_dto.playbooks.append(playbook) + elif contentType == SecurityContentType.playbooks: + playbook = Playbook.model_validate(modelDict,context={"output_dto":self.output_dto}) + self.output_dto.playbooks.append(playbook) + self.addContentToDictMappings(playbook) - elif type == SecurityContentType.baselines: - self.constructBaseline(self.baseline_builder, file) - baseline = self.baseline_builder.getObject() + elif contentType == SecurityContentType.baselines: + baseline = Baseline.model_validate(modelDict,context={"output_dto":self.output_dto}) self.output_dto.baselines.append(baseline) + self.addContentToDictMappings(baseline) - elif type == SecurityContentType.investigations: - self.constructInvestigation(self.investigation_builder, file) - investigation = self.investigation_builder.getObject() + elif contentType == SecurityContentType.investigations: + investigation = Investigation.model_validate(modelDict,context={"output_dto":self.output_dto}) self.output_dto.investigations.append(investigation) + self.addContentToDictMappings(investigation) - elif type == SecurityContentType.stories: - self.constructStory(self.story_builder, file) - story = self.story_builder.getObject() + elif contentType == SecurityContentType.stories: + story = Story.model_validate(modelDict,context={"output_dto":self.output_dto}) self.output_dto.stories.append(story) + self.addContentToDictMappings(story) - elif type == SecurityContentType.detections: - self.constructDetection(self.detection_builder, file) - detection = self.detection_builder.getObject() + elif contentType == SecurityContentType.detections: + detection = Detection.model_validate(modelDict,context={"output_dto":self.output_dto}) self.output_dto.detections.append(detection) + self.addContentToDictMappings(detection) - elif type == SecurityContentType.ssa_detections: - self.constructSSADetection(self.ssa_detection_builder, file) - detection = self.ssa_detection_builder.getObject() - if detection.status in [DetectionStatus.production.value, DetectionStatus.validation.value]: - self.output_dto.ssa_detections.append(detection) + elif contentType == SecurityContentType.ssa_detections: + self.constructSSADetection(self.ssa_detection_builder, self.output_dto,str(file)) + ssa_detection = self.ssa_detection_builder.getObject() + if ssa_detection.status in [DetectionStatus.production.value, DetectionStatus.validation.value]: + self.output_dto.ssa_detections.append(ssa_detection) + self.addContentToDictMappings(ssa_detection) else: - raise Exception(f"Unsupported type: [{type}]") + raise Exception(f"Unsupported type: [{contentType}]") if (sys.stdout.isatty() and sys.stdin.isatty() and sys.stderr.isatty()) or not already_ran: already_ran = True print(f"\r{f'{type_string} Progress'.rjust(23)}: [{progress_percent:3.0f}%]...", end="", flush=True) except (ValidationError, ValueError) as e: - relative_path = file.absolute().relative_to(self.input_dto.input_path.absolute()) + relative_path = file.absolute().relative_to(self.input_dto.path.absolute()) validation_errors.append((relative_path,e)) + - print(f"\r{f'{type.name.upper()} Progress'.rjust(23)}: [{progress_percent:3.0f}%]...", end="", flush=True) + print(f"\r{f'{contentType.name.upper()} Progress'.rjust(23)}: [{progress_percent:3.0f}%]...", end="", flush=True) print("Done!") if len(validation_errors) > 0: - errors_string = '\n\n'.join([f"{e_tuple[0]}\n{str(e_tuple[1])}" for e_tuple in validation_errors]) + errors_string = '\n\n'.join([f"File: {e_tuple[0]}\nError: {str(e_tuple[1])}" for e_tuple in validation_errors]) + #print(f"The following {len(validation_errors)} error(s) were found during validation:\n\n{errors_string}\n\nVALIDATION FAILED") + # We quit after validation a single type/group of content because it can cause significant cascading errors in subsequent + # types of content (since they may import or otherwise use it) raise Exception(f"The following {len(validation_errors)} error(s) were found during validation:\n\n{errors_string}\n\nVALIDATION FAILED") - def constructDetection(self, builder: DetectionBuilder, file_path: str) -> None: - builder.reset() - builder.setObject(file_path) - builder.addDeployment(self.output_dto.deployments) - builder.addMitreAttackEnrichment(self.attack_enrichment) - builder.addKillChainPhase() - builder.addCIS() - builder.addNist() - builder.addDatamodel() - builder.addRBA() - builder.addProvidingTechnologies() - builder.addNesFields() - builder.addAnnotations() - builder.addMappings() - builder.addBaseline(self.output_dto.baselines) - builder.addPlaybook(self.output_dto.playbooks) - builder.addMacros(self.output_dto.macros) - builder.addLookups(self.output_dto.lookups) - - if self.input_dto.config.enrichments.attack_enrichment: - builder.addMitreAttackEnrichment(self.attack_enrichment) - - if self.input_dto.config.enrichments.cve_enrichment: - builder.addCve() - - if self.input_dto.config.enrichments.splunk_app_enrichment: - builder.addSplunkApp() - - # Skip all integration tests if configured to do so - # TODO: is there a better way to handle this? The `test` portion of the config is not defined for validate - if (self.input_dto.config.test is not None) and (not self.input_dto.config.test.enable_integration_testing): - builder.skipIntegrationTests() - - if builder.security_content_obj is not None and \ - builder.security_content_obj.tags is not None and \ - isinstance(builder.security_content_obj.tags.manual_test,str): - # Set all tests, both Unit AND Integration, to manual_test. Note that integration test messages - # will intentionally overwrite the justification in the skipIntegrationTests call above. - builder.skipAllTests(builder.security_content_obj.tags.manual_test) - + + - def constructSSADetection(self, builder: DetectionBuilder, file_path: str) -> None: + def constructSSADetection(self, builder: SSADetectionBuilder, directorOutput:DirectorOutputDto, file_path: str) -> None: builder.reset() - builder.setObject(file_path) - builder.addMitreAttackEnrichment(self.attack_enrichment) + builder.setObject(file_path,self.output_dto) + builder.addMitreAttackEnrichmentNew(directorOutput.attack_enrichment) builder.addKillChainPhase() builder.addCIS() builder.addNist() @@ -243,53 +230,4 @@ def constructSSADetection(self, builder: DetectionBuilder, file_path: str) -> No builder.addRBA() - def constructStory(self, builder: StoryBuilder, file_path: str) -> None: - builder.reset() - builder.setObject(file_path) - builder.addDetections(self.output_dto.detections, self.input_dto.config) - builder.addInvestigations(self.output_dto.investigations) - builder.addBaselines(self.output_dto.baselines) - builder.addAuthorCompanyName() - - - def constructBaseline(self, builder: BaselineBuilder, file_path: str) -> None: - builder.reset() - builder.setObject(file_path) - builder.addDeployment(self.output_dto.deployments) - - - def constructDeployment(self, builder: BasicBuilder, file_path: str) -> None: - builder.reset() - builder.setObject(file_path, SecurityContentType.deployments) - - - def constructLookup(self, builder: BasicBuilder, file_path: str) -> None: - builder.reset() - builder.setObject(file_path, SecurityContentType.lookups) - - - def constructMacro(self, builder: BasicBuilder, file_path: str) -> None: - builder.reset() - builder.setObject(file_path, SecurityContentType.macros) - - - def constructPlaybook(self, builder: PlaybookBuilder, file_path: str) -> None: - builder.reset() - builder.setObject(file_path) - builder.addDetections() - - - def constructTest(self, builder: BasicBuilder, file_path: str) -> None: - builder.reset() - builder.setObject(file_path, SecurityContentType.unit_tests) - - - def constructInvestigation(self, builder: InvestigationBuilder, file_path: str) -> None: - builder.reset() - builder.setObject(file_path) - builder.addInputs() - builder.addLowercaseName() - - def constructObjects(self, builder: BasicBuilder, file_path: str) -> None: - builder.reset() - builder.setObject(file_path) \ No newline at end of file + \ No newline at end of file diff --git a/contentctl/input/investigation_builder.py b/contentctl/input/investigation_builder.py deleted file mode 100644 index 388f5fb2..00000000 --- a/contentctl/input/investigation_builder.py +++ /dev/null @@ -1,42 +0,0 @@ -import re -import sys - -from pydantic import ValidationError - -from contentctl.objects.investigation import Investigation -from contentctl.input.yml_reader import YmlReader -from contentctl.objects.enums import SecurityContentType - - -class InvestigationBuilder(): - investigation: Investigation - - def setObject(self, path: str) -> None: - yml_dict = YmlReader.load_file(path) - try: - self.investigation = Investigation.parse_obj(yml_dict) - except ValidationError as e: - print('Validation Error for file ' + path) - print(e) - sys.exit(1) - - def reset(self) -> None: - self.investigation = None - - - def getObject(self) -> Investigation: - return self.investigation - - - def addInputs(self) -> None: - pattern = r"\$([^\s.]*)\$" - inputs = [] - - for input in re.findall(pattern, self.investigation.search): - inputs.append(input) - - self.investigation.inputs = inputs - - - def addLowercaseName(self) -> None: - self.investigation.lowercase_name = self.investigation.name.replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower().replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower() \ No newline at end of file diff --git a/contentctl/input/new_content_generator.py b/contentctl/input/new_content_generator.py deleted file mode 100644 index d9aa6659..00000000 --- a/contentctl/input/new_content_generator.py +++ /dev/null @@ -1,95 +0,0 @@ -import os -import uuid -import questionary -from dataclasses import dataclass -from datetime import datetime - -from contentctl.objects.enums import SecurityContentType -from contentctl.input.new_content_questions import NewContentQuestions - - -@dataclass(frozen=True) -class NewContentGeneratorInputDto: - type: SecurityContentType - - -@dataclass(frozen=True) -class NewContentGeneratorOutputDto: - obj: dict - answers: dict - - -class NewContentGenerator(): - - - def __init__(self, output_dto: NewContentGeneratorOutputDto) -> None: - self.output_dto = output_dto - - - def execute(self, input_dto: NewContentGeneratorInputDto) -> None: - if input_dto.type == SecurityContentType.detections: - questions = NewContentQuestions.get_questions_detection() - answers = questionary.prompt(questions) - self.output_dto.answers.update(answers) - self.output_dto.obj['name'] = answers['detection_name'] - self.output_dto.obj['id'] = str(uuid.uuid4()) - self.output_dto.obj['version'] = 1 - self.output_dto.obj['date'] = datetime.today().strftime('%Y-%m-%d') - self.output_dto.obj['author'] = answers['detection_author'] - self.output_dto.obj['data_source'] = answers['data_source'] - self.output_dto.obj['type'] = answers['detection_type'] - self.output_dto.obj['status'] = "production" #start everything as production since that's what we INTEND the content to become - self.output_dto.obj['description'] = 'UPDATE_DESCRIPTION' - file_name = self.output_dto.obj['name'].replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower() - self.output_dto.obj['search'] = answers['detection_search'] + ' | `' + file_name + '_filter`' - self.output_dto.obj['how_to_implement'] = 'UPDATE_HOW_TO_IMPLEMENT' - self.output_dto.obj['known_false_positives'] = 'UPDATE_KNOWN_FALSE_POSITIVES' - self.output_dto.obj['references'] = ['REFERENCE'] - self.output_dto.obj['tags'] = dict() - self.output_dto.obj['tags']['analytic_story'] = ['UPDATE_STORY_NAME'] - self.output_dto.obj['tags']['asset_type'] = 'UPDATE asset_type' - self.output_dto.obj['tags']['confidence'] = 'UPDATE value between 1-100' - self.output_dto.obj['tags']['impact'] = 'UPDATE value between 1-100' - self.output_dto.obj['tags']['message'] = 'UPDATE message' - self.output_dto.obj['tags']['mitre_attack_id'] = [x.strip() for x in answers['mitre_attack_ids'].split(',')] - self.output_dto.obj['tags']['observable'] = [{'name': 'UPDATE', 'type': 'UPDATE', 'role': ['UPDATE']}] - self.output_dto.obj['tags']['product'] = ['Splunk Enterprise','Splunk Enterprise Security','Splunk Cloud'] - self.output_dto.obj['tags']['required_fields'] = ['UPDATE'] - self.output_dto.obj['tags']['risk_score'] = 'UPDATE (impact * confidence)/100' - self.output_dto.obj['tags']['security_domain'] = answers['security_domain'] - self.output_dto.obj['tags']['cve'] = ['UPDATE WITH CVE(S) IF APPLICABLE'] - - #generate the tests section - self.output_dto.obj['tests'] = [ - { - 'name': "True Positive Test", - 'attack_data': [ - { - 'data': "Enter URL for Dataset Here. This may also be a relative or absolute path on your local system for testing.", - "sourcetype": "UPDATE SOURCETYPE", - "source": "UPDATE SOURCE" - } - ] - } - ] - - - - elif input_dto.type == SecurityContentType.stories: - questions = NewContentQuestions.get_questions_story() - answers = questionary.prompt(questions) - self.output_dto.answers.update(answers) - self.output_dto.obj['name'] = answers['story_name'] - self.output_dto.obj['id'] = str(uuid.uuid4()) - self.output_dto.obj['version'] = 1 - self.output_dto.obj['date'] = datetime.today().strftime('%Y-%m-%d') - self.output_dto.obj['author'] = answers['story_author'] - self.output_dto.obj['description'] = 'UPDATE_DESCRIPTION' - self.output_dto.obj['narrative'] = 'UPDATE_NARRATIVE' - self.output_dto.obj['references'] = [] - self.output_dto.obj['tags'] = dict() - self.output_dto.obj['tags']['analytic_story'] = self.output_dto.obj['name'] - self.output_dto.obj['tags']['category'] = answers['category'] - self.output_dto.obj['tags']['product'] = ['Splunk Enterprise','Splunk Enterprise Security','Splunk Cloud'] - self.output_dto.obj['tags']['usecase'] = answers['usecase'] - self.output_dto.obj['tags']['cve'] = ['UPDATE WITH CVE(S) IF APPLICABLE'] \ No newline at end of file diff --git a/contentctl/input/new_content_questions.py b/contentctl/input/new_content_questions.py index f366426f..007dfffb 100644 --- a/contentctl/input/new_content_questions.py +++ b/contentctl/input/new_content_questions.py @@ -1,15 +1,13 @@ - - -class NewContentQuestions(): +class NewContentQuestions: @classmethod def get_questions_detection(self) -> list: questions = [ { - 'type': 'text', - 'message': 'enter detection name', - 'name': 'detection_name', - 'default': 'Powershell Encoded Command', + "type": "text", + "message": "enter detection name", + "name": "detection_name", + "default": "Powershell Encoded Command", }, { 'type': 'select', @@ -30,18 +28,23 @@ def get_questions_detection(self) -> list: 'name': 'detection_author', }, { - 'type': 'select', - 'message': 'select a detection type', - 'name': 'detection_type', - 'choices': [ - 'TTP', - 'Anomaly', - 'Hunting', - 'Baseline', - 'Investigation', - 'Correlation' + "type": "text", + "message": "enter author name", + "name": "detection_author", + }, + { + "type": "select", + "message": "select a detection type", + "name": "detection_type", + "choices": [ + "TTP", + "Anomaly", + "Hunting", + "Baseline", + "Investigation", + "Correlation", ], - 'default': 'TTP' + "default": "TTP", }, { 'type': 'checkbox', @@ -89,16 +92,16 @@ def get_questions_detection(self) -> list: ] }, { - 'type': 'text', - 'message': 'enter search (spl)', - 'name': 'detection_search', - 'default': '| UPDATE_SPL' + "type": "text", + "message": "enter search (spl)", + "name": "detection_search", + "default": "| UPDATE_SPL", }, { - 'type': 'text', - 'message': 'enter MITRE ATT&CK Technique IDs related to the detection, comma delimited for multiple', - 'name': 'mitre_attack_ids', - 'default': 'T1003.002' + "type": "text", + "message": "enter MITRE ATT&CK Technique IDs related to the detection, comma delimited for multiple", + "name": "mitre_attack_ids", + "default": "T1003.002", }, { 'type': 'select', @@ -117,49 +120,48 @@ def get_questions_detection(self) -> list: ] return questions - @classmethod def get_questions_story(self) -> list: questions = [ { - 'type': 'text', - 'message': 'enter story name', - 'name': 'story_name', - 'default': 'Suspicious Powershell Behavior', + "type": "text", + "message": "enter story name", + "name": "story_name", + "default": "Suspicious Powershell Behavior", }, { - 'type': 'text', - 'message': 'enter author name', - 'name': 'story_author', + "type": "text", + "message": "enter author name", + "name": "story_author", }, { - 'type': 'checkbox', - 'message': 'select a category', - 'name': 'category', - 'choices': [ - 'Adversary Tactics', - 'Account Compromise', - 'Unauthorized Software', - 'Best Practices', - 'Cloud Security', - 'Command and Control', - 'Lateral Movement', - 'Ransomware', - 'Privilege Escalation' - ] - }, - { - 'type': 'select', - 'message': 'select a use case', - 'name': 'usecase', - 'choices': [ - 'Advanced Threat Detection', - 'Security Monitoring', - 'Compliance', - 'Insider Threat', - 'Application Security', - 'Other' + "type": "checkbox", + "message": "select a category", + "name": "category", + "choices": [ + "Adversary Tactics", + "Account Compromise", + "Unauthorized Software", + "Best Practices", + "Cloud Security", + "Command and Control", + "Lateral Movement", + "Ransomware", + "Privilege Escalation", + ], + }, + { + "type": "select", + "message": "select a use case", + "name": "usecase", + "choices": [ + "Advanced Threat Detection", + "Security Monitoring", + "Compliance", + "Insider Threat", + "Application Security", + "Other", ], }, ] - return questions \ No newline at end of file + return questions diff --git a/contentctl/input/playbook_builder.py b/contentctl/input/playbook_builder.py deleted file mode 100644 index 4d83f74c..00000000 --- a/contentctl/input/playbook_builder.py +++ /dev/null @@ -1,68 +0,0 @@ - -import sys -import os -import pathlib -from pydantic import ValidationError -from pathlib import Path - -from contentctl.objects.playbook import Playbook -from contentctl.input.yml_reader import YmlReader - - -class PlaybookBuilder(): - playbook: Playbook - input_path: pathlib.Path - - - def __init__(self, input_path: pathlib.Path): - self.input_path = input_path - - def setObject(self, path: pathlib.Path) -> None: - yml_dict = YmlReader.load_file(path) - - try: - self.playbook = Playbook.parse_obj(yml_dict) - - except ValidationError as e: - print('Validation Error for file ' + str(path)) - print(e) - sys.exit(1) - - - def addDetections(self) -> None: - if self.playbook.tags.detections: - self.playbook.tags.detection_objects = [] - for detection in self.playbook.tags.detections: - detection_object = { - "name": detection, - "lowercase_name": self.convertNameToFileName(detection), - "path": self.findDetectionPath(detection) - } - self.playbook.tags.detection_objects.append(detection_object) - - - def reset(self) -> None: - self.playbook = None - - - def getObject(self) -> Playbook: - return self.playbook - - - def convertNameToFileName(self, name: str): - file_name = name \ - .replace(' ', '_') \ - .replace('-','_') \ - .replace('.','_') \ - .replace('/','_') \ - .lower() - return file_name - - - def findDetectionPath(self, detection_name: str) -> str: - for path in Path(os.path.join(self.input_path, 'detections')).rglob(self.convertNameToFileName(detection_name) + '.yml'): - normalized_path = os.path.normpath(path) - path_components = normalized_path.split(os.sep) - value_index = path_components.index('detections') - return "/".join(path_components[value_index:]) - raise Exception(f"Failed to find detection path for playbook with name '{detection_name}'") \ No newline at end of file diff --git a/contentctl/input/sigma_converter.py b/contentctl/input/sigma_converter.py index 89d8f777..3e168be6 100644 --- a/contentctl/input/sigma_converter.py +++ b/contentctl/input/sigma_converter.py @@ -213,7 +213,6 @@ def execute(self, input_dto: SigmaConverterInputDto) -> None: def read_detection(self, detection_path : str) -> Detection: yml_dict = YmlReader.load_file(detection_path) - yml_dict["tags"]["name"] = yml_dict["name"] #SSA Detections are ALLOWED to have names longer than 67 characters, #unlike Splunk App Detections. Because we still want to use the @@ -234,7 +233,7 @@ def read_detection(self, detection_path : str) -> Detection: detection.name = name - detection.source = os.path.split(os.path.dirname(detection_path))[-1] + return detection diff --git a/contentctl/input/ssa_detection_builder.py b/contentctl/input/ssa_detection_builder.py index 9e134f71..90c0ecde 100644 --- a/contentctl/input/ssa_detection_builder.py +++ b/contentctl/input/ssa_detection_builder.py @@ -3,7 +3,7 @@ import os from pydantic import ValidationError - +from typing import List from contentctl.input.yml_reader import YmlReader from contentctl.objects.detection import Detection from contentctl.objects.security_content_object import SecurityContentObject @@ -12,19 +12,20 @@ from contentctl.enrichments.cve_enrichment import CveEnrichment from contentctl.enrichments.splunk_app_enrichment import SplunkAppEnrichment from contentctl.objects.ssa_detection import SSADetection -from contentctl.objects.constants import ATTACK_TACTICS_KILLCHAIN_MAPPING - +from contentctl.objects.constants import * +from contentctl.input.director import DirectorOutputDto +from contentctl.enrichments.attack_enrichment import AttackEnrichment class SSADetectionBuilder(): security_content_obj : SSADetection - def setObject(self, path: str) -> None: + def setObject(self, path: str, + output_dto:DirectorOutputDto ) -> None: yml_dict = YmlReader.load_file(path) - yml_dict["tags"]["name"] = yml_dict["name"] + #self.security_content_obj = SSADetection.model_validate(yml_dict, context={"output_dto":output_dto}) self.security_content_obj = SSADetection.parse_obj(yml_dict) - self.security_content_obj.source = os.path.split(os.path.dirname(self.security_content_obj.file_path))[-1] - + self.security_content_obj.source = os.path.split(os.path.dirname(self.security_content_obj.file_path))[-1] def addProvidingTechnologies(self) -> None: if self.security_content_obj: @@ -90,6 +91,14 @@ def addMitreAttackEnrichment(self, attack_enrichment: dict) -> None: else: #print("mitre_attack_id " + mitre_attack_id + " doesn't exist for detecction " + self.security_content_obj.name) raise ValueError("mitre_attack_id " + mitre_attack_id + " doesn't exist for detection " + self.security_content_obj.name) + def addMitreAttackEnrichmentNew(self, attack_enrichment: AttackEnrichment) -> None: + if self.security_content_obj and self.security_content_obj.tags.mitre_attack_id: + self.security_content_obj.tags.mitre_attack_enrichments = [] + for mitre_attack_id in self.security_content_obj.tags.mitre_attack_id: + enrichment_obj = attack_enrichment.getEnrichmentByMitreID(mitre_attack_id) + if enrichment_obj is not None: + self.security_content_obj.tags.mitre_attack_enrichments.append(enrichment_obj) + def addCIS(self) -> None: diff --git a/contentctl/input/story_builder.py b/contentctl/input/story_builder.py deleted file mode 100644 index bd884517..00000000 --- a/contentctl/input/story_builder.py +++ /dev/null @@ -1,106 +0,0 @@ -import re -import sys -import pathlib -from pydantic import ValidationError - -from contentctl.objects.story import Story -from contentctl.objects.enums import SecurityContentType -from contentctl.objects.config import Config -from contentctl.input.yml_reader import YmlReader - - -class StoryBuilder(): - story: Story - - def setObject(self, path: pathlib.Path) -> None: - yml_dict = YmlReader.load_file(path) - yml_dict["tags"]["name"] = yml_dict["name"] - - try: - self.story = Story.parse_obj(yml_dict) - except ValidationError as e: - print('Validation Error for file ' + str(path)) - print(e) - sys.exit(1) - - def reset(self) -> None: - self.story = None - - def getObject(self) -> Story: - return self.story - - def addDetections(self, detections: list, config: Config) -> None: - matched_detection_names = [] - matched_detections = [] - mitre_attack_enrichments = [] - mitre_attack_tactics = set() - datamodels = set() - kill_chain_phases = set() - - for detection in detections: - if detection: - for detection_analytic_story in detection.tags.analytic_story: - if detection_analytic_story == self.story.name: - matched_detection_names.append(str(f'{config.build.prefix} - ' + detection.name + ' - Rule')) - mitre_attack_enrichments_list = [] - if (detection.tags.mitre_attack_enrichments): - for attack in detection.tags.mitre_attack_enrichments: - mitre_attack_enrichments_list.append({"mitre_attack_technique": attack.mitre_attack_technique}) - tags_obj = {"mitre_attack_enrichments": mitre_attack_enrichments_list} - matched_detections.append({ - "name": detection.name, - "source": detection.source, - "type": detection.type, - "tags": tags_obj - }) - datamodels.update(detection.datamodel) - if detection.tags.kill_chain_phases: - kill_chain_phases.update(detection.tags.kill_chain_phases) - - if detection.tags.mitre_attack_enrichments: - for attack_enrichment in detection.tags.mitre_attack_enrichments: - mitre_attack_tactics.update(attack_enrichment.mitre_attack_tactics) - if attack_enrichment.mitre_attack_id not in [attack.mitre_attack_id for attack in mitre_attack_enrichments]: - mitre_attack_enrichments.append(attack_enrichment) - - self.story.detection_names = matched_detection_names - self.story.detections = matched_detections - self.story.tags.datamodels = sorted(list(datamodels)) - self.story.tags.kill_chain_phases = sorted(list(kill_chain_phases)) - self.story.tags.mitre_attack_enrichments = mitre_attack_enrichments - self.story.tags.mitre_attack_tactics = sorted(list(mitre_attack_tactics)) - - - def addBaselines(self, baselines: list) -> None: - matched_baseline_names = [] - for baseline in baselines: - for baseline_analytic_story in baseline.tags.analytic_story: - if baseline_analytic_story == self.story.name: - matched_baseline_names.append(str(f'ESCU - ' + baseline.name)) - - self.story.baseline_names = matched_baseline_names - - def addInvestigations(self, investigations: list) -> None: - matched_investigation_names = [] - matched_investigations = [] - for investigation in investigations: - for investigation_analytic_story in investigation.tags.analytic_story: - if investigation_analytic_story == self.story.name: - matched_investigation_names.append(str(f'ESCU - ' + investigation.name + ' - Response Task')) - matched_investigations.append(investigation) - - self.story.investigation_names = matched_investigation_names - self.story.investigations = matched_investigations - - def addAuthorCompanyName(self) -> None: - match_author = re.search(r'^([^,]+)', self.story.author) - if match_author is None: - self.story.author_name = 'no' - else: - self.story.author_name = match_author.group(1) - - match_company = re.search(r',\s?(.*)$', self.story.author) - if match_company is None: - self.story.author_company = 'no' - else: - self.story.author_company = match_company.group(1) diff --git a/contentctl/input/yml_reader.py b/contentctl/input/yml_reader.py index 45db12cd..37714a2c 100644 --- a/contentctl/input/yml_reader.py +++ b/contentctl/input/yml_reader.py @@ -1,4 +1,4 @@ -from typing import Dict +from typing import Dict, Any import yaml @@ -9,7 +9,7 @@ class YmlReader(): @staticmethod - def load_file(file_path: pathlib.Path, add_fields=True, STRICT_YML_CHECKING=False) -> Dict: + def load_file(file_path: pathlib.Path, add_fields=True, STRICT_YML_CHECKING=False) -> Dict[str,Any]: try: file_handler = open(file_path, 'r', encoding="utf-8") @@ -27,7 +27,8 @@ def load_file(file_path: pathlib.Path, add_fields=True, STRICT_YML_CHECKING=Fals print(f"Error loading YML file {file_path}: {str(e)}") sys.exit(1) try: - yml_obj = list(yaml.safe_load_all(file_handler))[0] + #yml_obj = list(yaml.safe_load_all(file_handler))[0] + yml_obj = yaml.load(file_handler, Loader=yaml.CSafeLoader) except yaml.YAMLError as exc: print(exc) sys.exit(1) diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index bdd77cd8..a51eea07 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -1,72 +1,62 @@ from __future__ import annotations - +from typing import TYPE_CHECKING,Union, Optional, List, Any, Annotated +import os.path import re import pathlib -from pydantic import validator, root_validator -from typing import Union +from pydantic import BaseModel, field_validator, model_validator, ValidationInfo, Field, computed_field, model_serializer,ConfigDict, FilePath +from contentctl.objects.macro import Macro +from contentctl.objects.lookup import Lookup +if TYPE_CHECKING: + from contentctl.input.director import DirectorOutputDto + from contentctl.objects.baseline import Baseline + from contentctl.objects.security_content_object import SecurityContentObject from contentctl.objects.enums import AnalyticsType from contentctl.objects.enums import DataModel from contentctl.objects.enums import DetectionStatus +from contentctl.objects.enums import NistCategory + from contentctl.objects.detection_tags import DetectionTags -from contentctl.objects.config import ConfigDetectionConfiguration +from contentctl.objects.deployment import Deployment from contentctl.objects.unit_test import UnitTest -from contentctl.objects.integration_test import IntegrationTest -from contentctl.objects.macro import Macro -from contentctl.objects.lookup import Lookup -from contentctl.objects.baseline import Baseline -from contentctl.objects.playbook import Playbook -from contentctl.helper.link_validator import LinkValidator -from contentctl.objects.enums import SecurityContentType from contentctl.objects.test_group import TestGroup +from contentctl.objects.integration_test import IntegrationTest -class Detection_Abstract(SecurityContentObject): - # contentType: SecurityContentType = SecurityContentType.detections - # NOTE: because `use_enum_values` is configured, this will actually be type str - type: AnalyticsType = ... - file_path: str = None - # status field is REQUIRED (the way to denote this with pydantic is ...) - status: DetectionStatus = ... - data_source: list[str] - tags: DetectionTags - search: Union[str, dict] - how_to_implement: str - known_false_positives: str - check_references: bool = False - references: list - - tests: list[Union[UnitTest, IntegrationTest]] = [] - - # enrichments - datamodel: list = None - deployment: ConfigDetectionConfiguration = None - annotations: dict = None - risk: list = None - playbooks: list[Playbook] = [] - baselines: list[Baseline] = [] - mappings: dict = None - macros: list[Macro] = [] - lookups: list[Lookup] = [] - cve_enrichment: list = None - splunk_app_enrichment: list = None - - source: str = None - nes_fields: str = None - providing_technologies: list = None - runtime: str = None - enabled_by_default: bool = False +#from contentctl.objects.playbook import Playbook +from contentctl.objects.enums import DataSource,ProvidingTechnology +from contentctl.enrichments.cve_enrichment import CveEnrichment, CveEnrichmentObj - class Config: - use_enum_values = True +class Detection_Abstract(SecurityContentObject): + model_config = ConfigDict(use_enum_values=True) + + #contentType: SecurityContentType = SecurityContentType.detections + type: AnalyticsType = Field(...) + status: DetectionStatus = Field(...) + data_source: Optional[List[str]] = None + tags: DetectionTags = Field(...) + search: Union[str, dict[str,Any]] = Field(...) + how_to_implement: str = Field(..., min_length=4) + known_false_positives: str = Field(..., min_length=4) + check_references: bool = False + #data_source: Optional[List[DataSource]] = None + enabled_by_default: bool = False + file_path: FilePath = Field(...) + # For model construction to first attempt construction of the leftmost object. + # From a file, this should be UnitTest. Note this is different than the + # default mode, 'smart' + # https://docs.pydantic.dev/latest/concepts/unions/#left-to-right-mode + # https://github.com/pydantic/pydantic/issues/9101#issuecomment-2019032541 + tests: List[Annotated[Union[UnitTest, IntegrationTest], Field(union_mode='left_to_right')]] = [] # A list of groups of tests, relying on the same data - test_groups: Union[list[TestGroup], None] = None + test_groups: Union[list[TestGroup], None] = Field(None,validate_default=True) - @validator("test_groups", always=True) - def validate_test_groups(cls, value, values) -> Union[list[TestGroup], None]: + @field_validator("test_groups") + @classmethod + def validate_test_groups(cls, value:Union[None, List[TestGroup]], info:ValidationInfo) -> Union[List[TestGroup], None]: """ Validates the `test_groups` field and constructs the model from the list of unit tests if no explicit construct was provided @@ -79,24 +69,357 @@ def validate_test_groups(cls, value, values) -> Union[list[TestGroup], None]: # iterate over the unit tests and create a TestGroup (and as a result, an IntegrationTest) for each test_groups: list[TestGroup] = [] - for unit_test in values["tests"]: - test_group = TestGroup.derive_from_unit_test(unit_test, values["name"]) + for unit_test in info.data.get("tests"): + test_group = TestGroup.derive_from_unit_test(unit_test, info.data.get("name")) test_groups.append(test_group) # now add each integration test to the list of tests for test_group in test_groups: - values["tests"].append(test_group.integration_test) + info.data.get("tests").append(test_group.integration_test) return test_groups - def get_content_dependencies(self) -> list[SecurityContentObject]: - return self.playbooks + self.baselines + self.macros + self.lookups + @computed_field + @property + def datamodel(self)->List[DataModel]: + if isinstance(self.search, str): + return [dm for dm in DataModel if dm.value in self.search] + else: + return [] + + @computed_field + @property + def source(self)->str: + if self.file_path is not None: + return self.file_path.absolute().parent.name + else: + raise ValueError(f"Cannot get 'source' for detection {self.name} - 'file_path' was None.") + + deployment: Deployment = Field({}) + + @computed_field + @property + def annotations(self)->dict[str,Union[List[str],int,str]]: + + annotations_dict:dict[str, Union[List[str], int]] = {} + annotations_dict["analytic_story"]=[story.name for story in self.tags.analytic_story] + annotations_dict["confidence"] = self.tags.confidence + if len(self.tags.cve or []) > 0: + annotations_dict["cve"] = self.tags.cve + annotations_dict["impact"] = self.tags.impact + annotations_dict["type"] = self.type + #annotations_dict["version"] = self.version + + #The annotations object is a superset of the mappings object. + # So start with the mapping object. + annotations_dict.update(self.mappings) + + #Make sure that the results are sorted for readability/easier diffs + return dict(sorted(annotations_dict.items(), key=lambda item: item[0])) + + #playbooks: list[Playbook] = [] + + baselines: list[Baseline] = Field([],validate_default=True) + + @computed_field + @property + def mappings(self)->dict[str, List[str]]: + mappings:dict[str,Any] = {} + if len(self.tags.cis20) > 0: + mappings["cis20"] = [tag.value for tag in self.tags.cis20] + if len(self.tags.kill_chain_phases) > 0: + mappings['kill_chain_phases'] = [phase.value for phase in self.tags.kill_chain_phases] + if len(self.tags.mitre_attack_id) > 0: + mappings['mitre_attack'] = self.tags.mitre_attack_id + if len(self.tags.nist) > 0: + mappings['nist'] = [category.value for category in self.tags.nist] + + + # No need to sort the dict! It has been constructed in-order. + # However, if this logic is changed, then consider reordering or + # adding the sort back! + #return dict(sorted(mappings.items(), key=lambda item: item[0])) + return mappings + + macros: list[Macro] = Field([],validate_default=True) + lookups: list[Lookup] = Field([],validate_default=True) + + @computed_field + @property + def cve_enrichment(self)->List[CveEnrichmentObj]: + raise Exception("CVE Enrichment Functionality not currently supported. It will be re-added at a later time.") + enriched_cves = [] + for cve_id in self.tags.cve: + print(f"\nEnriching {cve_id}\n") + enriched_cves.append(CveEnrichment.enrich_cve(cve_id)) + + return enriched_cves + + splunk_app_enrichment: Optional[List[dict]] = None + + @computed_field + @property + def nes_fields(self)->Optional[str]: + if self.deployment.alert_action.notable is not None: + return ','.join(self.deployment.alert_action.notable.nes_fields) + else: + return None + + @computed_field + @property + def providing_technologies(self)->List[ProvidingTechnology]: + if isinstance(self.search, str): + return ProvidingTechnology.getProvidingTechFromSearch(self.search) + else: + #Dict-formatted searches (sigma) will not have providing technologies + return [] + + @computed_field + @property + def risk(self)->list[dict[str,Any]]: + risk_objects = [] + risk_object_user_types = {'user', 'username', 'email address'} + risk_object_system_types = {'device', 'endpoint', 'hostname', 'ip address'} + process_threat_object_types = {'process name','process'} + file_threat_object_types = {'file name','file', 'file hash'} + url_threat_object_types = {'url string','url'} + ip_threat_object_types = {'ip address'} + + + for entity in self.tags.observable: + + risk_object = dict() + if 'Victim' in entity.role and entity.type.lower() in risk_object_user_types: + risk_object['risk_object_type'] = 'user' + risk_object['risk_object_field'] = entity.name + risk_object['risk_score'] = self.tags.risk_score + risk_objects.append(risk_object) + + elif 'Victim' in entity.role and entity.type.lower() in risk_object_system_types: + risk_object['risk_object_type'] = 'system' + risk_object['risk_object_field'] = entity.name + risk_object['risk_score'] = self.tags.risk_score + risk_objects.append(risk_object) + + elif 'Attacker' in entity.role and entity.type.lower() in process_threat_object_types: + risk_object['threat_object_field'] = entity.name + risk_object['threat_object_type'] = "process" + risk_objects.append(risk_object) + + elif 'Attacker' in entity.role and entity.type.lower() in file_threat_object_types: + risk_object['threat_object_field'] = entity.name + risk_object['threat_object_type'] = "file_name" + risk_objects.append(risk_object) + + elif 'Attacker' in entity.role and entity.type.lower() in ip_threat_object_types: + risk_object['threat_object_field'] = entity.name + risk_object['threat_object_type'] = "ip_address" + risk_objects.append(risk_object) + + elif 'Attacker' in entity.role and entity.type.lower() in url_threat_object_types: + risk_object['threat_object_field'] = entity.name + risk_object['threat_object_type'] = "url" + risk_objects.append(risk_object) + + else: + risk_object['risk_object_type'] = 'other' + risk_object['risk_object_field'] = entity.name + risk_object['risk_score'] = self.tags.risk_score + risk_objects.append(risk_object) + continue + + + return risk_objects + + + + @computed_field + @property + def metadata(self)->dict[str,str]: + return {'detection_id':str(self.id), + 'deprecated':'1' if self.status==DetectionStatus.deprecated.value else '0', + 'detection_version':str(self.version)} + + @model_serializer + def serialize_model(self): + #Call serializer for parent + super_fields = super().serialize_model() + + #All fields custom to this model + model= { + "tags": self.tags.model_dump(), + "type": self.type, + "search": self.search, + "how_to_implement":self.how_to_implement, + "known_false_positives":self.known_false_positives, + "datamodel": self.datamodel, + "source": self.source, + "nes_fields": self.nes_fields, + } + #Only a subset of macro fields are required: + all_macros = [] + for macro in self.macros: + macro_dump:dict = { + "name": macro.name, + "definition": macro.definition, + "description": macro.description + } + if len(macro.arguments) > 0: + macro_dump['arguments'] = macro.arguments + + all_macros.append(macro_dump) + model['macros'] = all_macros + + + all_lookups = [] + for lookup in self.lookups: + if lookup.collection is not None: + all_lookups.append({ + "name":lookup.name, + "description":lookup.description, + "collection":lookup.collection, + "case_sensitive_match": None, + "fields_list":lookup.fields_list}) + elif lookup.filename is not None: + all_lookups.append({ + "name":lookup.name, + "description":lookup.description, + "filename": lookup.filename.name, + "default_match":"true" if lookup.default_match else "false", + "case_sensitive_match": "true" if lookup.case_sensitive_match else "false", + "match_type":lookup.match_type, + "min_matches":lookup.min_matches, + "fields_list":lookup.fields_list}) + model['lookups'] = all_lookups + + + #Combine fields from this model with fields from parent + super_fields.update(model) + + #return the model + return super_fields + + + def model_post_init(self, ctx:dict[str,Any]): + # director: Optional[DirectorOutputDto] = ctx.get("output_dto",None) + # if not isinstance(director,DirectorOutputDto): + # raise ValueError("DirectorOutputDto was not passed in context of Detection model_post_init") + director: Optional[DirectorOutputDto] = ctx.get("output_dto",None) + for story in self.tags.analytic_story: + story.detections.append(self) + + #Ensure that all baselines link to this detection + for baseline in self.baselines: + new_detections = [] + replaced = False + for d in baseline.tags.detections: + if isinstance(d,str) and self.name==d: + new_detections.append(self) + replaced = True + else: + new_detections.append(d) + if replaced is False: + raise ValueError(f"Error, failed to replace detection reference in Baseline '{baseline.name}' to detection '{self.name}'") + baseline.tags.detections = new_detections + + return self + + + + + @field_validator('lookups',mode="before") + @classmethod + def getDetectionLookups(cls, v:list[str], info:ValidationInfo)->list[Lookup]: + director:DirectorOutputDto = info.context.get("output_dto",None) + + search:Union[str,dict] = info.data.get("search",None) + if not isinstance(search,str): + #The search was sigma formatted (or failed other validation and was None), so we will not validate macros in it + return [] + + lookups= Lookup.get_lookups(search, director) + return lookups + + @field_validator('baselines',mode="before") + @classmethod + def mapDetectionNamesToBaselineObjects(cls, v:list[str], info:ValidationInfo)->List[Baseline]: + if len(v) > 0: + raise ValueError("Error, baselines are constructed automatically at runtime. Please do not include this field.") + + + name:Union[str,dict] = info.data.get("name",None) + if name is None: + raise ValueError("Error, cannot get Baselines because the Detection does not have a 'name' defined.") + + director:DirectorOutputDto = info.context.get("output_dto",None) + baselines:List[Baseline] = [] + for baseline in director.baselines: + if name in baseline.tags.detections: + baselines.append(baseline) + + return baselines + + @field_validator('macros',mode="before") + @classmethod + def getDetectionMacros(cls, v:list[str], info:ValidationInfo)->list[Macro]: + director:DirectorOutputDto = info.context.get("output_dto",None) + + search:Union[str,dict] = info.data.get("search",None) + if not isinstance(search,str): + #The search was sigma formatted (or failed other validation and was None), so we will not validate macros in it + return [] + + search_name:Union[str,Any] = info.data.get("name",None) + assert isinstance(search_name,str), f"Expected 'search_name' to be a string, instead it was [{type(search_name)}]" + + + + filter_macro_name = search_name.replace(' ', '_').replace('-', '_').replace('.', '_').replace('/', '_').lower() + '_filter' + try: + filter_macro = Macro.mapNamesToSecurityContentObjects([filter_macro_name], director)[0] + except: + # Filter macro did not exist, so create one at runtime + filter_macro = Macro.model_validate({"name":filter_macro_name, + "definition":'search *', + "description":'Update this macro to limit the output results to filter out false positives.'}) + director.macros.append(filter_macro) + + macros_from_search = Macro.get_macros(search, director) + + return macros_from_search + [filter_macro] + + def get_content_dependencies(self)->list[SecurityContentObject]: + #Do this separately to satisfy type checker + objects: list[SecurityContentObject] = [] + objects += self.macros + objects += self.lookups + return objects + + + @field_validator("deployment", mode="before") + def getDeployment(cls, v:Any, info:ValidationInfo)->Deployment: + return Deployment.getDeployment(v,info) + return SecurityContentObject.getDeploymentFromType(info.data.get("type",None), info) + # director: Optional[DirectorOutputDto] = info.context.get("output_dto",None) + # if not director: + # raise ValueError("Cannot set deployment - DirectorOutputDto not passed to Detection Constructor in context") + + + # typeField = info.data.get("type",None) + + # deps = [deployment for deployment in director.deployments if deployment.type == typeField] + # if len(deps) == 1: + # return deps[0] + # elif len(deps) == 0: + # raise ValueError(f"Failed to find Deployment for type '{typeField}' "\ + # f"from possible {[deployment.type for deployment in director.deployments]}") + # else: + # raise ValueError(f"Found more than 1 ({len(deps)}) Deployment for type '{typeField}' "\ + # f"from possible {[deployment.type for deployment in director.deployments]}") + @staticmethod - def get_detections_from_filenames( - detection_filenames: set[str], - all_detections: list[Detection_Abstract] - ) -> list[Detection_Abstract]: + def get_detections_from_filenames(detection_filenames:set[str], all_detections:list[Detection_Abstract])->list[Detection_Abstract]: detection_filenames = set(str(pathlib.Path(filename).absolute()) for filename in detection_filenames) detection_dict = SecurityContentObject.create_filename_to_content_dict(all_detections) @@ -104,6 +427,7 @@ def get_detections_from_filenames( return [detection_dict[detection_filename] for detection_filename in detection_filenames] except Exception as e: raise Exception(f"Failed to find detection object for modified detection: {str(e)}") + # @validator("type") # def type_valid(cls, v, values): @@ -111,25 +435,9 @@ def get_detections_from_filenames( # raise ValueError("not valid analytics type: " + values["name"]) # return v - @validator('how_to_implement', 'search', 'known_false_positives') - def encode_error(cls, v, values, field): - if not isinstance(v, str): - if isinstance(v, dict) and field.name == "search": - # This is a special case of the search field. It can be a dict, containing - # a sigma search, if we are running the converter. So we will not - # validate the field further. Additional validation will be done - # during conversion phase later on - return v - else: - # No other fields should contain a non-str type: - raise ValueError( - f"Error validating field '{field.name}'. Field MUST be be a string, not type '{type(v)}' " - ) - - return SecurityContentObject.free_text_field_valid(cls, v, values, field) - - @validator('enabled_by_default') - def only_enabled_if_production_status(cls,v,values): + + @field_validator("enabled_by_default",mode="before") + def only_enabled_if_production_status(cls,v:Any,info:ValidationInfo)->bool: ''' A detection can ONLY be enabled by default if it is a PRODUCTION detection. If not (for example, it is EXPERIMENTAL or DEPRECATED) then we will throw an exception. @@ -138,9 +446,9 @@ def only_enabled_if_production_status(cls,v,values): ''' if v == False: return v - - status = DetectionStatus(values.get("status")) - searchType = AnalyticsType(values.get("type")) + + status = DetectionStatus(info.data.get("status")) + searchType = AnalyticsType(info.data.get("type")) errors = [] if status != DetectionStatus.production: errors.append(f"status is '{status.name}'. Detections that are enabled by default MUST be '{DetectionStatus.production.value}'") @@ -152,79 +460,109 @@ def only_enabled_if_production_status(cls,v,values): raise ValueError(f"Detection is 'enabled_by_default: true' however \n - {error_message}") return v - - - @validator("status") - def validation_for_ba_only(cls, v, values): - # Ensure that only a BA detection can have status: validation - p = pathlib.Path(values['file_path']) - if v == DetectionStatus.validation.value: - if p.name.startswith("ssa___"): - pass - else: - raise ValueError( - f"The following is NOT an ssa_ detection, but has 'status: {v}' which may ONLY be used for " - f"ssa_ detections: {values['file_path']}" - ) - - return v + - # @root_validator - # def search_validation(cls, values): - # if 'ssa_' not in values['file_path']: - # if not '_filter' in values['search']: - # raise ValueError('filter macro missing in: ' + values["name"]) - # if any(x in values['search'] for x in ['eventtype=', 'sourcetype=', ' source=', 'index=']): - # if not 'index=_internal' in values['search']: - # raise ValueError('Use source macro instead of eventtype, sourcetype, source or index in detection: ' + values["name"]) - # return values - - # disable it because of performance reasons - # @validator('references') - # def references_check(cls, v, values): - # return LinkValidator.check_references(v, values["name"]) - # return v + @model_validator(mode="after") + def addTags_nist(self): + if self.type == AnalyticsType.TTP.value: + self.tags.nist = [NistCategory.DE_CM] + else: + self.tags.nist = [NistCategory.DE_AE] + return self + + @model_validator(mode="after") + def ensureProperObservablesExist(self): + """ + If a detections is PRODUCTION and either TTP or ANOMALY, then it MUST have an Observable with the VICTIM role. - @validator("search") - def search_obsersables_exist_validate(cls, v, values): - if type(v) is str: - tags: DetectionTags = values.get("tags") - if tags is None: - raise ValueError("Unable to parse Detection Tags. Please resolve Detection Tags errors") + Returns: + self: Returns itself if the valdiation passes + """ + if self.status not in [DetectionStatus.production.value]: + # Only perform this validation on production detections + return self - observable_fields = [ob.name.lower() for ob in tags.observable] + if self.type not in [AnalyticsType.TTP.value, AnalyticsType.Anomaly.value]: + # Only perform this validation on TTP and Anomaly detections + return self + + #Detection is required to have a victim + roles = [] + for observable in self.tags.observable: + roles.extend(observable.role) + + if roles.count("Victim") == 0: + raise ValueError(f"Error, there must be AT LEAST 1 Observable with the role 'Victim' declared in Detection.tags.observables. However, none were found.") + + # Exactly one victim was found + return self + - # All $field$ fields from the message must appear in the search + @model_validator(mode="after") + def search_observables_exist_validate(self): + + if isinstance(self.search, str): + + observable_fields = [ob.name.lower() for ob in self.tags.observable] + + #All $field$ fields from the message must appear in the search field_match_regex = r"\$([^\s.]*)\$" - - message_fields = [ - match.replace("$", "").lower() for match in re.findall(field_match_regex, tags.message.lower()) - ] - missing_fields = set([field for field in observable_fields if field not in v.lower()]) + + + if self.tags.message: + message_fields = [match.replace("$", "").lower() for match in re.findall(field_match_regex, self.tags.message.lower())] + missing_fields = set([field for field in observable_fields if field not in self.search.lower()]) + else: + message_fields = [] + missing_fields = set() + error_messages = [] if len(missing_fields) > 0: - error_messages.append( - f"The following fields are declared as observables, but do not exist in the search: " - f"{missing_fields}" - ) + error_messages.append(f"The following fields are declared as observables, but do not exist in the search: {missing_fields}") - missing_fields = set([field for field in message_fields if field not in v.lower()]) + + missing_fields = set([field for field in message_fields if field not in self.search.lower()]) if len(missing_fields) > 0: - error_messages.append( - f"The following fields are used as fields in the message, but do not exist in the search: " - f"{missing_fields}" - ) + error_messages.append(f"The following fields are used as fields in the message, but do not exist in the search: {missing_fields}") + + if len(error_messages) > 0 and self.status == DetectionStatus.production.value: + msg = "Use of fields in observables/messages that do not appear in search:\n\t- "+ "\n\t- ".join(error_messages) + raise(ValueError(msg)) + + # Found everything + return self + + + @model_validator(mode='after') + def ensurePresenceOfRequiredTests(self): + # TODO (cmcginley): Fix detection_abstract.tests_validate so that it surfaces validation errors + # (e.g. a lack of tests) to the final results, instead of just showing a failed detection w/ + # no tests (maybe have a message propagated at the detection level? do a separate coverage + # check as part of validation?): + + + #Only production analytics require tests + if self.status != DetectionStatus.production.value: + return self + + # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them. + # Accordingly, we do not need to do additional checks if the type is Correlation + if self.type in set([AnalyticsType.Correlation.value]): + return self + + if self.tags.manual_test is not None: + for test in self.tests: + test.skip(f"TEST SKIPPED: Detection marked as 'manual_test' with explanation: '{self.tags.manual_test}'") - if len(error_messages) > 0 and values.get("status") == DetectionStatus.production.value: - msg = "\n\t".join(error_messages) - raise (ValueError(msg)) + if len(self.tests) == 0: + raise ValueError(f"At least one test is REQUIRED for production detection: {self.name}") + - # Found everything - return v + return self - @validator("tests", always=True) - def tests_validate(cls, v, values): + @field_validator("tests") + def tests_validate(cls, v, info:ValidationInfo): # TODO (cmcginley): Fix detection_abstract.tests_validate so that it surfaces validation errors # (e.g. a lack of tests) to the final results, instead of just showing a failed detection w/ # no tests (maybe have a message propagated at the detection level? do a separate coverage @@ -232,18 +570,18 @@ def tests_validate(cls, v, values): #Only production analytics require tests - if values.get("status","") != DetectionStatus.production.value: + if info.data.get("status","") != DetectionStatus.production.value: return v # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them. # Accordingly, we do not need to do additional checks if the type is Correlation - if values.get("type","") in set([AnalyticsType.Correlation.value]): + if info.data.get("type","") in set([AnalyticsType.Correlation.value]): return v # Ensure that there is at least 1 test if len(v) == 0: - if values.get("tags",None) and values.get("tags").manual_test is not None: + if info.data.get("tags",None) and info.data.get("tags").manual_test is not None: # Detections that are manual_test MAY have detections, but it is not required. If they # do not have one, then create one which will be a placeholder. # Note that this fake UnitTest (and by extension, Integration Test) will NOT be generated @@ -252,19 +590,12 @@ def tests_validate(cls, v, values): return [placeholder_test] else: - raise ValueError("At least one test is REQUIRED for production detection: " + values.get("name", "NO NAME FOUND")) + raise ValueError("At least one test is REQUIRED for production detection: " + info.data.get("name", "NO NAME FOUND")) #No issues - at least one test provided for production type requiring testing return v - - @validator("datamodel") - def datamodel_valid(cls, v, values): - for datamodel in v: - if datamodel not in [el.name for el in DataModel]: - raise ValueError("not valid data model: " + values["name"]) - return v - + def all_tests_successful(self) -> bool: """ Checks that all tests in the detection succeeded. If no tests are defined, consider that a @@ -346,6 +677,7 @@ def get_summary( summary_dict["tests"].append(result) # Return the summary + return summary_dict @@ -354,3 +686,4 @@ def getMetadata(self)->dict[str,str]: 'deprecated':'1' if self.status==DetectionStatus.deprecated.value else '0', 'detection_version':str(self.version)} + diff --git a/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py b/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py index fdc296d3..8f160795 100644 --- a/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py @@ -1,71 +1,175 @@ from __future__ import annotations +from typing import TYPE_CHECKING, Self + +if TYPE_CHECKING: + from contentctl.objects.deployment import Deployment + from contentctl.objects.security_content_object import SecurityContentObject + from contentctl.objects.config import Config + from contentctl.input.director import DirectorOutputDto + +from contentctl.objects.enums import AnalyticsType import re import abc -import string -import uuid -from datetime import datetime -from pydantic import BaseModel, validator, ValidationError, Field -from contentctl.objects.enums import SecurityContentType -from typing import Tuple - import uuid +import datetime +from pydantic import BaseModel, field_validator, Field, ValidationInfo, FilePath, HttpUrl, NonNegativeInt, ConfigDict, model_validator, model_serializer +from typing import Tuple, Optional, List, Union import pathlib + + + + + +NO_FILE_NAME = "NO_FILE_NAME" + -NO_FILE_BUILT_AT_RUNTIME = "NO_FILE_BUILT_AT_RUNTIME" class SecurityContentObject_Abstract(BaseModel, abc.ABC): - #contentType: SecurityContentType - name: str - author: str = "UNKNOWN_AUTHOR" - date: str = "1990-01-01" - version: int = 1 + model_config = ConfigDict(use_enum_values=True,validate_default=True) + # name: str = ... + # author: str = Field(...,max_length=255) + # date: datetime.date = Field(...) + # version: NonNegativeInt = ... + # id: uuid.UUID = Field(default_factory=uuid.uuid4) #we set a default here until all content has a uuid + # description: str = Field(...,max_length=1000) + # file_path: FilePath = Field(...) + # references: Optional[List[HttpUrl]] = None + + name: str = Field("NO_NAME") + author: str = Field("Content Author",max_length=255) + date: datetime.date = Field(datetime.date.today()) + version: NonNegativeInt = 1 id: uuid.UUID = Field(default_factory=uuid.uuid4) #we set a default here until all content has a uuid - description: str = "UNKNOWN_DESCRIPTION" - file_path: str = "NO_FILE_BUILT_AT_RUNTIME" + description: str = Field("Enter Description Here",max_length=10000) + file_path: Optional[FilePath] = None + references: Optional[List[HttpUrl]] = None - @validator('name') - def name_max_length(cls, v): - if len(v) > 67: - raise ValueError('name is longer then 67 chars: ' + v) - return v - @validator('name') - def name_invalid_chars(cls, v): - invalidChars = set(string.punctuation.replace("-", "")) - if any(char in invalidChars for char in v): - raise ValueError('invalid chars used in name: ' + v) - return v + @model_serializer + def serialize_model(self): + return { + "name": self.name, + "author": self.author, + "date": str(self.date), + "version": self.version, + "id": str(self.id), + "description": self.description, + "references": [str(url) for url in self.references or []] + } + + @staticmethod + def objectListToNameList(objects:list[SecurityContentObject], config:Optional[Config]=None)->list[str]: + return [object.getName(config) for object in objects] + + # This function is overloadable by specific types if they want to redefine names, for example + # to have the format ESCU - NAME - Rule (config.tag - self.name - Rule) + def getName(self, config:Optional[Config])->str: + return self.name + + + @classmethod + def contentNameToFileName(cls, content_name:str)->str: + return content_name \ + .replace(' ', '_') \ + .replace('-','_') \ + .replace('.','_') \ + .replace('/','_') \ + .lower() + ".yml" + + + @model_validator(mode="after") + def ensureFileNameMatchesSearchName(self): + file_name = self.contentNameToFileName(self.name) + + if (self.file_path is not None and file_name != self.file_path.name): + raise ValueError(f"The file name MUST be based off the content 'name' field:\n"\ + f"\t- Expected File Name: {file_name}\n"\ + f"\t- Actual File Name : {self.file_path.name}") + + return self - @validator('date') - def date_valid(cls, v, values): - try: - datetime.strptime(v, "%Y-%m-%d") - except: - raise ValueError('date is not in format YYYY-MM-DD: ' + values["name"]) + @field_validator('file_path') + @classmethod + def file_path_valid(cls, v: Optional[pathlib.PosixPath], info: ValidationInfo): + if not v: + #It's possible that the object has no file path - for example filter macros that are created at runtime + return v + if not v.name.endswith(".yml"): + raise ValueError(f"All Security Content Objects must be YML files and end in .yml. The following file does not: '{v}'") return v - @staticmethod - def free_text_field_valid(input_cls, v, values, field): - try: - v.encode('ascii') - except UnicodeEncodeError as e: - print(f"Potential Ascii encoding error in {values['name']}:{field.name} - {str(e)}") - except Exception as e: - print(f"Unknown encoding error in {values['name']}:{field.name} - {str(e)}") + def getReferencesListForJson(self)->List[str]: + return [str(url) for url in self.references or []] + @classmethod + def mapNamesToSecurityContentObjects(cls, v: list[str], director:Union[DirectorOutputDto,None])->list[Self]: + if director is not None: + name_map = director.name_to_content_map + else: + name_map = {} - if bool(re.search(r"[^\\]\n", v)): - raise ValueError(f"Unexpected newline(s) in {values['name']}:{field.name}. Newline characters MUST be prefixed with \\") - return v - - - @validator("name", "author", 'description') - def description_valid(cls, v, values, field): + + + mappedObjects: list[Self] = [] + mistyped_objects: list[SecurityContentObject_Abstract] = [] + missing_objects: list[str] = [] + for object_name in v: + found_object = name_map.get(object_name,None) + if not found_object: + missing_objects.append(object_name) + elif not isinstance(found_object,cls): + mistyped_objects.append(found_object) + else: + mappedObjects.append(found_object) - return SecurityContentObject_Abstract.free_text_field_valid(cls,v,values,field) - + errors:list[str] = [] + if len(missing_objects) > 0: + errors.append(f"Failed to find the following '{cls.__name__}': {missing_objects}") + if len(missing_objects) > 0: + for mistyped_object in mistyped_objects: + errors.append(f"'{mistyped_object.name}' expected to have type '{type(Self)}', but actually had type '{type(mistyped_object)}'") + + if len(errors) > 0: + error_string = "\n - ".join(errors) + raise ValueError(f"Found {len(errors)} issues when resolving references Security Content Object names:\n - {error_string}") + + #Sort all objects sorted by name + return sorted(mappedObjects, key=lambda o: o.name) + + @staticmethod + def getDeploymentFromType(typeField:Union[str,None], info:ValidationInfo)->Deployment: + if typeField is None: + raise ValueError("'type:' field is missing from YML.") + director: Optional[DirectorOutputDto] = info.context.get("output_dto",None) + if not director: + raise ValueError("Cannot set deployment - DirectorOutputDto not passed to Detection Constructor in context") + + type_to_deployment_name_map = {AnalyticsType.TTP.value:"ESCU Default Configuration TTP", + AnalyticsType.Hunting.value:"ESCU Default Configuration Hunting", + AnalyticsType.Correlation.value: "ESCU Default Configuration Correlation", + AnalyticsType.Anomaly.value: "ESCU Default Configuration Anomaly", + "Baseline": "ESCU Default Configuration Baseline", + } + converted_type_field = type_to_deployment_name_map[typeField] + + #TODO: This is clunky, but is imported here to resolve some circular import errors + from contentctl.objects.deployment import Deployment + + deployments = Deployment.mapNamesToSecurityContentObjects([converted_type_field], director) + if len(deployments) == 1: + return deployments[0] + elif len(deployments) == 0: + raise ValueError(f"Failed to find Deployment for type '{converted_type_field}' "\ + f"from possible {[deployment.type for deployment in director.deployments]}") + else: + raise ValueError(f"Found more than 1 ({len(deployments)}) Deployment for type '{converted_type_field}' "\ + f"from possible {[deployment.type for deployment in director.deployments]}") + + + @staticmethod def get_objects_by_name(names_to_find:set[str], objects_to_search:list[SecurityContentObject_Abstract])->Tuple[list[SecurityContentObject_Abstract], set[str]]: + raise Exception("get_objects_by_name deprecated") found_objects = list(filter(lambda obj: obj.name in names_to_find, objects_to_search)) found_names = set([obj.name for obj in found_objects]) missing_names = names_to_find - found_names @@ -74,10 +178,10 @@ def get_objects_by_name(names_to_find:set[str], objects_to_search:list[SecurityC @staticmethod def create_filename_to_content_dict(all_objects:list[SecurityContentObject_Abstract])->dict[str,SecurityContentObject_Abstract]: name_dict:dict[str,SecurityContentObject_Abstract] = dict() - for object in all_objects: name_dict[str(pathlib.Path(object.file_path))] = object - return name_dict + + \ No newline at end of file diff --git a/contentctl/objects/alert_action.py b/contentctl/objects/alert_action.py new file mode 100644 index 00000000..f2f745d4 --- /dev/null +++ b/contentctl/objects/alert_action.py @@ -0,0 +1,40 @@ +from __future__ import annotations +from pydantic import BaseModel, model_serializer +from typing import Optional + +from contentctl.objects.deployment_email import DeploymentEmail +from contentctl.objects.deployment_notable import DeploymentNotable +from contentctl.objects.deployment_rba import DeploymentRBA +from contentctl.objects.deployment_slack import DeploymentSlack +from contentctl.objects.deployment_phantom import DeploymentPhantom + +class AlertAction(BaseModel): + email: Optional[DeploymentEmail] = None + notable: Optional[DeploymentNotable] = None + rba: Optional[DeploymentRBA] = DeploymentRBA() + slack: Optional[DeploymentSlack] = None + phantom: Optional[DeploymentPhantom] = None + + + @model_serializer + def serialize_model(self): + #Call serializer for parent + model = {} + + if self.email is not None: + raise Exception("Email not implemented") + + if self.notable is not None: + model['notable'] = self.notable + + if self.rba is not None and self.rba.enabled: + model['rba'] = {'enabled': "true"} + + if self.slack is not None: + raise Exception("Slack not implemented") + + if self.phantom is not None: + raise Exception("Phantom not implemented") + + #return the model + return model \ No newline at end of file diff --git a/contentctl/objects/app.py b/contentctl/objects/app.py deleted file mode 100644 index dc2a0210..00000000 --- a/contentctl/objects/app.py +++ /dev/null @@ -1,214 +0,0 @@ -# Needed for a staticmethod to be able to return an instance of the class it belongs to -from __future__ import annotations - - -import pathlib -import re -import os - -from pydantic import BaseModel, validator, ValidationError, Extra, Field -from dataclasses import dataclass -from datetime import datetime -from typing import Union -import validators -from contentctl.objects.security_content_object import SecurityContentObject -from contentctl.objects.enums import DataModel -from contentctl.helper.utils import Utils -import yaml - -SPLUNKBASE_URL = "https://splunkbase.splunk.com/app/{uid}/release/{release}/download" -ENVIRONMENT_PATH_NOT_SET = "ENVIRONMENT_PATH_NOT_SET" - -class App(BaseModel, extra=Extra.forbid): - - # uid is a numeric identifier assigned by splunkbase, so - # homemade applications will not have this - uid: Union[int, None] - - # appid is basically the internal name of your app - appid: str - - # Title is the human readable name for your application - title: str - - # Self explanatory - description: Union[str, None] - release: str - - local_path: Union[str, None] - http_path: Union[str, None] - # Splunkbase path is made of the combination of uid and release fields - splunkbase_path: Union[str, None] - - # Ultimate source of the app. Can be a local path or a Splunkbase Path. - # This will be set via a function call and should not be provided in the YML - # Note that this is the path relative to the container mount - environment_path: str = ENVIRONMENT_PATH_NOT_SET - force_local:bool = False - - def configure_app_source_for_container( - self, - splunkbase_username: Union[str, None], - splunkbase_password: Union[str, None], - apps_directory: pathlib.Path, - container_mount_path: pathlib.Path, - ): - - splunkbase_creds_provided = ( - splunkbase_username is not None and splunkbase_password is not None - ) - - if splunkbase_creds_provided and self.splunkbase_path is not None and not self.force_local: - self.environment_path = self.splunkbase_path - - elif self.local_path is not None: - # local path existence already validated - filename = pathlib.Path(self.local_path) - destination = str(apps_directory / filename.name) - Utils.copy_local_file(self.local_path, destination, verbose_print=True) - self.environment_path = str(container_mount_path / filename.name) - - elif self.http_path is not None: - from urllib.parse import urlparse - - path_on_server = str(urlparse(self.http_path).path) - filename = pathlib.Path(path_on_server) - download_path = str(apps_directory / filename.name) - Utils.download_file_from_http(self.http_path, download_path) - self.environment_path = str(container_mount_path / filename.name) - - else: - raise ( - Exception( - f"Unable to download app {self.title}:\n" - f"Splunkbase Path : {self.splunkbase_path}\n" - f"local_path : {self.local_path}\n" - f"http_path : {self.http_path}\n" - f"Splunkbase Creds: {splunkbase_creds_provided}\n" - ) - ) - - @staticmethod - def validate_string_alphanumeric_with_underscores(input: str) -> bool: - if len(input) == 0: - raise (ValueError(f"String was length 0")) - - for letter in input: - if not (letter.isalnum() or letter in "_-"): - raise ( - ValueError( - f"String '{input}' can only contain alphanumeric characters, underscores, and hyphens." - ) - ) - return True - - @validator("uid") - def validate_uid(cls, v): - return v - - @validator("appid") - def validate_appid(cls, v): - # Called function raises exception on failure, so we don't need to raise it here - cls.validate_string_alphanumeric_with_underscores(v) - return v - - @validator("title") - def validate_title(cls, v): - # Basically, a title can be any string - return v - - @validator("description") - def validate_description(cls, v): - # description can be anything - return v - - @validator("release") - def validate_release(cls, v): - # release can be any string - return v - - @validator("local_path") - def validate_local_path(cls, v): - if v is not None: - p = pathlib.Path(v) - if not p.exists(): - raise (ValueError(f"The path local_path {p} does not exist")) - elif not p.is_file(): - raise (ValueError(f"The path local_path {p} exists, but is not a file")) - - # release can be any string - return v - - @validator("http_path") - def validate_http_path(cls, v, values): - if v is not None: - try: - if bool(validators.url(v)) == False: - raise ValueError(f"URL '{v}' is not a valid URL") - except Exception as e: - raise (ValueError(f"Error validating the http_path: {str(e)}")) - return v - - @validator("splunkbase_path") - def validate_splunkbase_path(cls, v, values): - - if v is not None: - try: - if bool(validators.url(v)) == False: - raise ValueError(f"splunkbase_url {v} is not a valid URL") - except Exception as e: - raise (ValueError(f"Error validating the splunkbase_url: {str(e)}")) - - if ( - bool( - re.match( - "^https://splunkbase\.splunk\.com/app/\d+/release/.+/download$", - v, - ) - ) - == False - ): - raise ( - ValueError( - f"splunkbase_url {v} does not match the format {SPLUNKBASE_URL}" - ) - ) - - # Try to form the URL and error out if Splunkbase is the only place to get the app - if values["uid"] is None: - if values["must_download_from_splunkbase"]: - raise ( - ValueError( - f"Error building splunkbase_url. Attempting to" - f" build the url for '{values['title']}', but no " - f"uid was supplied." - ) - ) - else: - return None - - if values["release"] is None: - if values["must_download_from_splunkbase"]: - raise ( - ValueError( - f"Error building splunkbase_url. Attempting to" - f" build the url for '{values['title']}', but no " - f"release was supplied." - ) - ) - else: - return None - return SPLUNKBASE_URL.format(uid=values["uid"], release=values["release"]) - - @staticmethod - def get_default_apps() -> list[App]: - all_app_objs: list[App] = [] - with open( - os.path.join(os.path.dirname(__file__), "../", "templates/app_default.yml"), - "r", - ) as app_data: - all_apps_raw = yaml.safe_load(app_data) - for a in all_apps_raw: - app_obj = App.parse_obj(a) - all_app_objs.append(app_obj) - return all_app_objs diff --git a/contentctl/objects/atomic.py b/contentctl/objects/atomic.py new file mode 100644 index 00000000..e0abc30e --- /dev/null +++ b/contentctl/objects/atomic.py @@ -0,0 +1,212 @@ +from __future__ import annotations +from contentctl.input.yml_reader import YmlReader +from pydantic import BaseModel, model_validator, ConfigDict, FilePath, UUID4 +from typing import List, Optional, Dict, Union, Self +import pathlib +# We should determine if we want to use StrEnum, which is only present in Python3.11+ +# Alternatively, we can use +# class SupportedPlatform(str, enum.Enum): +# or install the StrEnum library from pip + +from enum import StrEnum, auto + + +class SupportedPlatform(StrEnum): + windows = auto() + linux = auto() + macos = auto() + containers = auto() + # Because the following fields contain special characters + # (which cannot be field names) we must specifiy them manually + google_workspace = "google-workspace" + iaas_gcp = "iaas:gcp" + iaas_azure = "iaas:azure" + iaas_aws = "iaas:aws" + azure_ad = "azure-ad" + office_365 = "office-365" + + + +class InputArgumentType(StrEnum): + string = auto() + path = auto() + url = auto() + integer = auto() + float = auto() + # Cannot use auto() since the case sensitivity is important + # These should likely be converted in the ART repo to use the same case + # As the defined types above + String = "String" + Path = "Path" + Url = "Url" + +class AtomicExecutor(BaseModel): + name: str + elevation_required: Optional[bool] = False #Appears to be optional + command: Optional[str] = None + steps: Optional[str] = None + cleanup_command: Optional[str] = None + + @model_validator(mode='after') + def ensure_mutually_exclusive_fields(self)->AtomicExecutor: + if self.command is not None and self.steps is not None: + raise ValueError("command and steps cannot both be defined in the executor section. Exactly one must be defined.") + elif self.command is None and self.steps is None: + raise ValueError("Neither command nor steps were defined in the executor section. Exactly one must be defined.") + return self + + + +class InputArgument(BaseModel): + model_config = ConfigDict(extra='forbid') + description: str + type: InputArgumentType + default: Union[str,int,float,None] = None + + +class DependencyExecutorType(StrEnum): + powershell = auto() + sh = auto() + bash = auto() + command_prompt = auto() + +class AtomicDependency(BaseModel): + model_config = ConfigDict(extra='forbid') + description: str + prereq_command: str + get_prereq_command: str + +class AtomicTest(BaseModel): + model_config = ConfigDict(extra='forbid') + name: str + auto_generated_guid: UUID4 + description: str + supported_platforms: List[SupportedPlatform] + executor: AtomicExecutor + input_arguments: Optional[Dict[str,InputArgument]] = None + dependencies: Optional[List[AtomicDependency]] = None + dependency_executor_name: Optional[DependencyExecutorType] = None + + @staticmethod + def AtomicTestWhenEnrichmentIsDisabled(auto_generated_guid: UUID4)->Self: + return AtomicTest(name="Placeholder Atomic Test (enrichment disabled)", + auto_generated_guid=auto_generated_guid, + description="This is a placeholder AtomicTest. Because enrichments were not enabled, it has not been validated against the real Atomic Red Team Repo.", + supported_platforms=[], + executor=AtomicExecutor(name="Placeholder Executor (enrichment disabled)", + command="Placeholder command (enrichment disabled)")) + + @staticmethod + def AtomicTestWhenTestIsMissing(auto_generated_guid: UUID4)->Self: + return AtomicTest(name="Missing Atomic", + auto_generated_guid=auto_generated_guid, + description="This is a placeholder AtomicTest. Either the auto_generated_guid is incorrect or it there was an exception while parsing its AtomicFile..", + supported_platforms=[], + executor=AtomicExecutor(name="Placeholder Executor (failed to find auto_generated_guid)", + command="Placeholder command (failed to find auto_generated_guid)")) + + + @classmethod + def getAtomicByAtomicGuid(cls, guid: UUID4, all_atomics:Union[List[AtomicTest],None])->AtomicTest: + if all_atomics is None: + return AtomicTest.AtomicTestWhenEnrichmentIsDisabled(guid) + matching_atomics = [atomic for atomic in all_atomics if atomic.auto_generated_guid == guid] + if len(matching_atomics) == 0: + raise ValueError(f"Unable to find atomic_guid {guid} in {len(all_atomics)} atomic_tests from ART Repo") + elif len(matching_atomics) > 1: + raise ValueError(f"Found {len(matching_atomics)} matching tests for atomic_guid {guid} in {len(all_atomics)} atomic_tests from ART Repo") + + return matching_atomics[0] + + @classmethod + def parseArtRepo(cls, repo_path:pathlib.Path)->List[AtomicFile]: + if not repo_path.is_dir(): + print(f"WARNING: Atomic Red Team repo does NOT exist at {repo_path.absolute()}. You can check it out with:\n * git clone --single-branch https://github.com/redcanaryco/atomic-red-team. This will ONLY throw a validation error if you reference atomid_guids in your detection(s).") + return [] + atomics_path = repo_path/"atomics" + if not atomics_path.is_dir(): + print(f"WARNING: Atomic Red Team repo exists at {repo_path.absolute}, but atomics directory does NOT exist at {atomics_path.absolute()}. Was it deleted or renamed? This will ONLY throw a validation error if you reference atomid_guids in your detection(s).") + return [] + + + atomic_files:List[AtomicFile] = [] + error_messages:List[str] = [] + for obj_path in atomics_path.glob("**/T*.yaml"): + try: + atomic_files.append(cls.constructAtomicFile(obj_path)) + except Exception as e: + error_messages.append(f"File [{obj_path}]\n{str(e)}") + if len(error_messages) > 0: + exceptions_string = '\n\n'.join(error_messages) + print(f"WARNING: The following [{len(error_messages)}] ERRORS were generated when parsing the Atomic Red Team Repo.\n" + "Please raise an issue so that they can be fixed at https://github.com/redcanaryco/atomic-red-team/issues.\n" + "Note that this is only a warning and contentctl will ignore Atomics contained in these files.\n" + f"However, if you have written a detection that references them, 'contentctl build --enrichments' will fail:\n\n{exceptions_string}") + + return atomic_files + + @classmethod + def constructAtomicFile(cls, file_path:pathlib.Path)->AtomicFile: + yml_dict = YmlReader.load_file(file_path) + atomic_file = AtomicFile.model_validate(yml_dict) + return atomic_file + + @classmethod + def getAtomicTestsFromArtRepo(cls, repo_path:pathlib.Path, enabled:bool=True)->Union[List[AtomicTest],None]: + # Get all the atomic files. Note that if the ART repo is not found, we will not throw an error, + # but will not have any atomics. This means that if atomic_guids are referenced during validation, + # validation for those detections will fail + if not enabled: + return None + + atomic_files = cls.getAtomicFilesFromArtRepo(repo_path) + + atomic_tests:List[AtomicTest] = [] + for atomic_file in atomic_files: + atomic_tests.extend(atomic_file.atomic_tests) + print(f"Found [{len(atomic_tests)}] Atomic Simulations in the Atomic Red Team Repo!") + return atomic_tests + + + @classmethod + def getAtomicFilesFromArtRepo(cls, repo_path:pathlib.Path)->List[AtomicFile]: + return cls.parseArtRepo(repo_path) + + + + + + +class AtomicFile(BaseModel): + model_config = ConfigDict(extra='forbid') + file_path: FilePath + attack_technique: str + display_name: str + atomic_tests: List[AtomicTest] + + + + +# ATOMICS_PATH = pathlib.Path("./atomics") +# atomic_objects = [] +# atomic_simulations = [] +# for obj_path in ATOMICS_PATH.glob("**/T*.yaml"): +# try: +# with open(obj_path, 'r', encoding="utf-8") as obj_handle: +# obj_data = yaml.load(obj_handle, Loader=yaml.CSafeLoader) +# atomic_obj = AtomicFile.model_validate(obj_data) +# except Exception as e: +# print(f"Error parsing object at path {obj_path}: {str(e)}") +# print(f"We have successfully parsed {len(atomic_objects)}, however!") +# sys.exit(1) + +# print(f"Successfully parsed {obj_path}!") +# atomic_objects.append(atomic_obj) +# atomic_simulations += atomic_obj.atomic_tests + +# print(f"Successfully parsed all {len(atomic_objects)} files!") +# print(f"Successfully parsed all {len(atomic_simulations)} simulations!") + + + + \ No newline at end of file diff --git a/contentctl/objects/baseline.py b/contentctl/objects/baseline.py index 7e8723d8..91cb8958 100644 --- a/contentctl/objects/baseline.py +++ b/contentctl/objects/baseline.py @@ -1,17 +1,21 @@ -import string -import uuid -import requests -from pydantic import BaseModel, validator, ValidationError -from dataclasses import dataclass -from datetime import datetime +from __future__ import annotations +from typing import TYPE_CHECKING, Annotated, Optional, List,Any +from pydantic import field_validator, ValidationInfo, Field, model_serializer +if TYPE_CHECKING: + from contentctl.input.director import DirectorOutputDto +from contentctl.objects.deployment import Deployment from contentctl.objects.security_content_object import SecurityContentObject -from contentctl.objects.enums import DataModel +from contentctl.objects.enums import DataModel, AnalyticsType from contentctl.objects.baseline_tags import BaselineTags -from contentctl.objects.deployment import Deployment -from contentctl.helper.link_validator import LinkValidator -from contentctl.objects.enums import SecurityContentType +from contentctl.objects.enums import DeploymentType +#from contentctl.objects.deployment import Deployment + +# from typing import TYPE_CHECKING +# if TYPE_CHECKING: +# from contentctl.input.director import DirectorOutputDto + class Baseline(SecurityContentObject): # baseline spec @@ -21,43 +25,40 @@ class Baseline(SecurityContentObject): #date: str #author: str #contentType: SecurityContentType = SecurityContentType.baselines - type: str - datamodel: list + type: Annotated[str,Field(pattern="^Baseline$")] = Field(...) + datamodel: Optional[List[DataModel]] = None #description: str - search: str - how_to_implement: str - known_false_positives: str + search: str = Field(..., min_length=4) + how_to_implement: str = Field(..., min_length=4) + known_false_positives: str = Field(..., min_length=4) check_references: bool = False #Validation is done in order, this field must be defined first - references: list - tags: BaselineTags + tags: BaselineTags = Field(...) # enrichment - deployment: Deployment = None - + deployment: Deployment = Field({}) + @field_validator("deployment", mode="before") + def getDeployment(cls, v:Any, info:ValidationInfo)->Deployment: + return Deployment.getDeployment(v,info) + - - @validator('type') - def type_valid(cls, v, values): - if v != "Baseline": - raise ValueError('not valid analytics type: ' + values["name"]) - return v - - @validator('datamodel') - def datamodel_valid(cls, v, values): - for datamodel in v: - if datamodel not in [el.name for el in DataModel]: - raise ValueError('not valid data model: ' + values["name"]) - return v - - @validator('how_to_implement') - def encode_error(cls, v, values, field): - return SecurityContentObject.free_text_field_valid(cls,v,values,field) + @model_serializer + def serialize_model(self): + #Call serializer for parent + super_fields = super().serialize_model() + + #All fields custom to this model + model= { + "tags": self.tags.model_dump(), + "type": self.type, + "search": self.search, + "how_to_implement":self.how_to_implement, + "known_false_positives":self.known_false_positives, + "datamodel": self.datamodel, + } + + #Combine fields from this model with fields from parent + super_fields.update(model) - # @validator('references') - # def references_check(cls, v, values): - # return LinkValidator.SecurityContentObject_validate_references(v, values) - @validator('search') - def search_validate(cls, v, values): - # write search validator - return v + #return the model + return super_fields \ No newline at end of file diff --git a/contentctl/objects/baseline_tags.py b/contentctl/objects/baseline_tags.py index 161afbe7..fa0030dd 100644 --- a/contentctl/objects/baseline_tags.py +++ b/contentctl/objects/baseline_tags.py @@ -1,25 +1,74 @@ +from __future__ import annotations +from typing import TYPE_CHECKING +from pydantic import BaseModel, Field, field_validator, ValidationInfo, model_serializer +from typing import List, Any, Union + +from contentctl.objects.story import Story +from contentctl.objects.deployment import Deployment +from contentctl.objects.detection import Detection +from contentctl.objects.enums import SecurityContentProductName +from contentctl.objects.enums import SecurityDomain +if TYPE_CHECKING: + from contentctl.input.director import DirectorOutputDto -from pydantic import BaseModel, validator, ValidationError class BaselineTags(BaseModel): - analytic_story: list - deployments: list = None - detections: list - product: list - required_fields: list - security_domain: str - - - @validator('product') - def tags_product(cls, v, values): - valid_products = [ - "Splunk Enterprise", "Splunk Enterprise Security", "Splunk Cloud", - "Splunk Security Analytics for AWS", "Splunk Behavioral Analytics" - ] - - for value in v: - if value not in valid_products: - raise ValueError('product is not valid for ' + values['name'] + '. valid products are ' + str(valid_products)) - return v \ No newline at end of file + analytic_story: list[Story] = Field(...) + #deployment: Deployment = Field('SET_IN_GET_DEPLOYMENT_FUNCTION') + detections: List[Union[Detection,str]] = Field(...) + product: list[SecurityContentProductName] = Field(...,min_length=1) + required_fields: List[str] = Field(...,min_length=1) + security_domain: SecurityDomain = Field(...) + + + @field_validator("analytic_story",mode="before") + def getStories(cls, v:Any, info:ValidationInfo)->List[Story]: + return Story.mapNamesToSecurityContentObjects(v, info.context.get("output_dto",None)) + + + @model_serializer + def serialize_model(self): + #All fields custom to this model + model= { + "analytic_story": [story.name for story in self.analytic_story], + "detections": [detection.name for detection in self.detections if isinstance(detection,Detection)], + "product": self.product, + "required_fields":self.required_fields, + "security_domain":self.security_domain, + "deployments": None + } + + + #return the model + return model + + def replaceDetectionNameWithDetectionObject(self, detection:Detection)->bool: + + pass + + + + + # @field_validator("deployment", mode="before") + # def getDeployment(cls, v:Any, info:ValidationInfo)->Deployment: + # if v != 'SET_IN_GET_DEPLOYMENT_FUNCTION': + # print(f"Deployment defined in YML: {v}") + # return v + + # director: Optional[DirectorOutputDto] = info.context.get("output_dto",None) + # if not director: + # raise ValueError("Cannot set deployment - DirectorOutputDto not passed to Detection Constructor in context") + + # typeField = "Baseline" + # deps = [deployment for deployment in director.deployments if deployment.type == typeField] + # if len(deps) == 1: + # return deps[0] + # elif len(deps) == 0: + # raise ValueError(f"Failed to find Deployment for type '{typeField}' "\ + # f"from possible {[deployment.type for deployment in director.deployments]}") + # else: + # raise ValueError(f"Found more than 1 ({len(deps)}) Deployment for type '{typeField}' "\ + # f"from possible {[deployment.type for deployment in director.deployments]}") + \ No newline at end of file diff --git a/contentctl/objects/config.py b/contentctl/objects/config.py index e0907eea..f036d132 100644 --- a/contentctl/objects/config.py +++ b/contentctl/objects/config.py @@ -1,110 +1,107 @@ -from pydantic import BaseModel, validator, ValidationError, Field, Extra +from __future__ import annotations +from pydantic import ( + BaseModel, Field, field_validator, + field_serializer, ConfigDict, DirectoryPath, + PositiveInt, FilePath, HttpUrl, AnyUrl, model_validator, + ValidationInfo +) +from contentctl.output.yml_writer import YmlWriter +from os import environ +from datetime import datetime, UTC +from typing import Optional,Any,Annotated,List,Union, Self import semantic_version -from datetime import datetime -from typing import Union -from contentctl.objects.test_config import TestConfig - -import string import random -PASSWORD = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(16)]) - -class ConfigGlobal(BaseModel): - log_path: str - log_level: str - - -class ConfigScheduling(BaseModel): - cron_schedule: str - earliest_time: str - latest_time: str - schedule_window: str - - -class ConfigNotable(BaseModel): - rule_description: str - rule_title: str - nes_fields: list - - -class ConfigEmail(BaseModel): - subject: str - to: str - message: str - - -class ConfigSlack(BaseModel): - channel: str - message: str - - -class ConfigPhantom(BaseModel): - cam_workers: str - label: str - phantom_server: str - sensitivity: str - severity: str - - -class ConfigRba(BaseModel): - enabled: str - - -class ConfigDetectionConfiguration(BaseModel): - scheduling: ConfigScheduling = ConfigScheduling(cron_schedule="0 * * * *", earliest_time="-70m@m", latest_time="-10m@m", schedule_window="auto") - notable: ConfigNotable = ConfigNotable(rule_description="%description%", rule_title="%name%", nes_fields=["user", "dest", "src"]) - email: Union[ConfigEmail,None] = None - slack: Union[ConfigSlack,None] = None - phantom: Union[ConfigPhantom,None] = None - rba: Union[ConfigRba,None] = None - - -class ConfigAlertAction(BaseModel): - notable: ConfigNotable - - - - -class ConfigDeploy(BaseModel): - description: str = "Description for this deployment target" - server: str = "127.0.0.1" - -CREDENTIAL_MISSING = "PROVIDE_CREDENTIALS_VIA_CMD_LINE_ARGUMENT" -class ConfigDeployACS(ConfigDeploy): - token: str = CREDENTIAL_MISSING +from enum import StrEnum, auto +import pathlib +from contentctl.helper.utils import Utils +from urllib.parse import urlparse +from abc import ABC, abstractmethod +from contentctl.objects.enums import PostTestBehavior +from contentctl.objects.detection import Detection + +import tqdm +from functools import partialmethod + +ENTERPRISE_SECURITY_UID = 263 +COMMON_INFORMATION_MODEL_UID = 1621 + +SPLUNKBASE_URL = "https://splunkbase.splunk.com/app/{uid}/release/{version}/download" + +class App_Base(BaseModel,ABC): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + uid: Optional[int] = Field(default=None) + title: str = Field(description="Human-readable name used by the app. This can have special characters.") + appid: Optional[Annotated[str, Field(pattern="^[a-zA-Z0-9_-]+$")]]= Field(default=None,description="Internal name used by your app. " + "It may ONLY have characters, numbers, and underscores. No other characters are allowed.") + version: str = Field(description="The version of your Content Pack. This must follow semantic versioning guidelines.") + description: Optional[str] = Field(default="description of app",description="Free text description of the Content Pack.") -class ConfigDeployRestAPI(ConfigDeploy): - port: int = 8089 - username: str = "admin" - password: str = PASSWORD -class Deployments(BaseModel): - acs_deployments: list[ConfigDeployACS] = [] - rest_api_deployments: list[ConfigDeployRestAPI] = [ConfigDeployRestAPI()] + def getSplunkbasePath(self)->HttpUrl: + return HttpUrl(SPLUNKBASE_URL.format(uid=self.uid, release=self.version)) + @abstractmethod + def getApp(self, config:test, stage_file:bool=False)->str: + ... + def ensureAppPathExists(self, config:test, stage_file:bool=False): + if stage_file: + if not config.getLocalAppDir().exists(): + config.getLocalAppDir().mkdir(parents=True) -class ConfigBuildSplunk(BaseModel): - pass +class TestApp(App_Base): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + hardcoded_path: Optional[Union[FilePath,HttpUrl]] = Field(default=None, description="This may be a relative or absolute link to a file OR an HTTP URL linking to your app.") -class ConfigBuildJson(BaseModel): - pass - -class ConfigBuildBa(BaseModel): - pass - - -class ConfigBuild(BaseModel): + @field_serializer('hardcoded_path',when_used='always') + def serialize_path(path: Union[AnyUrl, pathlib.Path])->str: + return str(path) + + def getApp(self, config:test,stage_file:bool=False)->str: + #If the apps directory does not exist, then create it + self.ensureAppPathExists(config,stage_file) + + if config.splunk_api_password is not None and config.splunk_api_username is not None: + if self.version is not None and self.uid is not None: + return str(self.getSplunkbasePath()) + if self.version is None or self.uid is None: + print(f"Not downloading {self.title} from Splunkbase since uid[{self.uid}] AND version[{self.version}] MUST be defined") + + + elif isinstance(self.hardcoded_path, pathlib.Path): + destination = config.getLocalAppDir() / self.hardcoded_path.name + if stage_file: + Utils.copy_local_file(str(self.hardcoded_path), + str(destination), + verbose_print=True) + + elif isinstance(self.hardcoded_path, AnyUrl): + file_url_string = str(self.hardcoded_path) + server_path = pathlib.Path(urlparse(file_url_string).path) + destination = config.getLocalAppDir() / server_path.name + if stage_file: + Utils.download_file_from_http(file_url_string, str(destination)) + else: + raise Exception(f"Unknown path for app '{self.title}'") + + return str(destination) + +class CustomApp(App_Base): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) # Fields required for app.conf based on # https://docs.splunk.com/Documentation/Splunk/9.0.4/Admin/Appconf - title: str = Field(default="ContentPack",title="Internal name used by your app. No spaces or special characters.") - path_root: str = Field(default="dist",title="The root path at which you will build your app.") - prefix: str = Field(default="ContentPack",title="A short prefix to easily identify all your content.") - build: int = Field(default=int(datetime.utcnow().strftime("%Y%m%d%H%M%S")), - title="Build number for your app. This will always be a number that corresponds to the time of the build in the format YYYYMMDDHHMMSS") - version: str = Field(default="0.0.1",title="The version of your Content Pack. This must follow semantic versioning guidelines.") + uid: int = Field(ge=2, lt=100000, default_factory=lambda:random.randint(20000,100000)) + title: str = Field(default="Content Pack",description="Human-readable name used by the app. This can have special characters.") + appid: Annotated[str, Field(pattern="^[a-zA-Z0-9_-]+$")]= Field(default="ContentPack",description="Internal name used by your app. " + "It may ONLY have characters, numbers, and underscores. No other characters are allowed.") + version: str = Field(default="0.0.1",description="The version of your Content Pack. This must follow semantic versioning guidelines.", validate_default=True) + + prefix: str = Field(default="ContentPack",description="A short prefix to easily identify all your content.") + build: int = Field(exclude=True, default=int(datetime.now(UTC).strftime("%Y%m%d%H%M%S")), validate_default=True, + description="Build number for your app. This will always be a number that corresponds to the time of the build in the format YYYYMMDDHHMMSS") # id has many restrictions: # * Omit this setting for apps that are for internal use only and not intended # for upload to Splunkbase. @@ -120,54 +117,789 @@ class ConfigBuild(BaseModel): # * must not be any of the following names: CON, PRN, AUX, NUL, # COM1, COM2, COM3, COM4, COM5, COM6, COM7, COM8, COM9, # LPT1, LPT2, LPT3, LPT4, LPT5, LPT6, LPT7, LPT8, LPT9 - name: str = Field(default="ContentPack",title="Internal name used by your app. No spaces or special characters.") - label: str = Field(default="Custom Splunk Content Pack",title="This is the app name that shows in the launcher.") - author_name: str = Field(default="author name",title="Name of the Content Pack Author.") - author_email: str = Field(default="author@contactemailaddress.com",title="Contact email for the Content Pack Author") - author_company: str = Field(default="author company",title="Name of the company who has developed the Content Pack") - description: str = Field(default="description of app",title="Free text description of the Content Pack.") - - splunk_app: Union[ConfigBuildSplunk,None] = ConfigBuildSplunk() - json_objects: Union[ConfigBuildJson,None] = None - ba_objects: Union[ConfigBuildBa,None] = None - - @validator('version', always=True) + + label: str = Field(default="Custom Splunk Content Pack",description="This is the app name that shows in the launcher.") + author_name: str = Field(default="author name",description="Name of the Content Pack Author.") + author_email: str = Field(default="author@contactemailaddress.com",description="Contact email for the Content Pack Author") + author_company: str = Field(default="author company",description="Name of the company who has developed the Content Pack") + description: str = Field(default="description of app",description="Free text description of the Content Pack.") + + + @field_validator('version') def validate_version(cls, v, values): try: - validate_version = semantic_version.Version(v) + _ = semantic_version.Version(v) except Exception as e: raise(ValueError(f"The specified version does not follow the semantic versioning spec (https://semver.org/). {str(e)}")) return v #Build will ALWAYS be the current utc timestamp - @validator('build', always=True) + @field_validator('build') def validate_build(cls, v, values): return int(datetime.utcnow().strftime("%Y%m%d%H%M%S")) + def getApp(self, config:test, stage_file=True)->str: + self.ensureAppPathExists(config,stage_file) + + destination = config.getLocalAppDir() / (config.getPackageFilePath(include_version=True).name) + if stage_file: + Utils.copy_local_file(str(config.getPackageFilePath(include_version=True)), + str(destination), + verbose_print=True) + return str(destination) + + +class Config_Base(BaseModel): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + + path: DirectoryPath = Field(default=DirectoryPath("."), description="The root of your app.") + app:CustomApp = Field(default_factory=CustomApp) + + @field_serializer('path',when_used='always') + def serialize_path(path: DirectoryPath)->str: + return str(path) + +class init(Config_Base): + pass + + +class validate(Config_Base): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + enrichments: bool = Field(default=False, description="Enable MITRE, APP, and CVE Enrichments. "\ + "This is useful when outputting a release build "\ + "and validating these values, but should otherwise "\ + "be avoided for performance reasons.") + build_app: bool = Field(default=True, description="Should an app be built and output in the build_path?") + build_api: bool = Field(default=False, description="Should api objects be built and output in the build_path?") + build_ssa: bool = Field(default=False, description="Should ssa objects be built and output in the build_path?") + + def getAtomicRedTeamRepoPath(self, atomic_red_team_repo_name:str = "atomic-red-team"): + return self.path/atomic_red_team_repo_name + +class report(validate): + #reporting takes no extra args, but we define it here so that it can be a mode on the command line + def getReportingPath(self)->pathlib.Path: + return self.path/"reporting/" + + + +class build(validate): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + build_path: DirectoryPath = Field(default=DirectoryPath("dist/"), title="Target path for all build outputs") + + @field_serializer('build_path',when_used='always') + def serialize_build_path(path: DirectoryPath)->str: + return str(path) + + @field_validator('build_path',mode='before') + @classmethod + def ensure_build_path(cls, v:Union[str,DirectoryPath]): + ''' + If the build path does not exist, then create it. + If the build path is actually a file, then raise a descriptive + exception. + ''' + if isinstance(v,str): + v = pathlib.Path(v) + if v.is_dir(): + return v + elif v.is_file(): + raise ValueError(f"Build path {v} must be a directory, but instead it is a file") + elif not v.exists(): + v.mkdir(parents=True) + return v + + def getBuildDir(self)->pathlib.Path: + return self.path / self.build_path + + def getPackageDirectoryPath(self)->pathlib.Path: + return self.getBuildDir() / f"{self.app.appid}" + + + def getPackageFilePath(self, include_version:bool=False)->pathlib.Path: + if include_version: + return self.getBuildDir() / f"{self.app.appid}-{self.app.version}.tar.gz" + else: + return self.getBuildDir() / f"{self.app.appid}-latest.tar.gz" + + def getSSAPath(self)->pathlib.Path: + return self.getBuildDir() / "ssa" + + def getAPIPath(self)->pathlib.Path: + return self.getBuildDir() / "api" + + def getAppTemplatePath(self)->pathlib.Path: + return self.path/"app_template" + +class StackType(StrEnum): + classic = auto() + victoria = auto() -class ConfigEnrichments(BaseModel): - attack_enrichment: bool = False - cve_enrichment: bool = False - splunk_app_enrichment: bool = False +class inspect(build): + splunk_api_username: str = Field(description="Splunk API username used for running appinspect.") + splunk_api_password: str = Field(exclude=True, description="Splunk API password used for running appinspect.") + stack_type: StackType = Field(description="The type of your Splunk Cloud Stack") +class NewContentType(StrEnum): + detection = auto() + story = auto() -class ConfigBuildSSA(BaseModel): - path_root: str -class ConfigBuildApi(BaseModel): - path_root: str +class new(Config_Base): + type: NewContentType = Field(default=NewContentType.detection, description="Specify the type of content you would like to create.") -class Config(BaseModel, extra=Extra.forbid): - #general: ConfigGlobal = ConfigGlobal() - #detection_configuration: ConfigDetectionConfiguration = ConfigDetectionConfiguration() - deployments: Deployments = Deployments() - build: ConfigBuild = ConfigBuild() - build_ssa: Union[ConfigBuildSSA,None] = None - build_api: Union[ConfigBuildApi,None] = None - enrichments: ConfigEnrichments = ConfigEnrichments() - test: Union[TestConfig,None] = None + +class deploy_acs(inspect): + model_config = ConfigDict(use_enum_values=True,validate_default=False, arbitrary_types_allowed=True) + #ignore linter error + splunk_cloud_jwt_token: str = Field(exclude=True, description="Splunk JWT used for performing ACS operations on a Splunk Cloud Instance") + splunk_cloud_stack: str = Field(description="The name of your Splunk Cloud Stack") + + +class Infrastructure(BaseModel): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + splunk_app_username:str = Field(default="admin", description="Username for logging in to your Splunk Server") + splunk_app_password:str = Field(exclude=True, default="password", description="Password for logging in to your Splunk Server.") + instance_address:str = Field(..., description="Address of your splunk server.") + hec_port: int = Field(default=8088, gt=1, lt=65536, title="HTTP Event Collector Port") + web_ui_port: int = Field(default=8000, gt=1, lt=65536, title="Web UI Port") + api_port: int = Field(default=8089, gt=1, lt=65536, title="REST API Port") + instance_name: str = Field(...) + + +class deploy_rest(build): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + + target:Infrastructure = Infrastructure(instance_name="splunk_target_host", instance_address="localhost") + #This will overwrite existing content without promprting for confirmation + overwrite_existing_content:bool = Field(default=True, description="Overwrite existing macros and savedsearches in your enviornment") + + +class Container(Infrastructure): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + instance_address:str = Field(default="localhost", description="Address of your splunk server.") + + +class ContainerSettings(BaseModel): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + leave_running: bool = Field(default=True, description="Leave container running after it is first " + "set up to speed up subsequent test runs.") + num_containers: PositiveInt = Field(default=1, description="Number of containers to start in parallel. " + "Please note that each container is quite expensive to run. It is not " + "recommended to run more than 4 containers unless you have a very " + "well-resourced environment.") + full_image_path:str = Field(default="registry.hub.docker.com/splunk/splunk:latest", + title="Full path to the container image to be used") + + def getContainers(self)->List[Container]: + containers = [] + for i in range(self.num_containers): + containers.append(Container(instance_name="contentctl_{}".format(i), + web_ui_port=8000+i, hec_port=8088+(i*2), api_port=8089+(i*2))) + + return containers + + +class All(BaseModel): + #Doesn't need any extra logic + pass + +class Changes(BaseModel): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + target_branch:str = Field(...,description="The target branch to diff against. Note that this includes uncommitted changes in the working directory as well.") + + +class Selected(BaseModel): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + files:List[FilePath] = Field(...,description="List of detection files to test, separated by spaces.") + + @field_serializer('files',when_used='always') + def serialize_path(paths: List[FilePath])->List[str]: + return [str(path) for path in paths] + +DEFAULT_APPS:List[TestApp] = [ + TestApp( + uid=1621, + appid="Splunk_SA_CIM", + title="Splunk Common Information Model (CIM)", + version="5.2.0", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-common-information-model-cim_520.tgz" + ), + ), + TestApp( + uid=6553, + appid="Splunk_TA_okta_identity_cloud", + title="Splunk Add-on for Okta Identity Cloud", + version="2.1.0", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-okta-identity-cloud_210.tgz" + ), + ), + TestApp( + uid=6176, + appid="Splunk_TA_linux_sysmon", + title="Add-on for Linux Sysmon", + version="1.0.4", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/add-on-for-linux-sysmon_104.tgz" + ), + ), + TestApp( + appid="Splunk_FIX_XMLWINEVENTLOG_HEC_PARSING", + title="Splunk Fix XmlWinEventLog HEC Parsing", + version="0.1", + description="This TA is required for replaying Windows Data into the Test Environment. The Default TA does not include logic for properly splitting multiple log events in a single file. In production environments, this logic is applied by the Universal Forwarder.", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/Splunk_TA_fix_windows.tgz" + ), + ), + TestApp( + uid=742, + appid="SPLUNK_ADD_ON_FOR_MICROSOFT_WINDOWS", + title="Splunk Add-on for Microsoft Windows", + version="8.8.0", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-microsoft-windows_880.tgz" + ), + ), + TestApp( + uid=5709, + appid="Splunk_TA_microsoft_sysmon", + title="Splunk Add-on for Sysmon", + version="4.0.0", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-sysmon_400.tgz" + ), + ), + TestApp( + uid=833, + appid="Splunk_TA_nix", + title="Splunk Add-on for Unix and Linux", + version="9.0.0", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-unix-and-linux_900.tgz" + ), + ), + TestApp( + uid=5579, + appid="Splunk_TA_CrowdStrike_FDR", + title="Splunk Add-on for CrowdStrike FDR", + version="1.5.0", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-crowdstrike-fdr_150.tgz" + ), + ), + TestApp( + uid=3185, + appid="SPLUNK_TA_FOR_IIS", + title="Splunk Add-on for Microsoft IIS", + version="1.3.0", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-microsoft-iis_130.tgz" + ), + ), + TestApp( + uid=4242, + appid="SPLUNK_TA_FOR_SURICATA", + title="TA for Suricata", + version="2.3.4", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/ta-for-suricata_234.tgz" + ), + ), + TestApp( + uid=5466, + appid="SPLUNK_TA_FOR_ZEEK", + title="TA for Zeek", + version="1.0.6", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/ta-for-zeek_106.tgz" + ), + ), + TestApp( + uid=3258, + appid="SPLUNK_ADD_ON_FOR_NGINX", + title="Splunk Add-on for NGINX", + version="3.2.2", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-nginx_322.tgz" + ), + ), + TestApp( + uid=5238, + appid="SPLUNK_ADD_ON_FOR_STREAM_FORWARDERS", + title="Splunk Add-on for Stream Forwarders", + version="8.1.1", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-stream-forwarders_811.tgz" + ), + ), + TestApp( + uid=5234, + appid="SPLUNK_ADD_ON_FOR_STREAM_WIRE_DATA", + title="Splunk Add-on for Stream Wire Data", + version="8.1.1", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-stream-wire-data_811.tgz" + ), + ), + TestApp( + uid=2757, + appid="PALO_ALTO_NETWORKS_ADD_ON_FOR_SPLUNK", + title="Palo Alto Networks Add-on for Splunk", + version="8.1.1", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/palo-alto-networks-add-on-for-splunk_811.tgz" + ), + ), + TestApp( + uid=3865, + appid="Zscaler_CIM", + title="Zscaler Technical Add-On for Splunk", + version="4.0.3", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/zscaler-technical-add-on-for-splunk_403.tgz" + ), + ), + TestApp( + uid=3719, + appid="SPLUNK_ADD_ON_FOR_AMAZON_KINESIS_FIREHOSE", + title="Splunk Add-on for Amazon Kinesis Firehose", + version="1.3.2", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-amazon-kinesis-firehose_132.tgz" + ), + ), + TestApp( + uid=1876, + appid="Splunk_TA_aws", + title="Splunk Add-on for AWS", + version="7.5.0", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-amazon-web-services-aws_750.tgz" + ), + ), + TestApp( + uid=3088, + appid="SPLUNK_ADD_ON_FOR_GOOGLE_CLOUD_PLATFORM", + title="Splunk Add-on for Google Cloud Platform", + version="4.4.0", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-google-cloud-platform_440.tgz" + ), + ), + TestApp( + uid=5556, + appid="SPLUNK_ADD_ON_FOR_GOOGLE_WORKSPACE", + title="Splunk Add-on for Google Workspace", + version="2.6.3", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-google-workspace_263.tgz" + ), + ), + TestApp( + uid=3110, + appid="SPLUNK_TA_MICROSOFT_CLOUD_SERVICES", + title="Splunk Add-on for Microsoft Cloud Services", + version="5.2.2", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-microsoft-cloud-services_522.tgz" + ), + ), + TestApp( + uid=4055, + appid="SPLUNK_ADD_ON_FOR_MICROSOFT_OFFICE_365", + title="Splunk Add-on for Microsoft Office 365", + version="4.5.1", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-add-on-for-microsoft-office-365_451.tgz" + ), + ), + TestApp( + uid=2890, + appid="SPLUNK_MACHINE_LEARNING_TOOLKIT", + title="Splunk Machine Learning Toolkit", + version="5.4.1", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/splunk-machine-learning-toolkit_541.tgz" + ), + ), + TestApp( + uid=2734, + appid="URL_TOOLBOX", + title="URL Toolbox", + version="1.9.2", + hardcoded_path=HttpUrl( + "https://attack-range-appbinaries.s3.us-west-2.amazonaws.com/Latest/url-toolbox_192.tgz" + ), + ), + ] + +class test_common(build): + mode:Union[Changes, Selected, All] = Field(All(), union_mode='left_to_right') + post_test_behavior: PostTestBehavior = Field(default=PostTestBehavior.pause_on_failure, description="Controls what to do when a test completes.\n\n" + f"'{PostTestBehavior.always_pause.value}' - the state of " + "the test will always pause after a test, allowing the user to log into the " + "server and experiment with the search and data before it is removed.\n\n" + f"'{PostTestBehavior.pause_on_failure.value}' - pause execution ONLY when a test fails. The user may press ENTER in the terminal " + "running the test to move on to the next test.\n\n" + f"'{PostTestBehavior.never_pause.value}' - never stop testing, even if a test fails.\n\n" + "***SPECIAL NOTE FOR CI/CD*** 'never_pause' MUST be used for a test to " + "run in an unattended manner or in a CI/CD system - otherwise a single failed test " + "will result in the testing never finishing as the tool waits for input.") + test_instances:List[Infrastructure] = Field(...) + enable_integration_testing: bool = Field(default=False, description="Enable integration testing, which REQUIRES Splunk Enterprise Security " + "to be installed on the server. This checks for a number of different things including generation " + "of appropriate notables and messages. Please note that this will increase testing time " + "considerably (by approximately 2-3 minutes per detection).") + plan_only:bool = Field(default=False, exclude=True, description="WARNING - This is an advanced feature and currently intended for widespread use. " + "This flag is useful for building your app and generating a test plan to run on different infrastructure. " + "This flag does not actually perform the test. Instead, it builds validates all content and builds the app(s). " + "It MUST be used with mode.changes and must run in the context of a git repo.") + disable_tqdm:bool = Field(default=False, exclude=True, description="The tdqm library (https://github.com/tqdm/tqdm) is used to facilitate a richer," + " interactive command line workflow that can display progress bars and status information frequently. " + "Unfortunately it is incompatible with, or may cause poorly formatted logs, in many CI/CD systems or other unattended environments. " + "If you are running contentctl in CI/CD, then please set this argument to True. Note that if you are running in a CI/CD context, " + f"you also MUST set post_test_behavior to {PostTestBehavior.never_pause.value}. Otherwiser, a failed detection will cause" + "the CI/CD running to pause indefinitely.") + + apps: List[TestApp] = Field(default=DEFAULT_APPS, exclude=False, description="List of apps to install in test environment") + + + def dumpCICDPlanAndQuit(self, githash: str, detections:List[Detection]): + output_file = self.path / "test_plan.yml" + self.mode = Selected(files=sorted([detection.file_path for detection in detections], key=lambda path: str(path))) + self.post_test_behavior = PostTestBehavior.never_pause.value + #required so that CI/CD does not get too much output or hang + self.disable_tqdm = True + + # We will still parse the app, but no need to do enrichments or + # output to dist. We have already built it! + self.build_app = False + self.build_api = False + self.build_ssa = False + self.enrichments = False + + self.enable_integration_testing = True + + data = self.model_dump() + + #Add the hash of the current commit + data['githash'] = str(githash) + + #Remove some fields that are not relevant + for k in ['container_settings', 'test_instances']: + if k in data: + del(data[k]) + + + + try: + YmlWriter.writeYmlFile(str(output_file), data) + print(f"Successfully wrote a test plan for [{len(self.mode.files)} detections] using [{len(self.apps)} apps] to [{output_file}]") + except Exception as e: + raise Exception(f"Error writing test plan file [{output_file}]: {str(e)}") + + + def getLocalAppDir(self)->pathlib.Path: + #docker really wants abolsute paths + path = self.path / "apps" + return path.absolute() + + def getContainerAppDir(self)->pathlib.Path: + #docker really wants abolsute paths + return pathlib.Path("/tmp/apps").absolute() + + def enterpriseSecurityInApps(self)->bool: + + for app in self.apps: + if app.uid == ENTERPRISE_SECURITY_UID: + return True + return False + + def commonInformationModelInApps(self)->bool: + for app in self.apps: + if app.uid == COMMON_INFORMATION_MODEL_UID: + return True + return False + + @model_validator(mode='after') + def ensureCommonInformationModel(self)->Self: + if self.commonInformationModelInApps(): + return self + print(f"INFO: Common Information Model/CIM " + f"(uid: [{COMMON_INFORMATION_MODEL_UID}]) is not listed in apps.\n" + f"contentctl test MUST include Common Information Model.\n" + f"Please note this message is only informational.") + return self + @model_validator(mode='after') + def suppressTQDM(self)->Self: + if self.disable_tqdm: + tqdm.tqdm.__init__ = partialmethod(tqdm.tqdm.__init__, disable=True) + if self.post_test_behavior != PostTestBehavior.never_pause.value: + raise ValueError(f"You have disabled tqdm, presumably because you are " + f"running in CI/CD or another unattended context.\n" + f"However, post_test_behavior is set to [{self.post_test_behavior}].\n" + f"If that is the case, then you MUST set post_test_behavior " + f"to [{PostTestBehavior.never_pause.value}].\n" + "Otherwise, if a detection fails in CI/CD, your CI/CD runner will hang forever.") + return self + + + + @model_validator(mode='after') + def ensureEnterpriseSecurityForIntegrationTesting(self)->Self: + if not self.enable_integration_testing: + return self + if self.enterpriseSecurityInApps(): + return self + + print(f"INFO: enable_integration_testing is [{self.enable_integration_testing}], " + f"but the Splunk Enterprise Security " + f"App (uid: [{ENTERPRISE_SECURITY_UID}]) is not listed in apps.\n" + f"Integration Testing MUST include Enterprise Security.\n" + f"Please note this message is only informational.") + return self + + + + @model_validator(mode='after') + def checkPlanOnlyUse(self)->Self: + #Ensure that mode is CHANGES + if self.plan_only and not isinstance(self.mode, Changes): + raise ValueError("plan_only MUST be used with --mode:changes") + return self + + + def getModeName(self)->str: + if isinstance(self.mode, All): + return "All" + elif isinstance(self.mode, Changes): + return "Changes" + else: + return "Selected" + + + + + + +class test(test_common): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + container_settings:ContainerSettings = ContainerSettings() + test_instances: List[Container] = Field([], exclude = True, validate_default=True) + splunk_api_username: Optional[str] = Field(default=None, exclude = True,description="Splunk API username used for running appinspect or installating apps from Splunkbase") + splunk_api_password: Optional[str] = Field(default=None, exclude = True, description="Splunk API password used for running appinspect or installaing apps from Splunkbase") + + + def getContainerInfrastructureObjects(self)->Self: + try: + self.test_instances = self.container_settings.getContainers() + return self + + except Exception as e: + raise ValueError(f"Error constructing container test_instances: {str(e)}") + + + + + @model_validator(mode='after') + def ensureAppsAreGood(self)->Self: + """ + This function ensures that, after the rest of the configuration + has been validated, all of the apps are able to be correctly resolved. + This includes apps that may be sourced from local files, HTTP files, + and/or Splunkbase. + + This is NOT a model_post_init function because it does perform some validation, + even though it does not change the object + + Raises: + Exception: There was a failure in parsing/validating all referenced apps + + Returns: + Self: The test object. No modifications are made during this call. + """ + try: + _ = self.getContainerEnvironmentString(stage_file=False, include_custom_app=False) + except Exception as e: + raise Exception(f"Error validating test apps: {str(e)}") + return self + + + def getContainerEnvironmentString(self,stage_file:bool=False, include_custom_app:bool=True)->str: + apps:List[App_Base] = self.apps + if include_custom_app: + apps.append(self.app) + + paths = [app.getApp(self,stage_file=stage_file) for app in apps] + + container_paths = [] + for path in paths: + if path.startswith(SPLUNKBASE_URL): + container_paths.append(path) + else: + container_paths.append(str(self.getContainerAppDir()/pathlib.Path(path).name)) + + return ','.join(container_paths) + + def getAppFilePath(self): + return self.path / "apps.yml" + + +TEST_ARGS_ENV = "CONTENTCTL_TEST_INFRASTRUCTURES" +class test_servers(test_common): + model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) + test_instances:List[Infrastructure] = Field([],description="Test against one or more preconfigured servers.", validate_default=True) + server_info:Optional[str] = Field(None, validate_default=True, description='String of pre-configured servers to use for testing. The list MUST be in the format:\n' + 'address,username,web_ui_port,hec_port,api_port;address_2,username_2,web_ui_port_2,hec_port_2,api_port_2' + '\nFor example, the following string will use 2 preconfigured test instances:\n' + '127.0.0.1,firstUser,firstUserPassword,8000,8088,8089;1.2.3.4,secondUser,secondUserPassword,8000,8088,8089\n' + 'Note that these test_instances may be hosted on the same system, such as localhost/127.0.0.1 or a docker server, or different hosts.\n' + f'This value may also be passed by setting the environment variable [{TEST_ARGS_ENV}] with the value above.') + + @model_validator(mode='before') + @classmethod + def parse_config(cls, data:Any, info: ValidationInfo)->Any: + #Ignore whatever is in the file or defaults, these must be supplied on command line + #if len(v) != 0: + # return v + + + if isinstance(data.get("server_info"),str) : + server_info = data.get("server_info") + elif isinstance(environ.get(TEST_ARGS_ENV),str): + server_info = environ.get(TEST_ARGS_ENV) + else: + raise ValueError(f"server_info not passed on command line or in environment variable {TEST_ARGS_ENV}") + + infrastructures:List[Infrastructure] = [] + + + index = 0 + for server in server_info.split(';'): + address, username, password, web_ui_port, hec_port, api_port = server.split(",") + infrastructures.append(Infrastructure(splunk_app_username = username, splunk_app_password=password, + instance_address=address, hec_port = int(hec_port), + web_ui_port= int(web_ui_port),api_port=int(api_port), instance_name=f"test_server_{index}") + ) + index+=1 + data['test_instances'] = infrastructures + return data + + @field_validator('test_instances',mode='before') + @classmethod + def check_environment_variable_for_config(cls, v:List[Infrastructure]): + return v + #Ignore whatever is in the file or defaults, these must be supplied on command line + #if len(v) != 0: + # return v + TEST_ARGS_ENV = "CONTENTCTL_TEST_INFRASTRUCTURES" + + + #environment variable is present. try to parse it + infrastructures:List[Infrastructure] = [] + server_info:str|None = environ.get(TEST_ARGS_ENV) + if server_info is None: + raise ValueError(f"test_instances not passed on command line or in environment variable {TEST_ARGS_ENV}") + + + index = 0 + for server in server_info.split(';'): + address, username, password, web_ui_port, hec_port, api_port = server.split(",") + infrastructures.append(Infrastructure(splunk_app_username = username, splunk_app_password=password, + instance_address=address, hec_port = int(hec_port), + web_ui_port= int(web_ui_port),api_port=int(api_port), instance_name=f"test_server_{index}") + ) + index+=1 + + + +class release_notes(Config_Base): + old_tag:Optional[str] = Field(None, description="Name of the tag to diff against to find new content. " + "If it is not supplied, then it will be inferred as the " + "second newest tag at runtime.") + new_tag:Optional[str] = Field(None, description="Name of the tag containing new content. If it is not supplied," + " then it will be inferred as the newest tag at runtime.") + latest_branch:Optional[str] = Field(None, description="Branch for which we are generating release notes") + + def releaseNotesFilename(self, filename:str)->pathlib.Path: + #Assume that notes are written to dist/. This does not respect build_dir since that is + #only a member of build + p = self.path / "dist" + try: + p.mkdir(exist_ok=True,parents=True) + except Exception: + raise Exception(f"Error making the directory '{p}' to hold release_notes: {str(e)}") + return p/filename + + @model_validator(mode='after') + def ensureNewTagOrLatestBranch(self): + ''' + Exactly one of latest_branch or new_tag must be defined. otherwise, throw an error + ''' + if self.new_tag is not None and self.latest_branch is not None: + raise ValueError("Both new_tag and latest_branch are defined. EXACTLY one of these MUST be defiend.") + elif self.new_tag is None and self.latest_branch is None: + raise ValueError("Neither new_tag nor latest_branch are defined. EXACTLY one of these MUST be defined.") + return self + + # @model_validator(mode='after') + # def ensureTagsAndBranch(self)->Self: + # #get the repo + # import pygit2 + # from pygit2 import Commit + # repo = pygit2.Repository(path=str(self.path)) + # tags = list(repo.references.iterator(references_return_type=pygit2.enums.ReferenceFilter.TAGS)) + + # #Sort all tags by commit time from newest to oldest + # sorted_tags = sorted(tags, key=lambda tag: repo.lookup_reference(tag.name).peel(Commit).commit_time, reverse=True) + + + # tags_names:List[str] = [t.shorthand for t in sorted_tags] + # print(tags_names) + # if self.new_tag is not None and self.new_tag not in tags_names: + # raise ValueError(f"The new_tag '{self.new_tag}' was not found in the set name tags for this repo: {tags_names}") + # elif self.new_tag is None: + # try: + # self.new_tag = tags_names[0] + # except Exception: + # raise ValueError("Error getting new_tag - there were no tags in the repo") + # elif self.new_tag in tags_names: + # pass + # else: + # raise ValueError(f"Unknown error getting new_tag {self.new_tag}") + + + + # if self.old_tag is not None and self.old_tag not in tags_names: + # raise ValueError(f"The old_tag '{self.new_tag}' was not found in the set name tags for this repo: {tags_names}") + # elif self.new_tag == self.old_tag: + # raise ValueError(f"old_tag '{self.old_tag}' cannot equal new_tag '{self.new_tag}'") + # elif self.old_tag is None: + # try: + # self.old_tag = tags_names[tags_names.index(self.new_tag) + 1] + # except Exception: + # raise ValueError(f"Error getting old_tag. new_tag '{self.new_tag}' is the oldest tag in the repo.") + # elif self.old_tag in tags_names: + # pass + # else: + # raise ValueError(f"Unknown error getting old_tag {self.old_tag}") + + + + # if not tags_names.index(self.new_tag) < tags_names.index(self.old_tag): + # raise ValueError(f"The new_tag '{self.new_tag}' is not newer than the old_tag '{self.old_tag}'") + + # if self.latest_branch is not None: + # if repo.lookup_branch(self.latest_branch) is None: + # raise ValueError("The latest_branch '{self.latest_branch}' was not found in the repository") + + + # return self diff --git a/contentctl/objects/constants.py b/contentctl/objects/constants.py index f94ab7eb..1e9871d2 100644 --- a/contentctl/objects/constants.py +++ b/contentctl/objects/constants.py @@ -12,7 +12,6 @@ "Lateral Movement": "Exploitation", "Collection": "Exploitation", "Command And Control": "Command and Control", - "Command And Control": "Command and Control", "Exfiltration": "Actions on Objectives", "Impact": "Actions on Objectives" } diff --git a/contentctl/objects/correlation_search.py b/contentctl/objects/correlation_search.py index 9950636b..31a65f83 100644 --- a/contentctl/objects/correlation_search.py +++ b/contentctl/objects/correlation_search.py @@ -220,7 +220,7 @@ class CorrelationSearch(BaseModel): # The logger to use (logs all go to a null pipe unless ENABLE_LOGGING is set to True, so as not # to conflict w/ tqdm) - logger: logging.Logger = Field(default_factory=get_logger, const=True) + logger: logging.Logger = Field(default_factory=get_logger) # The search name (e.g. "ESCU - Windows Modify Registry EnableLinkedConnections - Rule") name: Optional[str] = None diff --git a/contentctl/objects/data_source.py b/contentctl/objects/data_source.py index 5b21b111..04feb347 100644 --- a/contentctl/objects/data_source.py +++ b/contentctl/objects/data_source.py @@ -1,10 +1,8 @@ +from __future__ import annotations +from pydantic import BaseModel -from pydantic import BaseModel, validator, ValidationError -from dataclasses import dataclass - - class DataSource(BaseModel): name: str id: str diff --git a/contentctl/objects/deployment.py b/contentctl/objects/deployment.py index 552c069b..f2b2f391 100644 --- a/contentctl/objects/deployment.py +++ b/contentctl/objects/deployment.py @@ -1,30 +1,70 @@ - -import uuid -import string - -from pydantic import BaseModel, validator, ValidationError -from datetime import datetime +from __future__ import annotations +from pydantic import Field, computed_field, model_validator,ValidationInfo, model_serializer +from typing import Optional,Any from contentctl.objects.security_content_object import SecurityContentObject from contentctl.objects.deployment_scheduling import DeploymentScheduling -from contentctl.objects.deployment_email import DeploymentEmail -from contentctl.objects.deployment_notable import DeploymentNotable -from contentctl.objects.deployment_rba import DeploymentRBA -from contentctl.objects.deployment_slack import DeploymentSlack -from contentctl.objects.deployment_phantom import DeploymentPhantom -from contentctl.objects.enums import SecurityContentType +from contentctl.objects.alert_action import AlertAction + +from contentctl.objects.enums import DeploymentType + + class Deployment(SecurityContentObject): - name: str = "PLACEHOLDER_NAME" #id: str = None #date: str = None #author: str = None #description: str = None #contentType: SecurityContentType = SecurityContentType.deployments - scheduling: DeploymentScheduling = None - email: DeploymentEmail = None - notable: DeploymentNotable = None - rba: DeploymentRBA = None - slack: DeploymentSlack = None - phantom: DeploymentPhantom = None - tags: dict = None - \ No newline at end of file + scheduling: DeploymentScheduling = Field(...) + alert_action: AlertAction = AlertAction() + type: DeploymentType = Field(...) + + #Type was the only tag exposed and should likely be removed/refactored. + #For transitional reasons, provide this as a computed_field in prep for removal + @computed_field + @property + def tags(self)->dict[str,DeploymentType]: + return {"type": self.type} + + @staticmethod + def getDeployment(v:dict[str,Any], info:ValidationInfo)->Deployment: + if v != {}: + # If the user has defined a deployment, then allow it to be validated + # and override the default deployment info defined in type:Baseline + v['type'] = DeploymentType.Embedded + + detection_name = info.data.get("name", None) + if detection_name is None: + raise ValueError("Could not create inline deployment - Baseline or Detection lacking 'name' field,") + + v['name'] = f"{detection_name} - Inline Deployment" + # This constructs a temporary in-memory deployment, + # allowing the deployment to be easily defined in the + # detection on a per detection basis. + return Deployment.model_validate(v) + + else: + return SecurityContentObject.getDeploymentFromType(info.data.get("type",None), info) + + @model_serializer + def serialize_model(self): + #Call serializer for parent + super_fields = super().serialize_model() + + #All fields custom to this model + model= { + "scheduling": self.scheduling.model_dump(), + "tags": self.tags + } + + + #Combine fields from this model with fields from parent + model.update(super_fields) + + alert_action_fields = self.alert_action.model_dump() + model.update(alert_action_fields) + + del(model['references']) + + #return the model + return model \ No newline at end of file diff --git a/contentctl/objects/deployment_email.py b/contentctl/objects/deployment_email.py index d13063b1..a607502c 100644 --- a/contentctl/objects/deployment_email.py +++ b/contentctl/objects/deployment_email.py @@ -1,5 +1,5 @@ - -from pydantic import BaseModel, validator, ValidationError +from __future__ import annotations +from pydantic import BaseModel class DeploymentEmail(BaseModel): diff --git a/contentctl/objects/deployment_notable.py b/contentctl/objects/deployment_notable.py index b72ff166..b6e2c463 100644 --- a/contentctl/objects/deployment_notable.py +++ b/contentctl/objects/deployment_notable.py @@ -1,8 +1,8 @@ - -from pydantic import BaseModel, validator, ValidationError - +from __future__ import annotations +from pydantic import BaseModel +from typing import List class DeploymentNotable(BaseModel): rule_description: str rule_title: str - nes_fields: list \ No newline at end of file + nes_fields: List[str] \ No newline at end of file diff --git a/contentctl/objects/deployment_phantom.py b/contentctl/objects/deployment_phantom.py index d32eea69..11df2feb 100644 --- a/contentctl/objects/deployment_phantom.py +++ b/contentctl/objects/deployment_phantom.py @@ -1,5 +1,5 @@ - -from pydantic import BaseModel, validator, ValidationError +from __future__ import annotations +from pydantic import BaseModel class DeploymentPhantom(BaseModel): diff --git a/contentctl/objects/deployment_rba.py b/contentctl/objects/deployment_rba.py index 6b7d59df..b3412b3f 100644 --- a/contentctl/objects/deployment_rba.py +++ b/contentctl/objects/deployment_rba.py @@ -1,7 +1,6 @@ - - -from pydantic import BaseModel, validator, ValidationError +from __future__ import annotations +from pydantic import BaseModel class DeploymentRBA(BaseModel): - enabled: str \ No newline at end of file + enabled: bool = False \ No newline at end of file diff --git a/contentctl/objects/deployment_scheduling.py b/contentctl/objects/deployment_scheduling.py index bfc209b2..6c5a75a8 100644 --- a/contentctl/objects/deployment_scheduling.py +++ b/contentctl/objects/deployment_scheduling.py @@ -1,6 +1,5 @@ - - -from pydantic import BaseModel, validator, ValidationError +from __future__ import annotations +from pydantic import BaseModel class DeploymentScheduling(BaseModel): diff --git a/contentctl/objects/deployment_slack.py b/contentctl/objects/deployment_slack.py index 64d79eb2..294836e2 100644 --- a/contentctl/objects/deployment_slack.py +++ b/contentctl/objects/deployment_slack.py @@ -1,5 +1,5 @@ - -from pydantic import BaseModel, validator, ValidationError +from __future__ import annotations +from pydantic import BaseModel class DeploymentSlack(BaseModel): diff --git a/contentctl/objects/detection.py b/contentctl/objects/detection.py index 84321631..d418b520 100644 --- a/contentctl/objects/detection.py +++ b/contentctl/objects/detection.py @@ -1,10 +1,6 @@ -from typing import Union -from pydantic import validator - +from __future__ import annotations from contentctl.objects.abstract_security_content_objects.detection_abstract import Detection_Abstract - - class Detection(Detection_Abstract): # Customization to the Detection Class go here. # You may add fields and/or validations diff --git a/contentctl/objects/detection_tags.py b/contentctl/objects/detection_tags.py index 7ba5c87e..bd3920cb 100644 --- a/contentctl/objects/detection_tags.py +++ b/contentctl/objects/detection_tags.py @@ -1,134 +1,100 @@ -import re +from __future__ import annotations +import uuid +from typing import TYPE_CHECKING, List, Optional, Annotated, Union +from pydantic import BaseModel,Field, NonNegativeInt, PositiveInt, computed_field, UUID4, HttpUrl, ConfigDict, field_validator, ValidationInfo, model_serializer, model_validator +from contentctl.objects.story import Story +if TYPE_CHECKING: + from contentctl.input.director import DirectorOutputDto + + -from pydantic import BaseModel, validator, ValidationError, root_validator from contentctl.objects.mitre_attack_enrichment import MitreAttackEnrichment from contentctl.objects.constants import * from contentctl.objects.observable import Observable +from contentctl.objects.enums import Cis18Value, AssetType, SecurityDomain, RiskSeverity, KillChainPhase, NistCategory, RiskLevel, SecurityContentProductName +from contentctl.objects.atomic import AtomicTest + + class DetectionTags(BaseModel): # detection spec - name: str - analytic_story: list - asset_type: str - automated_detection_testing: str = None - cis20: list = None - confidence: str - impact: int - kill_chain_phases: list = None - mitre_attack_id: list = None - nist: list = None - observable: list[Observable] = [] - message: str - product: list - required_fields: list - risk_score: int - security_domain: str - risk_severity: str = None - cve: list = None - supported_tas: list = None - atomic_guid: list = None - drilldown_search: str = None - manual_test: str = None + model_config = ConfigDict(use_enum_values=True,validate_default=False) + analytic_story: list[Story] = Field(...) + asset_type: AssetType = Field(...) + + + confidence: NonNegativeInt = Field(...,le=100) + impact: NonNegativeInt = Field(...,le=100) + @computed_field + @property + def risk_score(self)->int: + return round((self.confidence * self.impact)/100) + + + mitre_attack_id: List[Annotated[str, Field(pattern="^T\d{4}(.\d{3})?$")]] = [] + nist: list[NistCategory] = [] + observable: List[Observable] = [] + message: Optional[str] = Field(...) + product: list[SecurityContentProductName] = Field(...,min_length=1) + required_fields: list[str] = Field(min_length=1) + + security_domain: SecurityDomain = Field(...) + + @computed_field + @property + def risk_severity(self)->RiskSeverity: + if self.risk_score >= 80: + return RiskSeverity('high') + elif (self.risk_score >= 50 and self.risk_score <= 79): + return RiskSeverity('medium') + else: + return RiskSeverity('low') + + + + cve: List[Annotated[str, "^CVE-[1|2][0-9]{3}-[0-9]+$"]] = [] + atomic_guid: List[AtomicTest] = [] + drilldown_search: Optional[str] = None # enrichment - mitre_attack_enrichments: list[MitreAttackEnrichment] = [] - confidence_id: int = None - impact_id: int = None - context_ids: list = None - risk_level_id: int = None - risk_level: str = None - observable_str: str = None - evidence_str: str = None - kill_chain_phases_id: list = None - research_site_url: str = None - event_schema: str = None - mappings: list = None - annotations: dict = None - - - @validator('cis20') - def tags_cis20(cls, v, values): - pattern = '^CIS ([0-9]|1[0-9]|20)$' #DO NOT match leading zeroes and ensure no extra characters before or after the string - for value in v: - if not re.match(pattern, value): - raise ValueError(f"CIS control '{value}' is not a valid Control ('CIS 1' -> 'CIS 20'): {values['name']}") - return v + mitre_attack_enrichments: List[MitreAttackEnrichment] = Field([],validate_default=True) + confidence_id: Optional[PositiveInt] = Field(None,ge=1,le=3) + impact_id: Optional[PositiveInt] = Field(None,ge=1,le=5) + # context_ids: list = None + risk_level_id: Optional[NonNegativeInt] = Field(None,le=4) + risk_level: Optional[RiskLevel] = None + #observable_str: str = None + evidence_str: Optional[str] = None + + @computed_field + @property + def kill_chain_phases(self)->list[KillChainPhase]: + if self.mitre_attack_enrichments is None: + return [] + phases:set[KillChainPhase] = set() + for enrichment in self.mitre_attack_enrichments: + for tactic in enrichment.mitre_attack_tactics: + phase = KillChainPhase(ATTACK_TACTICS_KILLCHAIN_MAPPING[tactic]) + phases.add(phase) + return sorted(list(phases)) - @validator('nist') - def tags_nist(cls, v, values): - # Sourced Courtest of NIST: https://www.nist.gov/system/files/documents/cyberframework/cybersecurity-framework-021214.pdf (Page 19) - IDENTIFY = [f'ID.{category}' for category in ["AM", "BE", "GV", "RA", "RM"] ] - PROTECT = [f'PR.{category}' for category in ["AC", "AT", "DS", "IP", "MA", "PT"]] - DETECT = [f'DE.{category}' for category in ["AE", "CM", "DP"] ] - RESPOND = [f'RS.{category}' for category in ["RP", "CO", "AN", "MI", "IM"] ] - RECOVER = [f'RC.{category}' for category in ["RP", "IM", "CO"] ] - ALL_NIST_CATEGORIES = IDENTIFY + PROTECT + DETECT + RESPOND + RECOVER - - - for value in v: - if not value in ALL_NIST_CATEGORIES: - raise ValueError(f"NIST Category '{value}' is not a valid category") - return v - - @validator('confidence') - def tags_confidence(cls, v, values): - v = int(v) - if not (v > 0 and v <= 100): - raise ValueError('confidence score is out of range 1-100: ' + values["name"]) - else: - return v - - @validator('context_ids') - def tags_context(cls, v, values): - context_list = SES_CONTEXT_MAPPING.keys() - for value in v: - if value not in context_list: - raise ValueError('context value not valid for ' + values["name"] + '. valid options are ' + str(context_list) ) - return v - - @validator('impact') - def tags_impact(cls, v, values): - if not (v > 0 and v <= 100): - raise ValueError('impact score is out of range 1-100: ' + values["name"]) + #enum is intentionally Cis18 even though field is named cis20 for legacy reasons + @computed_field + @property + def cis20(self)->list[Cis18Value]: + if self.security_domain == SecurityDomain.NETWORK: + return [Cis18Value.CIS_13] else: - return v - - @validator('kill_chain_phases') - def tags_kill_chain_phases(cls, v, values): - valid_kill_chain_phases = SES_KILL_CHAIN_MAPPINGS.keys() - for value in v: - if value not in valid_kill_chain_phases: - raise ValueError('kill chain phase not valid for ' + values["name"] + '. valid options are ' + str(valid_kill_chain_phases)) - return v - - @validator('mitre_attack_id') - def tags_mitre_attack_id(cls, v, values): - pattern = 'T[0-9]{4}' - for value in v: - if not re.match(pattern, value): - raise ValueError('Mitre Attack ID are not following the pattern Txxxx: ' + values["name"]) - return v - - @validator('product') - def tags_product(cls, v, values): - valid_products = [ - "Splunk Enterprise", "Splunk Enterprise Security", "Splunk Cloud", - "Splunk Security Analytics for AWS", "Splunk Behavioral Analytics" - ] - - for value in v: - if value not in valid_products: - raise ValueError('product is not valid for ' + values['name'] + '. valid products are ' + str(valid_products)) - return v - - @validator('risk_score') - def tags_calculate_risk_score(cls, v, values): - calculated_risk_score = round(values['impact'] * values['confidence'] / 100) - if calculated_risk_score != int(v): - raise ValueError(f"Risk Score must be calculated as round(confidence * impact / 100)" - f"\n Expected risk_score={calculated_risk_score}, found risk_score={int(v)}: {values['name']}") - return v + return [Cis18Value.CIS_10] + + + research_site_url: Optional[HttpUrl] = None + event_schema: str = "ocsf" + mappings: Optional[List] = None + #annotations: Optional[dict] = None + manual_test: Optional[str] = None + # The following validator is temporarily disabled pending further discussions # @validator('message') @@ -157,4 +123,129 @@ def tags_calculate_risk_score(cls, v, values): # raise ValueError(f"The following observables were declared, but are not referenced in the message: {unused_observables}") # return v + + @model_serializer + def serialize_model(self): + #Since this field has no parent, there is no need to call super() serialization function + return { + "analytic_story": [story.name for story in self.analytic_story], + "asset_type": self.asset_type.value, + "cis20": self.cis20, + "kill_chain_phases": self.kill_chain_phases, + "nist": self.nist, + "observable": self.observable, + "message": self.message, + "risk_score": self.risk_score, + "security_domain": self.security_domain, + "risk_severity": self.risk_severity, + "mitre_attack_enrichments": self.mitre_attack_enrichments + } + + + @model_validator(mode="after") + def addAttackEnrichment(self, info:ValidationInfo): + if len(self.mitre_attack_enrichments) > 0: + raise ValueError(f"Error, field 'mitre_attack_enrichment' should be empty and dynamically populated at runtime. Instead, this field contained: {str(v)}") + + output_dto:Union[DirectorOutputDto,None]= info.context.get("output_dto",None) + if output_dto is None: + raise ValueError("Context not provided to detection.detection_tags model post validator") + + if output_dto.attack_enrichment.use_enrichment is False: + return self + + + mitre_enrichments = [] + missing_tactics = [] + for mitre_attack_id in self.mitre_attack_id: + try: + mitre_enrichments.append(output_dto.attack_enrichment.getEnrichmentByMitreID(mitre_attack_id)) + except Exception as e: + missing_tactics.append(mitre_attack_id) + + if len(missing_tactics) > 0: + raise ValueError(f"Missing Mitre Attack IDs. {missing_tactics} not found.") + else: + self.mitre_attack_enrichments = mitre_enrichments + + return self + + ''' + @field_validator('mitre_attack_enrichments', mode="before") + @classmethod + def addAttackEnrichments(cls, v:list[MitreAttackEnrichment], info:ValidationInfo)->list[MitreAttackEnrichment]: + if len(v) > 0: + raise ValueError(f"Error, field 'mitre_attack_enrichment' should be empty and dynamically populated at runtime. Instead, this field contained: {str(v)}") + + + output_dto:Union[DirectorOutputDto,None]= info.context.get("output_dto",None) + if output_dto is None: + raise ValueError("Context not provided to detection.detection_tags.mitre_attack_enrichments") + + enrichments = [] + + + + + return enrichments + ''' + + @field_validator('analytic_story',mode="before") + @classmethod + def mapStoryNamesToStoryObjects(cls, v:list[str], info:ValidationInfo)->list[Story]: + return Story.mapNamesToSecurityContentObjects(v, info.context.get("output_dto",None)) + + def getAtomicGuidStringArray(self)->List[str]: + return [str(atomic_guid.auto_generated_guid) for atomic_guid in self.atomic_guid] + + + @field_validator('atomic_guid',mode="before") + @classmethod + def mapAtomicGuidsToAtomicTests(cls, v:List[UUID4], info:ValidationInfo)->List[AtomicTest]: + if len(v) == 0: + return [] + + output_dto:Union[DirectorOutputDto,None]= info.context.get("output_dto",None) + if output_dto is None: + raise ValueError("Context not provided to detection.detection_tags.atomic_guid validator") + + + all_tests:List[AtomicTest]= output_dto.atomic_tests + + matched_tests:List[AtomicTest] = [] + missing_tests:List[UUID4] = [] + badly_formatted_guids:List[str] = [] + for atomic_guid_str in v: + try: + #Ensure that this is a valid UUID + atomic_guid = uuid.UUID(str(atomic_guid_str)) + except Exception as e: + #We will not try to load a test for this since it was invalid + badly_formatted_guids.append(str(atomic_guid_str)) + continue + try: + matched_tests.append(AtomicTest.getAtomicByAtomicGuid(atomic_guid,all_tests)) + except Exception as _: + missing_tests.append(atomic_guid) + + + + + if len(missing_tests) > 0: + missing_tests_string = f"\n\tWARNING: Failed to find [{len(missing_tests)}] Atomic Test(s) with the following atomic_guids (called auto_generated_guid in the ART Repo)."\ + f"\n\tPlease review the output above for potential exception(s) when parsing the Atomic Red Team Repo."\ + f"\n\tVerify that these auto_generated_guid exist and try updating/pulling the repo again.: {[str(guid) for guid in missing_tests]}" + else: + missing_tests_string = "" + + + if len(badly_formatted_guids) > 0: + if len(badly_formatted_guids) > 0: + bad_guids_string = f"The following [{len(badly_formatted_guids)}] value(s) are not properly formatted UUIDs: {badly_formatted_guids}\n" + raise ValueError(f"{bad_guids_string}{missing_tests_string}") + + elif len(missing_tests) > 0: + print(missing_tests_string) + + return matched_tests + [AtomicTest.AtomicTestWhenTestIsMissing(test) for test in missing_tests] \ No newline at end of file diff --git a/contentctl/objects/enums.py b/contentctl/objects/enums.py index b22d47b9..5cb06400 100644 --- a/contentctl/objects/enums.py +++ b/contentctl/objects/enums.py @@ -1,33 +1,49 @@ +from __future__ import annotations +from typing import List import enum -class AnalyticsType(enum.Enum): +class AnalyticsType(str, enum.Enum): TTP = "TTP" Anomaly = "Anomaly" Hunting = "Hunting" Correlation = "Correlation" +class DeploymentType(str, enum.Enum): + TTP = "TTP" + Anomaly = "Anomaly" + Hunting = "Hunting" + Correlation = "Correlation" + Baseline = "Baseline" + Embedded = "Embedded" + -class DataModel(enum.Enum): - Endpoint = 1 - Network_Traffic = 2 - Authentication = 3 - Change = 4 - Change_Analysis = 5 - Email = 6 - Network_Resolution = 7 - Network_Sessions = 8 - UEBA = 9 - Updates = 10 - Vulnerabilities = 11 - Web = 12 - Endpoint_Processes = 13 - Endpoint_Filesystem = 14 - Endpoint_Registry = 15 - Risk = 16 - Splunk_Audit = 17 +class DataModel(str,enum.Enum): + ENDPOINT = "Endpoint" + NETWORK_TRAFFIC = "Network_Traffic" + AUTHENTICATION = "Authentication" + CHANGE = "Change" + CHANGE_ANALYSIS = "Change_Analysis" + EMAIL = "Email" + NETWORK_RESOLUTION = "Network_Resolution" + NETWORK_SESSIONS = "Network_Sessions" + UEBA = "UEBA" + UPDATES = "Updates" + VULNERABILITIES = "Vulnerabilities" + WEB = "Web" + #Should the following more specific DMs be added? + #Or should they just fall under endpoint? + #ENDPOINT_PROCESSES = "Endpoint_Processes" + #ENDPOINT_FILESYSTEM = "Endpoint_Filesystem" + #ENDPOINT_REGISTRY = "Endpoint_Registry" + RISK = "Risk" + SPLUNK_AUDIT = "Splunk_Audit" +class PlaybookType(str, enum.Enum): + INVESTIGATION = "Investigation" + RESPONSE = "Response" + class SecurityContentType(enum.Enum): detections = 1 baselines = 2 @@ -57,12 +73,21 @@ class SecurityContentProduct(enum.Enum): CUSTOM = 4 -class SigmaConverterTarget(enum.Enum): - CIM = 1 - RAW = 2 - OCSF = 3 - ALL = 4 +class SecurityContentProductName(str, enum.Enum): + SPLUNK_ENTERPRISE = "Splunk Enterprise" + SPLUNK_ENTERPRISE_SECURITY = "Splunk Enterprise Security" + SPLUNK_CLOUD = "Splunk Cloud" + SPLUNK_SECURITY_ANALYTICS_FOR_AWS = "Splunk Security Analytics for AWS" + SPLUNK_BEHAVIORAL_ANALYTICS = "Splunk Behavioral Analytics" +class SecurityContentInvestigationProductName(str, enum.Enum): + SPLUNK_ENTERPRISE = "Splunk Enterprise" + SPLUNK_ENTERPRISE_SECURITY = "Splunk Enterprise Security" + SPLUNK_CLOUD = "Splunk Cloud" + SPLUNK_SECURITY_ANALYTICS_FOR_AWS = "Splunk Security Analytics for AWS" + SPLUNK_BEHAVIORAL_ANALYTICS = "Splunk Behavioral Analytics" + SPLUNK_PHANTOM = "Splunk Phantom" + class DetectionStatus(enum.Enum): production = "production" @@ -71,6 +96,13 @@ class DetectionStatus(enum.Enum): validation = "validation" +class DetectionStatusSSA(enum.Enum): + production = "production" + deprecated = "deprecated" + experimental = "experimental" + validation = "validation" + + class LogLevel(enum.Enum): NONE = "NONE" ERROR = "ERROR" @@ -129,3 +161,259 @@ class InstanceState(str, enum.Enum): error = "error" stopping = "stopping" stopped = "stopped" + + +class SigmaConverterTarget(enum.Enum): + CIM = 1 + RAW = 2 + OCSF = 3 + ALL = 4 + +# It's unclear why we use a mix of constants and enums. The following list was taken from: +# contentctl/contentctl/helper/constants.py. +# We convect it to an enum here +# SES_KILL_CHAIN_MAPPINGS = { +# "Unknown": 0, +# "Reconnaissance": 1, +# "Weaponization": 2, +# "Delivery": 3, +# "Exploitation": 4, +# "Installation": 5, +# "Command And Control": 6, +# "Actions on Objectives": 7 +# } +class KillChainPhase(str, enum.Enum): + UNKNOWN ="Unknown" + RECONNAISSANCE = "Reconnaissance" + WEAPONIZATION = "Weaponization" + DELIVERY = "Delivery" + EXPLOITAITON = "Exploitation" + INSTALLATION = "Installation" + COMMAND_AND_CONTROL = "Command and Control" + ACTIONS_ON_OBJECTIVES = "Actions on Objectives" + + +class DataSource(str,enum.Enum): + OSQUERY_ES_PROCESS_EVENTS = "OSQuery ES Process Events" + POWERSHELL_4104 = "Powershell 4104" + SYSMON_EVENT_ID_1 = "Sysmon Event ID 1" + SYSMON_EVENT_ID_10 = "Sysmon Event ID 10" + SYSMON_EVENT_ID_11 = "Sysmon Event ID 11" + SYSMON_EVENT_ID_13 = "Sysmon Event ID 13" + SYSMON_EVENT_ID_15 = "Sysmon Event ID 15" + SYSMON_EVENT_ID_20 = "Sysmon Event ID 20" + SYSMON_EVENT_ID_21 = "Sysmon Event ID 21" + SYSMON_EVENT_ID_22 = "Sysmon Event ID 22" + SYSMON_EVENT_ID_23 = "Sysmon Event ID 23" + SYSMON_EVENT_ID_3 = "Sysmon Event ID 3" + SYSMON_EVENT_ID_5 = "Sysmon Event ID 5" + SYSMON_EVENT_ID_6 = "Sysmon Event ID 6" + SYSMON_EVENT_ID_7 = "Sysmon Event ID 7" + SYSMON_EVENT_ID_8 = "Sysmon Event ID 8" + SYSMON_EVENT_ID_9 = "Sysmon Event ID 9" + WINDOWS_SECURITY_4624 = "Windows Security 4624" + WINDOWS_SECURITY_4625 = "Windows Security 4625" + WINDOWS_SECURITY_4648 = "Windows Security 4648" + WINDOWS_SECURITY_4663 = "Windows Security 4663" + WINDOWS_SECURITY_4688 = "Windows Security 4688" + WINDOWS_SECURITY_4698 = "Windows Security 4698" + WINDOWS_SECURITY_4703 = "Windows Security 4703" + WINDOWS_SECURITY_4720 = "Windows Security 4720" + WINDOWS_SECURITY_4732 = "Windows Security 4732" + WINDOWS_SECURITY_4738 = "Windows Security 4738" + WINDOWS_SECURITY_4741 = "Windows Security 4741" + WINDOWS_SECURITY_4742 = "Windows Security 4742" + WINDOWS_SECURITY_4768 = "Windows Security 4768" + WINDOWS_SECURITY_4769 = "Windows Security 4769" + WINDOWS_SECURITY_4771 = "Windows Security 4771" + WINDOWS_SECURITY_4776 = "Windows Security 4776" + WINDOWS_SECURITY_4781 = "Windows Security 4781" + WINDOWS_SECURITY_4798 = "Windows Security 4798" + WINDOWS_SECURITY_5136 = "Windows Security 5136" + WINDOWS_SECURITY_5145 = "Windows Security 5145" + WINDOWS_SYSTEM_7045 = "Windows System 7045" + +class ProvidingTechnology(str, enum.Enum): + AMAZON_SECURITY_LAKE = "Amazon Security Lake" + AMAZON_WEB_SERVICES_CLOUDTRAIL = "Amazon Web Services - Cloudtrail" + AZURE_AD = "Azure AD" + CARBON_BLACK_RESPONSE = "Carbon Black Response" + CROWDSTRIKE_FALCON = "CrowdStrike Falcon" + ENTRA_ID = "Entra ID" + GOOGLE_CLOUD_PLATFORM = "Google Cloud Platform" + GOOGLE_WORKSPACE = "Google Workspace" + KUBERNETES = "Kubernetes" + MICROSOFT_DEFENDER = "Microsoft Defender" + MICROSOFT_OFFICE_365 = "Microsoft Office 365" + MICROSOFT_SYSMON = "Microsoft Sysmon" + MICROSOFT_WINDOWS = "Microsoft Windows" + OKTA = "Okta" + PING_ID = "Ping ID" + SPLUNK_INTERNAL_LOGS = "Splunk Internal Logs" + SYMANTEC_ENDPOINT_PROTECTION = "Symantec Endpoint Protection" + ZEEK = "Zeek" + + @staticmethod + def getProvidingTechFromSearch(search_string:str)->List[ProvidingTechnology]: + """_summary_ + + Args: + search_string (str): The search string to extract the providing technologies from. + The providing_technologies_mapping provides keywords, macros, etc that can be updated + with new mappings. If that substring appears in the search, then its list of providing + technologies is added. + + Returns: + List[ProvidingTechnology]: List of providing technologies (with no duplicates because + it is derived from a set) calculated from the search string. + """ + matched_technologies:set[ProvidingTechnology] = set() + #As there are many different sources that use google logs, we define the set once + google_logs = set([ProvidingTechnology.GOOGLE_WORKSPACE, ProvidingTechnology.GOOGLE_CLOUD_PLATFORM]) + providing_technologies_mapping = { + '`amazon_security_lake`': set([ProvidingTechnology.AMAZON_SECURITY_LAKE]), + 'audit_searches': set([ProvidingTechnology.SPLUNK_INTERNAL_LOGS]), + '`azure_monitor_aad`': set([ProvidingTechnology.AZURE_AD, ProvidingTechnology.ENTRA_ID]), + '`cloudtrail`': set([ProvidingTechnology.AMAZON_WEB_SERVICES_CLOUDTRAIL]), + #Endpoint is NOT a Macro (and this is intentional since it is to capture Endpoint Datamodel usage) + 'Endpoint': set([ProvidingTechnology.MICROSOFT_SYSMON, + ProvidingTechnology.MICROSOFT_WINDOWS, + ProvidingTechnology.CARBON_BLACK_RESPONSE, + ProvidingTechnology.CROWDSTRIKE_FALCON, + ProvidingTechnology.SYMANTEC_ENDPOINT_PROTECTION]), + '`google_': google_logs, + '`gsuite': google_logs, + '`gws_': google_logs, + '`kube': set([ProvidingTechnology.KUBERNETES]), + '`ms_defender`': set([ProvidingTechnology.MICROSOFT_DEFENDER]), + '`o365_': set([ProvidingTechnology.MICROSOFT_OFFICE_365]), + '`okta': set([ProvidingTechnology.OKTA]), + '`pingid`': set([ProvidingTechnology.PING_ID]), + '`powershell`': set(set([ProvidingTechnology.MICROSOFT_WINDOWS])), + '`splunkd_': set([ProvidingTechnology.SPLUNK_INTERNAL_LOGS]), + '`sysmon`': set([ProvidingTechnology.MICROSOFT_SYSMON]), + '`wineventlog_security`': set([ProvidingTechnology.MICROSOFT_WINDOWS]), + '`zeek_': set([ProvidingTechnology.ZEEK]), + } + for key in providing_technologies_mapping: + if key in search_string: + matched_technologies.update(providing_technologies_mapping[key]) + return sorted(list(matched_technologies)) + + +class Cis18Value(str,enum.Enum): + CIS_0 = "CIS 0" + CIS_1 = "CIS 1" + CIS_2 = "CIS 2" + CIS_3 = "CIS 3" + CIS_4 = "CIS 4" + CIS_5 = "CIS 5" + CIS_6 = "CIS 6" + CIS_7 = "CIS 7" + CIS_8 = "CIS 8" + CIS_9 = "CIS 9" + CIS_10 = "CIS 10" + CIS_11 = "CIS 11" + CIS_12 = "CIS 12" + CIS_13 = "CIS 13" + CIS_14 = "CIS 14" + CIS_15 = "CIS 15" + CIS_16 = "CIS 16" + CIS_17 = "CIS 17" + CIS_18 = "CIS 18" + +class SecurityDomain(str, enum.Enum): + ENDPOINT = "endpoint" + NETWORK = "network" + THREAT = "threat" + IDENTITY = "identity" + ACCESS = "access" + AUDIT = "audit" + CLOUD = "cloud" + +class AssetType(str, enum.Enum): + AWS_ACCOUNT = "AWS Account" + AWS_EKS_KUBERNETES_CLUSTER = "AWS EKS Kubernetes cluster" + AWS_FEDERATED_ACCOUNT = "AWS Federated Account" + AWS_INSTANCE = "AWS Instance" + ACCOUNT = "Account" + AMAZON_EKS_KUBERNETES_CLUSTER = "Amazon EKS Kubernetes cluster" + AMAZON_EKS_KUBERNETES_CLUSTER_POD = "Amazon EKS Kubernetes cluster Pod" + AMAZON_ELASTIC_CONTAINER_REGISTRY = "Amazon Elastic Container Registry" + #AZURE = "Azure" + #AZURE_AD = "Azure AD" + #AZURE_AD_TENANT = "Azure AD Tenant" + AZURE_TENANT = "Azure Tenant" + AZURE_AKS_KUBERNETES_CLUSTER = "Azure AKS Kubernetes cluster" + AZURE_ACTIVE_DIRECTORY = "Azure Active Directory" + CIRCLECI = "CircleCI" + CLOUD_COMPUTE_INSTANCE = "Cloud Compute Instance" + CLOUD_INSTANCE = "Cloud Instance" + DNS_SERVERS = "DNS Servers" + DATABASE_SERVER = "Database Server" + DOMAIN_SERVER = "Domain Server" + EC2_SNAPSHOT = "EC2 Snapshot" + ENDPOINT = "Endpoint" + GCP = "GCP" + GCP_ACCOUNT = "GCP Account" + GCP_GKE_EKS_KUBERNETES_CLUSTER = "GCP GKE EKS Kubernetes cluster" + GCP_GKE_KUBERNETES_CLUSTER = "GCP GKE Kubernetes cluster" + GCP_KUBERNETES_CLUSTER = "GCP Kubernetes cluster" + GCP_STORAGE_BUCKET = "GCP Storage Bucket" + GDRIVE = "GDrive" + GSUITE = "GSuite" + GITHUB = "GitHub" + GOOGLE_CLOUD_PLATFORM_TENANT = "Google Cloud Platform tenant" + IDENTITY = "Identity" + INFRASTRUCTURE = "Infrastructure" + INSTANCE = "Instance" + KUBERNETES = "Kubernetes" + NETWORK = "Network" + #OFFICE_365 = "Office 365" + #OFFICE_365_Tenant = "Office 365 Tenant" + O365_TENANT = "O365 Tenant" + OKTA_TENANT = "Okta Tenant" + PROXY = "Proxy" + S3_BUCKET = "S3 Bucket" + SPLUNK_SERVER = "Splunk Server" + VPN_Appliance = "VPN Appliance" + WEB_SERVER = "Web Server" + WEB_PROXY = "Web Proxy" + WEB_APPLICATION = "Web Application" + WINDOWS = "Windows" + +class NistCategory(str, enum.Enum): + ID_AM = "ID.AM" + ID_BE = "ID.BE" + ID_GV = "ID.GV" + ID_RA = "ID.RA" + ID_RM = "ID.RM" + PR_AC = "PR.AC" + PR_AT = "PR.AT" + PR_DS = "PR.DS" + PR_IP = "PR.IP" + PR_MA = "PR.MA" + PR_PT = "PR.PT" + DE_AE = "DE.AE" + DE_CM = "DE.CM" + DE_DP = "DE.DP" + RS_RP = "RS.RP" + RS_CO = "RS.CO" + RS_AN = "RS.AN" + RS_MI = "RS.MI" + RS_IM = "RS.IM" + RC_RP = "RC.RP" + RC_IM = "RC.IM" + RC_CO = "RC.CO" + +class RiskLevel(str,enum.Enum): + INFO = "Info" + LOW = "Low" + MEDIUM = "Medium" + HIGH = "High" + CRITICAL = "Critical" + +class RiskSeverity(str,enum.Enum): + LOW = "low" + MEDIUM = "medium" + HIGH = "high" diff --git a/contentctl/objects/integration_test.py b/contentctl/objects/integration_test.py index 0b0b57ec..c7cb5119 100644 --- a/contentctl/objects/integration_test.py +++ b/contentctl/objects/integration_test.py @@ -13,7 +13,7 @@ class IntegrationTest(BaseTest): An integration test for a detection against ES """ # The test type (integration) - test_type: TestType = Field(TestType.INTEGRATION, const=True) + test_type: TestType = Field(TestType.INTEGRATION) # The test result result: Union[None, IntegrationTestResult] = None diff --git a/contentctl/objects/integration_test_result.py b/contentctl/objects/integration_test_result.py index 0460f633..e746731e 100644 --- a/contentctl/objects/integration_test_result.py +++ b/contentctl/objects/integration_test_result.py @@ -1,6 +1,4 @@ from typing import Optional - -from contentctl.objects.test_config import Infrastructure from contentctl.objects.base_test_result import BaseTestResult diff --git a/contentctl/objects/investigation.py b/contentctl/objects/investigation.py index eeb24abf..1ca980ea 100644 --- a/contentctl/objects/investigation.py +++ b/contentctl/objects/investigation.py @@ -1,67 +1,76 @@ -import enum -import uuid -import string +from __future__ import annotations import re -import requests - -from pydantic import BaseModel, validator, ValidationError -from dataclasses import dataclass -from datetime import datetime - +from typing import TYPE_CHECKING, Optional, List, Any +from pydantic import field_validator, computed_field, Field, ValidationInfo, ConfigDict,model_serializer +if TYPE_CHECKING: + from contentctl.input.director import DirectorOutputDto from contentctl.objects.security_content_object import SecurityContentObject -from contentctl.objects.enums import AnalyticsType from contentctl.objects.enums import DataModel -from contentctl.objects.enums import SecurityContentType from contentctl.objects.investigation_tags import InvestigationTags -from contentctl.helper.link_validator import LinkValidator class Investigation(SecurityContentObject): - # investigation spec - #contentType: SecurityContentType = SecurityContentType.investigations - #name: str - #id: str - #version: int - #date: str - #author: str - type: str - datamodel: list - #description: str - search: str - how_to_implement: str - known_false_positives: str - check_references: bool = False #Validation is done in order, this field must be defined first - references: list - inputs: list = None + model_config = ConfigDict(use_enum_values=True,validate_default=False) + type: str = Field(...,pattern="^Investigation$") + datamodel: list[DataModel] = Field(...) + + search: str = Field(...) + how_to_implement: str = Field(...) + known_false_positives: str = Field(...) + + tags: InvestigationTags # enrichment - lowercase_name: str = None + @computed_field + @property + def inputs(self)->List[str]: + #Parse out and return all inputs from the searchj + inputs = [] + pattern = r"\$([^\s.]*)\$" - # check_fields=False because we want to override the - # name validator in SecurityContentObject - # (since we allow longer than the default length) - @validator('name',check_fields=False) - def name_max_length(cls, v): - if len(v) > 75: - raise ValueError('name is longer then 75 chars: ' + v) - return v + for input in re.findall(pattern, self.search): + inputs.append(input) - @validator('datamodel') - def datamodel_valid(cls, v, values): - for datamodel in v: - if datamodel not in [el.name for el in DataModel]: - raise ValueError('not valid data model: ' + values["name"]) - return v + return inputs - @validator('how_to_implement') - def encode_error(cls, v, values, field): - return SecurityContentObject.free_text_field_valid(cls,v,values,field) + @computed_field + @property + def lowercase_name(self)->str: + return self.name.replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower().replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower() + + + @model_serializer + def serialize_model(self): + #Call serializer for parent + super_fields = super().serialize_model() + + #All fields custom to this model + model= { + "type": self.type, + "datamodel": self.datamodel, + "search": self.search, + "how_to_implement": self.how_to_implement, + "known_false_positives": self.known_false_positives, + "inputs": self.inputs, + "tags": self.tags.model_dump(), + "lowercase_name":self.lowercase_name + } + + #Combine fields from this model with fields from parent + super_fields.update(model) + + #return the model + return super_fields + + + def model_post_init(self, ctx:dict[str,Any]): + # director: Optional[DirectorOutputDto] = ctx.get("output_dto",None) + # if not isinstance(director,DirectorOutputDto): + # raise ValueError("DirectorOutputDto was not passed in context of Detection model_post_init") + director: Optional[DirectorOutputDto] = ctx.get("output_dto",None) + for story in self.tags.analytic_story: + story.investigations.append(self) - @validator('references') - def references_check(cls, v, values): - return LinkValidator.SecurityContentObject_validate_references(v, values) - @validator('search') - def search_validate(cls, v, values): - # write search validator - return v \ No newline at end of file + + \ No newline at end of file diff --git a/contentctl/objects/investigation_tags.py b/contentctl/objects/investigation_tags.py index a6a0334f..c01ac2cc 100644 --- a/contentctl/objects/investigation_tags.py +++ b/contentctl/objects/investigation_tags.py @@ -1,9 +1,33 @@ +from __future__ import annotations +from pydantic import BaseModel, Field, field_validator, ValidationInfo, model_serializer +from contentctl.objects.story import Story +from contentctl.objects.enums import SecurityContentInvestigationProductName, SecurityDomain -from pydantic import BaseModel, validator, ValidationError +class InvestigationTags(BaseModel): + analytic_story: list[Story] = Field([],min_length=1) + product: list[SecurityContentInvestigationProductName] = Field(...,min_length=1) + required_fields: list[str] = Field(min_length=1) + security_domain: SecurityDomain = Field(...) -class InvestigationTags(BaseModel): - analytic_story: list - product: list - required_fields: list - security_domain: str \ No newline at end of file + @field_validator('analytic_story',mode="before") + @classmethod + def mapStoryNamesToStoryObjects(cls, v:list[str], info:ValidationInfo)->list[Story]: + return Story.mapNamesToSecurityContentObjects(v, info.context.get("output_dto",None)) + + + @model_serializer + def serialize_model(self): + #All fields custom to this model + model= { + "analytic_story": [story.name for story in self.analytic_story], + "product": self.product, + "required_fields": self.required_fields, + "security_domain": self.security_domain, + } + + #Combine fields from this model with fields from parent + + + #return the model + return model \ No newline at end of file diff --git a/contentctl/objects/lookup.py b/contentctl/objects/lookup.py index 60ac4bd6..c8ac5d81 100644 --- a/contentctl/objects/lookup.py +++ b/contentctl/objects/lookup.py @@ -1,10 +1,12 @@ from __future__ import annotations - -from pydantic import BaseModel, validator, ValidationError -from typing import Tuple +from pydantic import field_validator, ValidationInfo, model_validator, FilePath, model_serializer +from typing import TYPE_CHECKING, Optional, Any, Union import re +if TYPE_CHECKING: + from contentctl.input.director import DirectorOutputDto + from contentctl.objects.config import validate from contentctl.objects.security_content_object import SecurityContentObject -from contentctl.objects.enums import SecurityContentType + LOOKUPS_TO_IGNORE = set(["outputlookup"]) LOOKUPS_TO_IGNORE.add("ut_shannon_lookup") #In the URL toolbox app which is recommended for ESCU @@ -18,39 +20,86 @@ LOOKUPS_TO_IGNORE.add("other_lookups") -class Lookup(BaseModel): - #contentType: SecurityContentType = SecurityContentType.lookups - name: str - description: str - collection: str = None - fields_list: str = None - filename: str = None - default_match: str = None - match_type: str = None - min_matches: int = None - case_sensitive_match: str = None - file_path:str = None - - # Macro can have different punctuatuation in it, - # so redefine the name validator. For now, jsut - # allow any characters in the macro - @validator('name',check_fields=False) - def name_invalid_chars(cls, v): - return v +class Lookup(SecurityContentObject): + + collection: Optional[str] = None + fields_list: Optional[str] = None + filename: Optional[FilePath] = None + default_match: Optional[bool] = None + match_type: Optional[str] = None + min_matches: Optional[int] = None + case_sensitive_match: Optional[bool] = None + + + @model_serializer + def serialize_model(self): + #Call parent serializer + super_fields = super().serialize_model() + + #All fields custom to this model + model= { + "filename": self.filename.name if self.filename is not None else None, + "default_match": "true" if self.default_match is True else "false", + "match_type": self.match_type, + "min_matches": self.min_matches, + "case_sensitive_match": "true" if self.case_sensitive_match is True else "false", + "collection": self.collection, + "fields_list": self.fields_list + } + + #return the model + model.update(super_fields) + return model + + @model_validator(mode="before") + def fix_lookup_path(cls, data:Any, info: ValidationInfo)->Any: + if data.get("filename"): + config:validate = info.context.get("config",None) + if config is not None: + data["filename"] = config.path / "lookups/" / data["filename"] + else: + raise ValueError("config required for constructing lookup filename, but it was not") + return data + @field_validator('filename') + @classmethod + def lookup_file_valid(cls, v: Union[FilePath,None], info: ValidationInfo): + if not v: + return v + if not (v.name.endswith(".csv") or v.name.endswith(".mlmodel")): + raise ValueError(f"All Lookup files must be CSV files and end in .csv. The following file does not: '{v}'") - # Allow long names for lookups - @validator('name',check_fields=False) - def name_max_length(cls, v): - #if len(v) > 67: - # raise ValueError('name is longer then 67 chars: ' + v) return v + + @field_validator('match_type') + @classmethod + def match_type_valid(cls, v: Union[str,None], info: ValidationInfo): + if not v: + #Match type can be None and that's okay + return v + + if not (v.startswith("WILDCARD(") or v.endswith(")")) : + raise ValueError(f"All match_types must take the format 'WILDCARD(field_name)'. The following file does not: '{v}'") + return v + + + #Ensure that exactly one of location or filename are defined + @model_validator(mode='after') + def ensure_mutually_exclusive_fields(self)->Lookup: + if self.filename is not None and self.collection is not None: + raise ValueError("filename and collection cannot be defined in the lookup file. Exactly one must be defined.") + elif self.filename is None and self.collection is None: + raise ValueError("Neither filename nor collection were defined in the lookup file. Exactly one must " + "be defined.") + + + return self + @staticmethod - def get_lookups(text_field: str, all_lookups: list[Lookup], ignore_lookups:set[str]=LOOKUPS_TO_IGNORE)->Tuple[list[Lookup], set[str]]: + def get_lookups(text_field: str, director:DirectorOutputDto, ignore_lookups:set[str]=LOOKUPS_TO_IGNORE)->list[Lookup]: lookups_to_get = set(re.findall(r'[^output]lookup (?:update=true)?(?:append=t)?\s*([^\s]*)', text_field)) lookups_to_ignore = set([lookup for lookup in lookups_to_get if any(to_ignore in lookups_to_get for to_ignore in ignore_lookups)]) lookups_to_get -= lookups_to_ignore - found_lookups, missing_lookups = SecurityContentObject.get_objects_by_name(lookups_to_get, all_lookups) - return found_lookups, missing_lookups + return Lookup.mapNamesToSecurityContentObjects(list(lookups_to_get), director) \ No newline at end of file diff --git a/contentctl/objects/macro.py b/contentctl/objects/macro.py index ee30bc0c..478e5e13 100644 --- a/contentctl/objects/macro.py +++ b/contentctl/objects/macro.py @@ -1,12 +1,13 @@ # Used so that we can have a staticmethod that takes the class # type Macro as an argument from __future__ import annotations +from typing import TYPE_CHECKING, List import re -from pydantic import BaseModel, validator, ValidationError - +from pydantic import Field, model_serializer +if TYPE_CHECKING: + from contentctl.input.director import DirectorOutputDto from contentctl.objects.security_content_object import SecurityContentObject -from contentctl.objects.enums import SecurityContentType -from typing import Tuple + MACROS_TO_IGNORE = set(["_filter", "drop_dm_object_name"]) @@ -17,32 +18,32 @@ MACROS_TO_IGNORE.add("prohibited_processes") -class Macro(BaseModel): - #contentType: SecurityContentType = SecurityContentType.macros - name: str - definition: str - description: str - arguments: list = None - file_path: str = None - - # Macro can have different punctuatuation in it, - # so redefine the name validator. For now, jsut - # allow any characters in the macro - @validator('name',check_fields=False) - def name_invalid_chars(cls, v): - return v +class Macro(SecurityContentObject): + definition: str = Field(..., min_length=1) + arguments: List[str] = Field([]) + + - # Allow long names for macros - @validator('name',check_fields=False) - def name_max_length(cls, v): - #if len(v) > 67: - # raise ValueError('name is longer then 67 chars: ' + v) - return v + @model_serializer + def serialize_model(self): + #Call serializer for parent + super_fields = super().serialize_model() + #All fields custom to this model + model= { + "definition": self.definition, + "description": self.description, + } + + #return the model + model.update(super_fields) + + return model @staticmethod - def get_macros(text_field:str, all_macros: list[Macro], ignore_macros:set[str]=MACROS_TO_IGNORE)->Tuple[list[Macro], set[str]]: + + def get_macros(text_field:str, director:DirectorOutputDto , ignore_macros:set[str]=MACROS_TO_IGNORE)->list[Macro]: #Remove any comments, allowing there to be macros (which have a single backtick) inside those comments #If a comment ENDS in a macro, for example ```this is a comment with a macro `macro_here```` #then there is a small edge case where the regex below does not work properly. If that is @@ -50,6 +51,7 @@ def get_macros(text_field:str, all_macros: list[Macro], ignore_macros:set[str]=M text_field = re.sub(r"\`\`\`\`", r"` ```", text_field) text_field = re.sub(r"\`\`\`.*?\`\`\`", " ", text_field) + macros_to_get = re.findall(r'`([^\s]+)`', text_field) #If macros take arguments, stop at the first argument. We just want the name of the macro macros_to_get = set([macro[:macro.find('(')] if macro.find('(') != -1 else macro for macro in macros_to_get]) @@ -57,24 +59,6 @@ def get_macros(text_field:str, all_macros: list[Macro], ignore_macros:set[str]=M macros_to_ignore = set([macro for macro in macros_to_get if any(to_ignore in macro for to_ignore in ignore_macros)]) #remove the ones that we will ignore macros_to_get -= macros_to_ignore - found_macros, missing_macros = SecurityContentObject.get_objects_by_name(macros_to_get, all_macros) - return found_macros, missing_macros - - # found_macros = [macro for macro in all_macros if macro.name in macros_to_get] - - # missing_macros = macros_to_get - set([macro.name for macro in found_macros]) - # missing_macros_after_ignored_macros = set() - # for macro in missing_macros: - # found = False - # for ignore in ignore_macros: - # if ignore in macro: - # found=True - # break - # if found is False: - # missing_macros_after_ignored_macros.add(macro) - - #return found_macros, missing_macros_after_ignored_macros + return Macro.mapNamesToSecurityContentObjects(list(macros_to_get), director) - - - + \ No newline at end of file diff --git a/contentctl/objects/mitre_attack_enrichment.py b/contentctl/objects/mitre_attack_enrichment.py index ff92b2b3..bf00e18a 100644 --- a/contentctl/objects/mitre_attack_enrichment.py +++ b/contentctl/objects/mitre_attack_enrichment.py @@ -1,8 +1,32 @@ -from pydantic import BaseModel, validator, ValidationError +from __future__ import annotations +from pydantic import BaseModel, Field, ConfigDict +from typing import Set,List,Annotated +from enum import StrEnum + + +class MitreTactics(StrEnum): + RECONNAISSANCE = "Reconnaissance" + RESOURCE_DEVELOPMENT = "Resource Development" + INITIAL_ACCESS = "Initial Access" + EXECUTION = "Execution" + PERSISTENCE = "Persistence" + PRIVILEGE_ESCALATION = "Privilege Escalation" + DEFENSE_EVASION = "Defense Evasion" + CREDENTIAL_ACCESS = "Credential Access" + DISCOVERY = "Discovery" + LATERAL_MOVEMENT = "Lateral Movement" + COLLECTION = "Collection" + COMMAND_AND_CONTROL = "Command And Control" + EXFILTRATION = "Exfiltration" + IMPACT = "Impact" class MitreAttackEnrichment(BaseModel): - mitre_attack_id: str - mitre_attack_technique: str - mitre_attack_tactics: list - mitre_attack_groups: list + ConfigDict(use_enum_values=True) + mitre_attack_id: Annotated[str, Field(pattern="^T\d{4}(.\d{3})?$")] = Field(...) + mitre_attack_technique: str = Field(...) + mitre_attack_tactics: List[MitreTactics] = Field(...) + mitre_attack_groups: List[str] = Field(...) + + def __hash__(self) -> int: + return id(self) \ No newline at end of file diff --git a/contentctl/objects/observable.py b/contentctl/objects/observable.py index 7b40b145..3a6134fe 100644 --- a/contentctl/objects/observable.py +++ b/contentctl/objects/observable.py @@ -1,10 +1,6 @@ -import abc -import string -import uuid -from typing import Literal -from datetime import datetime -from pydantic import BaseModel, validator, ValidationError -from contentctl.objects.enums import SecurityContentType +from __future__ import annotations +from pydantic import BaseModel, validator + from contentctl.objects.constants import * diff --git a/contentctl/objects/playbook.py b/contentctl/objects/playbook.py index 801cebcd..f025d227 100644 --- a/contentctl/objects/playbook.py +++ b/contentctl/objects/playbook.py @@ -1,36 +1,66 @@ +from __future__ import annotations +from typing import TYPE_CHECKING,Self +from pydantic import model_validator, Field, FilePath -import uuid -import string -from pydantic import BaseModel, validator, ValidationError - -from contentctl.objects.security_content_object import SecurityContentObject from contentctl.objects.playbook_tags import PlaybookTag -from contentctl.helper.link_validator import LinkValidator -from contentctl.objects.enums import SecurityContentType +from contentctl.objects.security_content_object import SecurityContentObject +from contentctl.objects.enums import PlaybookType class Playbook(SecurityContentObject): - #name: str - #id: str - #version: int - #date: str - #author: str - #contentType: SecurityContentType = SecurityContentType.playbooks - type: str - #description: str - how_to_implement: str - playbook: str - check_references: bool = False #Validation is done in order, this field must be defined first - references: list - app_list: list - tags: PlaybookTag - - - @validator('references') - def references_check(cls, v, values): - return LinkValidator.SecurityContentObject_validate_references(v, values) - - @validator('how_to_implement') - def encode_error(cls, v, values, field): - return SecurityContentObject.free_text_field_valid(cls,v,values,field) \ No newline at end of file + type: PlaybookType = Field(...) + + # Override the type definition for filePath. + # This MUST be backed by a file and cannot be None + file_path: FilePath + + how_to_implement: str = Field(min_length=4) + playbook: str = Field(min_length=4) + app_list: list[str] = Field(...,min_length=0) + tags: PlaybookTag = Field(...) + + + + @model_validator(mode="after") + def ensureJsonAndPyFilesExist(self)->Self: + json_file_path = self.file_path.with_suffix(".json") + python_file_path = self.file_path.with_suffix(".py") + missing:list[str] = [] + if not json_file_path.is_file(): + missing.append(f"Playbook file named '{self.file_path.name}' MUST "\ + f"have a .json file named '{json_file_path.name}', "\ + "but it does not exist") + + if not python_file_path.is_file(): + missing.append(f"Playbook file named '{self.file_path.name}' MUST "\ + f"have a .py file named '{python_file_path.name}', "\ + "but it does not exist") + + + if len(missing) == 0: + return self + else: + missing_files_string = '\n - '.join(missing) + raise ValueError(f"Playbook files missing:\n -{missing_files_string}") + + + #Override playbook file name checking FOR NOW + @model_validator(mode="after") + def ensureFileNameMatchesSearchName(self)->Self: + file_name = self.name \ + .replace(' ', '_') \ + .replace('-','_') \ + .replace('.','_') \ + .replace('/','_') \ + .lower() + ".yml" + + #allow different capitalization FOR NOW in playbook file names + if (self.file_path is not None and file_name != self.file_path.name.lower()): + raise ValueError(f"The file name MUST be based off the content 'name' field:\n"\ + f"\t- Expected File Name: {file_name}\n"\ + f"\t- Actual File Name : {self.file_path.name}") + + return self + + \ No newline at end of file diff --git a/contentctl/objects/playbook_tags.py b/contentctl/objects/playbook_tags.py index 0d3b1fee..fd4a21e6 100644 --- a/contentctl/objects/playbook_tags.py +++ b/contentctl/objects/playbook_tags.py @@ -1,13 +1,50 @@ +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, List +from pydantic import BaseModel, Field +import enum +from contentctl.objects.detection import Detection -from pydantic import BaseModel, validator, ValidationError +class PlaybookProduct(str,enum.Enum): + SPLUNK_SOAR = "Splunk SOAR" +class PlaybookUseCase(str,enum.Enum): + PHISHING = "Phishing" + ENDPOINT = "Endpoint" + ENRICHMENT = "Enrichment" + +class PlaybookType(str,enum.Enum): + INPUT = "Input" + AUTOMATION = "Automation" + +class VpeType(str,enum.Enum): + MODERN = "Modern" + CLASSIC = "Classic" +class DefendTechnique(str,enum.Enum): + D3_AL = "D3-AL" + D3_DNSDL = "D3-DNSDL" + D3_DA = "D3-DA" + D3_IAA = "D3-IAA" + D3_IRA = "D3-IRA" + D3_OTF = "D3-OTF" + D3_ER = "D3-ER" + D3_RE = "D3-RE" + D3_URA = "D3-URA" + D3_DNRA = "D3-DNRA" + D3_IPRA = "D3-IPRA" + D3_FHRA = "D3-FHRA" + D3_SRA = "D3-SRA" + D3_RUAA = "D3-RUAA" class PlaybookTag(BaseModel): - analytic_story: list = None - detections: list = None - platform_tags: list = None - playbook_fields: list = None - product: list = None - playbook_fields: list = None - detection_objects: list = None + analytic_story: Optional[list] = None + detections: Optional[list] = None + platform_tags: list[str] = Field(...,min_length=0) + playbook_type: PlaybookType = Field(...) + vpe_type: VpeType = Field(...) + playbook_fields: list[str] = Field([], min_length=0) + product: list[PlaybookProduct] = Field([],min_length=0) + use_cases: list[PlaybookUseCase] = Field([],min_length=0) + defend_technique_id: Optional[List[DefendTechnique]] = None + + detection_objects: list[Detection] = [] \ No newline at end of file diff --git a/contentctl/objects/repo_config.py b/contentctl/objects/repo_config.py deleted file mode 100644 index bf0904f5..00000000 --- a/contentctl/objects/repo_config.py +++ /dev/null @@ -1,163 +0,0 @@ - - -import pathlib - - -from pydantic import BaseModel, root_validator, validator, ValidationError, Extra, Field -from pydantic.main import ModelMetaclass -from dataclasses import dataclass -from datetime import datetime -from typing import Union - -import validators - -from contentctl.objects.enums import SecurityContentProduct - -from contentctl.helper.utils import Utils - -from semantic_version import Version - -import git -ALWAYS_PULL = True - -SPLUNKBASE_URL = "https://splunkbase.splunk.com/app/{uid}/release/{release}/download" - -class Manifest(BaseModel): - #Note that many of these fields are mirrored from App - - #Some information about the developer of the app - author_name: str = Field(default=None, title="Enter the name of the app author") - author_email: str = Field(default=None, title="Enter a contact email for the develop(s) of the app") - author_company: str = Field(default=None, title="Enter the company who is developing the app") - - #uid is a numeric identifier assigned by splunkbase, so - #homemade applications will not have this - uid: Union[int, None] = Field(default=None, title="Unique numeric identifier assigned by Splunkbase to identify your app. You can find it in the URL of your app's landing page. If you do not have one, leave this blank.") - - #appid is basically the internal name of you app - appid: str = Field(default=None, title="Internal name of your app. Note that it MUST be alphanumeric with underscores, but no spaces or other special characters") - - #Title is the human readable name for your application - title: str = Field(default=None, title="Human-Readable name for your app. This can include any characters you want") - - #Self explanatory - description: Union[str,None] = Field(default=None, title="Provide a helpful description of the app.") - release: str = Field(default=None, title="Provide a name for the current release of the app. This MUST follow semantic version format MAJOR.MINOR.PATCH[-tag]") - - - - @validator('author_email', always=True) - def validate_author_email(cls, v): - print("email is") - print(v) - if bool(validators.email(v)) == False: - raise(ValueError(f"Email address {v} is invalid")) - return v - - @validator('release', always=True) - def validate_release(cls, v): - try: - Version(v) - except Exception as e: - raise(ValueError(f"The string '{v}' is not a valid Semantic Version. For more information on Semantic Versioning, please refer to https://semver.org/")) - - return v - - -class RepoConfig(BaseModel): - - #Needs a manifest to be able to properly generate the app - manifest:Manifest = Field(default=None, title="Manifest Object") - repo_path: str = Field(default='.', title="Path to the root of your app") - repo_url: Union[str,None] = Field(default=None, title="HTTP(s) path to the repo for repo_path. If this field is blank, it will be inferred from the repo") - main_branch: str = Field(title="Main branch of the repo.") - - - - - type: SecurityContentProduct = Field(default=SecurityContentProduct.SPLUNK_ENTERPRISE_APP, title=f"What type of product would you like to build. Choose one of {SecurityContentProduct._member_names_}") - skip_enrichment: bool = Field(default=True, title="Whether or not to skip the enrichment processes when validating the app. Enrichment increases the amount of time it takes to build an app significantly because it must hit a number of Web APIs.") - - input_path: str = Field(default='.', title="Path to the root of your app") - output_path: str = Field(default='./dist', title="Path where 'generate' will write out your raw app") - #output_path: str = Field(default='./build', title="Path where 'build' will write out your custom app") - - #test_config: TestConfig = Field(default=TestConfig, title="Test Configuration") - - #@validator('manifest', always=True, pre=True) - ''' - @root_validator(pre=True) - def validate_manifest(cls, values): - - try: - print(Manifest.parse_obj(values)) - except Exception as e: - raise(ValueError(f"error validating manifest: {str(e)}")) - - - return values - print("TWO") - #return {} - #return Manifest.parse_obj({"email":"invalid_email@gmail.com"}) - ''' - @validator('repo_path', always=True) - def validate_repo_path(cls,v): - - try: - path = pathlib.Path(v) - except Exception as e: - raise(ValueError(f"Error, the provided path is is not a valid path: '{v}'")) - - try: - r = git.Repo(path) - except Exception as e: - raise(ValueError(f"Error, the provided path is not a valid git repo: '{path}'")) - - try: - - if ALWAYS_PULL: - r.remotes.origin.pull() - except Exception as e: - raise ValueError(f"Error pulling git repository {v}: {str(e)}") - - - return v - - - @validator('repo_url') - def validate_repo_url(cls, v, values): - - - #First try to get the value from the repo - try: - remote_url_from_repo = git.Repo(values['repo_path']).remotes.origin.url - except Exception as e: - raise(ValueError(f"Error reading remote_url from the repo located at {values['repo_path']}")) - - if v is not None and remote_url_from_repo != v: - raise(ValueError(f"The url of the remote repo supplied in the config file {v} does not "\ - f"match the value read from the repository at {values['repo_path']}, {remote_url_from_repo}")) - - - if v is None: - v = remote_url_from_repo - - #Ensure that the url is the proper format - try: - if bool(validators.url(v)) == False: - raise(Exception) - except: - raise(ValueError(f"Error validating the repo_url. The url is not valid: {v}")) - - - return v - - @validator('main_branch') - def valid_main_branch(cls, v, values): - - - try: - Utils.validate_git_branch_name(values['repo_path'],values['repo_url'], v) - except Exception as e: - raise ValueError(f"Error validating main_branch: {str(e)}") - return v \ No newline at end of file diff --git a/contentctl/objects/security_content_object.py b/contentctl/objects/security_content_object.py index 611e0f62..a2f87364 100644 --- a/contentctl/objects/security_content_object.py +++ b/contentctl/objects/security_content_object.py @@ -1,8 +1,4 @@ -import abc -import string -import uuid -from datetime import datetime -from pydantic import BaseModel, validator, ValidationError +from __future__ import annotations from contentctl.objects.abstract_security_content_objects.security_content_object_abstract import SecurityContentObject_Abstract class SecurityContentObject(SecurityContentObject_Abstract): diff --git a/contentctl/objects/ssa_detection.py b/contentctl/objects/ssa_detection.py index c06ee154..036f0b77 100644 --- a/contentctl/objects/ssa_detection.py +++ b/contentctl/objects/ssa_detection.py @@ -1,3 +1,4 @@ +from __future__ import annotations import uuid import string import requests @@ -14,8 +15,8 @@ from contentctl.objects.enums import DetectionStatus from contentctl.objects.deployment import Deployment from contentctl.objects.ssa_detection_tags import SSADetectionTags -from contentctl.objects.config import ConfigDetectionConfiguration -from contentctl.objects.unit_test import UnitTest +from contentctl.objects.unit_test_ssa import UnitTestSSA +from contentctl.objects.unit_test_old import UnitTestOld from contentctl.objects.macro import Macro from contentctl.objects.lookup import Lookup from contentctl.objects.baseline import Baseline @@ -40,7 +41,7 @@ class SSADetection(BaseModel): known_false_positives: str references: list tags: SSADetectionTags - tests: list[UnitTest] = None + tests: list[UnitTestSSA] = None # enrichments annotations: dict = None @@ -48,7 +49,7 @@ class SSADetection(BaseModel): mappings: dict = None file_path: str = None source: str = None - test: Union[UnitTest, dict] = None + test: Union[UnitTestSSA, dict, UnitTestOld] = None runtime: str = None internalVersion: int = None @@ -61,6 +62,7 @@ class SSADetection(BaseModel): class Config: use_enum_values = True + ''' @validator("name") def name_invalid_chars(cls, v): invalidChars = set(string.punctuation.replace("-", "")) @@ -150,3 +152,5 @@ def tests_validate(cls, v, values): "At least one test is required for a production or validation detection: " + values["name"] ) return v + + ''' \ No newline at end of file diff --git a/contentctl/objects/ssa_detection_tags.py b/contentctl/objects/ssa_detection_tags.py index 55bb4720..62fef564 100644 --- a/contentctl/objects/ssa_detection_tags.py +++ b/contentctl/objects/ssa_detection_tags.py @@ -1,13 +1,15 @@ +from __future__ import annotations import re +from typing import List +from pydantic import BaseModel, validator, ValidationError, model_validator, Field -from pydantic import BaseModel, validator, ValidationError, root_validator from contentctl.objects.mitre_attack_enrichment import MitreAttackEnrichment from contentctl.objects.constants import * - +from contentctl.objects.enums import SecurityContentProductName class SSADetectionTags(BaseModel): # detection spec - name: str + #name: str analytic_story: list asset_type: str automated_detection_testing: str = None @@ -19,7 +21,7 @@ class SSADetectionTags(BaseModel): mitre_attack_id: list = None nist: list = None observable: list - product: list + product: List[SecurityContentProductName] = Field(...,min_length=1) required_fields: list risk_score: int security_domain: str @@ -77,7 +79,7 @@ def tags_nist(cls, v, values): def tags_confidence(cls, v, values): v = int(v) if not (v > 0 and v <= 100): - raise ValueError('confidence score is out of range 1-100: ' + values["name"]) + raise ValueError('confidence score is out of range 1-100.' ) else: return v @@ -85,7 +87,7 @@ def tags_confidence(cls, v, values): @validator('impact') def tags_impact(cls, v, values): if not (v > 0 and v <= 100): - raise ValueError('impact score is out of range 1-100: ' + values["name"]) + raise ValueError('impact score is out of range 1-100.') else: return v @@ -94,7 +96,7 @@ def tags_kill_chain_phases(cls, v, values): valid_kill_chain_phases = SES_KILL_CHAIN_MAPPINGS.keys() for value in v: if value not in valid_kill_chain_phases: - raise ValueError('kill chain phase not valid for ' + values["name"] + '. valid options are ' + str(valid_kill_chain_phases)) + raise ValueError('kill chain phase not valid. Valid options are ' + str(valid_kill_chain_phases)) return v @validator('mitre_attack_id') @@ -102,20 +104,10 @@ def tags_mitre_attack_id(cls, v, values): pattern = 'T[0-9]{4}' for value in v: if not re.match(pattern, value): - raise ValueError('Mitre Attack ID are not following the pattern Txxxx: ' + values["name"]) + raise ValueError('Mitre Attack ID are not following the pattern Txxxx:' ) return v - @validator('product') - def tags_product(cls, v, values): - valid_products = [ - "Splunk Enterprise", "Splunk Enterprise Security", "Splunk Cloud", - "Splunk Security Analytics for AWS", "Splunk Behavioral Analytics" - ] - for value in v: - if value not in valid_products: - raise ValueError('product is not valid for ' + values['name'] + '. valid products are ' + str(valid_products)) - return v @validator('risk_score') def tags_calculate_risk_score(cls, v, values): @@ -125,21 +117,22 @@ def tags_calculate_risk_score(cls, v, values): f"\n Expected risk_score={calculated_risk_score}, found risk_score={int(v)}: {values['name']}") return v - @root_validator - def tags_observable(cls, values): + + @model_validator(mode="after") + def tags_observable(self): valid_roles = SES_OBSERVABLE_ROLE_MAPPING.keys() valid_types = SES_OBSERVABLE_TYPE_MAPPING.keys() - for value in values["observable"]: + for value in self.observable: if value['type'] in valid_types: - if 'Splunk Behavioral Analytics' in values["product"]: + if 'Splunk Behavioral Analytics' in self.product: continue if 'role' not in value: - raise ValueError('Observable role is missing for ' + values["name"]) + raise ValueError('Observable role is missing') for role in value['role']: if role not in valid_roles: - raise ValueError('Observable role ' + role + ' not valid for ' + values["name"] + '. valid options are ' + str(valid_roles)) + raise ValueError(f'Observable role ' + role + ' not valid. Valid options are {str(valid_roles)}') else: - raise ValueError('Observable type ' + value['type'] + ' not valid for ' + values["name"] + '. valid options are ' + str(valid_types)) - return values \ No newline at end of file + raise ValueError(f'Observable type ' + value['type'] + ' not valid. Valid options are {str(valid_types)}') + return self \ No newline at end of file diff --git a/contentctl/objects/story.py b/contentctl/objects/story.py index 53daa655..05a36fb8 100644 --- a/contentctl/objects/story.py +++ b/contentctl/objects/story.py @@ -1,49 +1,147 @@ -import string -import uuid -import requests - -from pydantic import BaseModel, validator, ValidationError -from datetime import datetime +from __future__ import annotations +from typing import TYPE_CHECKING,List +from contentctl.objects.story_tags import StoryTags +from pydantic import Field, model_serializer,computed_field, model_validator +import re +if TYPE_CHECKING: + from contentctl.objects.detection import Detection + from contentctl.objects.investigation import Investigation + from contentctl.objects.baseline import Baseline + from contentctl.objects.security_content_object import SecurityContentObject -from contentctl.objects.story_tags import StoryTags -from contentctl.helper.link_validator import LinkValidator -from contentctl.objects.enums import SecurityContentType + + + + + +#from contentctl.objects.investigation import Investigation + + + class Story(SecurityContentObject): - # story spec - #name: str - #id: str - #version: int - #date: str - #author: str - #description: str - #contentType: SecurityContentType = SecurityContentType.stories - narrative: str - check_references: bool = False #Validation is done in order, this field must be defined first - references: list - tags: StoryTags + narrative: str = Field(...) + tags: StoryTags = Field(...) # enrichments - detection_names: list = None - investigation_names: list = None - baseline_names: list = None - author_company: str = None - author_name: str = None - detections: list = None - investigations: list = None - - - # Allow long names for macros - @validator('name',check_fields=False) - def name_max_length(cls, v): - #if len(v) > 67: - # raise ValueError('name is longer then 67 chars: ' + v) - return v - - @validator('narrative') - def encode_error(cls, v, values, field): - return SecurityContentObject.free_text_field_valid(cls,v,values,field) - - @validator('references') - def references_check(cls, v, values): - return LinkValidator.SecurityContentObject_validate_references(v, values) \ No newline at end of file + #detection_names: List[str] = [] + #investigation_names: List[str] = [] + #baseline_names: List[str] = [] + + # These are updated when detection and investigation objects are created. + # Specifically in the model_post_init functions + detections:List[Detection] = [] + investigations: List[Investigation] = [] + baselines: List[Baseline] = [] + + + def storyAndInvestigationNamesWithApp(self, app_name:str)->List[str]: + return [f"{app_name} - {name} - Rule" for name in self.detection_names] + \ + [f"{app_name} - {name} - Response Task" for name in self.investigation_names] + + @model_serializer + def serialize_model(self): + #Call serializer for parent + super_fields = super().serialize_model() + + #All fields custom to this model + model= { + "narrative": self.narrative, + "tags": self.tags.model_dump(), + "detection_names": self.detection_names, + "investigation_names": self.investigation_names, + "baseline_names": self.baseline_names, + "author_company": self.author_company, + "author_name":self.author_name + } + detections = [] + for detection in self.detections: + new_detection = { + "name":detection.name, + "source":detection.source, + "type":detection.type + } + if self.tags.mitre_attack_enrichments is not None: + new_detection['tags'] = {"mitre_attack_enrichments": [{"mitre_attack_technique": enrichment.mitre_attack_technique} for enrichment in detection.tags.mitre_attack_enrichments]} + detections.append(new_detection) + + model['detections'] = detections + #Combine fields from this model with fields from parent + super_fields.update(model) + + #return the model + return super_fields + + @model_validator(mode="after") + def setTagsFields(self): + + enrichments = [] + for detection in self.detections: + enrichments.extend(detection.tags.mitre_attack_enrichments) + self.tags.mitre_attack_enrichments = list(set(enrichments)) + + + tactics = [] + for enrichment in self.tags.mitre_attack_enrichments: + tactics.extend(enrichment.mitre_attack_tactics) + self.tags.mitre_attack_tactics = set(tactics) + + + + datamodels = [] + for detection in self.detections: + datamodels.extend(detection.datamodel) + self.tags.datamodels = set(datamodels) + + + + kill_chain_phases = [] + for detection in self.detections: + kill_chain_phases.extend(detection.tags.kill_chain_phases) + self.tags.kill_chain_phases = set(kill_chain_phases) + + return self + + + @computed_field + @property + def author_name(self)->str: + match_author = re.search(r'^([^,]+)', self.author) + if match_author is None: + return 'no' + else: + return match_author.group(1) + + @computed_field + @property + def author_company(self)->str: + match_company = re.search(r',\s?(.*)$', self.author) + if match_company is None: + return 'no' + else: + return match_company.group(1) + + @computed_field + @property + def author_email(self)->str: + return "-" + + @computed_field + @property + def detection_names(self)->List[str]: + return [detection.name for detection in self.detections] + + @computed_field + @property + def investigation_names(self)->List[str]: + return [investigation.name for investigation in self.investigations] + + @computed_field + @property + def baseline_names(self)->List[str]: + return [baseline.name for baseline in self.baselines] + + + + + \ No newline at end of file diff --git a/contentctl/objects/story_tags.py b/contentctl/objects/story_tags.py index 1cbcd86d..859a8a98 100644 --- a/contentctl/objects/story_tags.py +++ b/contentctl/objects/story_tags.py @@ -1,38 +1,51 @@ +from __future__ import annotations +from pydantic import BaseModel, Field, model_serializer, ConfigDict +from typing import List,Set,Optional, Annotated +from enum import Enum -from pydantic import BaseModel, validator, ValidationError from contentctl.objects.mitre_attack_enrichment import MitreAttackEnrichment -from contentctl.objects.enums import StoryCategory +from contentctl.objects.enums import StoryCategory, DataModel, KillChainPhase, SecurityContentProductName + + +class StoryUseCase(str,Enum): + FRAUD_DETECTION = "Fraud Detection" + COMPLIANCE = "Compliance" + APPLICATION_SECURITY = "Application Security" + SECURITY_MONITORING = "Security Monitoring" + ADVANCED_THREAD_DETECTION = "Advanced Threat Detection" class StoryTags(BaseModel): - # story spec - name: str - analytic_story: str - category: list[StoryCategory] - product: list - usecase: str - - # enrichment - mitre_attack_enrichments: list[MitreAttackEnrichment] = [] - mitre_attack_tactics: list = [] - datamodels: list = [] - kill_chain_phases: list = [] - - - @validator('product') - def tags_product(cls, v, values): - valid_products = [ - "Splunk Enterprise", "Splunk Enterprise Security", "Splunk Cloud", - "Splunk Security Analytics for AWS", "Splunk Behavioral Analytics" - ] - - for value in v: - if value not in valid_products: - raise ValueError('product is not valid for ' + values['name'] + '. valid products are ' + str(valid_products)) - return v - - @validator('category') - def category_validate(cls,v,values): - if len(v) == 0: - raise ValueError(f"Error for Story '{values['name']}' - at least one 'category' MUST be provided.") - return v \ No newline at end of file + model_config = ConfigDict(extra='forbid', use_enum_values=True) + category: List[StoryCategory] = Field(...,min_length=1) + product: List[SecurityContentProductName] = Field(...,min_length=1) + usecase: StoryUseCase = Field(...) + + # enrichment + mitre_attack_enrichments: Optional[List[MitreAttackEnrichment]] = None + mitre_attack_tactics: Optional[Set[Annotated[str, Field(pattern="^T\d{4}(.\d{3})?$")]]] = None + datamodels: Optional[Set[DataModel]] = None + kill_chain_phases: Optional[Set[KillChainPhase]] = None + cve: List[Annotated[str, "^CVE-[1|2][0-9]{3}-[0-9]+$"]] = [] + group: List[str] = Field([], description="A list of groups who leverage the techniques list in this Analytic Story.") + + def getCategory_conf(self) -> str: + #if len(self.category) > 1: + # print("Story with more than 1 category. We can only have 1 category, fix it!") + return list(self.category)[0] + + @model_serializer + def serialize_model(self): + #no super to call + return { + "category": list(self.category), + "product": list(self.product), + "usecase": self.usecase, + "mitre_attack_enrichments": self.mitre_attack_enrichments, + "mitre_attack_tactics": list(self.mitre_attack_tactics) if self.mitre_attack_tactics is not None else None, + "datamodels": list(self.datamodels) if self.datamodels is not None else None, + "kill_chain_phases": list(self.kill_chain_phases) if self.kill_chain_phases is not None else None + } + + + \ No newline at end of file diff --git a/contentctl/objects/test_config.py b/contentctl/objects/test_config.py deleted file mode 100644 index ca2dcd24..00000000 --- a/contentctl/objects/test_config.py +++ /dev/null @@ -1,630 +0,0 @@ -# Needed for a staticmethod to be able to return an instance of the class it belongs to -from __future__ import annotations - -import git -import validators -import pathlib -import yaml -import os -from pydantic import BaseModel, validator, root_validator, Extra, Field -from typing import Union -import re -import docker -import docker.errors - - -from contentctl.objects.enums import ( - PostTestBehavior, - DetectionTestingMode, - DetectionTestingTargetInfrastructure, -) - -from contentctl.objects.app import App, ENVIRONMENT_PATH_NOT_SET -from contentctl.helper.utils import Utils - - -ALWAYS_PULL_REPO = False -PREVIOUSLY_ALLOCATED_PORTS: set[int] = set() - -LOCAL_APP_DIR = pathlib.Path("apps") -CONTAINER_APP_DIR = pathlib.Path("/tmp/apps") - - -def getTestConfigFromYMLFile(path: pathlib.Path): - try: - with open(path, "r") as config_handle: - cfg = yaml.safe_load(config_handle) - return TestConfig.parse_obj(cfg) - - except Exception as e: - print(f"Error loading test configuration file '{path}': {str(e)}") - - -class Infrastructure(BaseModel, extra=Extra.forbid, validate_assignment=True): - splunk_app_username: Union[str, None] = Field( - default="admin", title="The name of the user for testing" - ) - splunk_app_password: Union[str, None] = Field( - default="password", title="Password for logging into Splunk Server" - ) - instance_address: str = Field( - default="127.0.0.1", - title="Domain name of IP address of Splunk server to be used for testing. Do NOT use a protocol, like http(s):// or 'localhost'", - ) - - instance_name: str = Field( - default="Splunk_Server_Name", - title="Template to be used for naming the Splunk Test Containers or referring to Test Servers.", - ) - - hec_port: int = Field(default=8088, title="HTTP Event Collector Port") - web_ui_port: int = Field(default=8000, title="Web UI Port") - api_port: int = Field(default=8089, title="REST API Port") - - @staticmethod - def get_infrastructure_containers(num_containers:int=1, splunk_app_username:str="admin", splunk_app_password:str="password", instance_name_template="splunk_contentctl_{index}")->list[Infrastructure]: - containers:list[Infrastructure] = [] - if num_containers < 0: - raise ValueError(f"Error - you must specifiy 1 or more containers, not {num_containers}.") - - #Get the starting ports - i = Infrastructure() #Instantiate to get the base port numbers - - for index in range(0, num_containers): - containers.append(Infrastructure(splunk_app_username=splunk_app_username, - splunk_app_password=splunk_app_password, - instance_name=instance_name_template.format(index=index), - hec_port=i.hec_port+(index*2), - web_ui_port=i.web_ui_port+index, - api_port=i.api_port+(index*2))) - - - return containers - - @validator("instance_name") - def validate_instance_name(cls,v,values): - if not re.fullmatch("[a-zA-Z0-9][a-zA-Z0-9_.-]*", v): - raise ValueError(f"The instance_name '{v}' is not valid. Please use an instance name which matches the regular expression '[a-zA-Z0-9][a-zA-Z0-9_.-]*'") - else: - return v - - @validator("instance_address") - def validate_instance_address(cls, v, values): - try: - if v.startswith("http"): - raise (Exception("should not begin with http")) - is_ipv4 = validators.ipv4(v) - if bool(is_ipv4): - return v - is_domain_name = validators.domain(v) - if bool(is_domain_name): - import socket - - try: - socket.gethostbyname(v) - return v - except Exception as e: - pass - raise (Exception("DNS Lookup failed")) - raise (Exception(f"not an IPV4 address or a domain name")) - except Exception as e: - raise ( - Exception( - f"Error, failed to validate instance_address '{v}': {str(e)}" - ) - ) - - - - @validator("splunk_app_password") - def validate_splunk_app_password(cls, v): - if v == None: - # No app password was provided, so generate one - v = Utils.get_random_password() - else: - MIN_PASSWORD_LENGTH = 6 - if len(v) < MIN_PASSWORD_LENGTH: - raise ( - ValueError( - f"Password is less than {MIN_PASSWORD_LENGTH} characters long. This password is extremely weak, please change it." - ) - ) - return v - - @validator("hec_port", "web_ui_port", "api_port", each_item=True) - def validate_ports_range(cls, v): - if v < 2: - raise ( - ValueError( - f"Error, invalid Port number. Port must be between 2-65535: {v}" - ) - ) - elif v > 65535: - raise ( - ValueError( - f"Error, invalid Port number. Port must be between 2-65535: {v}" - ) - ) - return v - - @validator("hec_port", "web_ui_port", "api_port", each_item=False) - def validate_ports_overlap(cls, v): - - if type(v) is not list: - # Otherwise this throws error when we update a single field - return v - if len(set(v)) != len(v): - raise (ValueError(f"Duplicate ports detected: [{v}]")) - - return v - -class InfrastructureConfig(BaseModel, extra=Extra.forbid, validate_assignment=True): - infrastructure_type: DetectionTestingTargetInfrastructure = Field( - default=DetectionTestingTargetInfrastructure.container, - title=f"Control where testing should be launched. Choose one of {DetectionTestingTargetInfrastructure._member_names_}", - ) - - persist_and_reuse_container:bool = True - - full_image_path: str = Field( - default="registry.hub.docker.com/splunk/splunk:latest", - title="Full path to the container image to be used", - ) - infrastructures: list[Infrastructure] = [] - - - @validator("infrastructure_type") - def validate_infrastructure_type(cls, v, values): - if v == DetectionTestingTargetInfrastructure.server: - # No need to validate that the docker client is available - return v - elif v == DetectionTestingTargetInfrastructure.container: - # we need to make sure we can actually get the docker client from the environment - try: - docker.client.from_env() - except Exception as e: - raise ( - Exception( - f"Error, failed to get docker client. Is Docker Installed and running " - f"and are docker environment variables set properly? Error:\n\t{str(e)}" - ) - ) - return v - - - - - @validator("full_image_path") - def validate_full_image_path(cls, v, values): - if ( - values.get("infrastructure_type", None) - == DetectionTestingTargetInfrastructure.server.value - ): - print( - f"No need to validate target image path {v}, testing target is preconfigured server" - ) - return v - # This behavior may change if we start supporting local/offline containers and - # the logic to build them - if ":" not in v: - raise ( - ValueError( - f"Error, the image_name {v} does not include a tag. A tagged container MUST be included to ensure consistency when testing" - ) - ) - - # Check to make sure we have the latest version of the image - # We have this as a wrapped, nested try/except because if we - # encounter some error in trying to get the latest version, but - # we do have some version already, we will allow the test to continue. - # For example, this may occur if an image has been previously downloaded, - # but the server no longer has internet connectivity and can't get the - # image again. in this case, don't fail - continue with the test - try: - try: - # connectivity to docker server is validated previously - client = docker.from_env() - print( - f"Getting the latest version of the container image: {v}...", - end="", - flush=True, - ) - client.images.pull(v, platform="linux/amd64") - print("done") - except docker.errors.APIError as e: - print("error") - if e.is_client_error(): - if "invalid reference format" in str(e.explanation): - simple_explanation = f"The format of the docker image reference is incorrect. Please use a valid image reference" - else: - simple_explanation = ( - f"The most likely cause of this error is that the image/tag " - "does not exist or it is stored in a private repository and you are not logged in." - ) - - elif e.is_server_error(): - simple_explanation = ( - f"The mostly likely cause is that the server cannot be reached. " - "Please ensure that the server hosting your docker image is available " - "and you have internet access, if required." - ) - - else: - simple_explanation = f"Unable to pull image {v} for UNKNOWN reason. Please consult the detailed error below." - - verbose_explanation = e.explanation - - raise ( - ValueError( - f"Error Pulling Docker Image '{v}'\n - EXPLANATION: {simple_explanation} (full error text: '{verbose_explanation}'" - ) - ) - except Exception as e: - print("error") - raise (ValueError(f"Uknown error pulling Docker Image '{v}': {str(e)}")) - - except Exception as e: - # There was some exception that prevented us from getting the latest version - # of the image. However, if we already have it, use the current version and - # down fully raise the exception - just use it - client = docker.from_env() - try: - client.api.inspect_image(v) - print(e) - print( - f"We will default to using the version of the image {v} which has " - "already been downloaded to this machine. Please note that it may be out of date." - ) - - except Exception as e2: - raise ( - ValueError( - f"{str(e)}Image is not previously cached, so we could not use an old version." - ) - ) - - return v - - @validator("infrastructures", always=True) - def validate_infrastructures(cls, v, values): - MAX_RECOMMENDED_CONTAINERS_BEFORE_WARNING = 2 - if values.get("infrastructure_type",None) == DetectionTestingTargetInfrastructure.container and len(v) == 0: - v = [Infrastructure()] - - if len(v) < 1: - #print("Fix number of infrastructure validation later") - return v - raise ( - ValueError( - f"Error validating infrastructures. Test must be run with AT LEAST 1 infrastructure, not {len(v)}" - ) - ) - if (values.get("infrastructure_type", None) == DetectionTestingTargetInfrastructure.container.value) and len(v) > MAX_RECOMMENDED_CONTAINERS_BEFORE_WARNING: - print( - f"You requested to run with [{v}] containers which may use a very large amount of resources " - "as they all run in parallel. The maximum suggested number of parallel containers is " - f"[{MAX_RECOMMENDED_CONTAINERS_BEFORE_WARNING}]. We will do what you asked, but be warned!" - ) - return v - - - @validator("infrastructures", each_item=False) - def validate_ports_overlap(cls, v, values): - ports = set() - if values.get("infrastructure_type", None) == DetectionTestingTargetInfrastructure.server.value: - #ports are allowed to overlap, they are on different servers - return v - - if len(v) == 0: - raise ValueError("Error, there must be at least one test infrastructure defined in infrastructures.") - for infrastructure in v: - for k in ["hec_port", "web_ui_port", "api_port"]: - if getattr(infrastructure, k) in ports: - raise ValueError(f"Port {getattr(infrastructure, k)} used more than once in container infrastructure ports") - ports.add(getattr(infrastructure, k)) - return v - -class VersionControlConfig(BaseModel, extra=Extra.forbid, validate_assignment=True): - repo_path: str = Field(default=".", title="Path to the root of your app") - repo_url: str = Field( - default="https://github.com/your_organization/your_repo", - title="HTTP(s) path to the repo for repo_path. If this field is blank, it will be inferred from the repo", - ) - target_branch: str = Field(default="main", title="Main branch of the repo or target of a Pull Request/Merge Request.") - test_branch: str = Field(default="main", title="Branch of the repo to be tested, if applicable.") - commit_hash: Union[str,None] = Field(default=None, title="Commit hash of the repo state to be tested, if applicable") - pr_number: Union[int,None] = Field(default=None, title="The number of the PR to test") - - @validator('repo_path') - def validate_repo_path(cls,v): - print(f"checking repo path '{v}'") - try: - path = pathlib.Path(v) - except Exception as e: - - raise(ValueError(f"Error, the provided path is is not a valid path: '{v}'")) - - try: - r = git.Repo(path) - except Exception as e: - - raise(ValueError(f"Error, the provided path is not a valid git repo: '{path}'")) - - try: - - if ALWAYS_PULL_REPO: - r.remotes.origin.pull() - except Exception as e: - raise ValueError(f"Error pulling git repository {v}: {str(e)}") - print("repo path looks good") - return v - - @validator('repo_url') - def validate_repo_url(cls, v, values): - #First try to get the value from the repo - try: - remotes = git.Repo(values['repo_path']).remotes - except Exception as e: - raise ValueError(f"Error - repo at {values['repo_path']} has no remotes. Repo must be tracked in a remote git repo.") - - try: - remote_url_from_repo = remotes.origin.url - except Exception as e: - raise(ValueError(f"Error reading remote_url from the repo located at '{values['repo_path']}'")) - - if v is not None and remote_url_from_repo != v: - raise(ValueError(f"The url of the remote repo supplied in the config file {v} does not "\ - f"match the value read from the repository at {values['repo_path']}, {remote_url_from_repo}")) - - if v is None: - v = remote_url_from_repo - - #Ensure that the url is the proper format - # try: - # if bool(validators.url(v)) == False: - # raise(Exception) - # except: - # raise(ValueError(f"Error validating the repo_url. The url is not valid: {v}")) - - return v - - @validator('target_branch') - def valid_target_branch(cls, v, values): - if v is None: - print(f"target_branch is not supplied. Inferring from '{values['repo_path']}'...",end='') - - target_branch = Utils.get_default_branch_name(values['repo_path'], values['repo_url']) - print(f"target_branch name '{target_branch}' inferred'") - #continue with the validation - v = target_branch - - try: - Utils.validate_git_branch_name(values['repo_path'],values['repo_url'], v) - except Exception as e: - raise ValueError(f"Error validating target_branch: {str(e)}") - return v - - @validator('test_branch') - def validate_test_branch(cls, v, values): - if v is None: - print(f"No test_branch provided, so we will default to using the target_branch '{values['target_branch']}'") - v = values['target_branch'] - try: - Utils.validate_git_branch_name(values['repo_path'],values['repo_url'], v) - except Exception as e: - raise ValueError(f"Error validating test_branch: {str(e)}") - - r = git.Repo(values.get("repo_path")) - try: - if r.active_branch.name != v: - print(f"We are trying to test {v} but the current active branch is {r.active_branch}") - print(f"Checking out {v}") - r.git.checkout(v) - except Exception as e: - raise ValueError(f"Error checking out test_branch '{v}': {str(e)}") - return v - - @validator('commit_hash') - def validate_commit_hash(cls, v, values): - try: - #We can a hash with this function too - Utils.validate_git_hash(values['repo_path'],values['repo_url'], v, values['test_branch']) - except Exception as e: - raise ValueError(f"Error validating commit_hash '{v}': {str(e)}") - return v - - @validator('pr_number') - def validate_pr_number(cls, v, values): - if v == None: - return v - - hash = Utils.validate_git_pull_request(values['repo_path'], v) - - #Ensure that the hash is equal to the one in the config file, if it exists. - if values['commit_hash'] is None: - values['commit_hash'] = hash - else: - if values['commit_hash'] != hash: - raise(ValueError(f"commit_hash specified in configuration was {values['commit_hash']}, but commit_hash"\ - f" from pr_number {v} was {hash}. These must match. If you're testing"\ - " a PR, you probably do NOT want to provide the commit_hash in the configuration file "\ - "and always want to test the head of the PR. This will be done automatically if you do "\ - "not provide the commit_hash.")) - - return v - - -class TestConfig(BaseModel, extra=Extra.forbid, validate_assignment=True): - - version_control_config: Union[VersionControlConfig,None] = VersionControlConfig() - - infrastructure_config: InfrastructureConfig = Field( - default=InfrastructureConfig(), - title=f"The infrastructure for testing to be run on", - ) - - - post_test_behavior: PostTestBehavior = Field( - default=PostTestBehavior.pause_on_failure, - title=f"What to do after a test has completed. Choose one of {PostTestBehavior._member_names_}", - ) - mode: DetectionTestingMode = Field( - default=DetectionTestingMode.all, - title=f"Control which detections should be tested. Choose one of {DetectionTestingMode._member_names_}", - ) - detections_list: Union[list[str], None] = Field( - default=None, title="List of paths to detections which should be tested" - ) - - - splunkbase_username: Union[str, None] = Field( - default=None, - title="The username for logging into Splunkbase in case apps must be downloaded", - ) - splunkbase_password: Union[str, None] = Field( - default=None, - title="The password for logging into Splunkbase in case apps must be downloaded", - ) - apps: list[App] = Field( - default=App.get_default_apps(), - title="A list of all the apps to be installed on each container", - ) - enable_integration_testing: bool = Field( - default=False, - title="Whether integration testing should be enabled, in addition to unit testing (requires a configured Splunk" - " instance with ES installed)" - ) - - - - - - - - - - # Ensure that at least 1 of test_branch, commit_hash, and/or pr_number were passed. - # Otherwise, what are we testing?? - # @root_validator(pre=False) - def ensure_there_is_something_to_test(cls, values): - if 'test_branch' not in values and 'commit_hash' not in values and'pr_number' not in values: - if 'mode' in values and values['mode'] == DetectionTestingMode.changes: - raise(ValueError(f"Under mode [{DetectionTestingMode.changes}], 'test_branch', 'commit_hash', and/or 'pr_number' must be defined so that we know what to test.")) - - return values - - - - # presumably the post test behavior is validated by the enum? - # presumably the mode is validated by the enum? - - @validator("detections_list", always=True) - def validate_detections_list(cls, v, values): - # A detections list can only be provided if the mode is selected - # otherwise, we must throw an error - - # First check the mode - if values["mode"] != DetectionTestingMode.selected: - if v is not None: - # We intentionally raise an error even if the list is an empty list - raise ( - ValueError( - f"For Detection Testing Mode '{values['mode']}', " - f"'detections_list' MUST be none. Instead, it was a list containing {len(v)} detections." - ) - ) - return v - - # Mode is DetectionTestingMode.selected - verify the paths of all the detections - all_errors = [] - if v == None: - raise ( - ValueError( - f"mode is '{DetectionTestingMode.selected}', but detections_list was not provided." - ) - ) - for detection in v: - try: - if not pathlib.Path(detection).exists(): - all_errors.append(detection) - except Exception as e: - all_errors.append( - f"Unexpected error validating path '{detection}': {str(e)}" - ) - if len(all_errors): - joined_errors = "\n\t".join(all_errors) - raise ( - ValueError( - f"Paths to the following detections in 'detections_list' " - f"were invalid: \n\t{joined_errors}" - ) - ) - - return v - - - - - - - - @validator("splunkbase_username") - def validate_splunkbase_username(cls, v): - return v - - @validator("splunkbase_password") - def validate_splunkbase_password(cls, v, values): - if values["splunkbase_username"] == None: - return v - elif (v == None and values["splunkbase_username"] != None) or ( - v != None and values["splunkbase_username"] == None - ): - raise ( - ValueError( - "splunkbase_username OR splunkbase_password " - "was provided, but not both. You must provide" - " neither of these value or both, but not just " - "1 of them" - ) - ) - - else: - return v - - @validator("apps",) - def validate_apps(cls, v, values): - - - app_errors = [] - - # ensure that the splunkbase username and password are provided - username = values["splunkbase_username"] - password = values["splunkbase_password"] - app_directory = LOCAL_APP_DIR - try: - os.makedirs(LOCAL_APP_DIR, exist_ok=True) - except Exception as e: - raise ( - Exception(f"Error: When trying to create {CONTAINER_APP_DIR}: {str(e)}") - ) - - for app in v: - if app.environment_path != ENVIRONMENT_PATH_NOT_SET: - #Avoid re-configuring the apps that have already been configured. - continue - - try: - app.configure_app_source_for_container( - username, password, app_directory, CONTAINER_APP_DIR - ) - except Exception as e: - error_string = f"Unable to prepare app '{app.title}': {str(e)}" - app_errors.append(error_string) - - if len(app_errors) != 0: - error_string = "\n\t".join(app_errors) - raise (ValueError(f"Error preparing apps to install:\n\t{error_string}")) - - return v - - \ No newline at end of file diff --git a/contentctl/objects/unit_test.py b/contentctl/objects/unit_test.py index f10d622f..93520f7a 100644 --- a/contentctl/objects/unit_test.py +++ b/contentctl/objects/unit_test.py @@ -1,4 +1,9 @@ - +from __future__ import annotations +from pydantic import Field +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from contentctl.objects.unit_test_attack_data import UnitTestAttackData + from contentctl.objects.unit_test_result import UnitTestResult from typing import Union @@ -20,7 +25,7 @@ class UnitTest(BaseTest): # contentType: SecurityContentType = SecurityContentType.unit_tests # The test type (unit) - test_type: TestType = Field(TestType.UNIT, const=True) + test_type: TestType = Field(TestType.UNIT) # The condition to check if the search was successful pass_condition: Union[str, None] = None diff --git a/contentctl/objects/unit_test_attack_data.py b/contentctl/objects/unit_test_attack_data.py index 8bb38697..7a4d5d8a 100644 --- a/contentctl/objects/unit_test_attack_data.py +++ b/contentctl/objects/unit_test_attack_data.py @@ -1,22 +1,13 @@ -from pydantic import BaseModel, validator, ValidationError -from contentctl.helper.utils import Utils -from typing import Union +from __future__ import annotations +from pydantic import BaseModel, HttpUrl, FilePath, Field +from typing import Union, Optional class UnitTestAttackData(BaseModel): - file_name: str = None - data: str = None - source: str = None - sourcetype: str = None - update_timestamp: bool = None - custom_index: str = None - host: str = None - - @validator("data", always=True) - def validate_data(cls, v, values): - return v - try: - Utils.verify_file_exists(v) - except Exception as e: - raise (ValueError(f"Cannot find file {v}: {str(e)}")) - return v + data: Union[HttpUrl, FilePath] = Field(...) + # TODO - should source and sourcetype should be mapped to a list + # of supported source and sourcetypes in a given environment? + source: str = Field(...) + sourcetype: str = Field(...) + custom_index: Optional[str] = None + host: Optional[str] = None \ No newline at end of file diff --git a/contentctl/objects/unit_test_baseline.py b/contentctl/objects/unit_test_baseline.py index a3573772..9ba49336 100644 --- a/contentctl/objects/unit_test_baseline.py +++ b/contentctl/objects/unit_test_baseline.py @@ -1,6 +1,6 @@ -from pydantic import BaseModel, validator, ValidationError +from pydantic import BaseModel from typing import Union class UnitTestBaseline(BaseModel): diff --git a/contentctl/objects/unit_test_old.py b/contentctl/objects/unit_test_old.py index 83154f47..3858e01a 100644 --- a/contentctl/objects/unit_test_old.py +++ b/contentctl/objects/unit_test_old.py @@ -1,9 +1,10 @@ -from pydantic import BaseModel, validator, ValidationError +from __future__ import annotations +from pydantic import BaseModel -from contentctl.objects.unit_test import UnitTest +from contentctl.objects.unit_test_ssa import UnitTestSSA class UnitTestOld(BaseModel): name: str - tests: list[UnitTest] \ No newline at end of file + tests: list[UnitTestSSA] \ No newline at end of file diff --git a/contentctl/objects/unit_test_result.py b/contentctl/objects/unit_test_result.py index 40924790..8c40da10 100644 --- a/contentctl/objects/unit_test_result.py +++ b/contentctl/objects/unit_test_result.py @@ -1,10 +1,12 @@ -from typing import Union +from __future__ import annotations +from typing import Union,TYPE_CHECKING from splunklib.data import Record - -from contentctl.objects.test_config import Infrastructure from contentctl.objects.base_test_result import BaseTestResult, TestResultStatus +if TYPE_CHECKING: + from contentctl.objects.config import Infrastructure + FORCE_TEST_FAILURE_FOR_MISSING_OBSERVABLE = False NO_SID = "Testing Failed, NO Search ID" diff --git a/contentctl/objects/unit_test_ssa.py b/contentctl/objects/unit_test_ssa.py new file mode 100644 index 00000000..150b9efe --- /dev/null +++ b/contentctl/objects/unit_test_ssa.py @@ -0,0 +1,31 @@ +from __future__ import annotations +from typing import Optional +from pydantic import BaseModel, Field +from pydantic import Field + + +class UnitTestAttackDataSSA(BaseModel): + file_name:Optional[str] = None + data: str = Field(...) + # TODO - should source and sourcetype should be mapped to a list + # of supported source and sourcetypes in a given environment? + source: str = Field(...) + + sourcetype: Optional[str] = None + + +class UnitTestSSA(BaseModel): + """ + A unit test for a detection + """ + name: str + + # The attack data to be ingested for the unit test + attack_data: list[UnitTestAttackDataSSA] = Field(...) + + + + + + + diff --git a/contentctl/output/api_json_output.py b/contentctl/output/api_json_output.py index 400c887e..d81b8162 100644 --- a/contentctl/output/api_json_output.py +++ b/contentctl/output/api_json_output.py @@ -1,174 +1,246 @@ import os import json - +import pathlib from contentctl.output.json_writer import JsonWriter from contentctl.objects.enums import SecurityContentType +from contentctl.objects.abstract_security_content_objects.security_content_object_abstract import ( + SecurityContentObject_Abstract, +) -# Maximum Lambda Request Response Limit is 6MB -# https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-limits.html -# Note that if you are not using AWS Lambda, this file size may be increased. -AWS_LAMBDA_LIMIT = 1024 * 1024 * 6 - 1 class ApiJsonOutput: + def writeObjects( - self, objects: list, output_path: str, type: SecurityContentType = None + self, + objects: list[SecurityContentObject_Abstract], + output_path: pathlib.Path, + app_label:str = "ESCU", + contentType: SecurityContentType = None ) -> None: - if type == SecurityContentType.detections: - obj_array = [] - for detection in objects: - detection.id = str(detection.id) - obj_array.append( - detection.dict( - exclude_none=True, - exclude={ - "deprecated": True, - "experimental": True, - "annotations": True, - "risk": True, - "playbooks": True, - "baselines": True, - "mappings": True, - "test": True, - "deployment": True, - "type": True, - "status": True, - "data_source": True, - "tests": True, - "cve_enrichment": True, - "file_path": True, - "tags": { - "file_path": True, - "required_fields": True, - "confidence": True, - "impact": True, - "product": True, - "cve": True, - }, - }, + """#Serialize all objects + try: + for obj in objects: + + serialized_objects.append(obj.model_dump()) + except Exception as e: + raise Exception(f"Error serializing object with name '{obj.name}' and type '{type(obj).__name__}': '{str(e)}'") + """ + + if contentType == SecurityContentType.detections: + detections = [ + detection.model_dump( + include=set( + [ + "name", + "author", + "date", + "version", + "id", + "description", + "tags", + "search", + "how_to_implement", + "known_false_positives", + "references", + "datamodel", + "macros", + "lookups", + "source", + "nes_fields", + ] ) ) - - for detection in obj_array: - # Loop through each macro in the detection - for macro in detection["macros"]: - # Remove the 'file_path' key if it exists - macro.pop("file_path", None) - + for detection in objects + ] + #Only a subset of macro fields are required: + # for detection in detections: + # new_macros = [] + # for macro in detection.get("macros",[]): + # new_macro_fields = {} + # new_macro_fields["name"] = macro.get("name") + # new_macro_fields["definition"] = macro.get("definition") + # new_macro_fields["description"] = macro.get("description") + # if len(macro.get("arguments", [])) > 0: + # new_macro_fields["arguments"] = macro.get("arguments") + # new_macros.append(new_macro_fields) + # detection["macros"] = new_macros + # del() + + JsonWriter.writeJsonObject( - os.path.join(output_path, "detections.json"), {"detections": obj_array} + os.path.join(output_path, "detections.json"), "detections", detections ) - ### Code to be added to contentctl to ship filter macros to macros.json - - obj_array = [] - for detection in objects: - detection_dict = detection.dict() - if "macros" in detection_dict: - for macro in detection_dict["macros"]: - obj_array.append(macro) - - uniques: set[str] = set() - for obj in obj_array: - if obj.get("arguments", None) != None: - uniques.add(json.dumps(obj, sort_keys=True)) - else: - obj.pop("arguments") - uniques.add(json.dumps(obj, sort_keys=True)) - - obj_array = [] - for item in uniques: - obj_array.append(json.loads(item)) - - for obj in obj_array: - if "file_path" in obj: - del obj["file_path"] - + elif contentType == SecurityContentType.macros: + macros = [ + macro.model_dump(include=set(["definition", "description", "name"])) + for macro in objects + ] + for macro in macros: + for k in ["author", "date","version","id","references"]: + if k in macro: + del(macro[k]) JsonWriter.writeJsonObject( - os.path.join(output_path, "macros.json"), {"macros": obj_array} + os.path.join(output_path, "macros.json"), "macros", macros ) - elif type == SecurityContentType.stories: - obj_array = [] - for story in objects: - story.id = str(story.id) - obj_array.append( - story.dict( - exclude_none=True, - exclude={"investigations": True, "file_path": True}, + elif contentType == SecurityContentType.stories: + stories = [ + story.model_dump( + include=set( + [ + "name", + "author", + "date", + "version", + "id", + "description", + "narrative", + "references", + "tags", + "detections_names", + "investigation_names", + "baseline_names", + "detections", + ] ) ) + for story in objects + ] + # Only get certain fields from detections + for story in stories: + # Only use a small subset of fields from the detection + story["detections"] = [ + { + "name": detection["name"], + "source": detection["source"], + "type": detection["type"], + "tags": detection["tags"].get("mitre_attack_enrichments", []), + } + for detection in story["detections"] + ] + story["detection_names"] = [f"{app_label} - {name} - Rule" for name in story["detection_names"]] + JsonWriter.writeJsonObject( - os.path.join(output_path, "stories.json"), {"stories": obj_array} + os.path.join(output_path, "stories.json"), "stories", stories ) - elif type == SecurityContentType.baselines: - obj_array = [] - for baseline in objects: - baseline.id = str(baseline.id) - obj_array.append( - baseline.dict( - exclude={ - "deployment": True, - "check_references": True, - "file_path": True, - } + elif contentType == SecurityContentType.baselines: + try: + baselines = [ + baseline.model_dump( + include=set( + [ + "name", + "author", + "date", + "version", + "id", + "description", + "type", + "datamodel", + "search", + "how_to_implement", + "known_false_positives", + "references", + "tags", + ] + ) ) - ) + for baseline in objects + ] + except Exception as e: + print(e) + print('wait') JsonWriter.writeJsonObject( - os.path.join(output_path, "baselines.json"), {"baselines": obj_array} - ) + os.path.join(output_path, "baselines.json"), "baselines", baselines + ) - elif type == SecurityContentType.investigations: - obj_array = [] - for investigation in objects: - investigation.id = str(investigation.id) - obj_array.append( - investigation.dict( - exclude={ - "file_path": True, - } + elif contentType == SecurityContentType.investigations: + investigations = [ + investigation.model_dump( + include=set( + [ + "name", + "author", + "date", + "version", + "id", + "description", + "type", + "datamodel", + "search", + "how_to_implemnet", + "known_false_positives", + "references", + "inputs", + "tags", + "lowercase_name", + ] ) ) - + for investigation in objects + ] JsonWriter.writeJsonObject( os.path.join(output_path, "response_tasks.json"), - {"response_tasks": obj_array}, + "response_tasks", + investigations, ) - elif type == SecurityContentType.lookups: - obj_array = [] - for lookup in objects: - - obj_array.append( - lookup.dict( - exclude={ - "file_path": True, - } + elif contentType == SecurityContentType.lookups: + lookups = [ + lookup.model_dump( + include=set( + [ + "name", + "description", + "collection", + "fields_list", + "filename", + "default_match", + "match_type", + "min_matches", + "case_sensitive_match", + ] ) ) - + for lookup in objects + ] + for lookup in lookups: + for k in ["author","date","version","id","references"]: + if k in lookup: + del(lookup[k]) JsonWriter.writeJsonObject( - os.path.join(output_path, "lookups.json"), {"lookups": obj_array} + os.path.join(output_path, "lookups.json"), "lookups", lookups ) - elif type == SecurityContentType.deployments: - obj_array = [] - for deployment in objects: - deployment.id = str(deployment.id) - obj_array.append( - deployment.dict( - exclude_none=True, - exclude={ - "file_path": True, - }, + elif contentType == SecurityContentType.deployments: + deployments = [ + deployment.model_dump( + include=set( + [ + "name", + "author", + "date", + "version", + "id", + "description", + "scheduling", + "rba", + "tags" + ] ) ) - + for deployment in objects + ] + #references are not to be included, but have been deleted in the + #model_serialization logic JsonWriter.writeJsonObject( os.path.join(output_path, "deployments.json"), - {"deployments": obj_array}, - ) + "deployments", + deployments, + ) \ No newline at end of file diff --git a/contentctl/output/attack_nav_output.py b/contentctl/output/attack_nav_output.py index b71c5560..e6c3e35b 100644 --- a/contentctl/output/attack_nav_output.py +++ b/contentctl/output/attack_nav_output.py @@ -1,28 +1,39 @@ import os +from typing import List,Union +import pathlib - -from contentctl.objects.enums import SecurityContentType +from contentctl.objects.detection import Detection from contentctl.output.attack_nav_writer import AttackNavWriter class AttackNavOutput(): - def writeObjects(self, objects: list, output_path: str, type: SecurityContentType = None) -> None: - techniques = dict() + def writeObjects(self, detections: List[Detection], output_path: pathlib.Path) -> None: + techniques:dict[str,dict[str,Union[List[str],int]]] = {} + for detection in detections: + for tactic in detection.tags.mitre_attack_id: + if tactic not in techniques: + techniques[tactic] = {'score':0,'file_paths':[]} + + detection_url = f"https://github.com/splunk/security_content/blob/develop/detections/{detection.source}/{detection.file_path.name}" + techniques[tactic]['score'] += 1 + techniques[tactic]['file_paths'].append(detection_url) + + ''' for detection in objects: if detection.tags.mitre_attack_enrichments: for mitre_attack_enrichment in detection.tags.mitre_attack_enrichments: if not mitre_attack_enrichment.mitre_attack_id in techniques: techniques[mitre_attack_enrichment.mitre_attack_id] = { 'score': 1, - 'file_paths': ['https://github.com/splunk/security_content/blob/develop/detections/' + detection.source + '/' + self.convertNameToFileName(detection.name)] + 'file_paths': ['https://github.com/splunk/security_content/blob/develop/detections/' + detection.getSource() + '/' + self.convertNameToFileName(detection.name)] } else: techniques[mitre_attack_enrichment.mitre_attack_id]['score'] = techniques[mitre_attack_enrichment.mitre_attack_id]['score'] + 1 - techniques[mitre_attack_enrichment.mitre_attack_id]['file_paths'].append('https://github.com/splunk/security_content/blob/develop/detections/' + detection.source + '/' + self.convertNameToFileName(detection.name)) - - AttackNavWriter.writeAttackNavFile(techniques, os.path.join(output_path, 'coverage.json')) - + techniques[mitre_attack_enrichment.mitre_attack_id]['file_paths'].append('https://github.com/splunk/security_content/blob/develop/detections/' + detection.getSource() + '/' + self.convertNameToFileName(detection.name)) + ''' + AttackNavWriter.writeAttackNavFile(techniques, output_path / 'coverage.json') + def convertNameToFileName(self, name: str): file_name = name \ diff --git a/contentctl/output/attack_nav_writer.py b/contentctl/output/attack_nav_writer.py index 6ea6859f..78e8c514 100644 --- a/contentctl/output/attack_nav_writer.py +++ b/contentctl/output/attack_nav_writer.py @@ -1,7 +1,7 @@ import json - - +from typing import Union, List +import pathlib VERSION = "4.3" NAME = "Detection Coverage" DESCRIPTION = "security_content detection coverage" @@ -11,7 +11,7 @@ class AttackNavWriter(): @staticmethod - def writeAttackNavFile(mitre_techniques : dict, output_path : str) -> None: + def writeAttackNavFile(mitre_techniques : dict[str,dict[str,Union[List[str],int]]], output_path : pathlib.Path) -> None: max_count = 0 for technique_id in mitre_techniques.keys(): if mitre_techniques[technique_id]['score'] > max_count: diff --git a/contentctl/output/ba_yml_output.py b/contentctl/output/ba_yml_output.py index 2a6997ae..5b93743d 100644 --- a/contentctl/output/ba_yml_output.py +++ b/contentctl/output/ba_yml_output.py @@ -21,7 +21,7 @@ def writeObjectsInPlace(self, objects: list) -> None: YmlWriter.writeYmlFile(file_path, object) - def writeObjects(self, objects: list, output_path: str, type: SecurityContentType = None) -> None: + def writeObjects(self, objects: list, output_path: str, contentType: SecurityContentType = None) -> None: for obj in objects: file_name = "ssa___" + self.convertNameToFileName(obj.name, obj.tags) if self.isComplexBARule(obj.search): @@ -46,8 +46,9 @@ def writeObjects(self, objects: list, output_path: str, type: SecurityContentTyp } test_dict["tests"][0]["name"] = obj.name for count in range(len(test_dict["tests"][0]["attack_data"])): - a = urlparse(test_dict["tests"][0]["attack_data"][count]["data"]) + a = urlparse(str(test_dict["tests"][0]["attack_data"][count]["data"])) test_dict["tests"][0]["attack_data"][count]["file_name"] = os.path.basename(a.path) + test = UnitTestOld.parse_obj(test_dict) obj.test = test @@ -150,4 +151,3 @@ def convertNameToFileName(self, name: str, product: list): def isComplexBARule(self, search): return re.findall("stats|first_time_event|adaptive_threshold", search) - diff --git a/contentctl/output/conf_output.py b/contentctl/output/conf_output.py index 30e90210..db5d6e28 100644 --- a/contentctl/output/conf_output.py +++ b/contentctl/output/conf_output.py @@ -14,86 +14,91 @@ import json from contentctl.output.conf_writer import ConfWriter from contentctl.objects.enums import SecurityContentType -from contentctl.objects.config import Config +from contentctl.objects.config import build from requests import Session, post, get from requests.auth import HTTPBasicAuth -import pprint -class ConfOutput: - input_path: str - config: Config - output_path: pathlib.Path +class ConfOutput: + config: build - def __init__(self, input_path: str, config: Config): - self.input_path = input_path + def __init__(self, config: build): self.config = config - self.dist = pathlib.Path(self.input_path, self.config.build.path_root) - self.output_path = self.dist/self.config.build.name - self.output_path.mkdir(parents=True, exist_ok=True) - template_splunk_app_path = os.path.join(os.path.dirname(__file__), 'templates/splunk_app') - shutil.copytree(template_splunk_app_path, self.output_path, dirs_exist_ok=True) + + #Create the build directory if it does not exist + config.getPackageDirectoryPath().parent.mkdir(parents=True, exist_ok=True) + + #Remove the app path, if it exists + shutil.rmtree(config.getPackageDirectoryPath(), ignore_errors=True) + + #Copy all the template files into the app + shutil.copytree(config.getAppTemplatePath(), config.getPackageDirectoryPath()) - def getPackagePath(self, include_version:bool=False)->pathlib.Path: - if include_version: - return self.dist / f"{self.config.build.name}-{self.config.build.version}.tar.gz" - else: - return self.dist / f"{self.config.build.name}-latest.tar.gz" - def writeHeaders(self) -> None: - ConfWriter.writeConfFileHeader(self.output_path/'default/analyticstories.conf', self.config) - ConfWriter.writeConfFileHeader(self.output_path/'default/savedsearches.conf', self.config) - ConfWriter.writeConfFileHeader(self.output_path/'default/collections.conf', self.config) - ConfWriter.writeConfFileHeader(self.output_path/'default/es_investigations.conf', self.config) - ConfWriter.writeConfFileHeader(self.output_path/'default/macros.conf', self.config) - ConfWriter.writeConfFileHeader(self.output_path/'default/transforms.conf', self.config) - ConfWriter.writeConfFileHeader(self.output_path/'default/workflow_actions.conf', self.config) - ConfWriter.writeConfFileHeader(self.output_path/'default/app.conf', self.config) - ConfWriter.writeConfFileHeader(self.output_path/'default/content-version.conf', self.config) + def writeHeaders(self) -> set[pathlib.Path]: + written_files:set[pathlib.Path] = set() + for output_app_path in ['default/analyticstories.conf', + 'default/savedsearches.conf', + 'default/collections.conf', + 'default/es_investigations.conf', + 'default/macros.conf', + 'default/transforms.conf', + 'default/workflow_actions.conf', + 'default/app.conf', + 'default/content-version.conf']: + written_files.add(ConfWriter.writeConfFileHeader(pathlib.Path(output_app_path),self.config)) + + return written_files + + #The contents of app.manifest are not a conf file, but json. #DO NOT write a header for this file type, simply create the file - with open(self.output_path/'app.manifest', 'w') as f: + with open(self.config.getPackageDirectoryPath() / pathlib.Path('app.manifest'), 'w') as f: pass - def writeAppConf(self): - ConfWriter.writeConfFile(self.output_path/"default"/"app.conf", "app.conf.j2", self.config, [self.config.build] ) - ConfWriter.writeConfFile(self.output_path/"default"/"content-version.conf", "content-version.j2", self.config, [self.config.build] ) - ConfWriter.writeConfFile(self.output_path/"app.manifest", "app.manifest.j2", self.config, [self.config.build]) + def writeAppConf(self)->set[pathlib.Path]: + written_files:set[pathlib.Path] = set() + for output_app_path, template_name in [ ("default/app.conf", "app.conf.j2"), + ("default/content-version.conf", "content-version.j2")]: + written_files.add(ConfWriter.writeConfFile(pathlib.Path(output_app_path), + template_name, + self.config, + [self.config.app])) + + written_files.add(ConfWriter.writeManifestFile(pathlib.Path("app.manifest"), + "app.manifest.j2", + self.config, + [self.config.app])) + return written_files - def writeObjects(self, objects: list, type: SecurityContentType = None) -> None: + + def writeObjects(self, objects: list, type: SecurityContentType = None) -> set[pathlib.Path]: + written_files:set[pathlib.Path] = set() if type == SecurityContentType.detections: - ConfWriter.writeConfFile(self.output_path/'default/savedsearches.conf', - 'savedsearches_detections.j2', - self.config, objects) - - ConfWriter.writeConfFile(self.output_path/'default/analyticstories.conf', - 'analyticstories_detections.j2', - self.config, objects) - - ConfWriter.writeConfFile(self.output_path/'default/macros.conf', - 'macros_detections.j2', - self.config, objects) + for output_app_path, template_name in [ ('default/savedsearches.conf', 'savedsearches_detections.j2'), + ('default/analyticstories.conf', 'analyticstories_detections.j2')]: + written_files.add(ConfWriter.writeConfFile(pathlib.Path(output_app_path), + template_name, self.config, objects)) elif type == SecurityContentType.stories: - ConfWriter.writeConfFile(self.output_path/'default/analyticstories.conf', - 'analyticstories_stories.j2', - self.config, objects) + written_files.add(ConfWriter.writeConfFile(pathlib.Path('default/analyticstories.conf'), + 'analyticstories_stories.j2', + self.config, objects)) elif type == SecurityContentType.baselines: - ConfWriter.writeConfFile(self.output_path/'default/savedsearches.conf', - 'savedsearches_baselines.j2', - self.config, objects) + written_files.add(ConfWriter.writeConfFile(pathlib.Path('default/savedsearches.conf'), + 'savedsearches_baselines.j2', + self.config, objects)) elif type == SecurityContentType.investigations: - ConfWriter.writeConfFile(self.output_path/'default/savedsearches.conf', - 'savedsearches_investigations.j2', - self.config, objects) - - ConfWriter.writeConfFile(self.output_path/'default/analyticstories.conf', - 'analyticstories_investigations.j2', - self.config, objects) - + for output_app_path, template_name in [ ('default/savedsearches.conf', 'savedsearches_investigations.j2'), + ('default/analyticstories.conf', 'analyticstories_investigations.j2')]: + ConfWriter.writeConfFile(pathlib.Path(output_app_path), + template_name, + self.config, + objects) + workbench_panels = [] for investigation in objects: if investigation.inputs: @@ -101,66 +106,68 @@ def writeObjects(self, objects: list, type: SecurityContentType = None) -> None: workbench_panels.append(investigation) investigation.search = investigation.search.replace(">",">") investigation.search = investigation.search.replace("<","<") - ConfWriter.writeConfFileHeaderEmpty( - self.output_path/f'default/data/ui/panels/workbench_panel_{response_file_name_xml}', - self.config) - ConfWriter.writeConfFile( self.output_path/f'default/data/ui/panels/workbench_panel_{response_file_name_xml}', - 'panel.j2', - self.config,[investigation.search]) - - ConfWriter.writeConfFile(self.output_path/'default/es_investigations.conf', - 'es_investigations_investigations.j2', - self.config, workbench_panels) + + ConfWriter.writeXmlFileHeader(pathlib.Path(f'default/data/ui/panels/workbench_panel_{response_file_name_xml}'), + self.config) + + ConfWriter.writeXmlFile( pathlib.Path(f'default/data/ui/panels/workbench_panel_{response_file_name_xml}'), + 'panel.j2', + self.config,[investigation.search]) - ConfWriter.writeConfFile(self.output_path/'default/workflow_actions.conf', - 'workflow_actions.j2', - self.config, workbench_panels) + for output_app_path, template_name in [ ('default/es_investigations.conf', 'es_investigations_investigations.j2'), + ('default/workflow_actions.conf', 'workflow_actions.j2')]: + written_files.add( ConfWriter.writeConfFile(pathlib.Path(output_app_path), + template_name, + self.config, + workbench_panels)) elif type == SecurityContentType.lookups: - ConfWriter.writeConfFile(self.output_path/'default/collections.conf', - 'collections.j2', - self.config, objects) - - ConfWriter.writeConfFile(self.output_path/'default/transforms.conf', - 'transforms.j2', - self.config, objects) - + for output_app_path, template_name in [ ('default/collections.conf', 'collections.j2'), + ('default/transforms.conf', 'transforms.j2')]: + written_files.add(ConfWriter.writeConfFile(pathlib.Path(output_app_path), + template_name, + self.config, + objects)) - if self.input_path is None: - raise(Exception(f"input_path is required for lookups, but received [{self.input_path}]")) - + #we want to copy all *.mlmodel files as well, not just csvs - files = list(glob.iglob(os.path.join(self.input_path, 'lookups', '*.csv'))) + list(glob.iglob(os.path.join(self.input_path, 'lookups', '*.mlmodel'))) - lookup_folder = self.output_path/"lookups" - if lookup_folder.exists(): - # Remove it since we want to remove any previous lookups that are not - # currently part of the app - if lookup_folder.is_dir(): - shutil.rmtree(lookup_folder) - else: - #it's a file, but there should not be a file called lookups - lookup_folder.unlink() + files = list(glob.iglob(str(self.config.path/ 'lookups/*.csv'))) + list(glob.iglob(str(self.config.path / 'lookups/*.mlmodel'))) + lookup_folder = self.config.getPackageDirectoryPath()/"lookups" # Make the new folder for the lookups - lookup_folder.mkdir() + # This folder almost certainly already exists because mitre_enrichment.csv has been writtent here from the app template. + lookup_folder.mkdir(exist_ok=True) #Copy each lookup into the folder for lookup_name in files: lookup_path = pathlib.Path(lookup_name) if lookup_path.is_file(): - lookup_target_path = self.output_path/"lookups"/lookup_path.name - shutil.copy(lookup_path, lookup_target_path) + shutil.copy(lookup_path, lookup_folder/lookup_path.name) else: raise(Exception(f"Error copying lookup/mlmodel file. Path {lookup_path} does not exist or is not a file.")) elif type == SecurityContentType.macros: - ConfWriter.writeConfFile(self.output_path/'default/macros.conf', - 'macros.j2', - self.config, objects) + written_files.add(ConfWriter.writeConfFile(pathlib.Path('default/macros.conf'), + 'macros.j2', + self.config, objects)) + + return written_files + + - def packageApp(self) -> None: + + def packageAppTar(self) -> None: + + with tarfile.open(self.config.getPackageFilePath(include_version=True), "w:gz") as app_archive: + app_archive.add(self.config.getPackageDirectoryPath(), arcname=self.config.getPackageDirectoryPath().name) + + shutil.copy2(self.config.getPackageFilePath(include_version=True), + self.config.getPackageFilePath(include_version=False), + follow_symlinks=False) + + def packageAppSlim(self) -> None: # input_app_path = pathlib.Path(self.config.build.path_root)/f"{self.config.build.name}" @@ -171,305 +178,32 @@ def packageApp(self) -> None: # shutil.copyfile(readme_file, input_app_path/readme_file.name) - # try: - # import slim - # use_slim = True - - # except Exception as e: - # print("Failed to import Splunk Packaging Toolkit (slim). slim requires Python<3.10. " - # "Packaging app with tar instead. This should still work, but appinspect may catch " - # "errors that otherwise would have been flagged by slim.") - # use_slim = False - - # if use_slim: - # import slim - # from slim.utils import SlimLogger - # import logging - # #In order to avoid significant output, only emit FATAL log messages - # SlimLogger.set_level(logging.ERROR) - # try: - # slim.package(source=input_app_path, output_dir=pathlib.Path(self.config.build.path_root)) - # except SystemExit as e: - # raise Exception(f"Error building package with slim: {str(e)}") - # else: - with tarfile.open(self.getPackagePath(include_version=True), "w:gz") as app_archive: - app_archive.add(self.output_path, arcname=os.path.basename(self.output_path)) - - - if not self.output_path.exists(): - raise (Exception(f"The expected output app path '{self.getPackagePath(include_version=True)}' does not exist")) - - shutil.copy2(self.getPackagePath(include_version=True), - self.getPackagePath(include_version=False), - follow_symlinks=False) - - - def getElapsedTime(self, startTime:float)->datetime.timedelta: - return datetime.timedelta(seconds=round(timeit.default_timer() - startTime)) - - def deploy_via_acs(self, splunk_cloud_jwt_token:str, splunk_cloud_stack:str, appinspect_token:str, stack_type:str): - if stack_type not in ['victoria', 'classic']: - raise Exception(f"stack_type MUST be either 'classic' or 'victoria', NOT '{stack_type}'") - - - #The following common headers are used by both Clasic and Victoria - headers = { - 'Authorization': f'Bearer {splunk_cloud_jwt_token}', - 'ACS-Legal-Ack': 'Y' - } try: - with open(self.getPackagePath(include_version=False),'rb') as app_data: - #request_data = app_data.read() - if stack_type == 'classic': - # Classic instead uses a form to store token and package - # https://docs.splunk.com/Documentation/SplunkCloud/9.1.2308/Config/ManageApps#Manage_private_apps_using_the_ACS_API_on_Classic_Experience - address = f"https://admin.splunk.com/{splunk_cloud_stack}/adminconfig/v2/apps" - - form_data = { - 'token': (None, appinspect_token), - 'package': app_data - } - res = post(address, headers=headers, files = form_data) - else: - # Victoria uses the X-Splunk-Authorization Header - # It also uses --data-binary for the app content - # https://docs.splunk.com/Documentation/SplunkCloud/9.1.2308/Config/ManageApps#Manage_private_apps_using_the_ACS_API_on_Victoria_Experience - headers.update({'X-Splunk-Authorization': appinspect_token}) - address = f"https://admin.splunk.com/{splunk_cloud_stack}/adminconfig/v2/apps/victoria" - res = post(address, headers=headers, data=app_data.read()) - except Exception as e: - raise Exception(f"Error installing to stack '{splunk_cloud_stack}' (stack_type='{stack_type}') via ACS:\n{str(e)}") - - try: - # Request went through and completed, but may have returned a non-successful error code. - # This likely includes a more verbose response describing the error - res.raise_for_status() - except Exception as e: + import slim + from slim.utils import SlimLogger + import logging + #In order to avoid significant output, only emit FATAL log messages + SlimLogger.set_level(logging.ERROR) try: - error_text = res.json() - except Exception as e: - error_text = "No error text - request failed" - formatted_error_text = pprint.pformat(error_text) - raise Exception(f"Error installing to stack '{splunk_cloud_stack}' (stack_type='{stack_type}') via ACS:\n{formatted_error_text}") - - print(f"'{self.getPackagePath(include_version=False)}' successfully installed to stack '{splunk_cloud_stack}' (stack_type='{stack_type}') via ACS!") - - return - - def inspectAppAPI(self, username:str, password:str, stack_type:str)->str: - session = Session() - session.auth = HTTPBasicAuth(username, password) - if stack_type not in ['victoria', 'classic']: - raise Exception(f"stack_type MUST be either 'classic' or 'victoria', NOT '{stack_type}'") + slim.package(source=self.config.getPackageDirectoryPath(), output_dir=pathlib.Path(self.config.getBuildDir())) + except SystemExit as e: + raise Exception(f"Error building package with slim: {str(e)}") - APPINSPECT_API_LOGIN = "https://api.splunk.com/2.0/rest/login/splunk" + except Exception as e: + print("Failed to import Splunk Packaging Toolkit (slim). slim requires Python<3.10. " + "Packaging app with tar instead. This should still work, but appinspect may catch " + "errors that otherwise would have been flagged by slim.") + raise Exception(f"slim (splunk packaging toolkit) not installed: {str(e)}") - res = session.get(APPINSPECT_API_LOGIN) - #If login failed or other failure, raise an exception - res.raise_for_status() - - authorization_bearer = res.json().get("data",{}).get("token",None) - APPINSPECT_API_VALIDATION_REQUEST = "https://appinspect.splunk.com/v1/app/validate" - headers = { - "Authorization": f"bearer {authorization_bearer}", - "Cache-Control": "no-cache" - } - - package_path = self.getPackagePath(include_version=False) - if not package_path.is_file(): - raise Exception(f"Cannot run Appinspect API on App '{self.config.build.title}' - " - f"no package exists as expected path '{package_path}'.\nAre you " - "trying to 'contentctl acs_deploy' the package BEFORE running 'contentctl build'?") - - files = { - "app_package": open(package_path,"rb"), - "included_tags":(None,"cloud") - } - - res = post(APPINSPECT_API_VALIDATION_REQUEST, headers=headers, files=files) - - res.raise_for_status() - - request_id = res.json().get("request_id",None) - APPINSPECT_API_VALIDATION_STATUS = f"https://appinspect.splunk.com/v1/app/validate/status/{request_id}?included_tags=private_{stack_type}" - headers = headers = { - "Authorization": f"bearer {authorization_bearer}" - } - startTime = timeit.default_timer() - # the first time, wait for 40 seconds. subsequent times, wait for less. - # this is because appinspect takes some time to return, so there is no sense - # checking many times when we know it will take at least 40 seconds to run. - iteration_wait_time = 40 - while True: - - res = get(APPINSPECT_API_VALIDATION_STATUS, headers=headers) - res.raise_for_status() - status = res.json().get("status",None) - if status in ["PROCESSING", "PREPARING"]: - print(f"[{self.getElapsedTime(startTime)}] Appinspect API is {status}...") - time.sleep(iteration_wait_time) - iteration_wait_time = 1 - continue - elif status == "SUCCESS": - print(f"[{self.getElapsedTime(startTime)}] Appinspect API has finished!") - break - else: - raise Exception(f"Error - Unknown Appinspect API status '{status}'") - + def packageApp(self, method=packageAppTar)->None: + return method(self) - #We have finished running appinspect, so get the report - APPINSPECT_API_REPORT = f"https://appinspect.splunk.com/v1/app/report/{request_id}?included_tags=private_{stack_type}" - #Get human-readable HTML report - headers = headers = { - "Authorization": f"bearer {authorization_bearer}", - "Content-Type": "text/html" - } - res = get(APPINSPECT_API_REPORT, headers=headers) - res.raise_for_status() - report_html = res.content - - #Get JSON report for processing - headers = headers = { - "Authorization": f"bearer {authorization_bearer}", - "Content-Type": "application/json" - } - res = get(APPINSPECT_API_REPORT, headers=headers) - res.raise_for_status() - report_json = res.json() - with open(self.dist/f"{self.config.build.name}-{self.config.build.version}.appinspect_api_results.html", "wb") as report: - report.write(report_html) - with open(self.dist/f"{self.config.build.name}-{self.config.build.version}.appinspect_api_results.json", "w") as report: - json.dump(report_json, report) - - - self.parseAppinspectJsonLogFile(self.dist/f"{self.config.build.name}-{self.config.build.version}.appinspect_api_results.json") - - return authorization_bearer - - def parseAppinspectJsonLogFile(self, logfile_path:pathlib.Path, - status_types:list[str] = ["error", "failure", "manual_check", "warning"], - exception_types = ["error","failure","manual_check"] )->None: - if not set(exception_types).issubset(set(status_types)): - raise Exception(f"Error - exception_types {exception_types} MUST be a subset of status_types {status_types}, but it is not") - with open(logfile_path, "r+") as logfile: - j = json.load(logfile) - #Seek back to the beginning of the file. We don't need to clear - #it sice we will always write AT LEAST the same number of characters - #back as we read (due to the addition of whitespace) - logfile.seek(0) - json.dump(j, logfile, indent=3, ) - - reports = j.get("reports", []) - if len(reports) != 1: - raise Exception("Expected to find one appinspect report but found 0") - verbose_errors = [] - - for group in reports[0].get("groups", []): - for check in group.get("checks",[]): - if check.get("result","") in status_types: - verbose_errors.append(f" - {check.get('result','')} [{group.get('name','NONAME')}: {check.get('name', 'NONAME')}]") - verbose_errors.sort() - - summary = j.get("summary", None) - if summary is None: - raise Exception("Missing summary from appinspect report") - msgs = [] - generated_exception = False - for key in status_types: - if summary.get(key,0)>0: - msgs.append(f" - {summary.get(key,0)} {key}s") - if key in exception_types: - generated_exception = True - if len(msgs)>0 or len(verbose_errors): - summary = '\n'.join(msgs) - details = '\n'.join(verbose_errors) - summary = f"{summary}\nDetails:\n{details}" - if generated_exception: - raise Exception(f"AppInspect found [{','.join(exception_types)}] that MUST be addressed to pass AppInspect API:\n{summary}") - else: - print(f"AppInspect found [{','.join(status_types)}] that MAY cause a failure during AppInspect API:\n{summary}") - else: - print("AppInspect was successful!") - - return - - def inspectAppCLI(self)-> None: + def getElapsedTime(self, startTime:float)->datetime.timedelta: + return datetime.timedelta(seconds=round(timeit.default_timer() - startTime)) - try: - raise Exception("Local spunk-appinspect Not Supported at this time (you may use the appinspect api). If you would like to locally inspect your app with" - "Python 3.7, 3.8, or 3.9 (with limited support), please refer to:\n" - "\t - https://dev.splunk.com/enterprise/docs/developapps/testvalidate/appinspect/useappinspectclitool/") - from splunk_appinspect.main import ( - validate, MODE_OPTION, APP_PACKAGE_ARGUMENT, OUTPUT_FILE_OPTION, - LOG_FILE_OPTION, INCLUDED_TAGS_OPTION, EXCLUDED_TAGS_OPTION, - PRECERT_MODE, TEST_MODE) - except Exception as e: - print(e) - # print("******WARNING******") - # if sys.version_info.major == 3 and sys.version_info.minor > 9: - # print("The package splunk-appinspect was not installed due to a current issue with the library on Python3.10+. " - # "Please use the following commands to set up a virtualenvironment in a different folder so you may run appinspect manually (if desired):" - # "\n\tpython3.9 -m venv .venv" - # "\n\tsource .venv/bin/activate" - # "\n\tpython3 -m pip install splunk-appinspect" - # f"\n\tsplunk-appinspect inspect {self.getPackagePath(include_version=False).relative_to(pathlib.Path('.').absolute())} --mode precert") - - # else: - # print("splunk-appinspect is only compatable with Python3.9 at this time. Please see the following open issue here: https://github.com/splunk/contentctl/issues/28") - # print("******WARNING******") - return - - # Note that all tags are available and described here: - # https://dev.splunk.com/enterprise/reference/appinspect/appinspecttagreference/ - # By default, precert mode will run ALL checks. Explicitly included or excluding tags will - # change this behavior. To give the most thorough inspection, we leave these empty so that - # ALL checks are run - included_tags = [] - excluded_tags = [] - - appinspect_output = self.dist/f"{self.config.build.name}-{self.config.build.version}.appinspect_cli_results.json" - appinspect_logging = self.dist/f"{self.config.build.name}-{self.config.build.version}.appinspect_cli_logging.log" - try: - arguments_list = [(APP_PACKAGE_ARGUMENT, str(self.getPackagePath(include_version=False)))] - options_list = [] - options_list += [MODE_OPTION, TEST_MODE] - options_list += [OUTPUT_FILE_OPTION, str(appinspect_output)] - options_list += [LOG_FILE_OPTION, str(appinspect_logging)] - - #If there are any tags defined, then include them here - for opt in included_tags: - options_list += [INCLUDED_TAGS_OPTION, opt] - for opt in excluded_tags: - options_list += [EXCLUDED_TAGS_OPTION, opt] - - cmdline = options_list + [arg[1] for arg in arguments_list] - validate(cmdline) - - except SystemExit as e: - if e.code == 0: - # The sys.exit called inside of appinspect validate closes stdin. We need to - # reopen it. - sys.stdin = open("/dev/stdin","r") - print(f"AppInspect passed! Please check [ {appinspect_output} , {appinspect_logging} ] for verbose information.") - else: - if sys.version.startswith('3.11') or sys.version.startswith('3.12'): - raise Exception("At this time, AppInspect may fail on valid apps under Python>=3.11 with " - "the error 'global flags not at the start of the expression at position 1'. " - "If you encounter this error, please run AppInspect on a version of Python " - "<3.11. This issue is currently tracked. Please review the appinspect " - "report output above for errors.") - else: - raise Exception("AppInspect Failure - Please review the appinspect report output above for errors.") - finally: - # appinspect outputs the log in json format, but does not format it to be easier - # to read (it is all in one line). Read back that file and write it so it - # is easier to understand - - #Note that this may raise an exception itself! - self.parseAppinspectJsonLogFile(appinspect_output) - \ No newline at end of file + \ No newline at end of file diff --git a/contentctl/output/conf_writer.py b/contentctl/output/conf_writer.py index da6ba4f0..5f66e032 100644 --- a/contentctl/output/conf_writer.py +++ b/contentctl/output/conf_writer.py @@ -1,67 +1,205 @@ +from typing import Any import datetime +import re import os +import json +import configparser from xmlrpc.client import APPLICATION_ERROR from jinja2 import Environment, FileSystemLoader, StrictUndefined import pathlib from contentctl.objects.security_content_object import SecurityContentObject -from contentctl.objects.config import Config +from contentctl.objects.config import build +import xml.etree.ElementTree as ET class ConfWriter(): @staticmethod - def writeConfFileHeader(output_path:pathlib.Path, config: Config) -> None: - utc_time = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + def custom_jinja2_enrichment_filter(string:str, object:SecurityContentObject): + substitutions = re.findall(r"%[^%]*%", string) + updated_string = string + for sub in substitutions: + sub_without_percents = sub.replace("%","") + if hasattr(object, sub_without_percents): + updated_string = updated_string.replace(sub, str(getattr(object, sub_without_percents))) + elif hasattr(object,'tags') and hasattr(object.tags, sub_without_percents): + updated_string = updated_string.replace(sub, str(getattr(object.tags, sub_without_percents))) + else: + raise Exception(f"Unable to find field {sub} in object {object.name}") + + return updated_string + + @staticmethod + def escapeNewlines(obj:Any): + # Ensure that any newlines that occur in a string are escaped with a \. + # Failing to do so will result in an improperly formatted conf files that + # cannot be parsed + if isinstance(obj,str): + return obj.replace(f"\n","\\\n") + else: + return obj + + + @staticmethod + def writeConfFileHeader(app_output_path:pathlib.Path, config: build) -> pathlib.Path: + output = ConfWriter.writeFileHeader(app_output_path, config) + + output_path = config.getPackageDirectoryPath()/app_output_path + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, 'w') as f: + output = output.encode('utf-8', 'ignore').decode('utf-8') + f.write(output) + + #Ensure that the conf file we just generated/update is syntactically valid + ConfWriter.validateConfFile(output_path) + return output_path + + @staticmethod + def writeManifestFile(app_output_path:pathlib.Path, template_name : str, config: build, objects : list) -> pathlib.Path: + j2_env = ConfWriter.getJ2Environment() + template = j2_env.get_template(template_name) + + output = template.render(objects=objects, APP_NAME=config.app.label, currentDate=datetime.datetime.now(datetime.UTC).date().isoformat()) + + output_path = config.getPackageDirectoryPath()/app_output_path + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, 'w') as f: + output = output.encode('utf-8', 'ignore').decode('utf-8') + f.write(output) + return output_path + + + @staticmethod + def writeFileHeader(app_output_path:pathlib.Path, config: build) -> str: + #Do not output microseconds or +00:000 at the end of the datetime string + utc_time = datetime.datetime.now(datetime.UTC).replace(microsecond=0,tzinfo=None).isoformat() + j2_env = Environment( loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')), trim_blocks=True) template = j2_env.get_template('header.j2') - output = template.render(time=utc_time, author=' - '.join([config.build.author_name,config.build.author_company]), author_email=config.build.author_email) + output = template.render(time=utc_time, author=' - '.join([config.app.author_name,config.app.author_company]), author_email=config.app.author_email) + + return output + + + + @staticmethod + def writeXmlFile(app_output_path:pathlib.Path, template_name : str, config: build, objects : list) -> None: + + + j2_env = ConfWriter.getJ2Environment() + template = j2_env.get_template(template_name) + + output = template.render(objects=objects, APP_NAME=config.app.label) + + output_path = config.getPackageDirectoryPath()/app_output_path output_path.parent.mkdir(parents=True, exist_ok=True) - with open(output_path, 'w') as f: - output = output.encode('ascii', 'ignore').decode('ascii') + with open(output_path, 'a') as f: + output = output.encode('utf-8', 'ignore').decode('utf-8') f.write(output) + + #Ensure that the conf file we just generated/update is syntactically valid + ConfWriter.validateXmlFile(output_path) + + @staticmethod - def writeConfFileHeaderEmpty(output_path:pathlib.Path, config: Config) -> None: + def writeXmlFileHeader(app_output_path:pathlib.Path, config: build) -> None: + output = ConfWriter.writeFileHeader(app_output_path, config) + output_with_xml_comment = f"\n" + + output_path = config.getPackageDirectoryPath()/app_output_path output_path.parent.mkdir(parents=True, exist_ok=True) with open(output_path, 'w') as f: - f.write('') + output_with_xml_comment = output_with_xml_comment.encode('utf-8', 'ignore').decode('utf-8') + f.write(output_with_xml_comment) + + # We INTENTIONALLY do not validate the comment we wrote to the header. This is because right now, + # the file is an empty XML document (besides the commented header). This means that it will FAIL validation @staticmethod - def writeConfFile(output_path:pathlib.Path, template_name : str, config: Config, objects : list) -> None: - def custom_jinja2_enrichment_filter(string, object): - customized_string = string - - for key in dir(object): - if type(key) is not str: - key = key.decode() - if not key.startswith('__') and not key == "_abc_impl" and not callable(getattr(object, key)): - if hasattr(object, key): - customized_string = customized_string.replace("%" + key + "%", str(getattr(object, key))) - - for key in dir(object.tags): - if type(key) is not str: - key = key.decode() - if not key.startswith('__') and not key == "_abc_impl" and not callable(getattr(object.tags, key)): - if hasattr(object.tags, key): - customized_string = customized_string.replace("%" + key + "%", str(getattr(object.tags, key))) - - return customized_string - + def getJ2Environment()->Environment: j2_env = Environment( loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')), trim_blocks=True, undefined=StrictUndefined) + j2_env.globals.update(objectListToNameList=SecurityContentObject.objectListToNameList) + + j2_env.filters['custom_jinja2_enrichment_filter'] = ConfWriter.custom_jinja2_enrichment_filter + j2_env.filters['escapeNewlines'] = ConfWriter.escapeNewlines + return j2_env - j2_env.filters['custom_jinja2_enrichment_filter'] = custom_jinja2_enrichment_filter + @staticmethod + def writeConfFile(app_output_path:pathlib.Path, template_name : str, config: build, objects : list) -> pathlib.Path: + output_path = config.getPackageDirectoryPath()/app_output_path + j2_env = ConfWriter.getJ2Environment() + template = j2_env.get_template(template_name) - output = template.render(objects=objects, APP_NAME=config.build.prefix) + output = template.render(objects=objects, APP_NAME=config.app.label) + output_path.parent.mkdir(parents=True, exist_ok=True) with open(output_path, 'a') as f: - output = output.encode('ascii', 'ignore').decode('ascii') + output = output.encode('utf-8', 'ignore').decode('utf-8') f.write(output) + return output_path + + + @staticmethod + def validateConfFile(path:pathlib.Path): + """Ensure that the conf file is valid. We will do this by reading back + the conf using RawConfigParser to ensure that it does not throw any parsing errors. + This is particularly relevant because newlines contained in string fields may + break the formatting of the conf file if they have been incorrectly escaped with + the 'ConfWriter.escapeNewlines()' function. + + If a conf file failes validation, we will throw an exception + + Args: + path (pathlib.Path): path to the conf file to validate + """ + return + if path.suffix != ".conf": + #there may be some other files built, so just ignore them + return + try: + _ = configparser.RawConfigParser().read(path) + except Exception as e: + raise Exception(f"Failed to validate .conf file {str(path)}: {str(e)}") + + @staticmethod + def validateXmlFile(path:pathlib.Path): + """Ensure that the XML file is valid XML. + + Args: + path (pathlib.Path): path to the xml file to validate + """ + + try: + with open(path, 'r') as xmlFile: + _ = ET.fromstring(xmlFile.read()) + except Exception as e: + raise Exception(f"Failed to validate .xml file {str(path)}: {str(e)}") + + + @staticmethod + def validateManifestFile(path:pathlib.Path): + """Ensure that the Manifest file is valid JSON. + + Args: + path (pathlib.Path): path to the manifest JSON file to validate + """ + return + try: + with open(path, 'r') as manifestFile: + _ = json.load(manifestFile) + except Exception as e: + raise Exception(f"Failed to validate .manifest file {str(path)} (Note that .manifest files should contain only valid JSON-formatted data): {str(e)}") + + + + diff --git a/contentctl/output/jinja_writer.py b/contentctl/output/jinja_writer.py index 97d41c2c..05690ea8 100644 --- a/contentctl/output/jinja_writer.py +++ b/contentctl/output/jinja_writer.py @@ -1,5 +1,5 @@ import os - +from typing import Any from jinja2 import Environment, FileSystemLoader @@ -20,7 +20,7 @@ def writeObjectsList(template_name : str, output_path : str, objects : list) -> @staticmethod - def writeObject(template_name : str, output_path : str, object : dict) -> None: + def writeObject(template_name : str, output_path : str, object: dict[str,Any]) -> None: j2_env = Environment( loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')), diff --git a/contentctl/output/json_writer.py b/contentctl/output/json_writer.py index 0ae387c2..fe3696d9 100644 --- a/contentctl/output/json_writer.py +++ b/contentctl/output/json_writer.py @@ -1,9 +1,21 @@ import json - - +from contentctl.objects.abstract_security_content_objects.security_content_object_abstract import SecurityContentObject_Abstract +from typing import List +from io import TextIOWrapper class JsonWriter(): @staticmethod - def writeJsonObject(file_path : str, obj) -> None: - with open(file_path, 'w') as outfile: - json.dump(obj, outfile, ensure_ascii=False) \ No newline at end of file + def writeJsonObject(file_path : str, object_name: str, objs: List[dict],readable_output=False) -> None: + try: + with open(file_path, 'w') as outfile: + if readable_output: + # At the cost of slightly larger filesize, improve the redability significantly + # by sorting and indenting keys/values + sorted_objs = sorted(objs, key=lambda o: o['name']) + json.dump({object_name:sorted_objs}, outfile, ensure_ascii=False, indent=2) + else: + json.dump({object_name:objs}, outfile, ensure_ascii=False) + + except Exception as e: + raise Exception(f"Error serializing object to Json File '{file_path}': {str(e)}") + \ No newline at end of file diff --git a/contentctl/output/new_content_yml_output.py b/contentctl/output/new_content_yml_output.py index 7fbe09c7..df55dd1c 100644 --- a/contentctl/output/new_content_yml_output.py +++ b/contentctl/output/new_content_yml_output.py @@ -2,17 +2,18 @@ import pathlib from contentctl.objects.enums import SecurityContentType from contentctl.output.yml_writer import YmlWriter - - +import pathlib +from contentctl.objects.config import NewContentType class NewContentYmlOutput(): - output_path: str + output_path: pathlib.Path - def __init__(self, output_path:str): + def __init__(self, output_path:pathlib.Path): self.output_path = output_path - def writeObjectNewContent(self, object: dict, subdirectory_name: str, type: SecurityContentType) -> None: - if type == SecurityContentType.detections: + def writeObjectNewContent(self, object: dict, subdirectory_name: str, type: NewContentType) -> None: + if type == NewContentType.detection: + file_path = os.path.join(self.output_path, 'detections', subdirectory_name, self.convertNameToFileName(object['name'], object['tags']['product'])) output_folder = pathlib.Path(self.output_path)/'detections'/subdirectory_name #make sure the output folder exists for this detection @@ -21,7 +22,7 @@ def writeObjectNewContent(self, object: dict, subdirectory_name: str, type: Secu YmlWriter.writeYmlFile(file_path, object) print("Successfully created detection " + file_path) - elif type == SecurityContentType.stories: + elif type == NewContentType.story: file_path = os.path.join(self.output_path, 'stories', self.convertNameToFileName(object['name'], object['tags']['product'])) YmlWriter.writeYmlFile(file_path, object) print("Successfully created story " + file_path) diff --git a/contentctl/output/svg_output.py b/contentctl/output/svg_output.py index a832a0fb..d454ccb2 100644 --- a/contentctl/output/svg_output.py +++ b/contentctl/output/svg_output.py @@ -1,15 +1,18 @@ import os import pathlib +from typing import List, Any from contentctl.objects.enums import SecurityContentType from contentctl.output.jinja_writer import JinjaWriter -from contentctl.objects.config import Config from contentctl.objects.enums import DetectionStatus +from contentctl.objects.detection import Detection class SvgOutput(): - def get_badge_dict(self, name:str, total_detections:list, these_detections:list): - obj = dict() + + def get_badge_dict(self, name:str, total_detections:List[Detection], these_detections:List[Detection])->dict[str,Any]: + obj:dict[str,Any] = {} obj['name'] = name + if name == "Production": obj['color'] = "Green" elif name == "Detections": @@ -26,40 +29,27 @@ def get_badge_dict(self, name:str, total_detections:list, these_detections:list) obj['coverage'] = len(these_detections) / obj['count'] obj['coverage'] = "{:.0%}".format(obj['coverage']) return obj - - def writeObjects(self, objects: list, path: str, type: SecurityContentType = None) -> None: + + def writeObjects(self, detections: List[Detection], output_path: pathlib.Path, type: SecurityContentType = None) -> None: - detections_tmp = objects - - output_path = pathlib.Path(path) - - production_detections = [] - deprecated_detections = [] - experimental_detections = [] - obj = dict() - for detection in detections_tmp: - if detection.status == DetectionStatus.production.value: - production_detections.append(detection) - if detection.status == DetectionStatus.deprecated.value: - deprecated_detections.append(detection) - elif detection.status == DetectionStatus.experimental.value: - experimental_detections.append(detection) + total_dict:dict[str,Any] = self.get_badge_dict("Detections", detections, detections) + production_dict:dict[str,Any] = self.get_badge_dict("% Production", detections, [detection for detection in detections if detection.status == DetectionStatus.production.value]) + #deprecated_dict = self.get_badge_dict("Deprecated", detections, [detection for detection in detections if detection.status == DetectionStatus.deprecated]) + #experimental_dict = self.get_badge_dict("Experimental", detections, [detection for detection in detections if detection.status == DetectionStatus.experimental]) - total_detections = production_detections + deprecated_detections + experimental_detections - total_dict = self.get_badge_dict("Detections", total_detections, production_detections) - production_dict = self.get_badge_dict("Production", total_detections, production_detections) - deprecated_dict = self.get_badge_dict("Deprecated", total_detections, deprecated_detections) - experimental_dict = self.get_badge_dict("Experimental", total_detections, experimental_detections) - JinjaWriter.writeObject('detection_count.j2', os.path.join(output_path, 'detection_count.svg'), total_dict) + + #Total number of detections + JinjaWriter.writeObject('detection_count.j2', output_path /'detection_count.svg', total_dict) #JinjaWriter.writeObject('detection_count.j2', os.path.join(output_path, 'production_count.svg'), production_dict) #JinjaWriter.writeObject('detection_count.j2', os.path.join(output_path, 'deprecated_count.svg'), deprecated_dict) #JinjaWriter.writeObject('detection_count.j2', os.path.join(output_path, 'experimental_count.svg'), experimental_dict) - JinjaWriter.writeObject('detection_coverage.j2', os.path.join(output_path, 'detection_coverage.svg'), total_dict) + #Percentage of detections that are production + JinjaWriter.writeObject('detection_coverage.j2', output_path/'detection_coverage.svg', production_dict) #JinjaWriter.writeObject('detection_coverage.j2', os.path.join(output_path, 'detection_coverage.svg'), deprecated_dict) #JinjaWriter.writeObject('detection_coverage.j2', os.path.join(output_path, 'detection_coverage.svg'), experimental_dict) diff --git a/contentctl/output/templates/analyticstories_detections.j2 b/contentctl/output/templates/analyticstories_detections.j2 index c343711e..290c4c85 100644 --- a/contentctl/output/templates/analyticstories_detections.j2 +++ b/contentctl/output/templates/analyticstories_detections.j2 @@ -5,17 +5,21 @@ {% if (detection.type == 'TTP' or detection.type == 'Anomaly' or detection.type == 'Hunting' or detection.type == 'Correlation') %} [savedsearch://{{APP_NAME}} - {{ detection.name }} - Rule] type = detection -asset_type = {{ detection.tags.asset_type }} +asset_type = {{ detection.tags.asset_type.value }} confidence = medium -explanation = {{ detection.description }} +explanation = {{ detection.description | escapeNewlines() }} {% if detection.how_to_implement is defined %} -how_to_implement = {{ detection.how_to_implement }} +how_to_implement = {{ detection.how_to_implement | escapeNewlines() }} {% else %} how_to_implement = none {% endif %} annotations = {{ detection.mappings | tojson }} -known_false_positives = {{ detection.known_false_positives }} +known_false_positives = {{ detection.known_false_positives | escapeNewlines() }} +{% if detection.providing_technologies | length > 0 %} providing_technologies = {{ detection.providing_technologies | tojson }} +{% else %} +providing_technologies = null +{% endif %} {% endif %} {% endfor %} diff --git a/contentctl/output/templates/analyticstories_investigations.j2 b/contentctl/output/templates/analyticstories_investigations.j2 index 9e80c133..e742c909 100644 --- a/contentctl/output/templates/analyticstories_investigations.j2 +++ b/contentctl/output/templates/analyticstories_investigations.j2 @@ -7,7 +7,7 @@ type = investigation explanation = none {% if detection.how_to_implement is defined %} -how_to_implement = {{ detection.how_to_implement }} +how_to_implement = {{ detection.how_to_implement | escapeNewlines() }} {% else %} how_to_implement = none {% endif %} diff --git a/contentctl/output/templates/analyticstories_stories.j2 b/contentctl/output/templates/analyticstories_stories.j2 index d6fe6a0f..9723a6dd 100644 --- a/contentctl/output/templates/analyticstories_stories.j2 +++ b/contentctl/output/templates/analyticstories_stories.j2 @@ -4,16 +4,16 @@ {% for story in objects %} [analytic_story://{{ story.name }}] -category = {{ story.tags.category[0].value }} +category = {{ story.tags.getCategory_conf() }} last_updated = {{ story.date }} version = {{ story.version }} -references = {{ story.references | tojson }} -maintainers = [{"company": "{{ story.author_company }}", "email": "-", "name": "{{ story.author_name }}"}] +references = {{ story.getReferencesListForJson() | tojson }} +maintainers = [{"company": "{{ story.author_company }}", "email": "{{ story.author_email }}", "name": "{{ story.author_name }}"}] spec_version = 3 -searches = {{ (story.detection_names + story.investigation_names) | tojson }} -description = {{ story.description }} +searches = {{ story.storyAndInvestigationNamesWithApp(APP_NAME) | tojson }} +description = {{ story.description | escapeNewlines() }} {% if story.narrative is defined %} -narrative = {{ story.narrative }} +narrative = {{ story.narrative | escapeNewlines() }} {% endif %} {% endfor %} diff --git a/contentctl/output/templates/app.conf.j2 b/contentctl/output/templates/app.conf.j2 index 190ba31e..51734792 100644 --- a/contentctl/output/templates/app.conf.j2 +++ b/contentctl/output/templates/app.conf.j2 @@ -21,14 +21,14 @@ reload.es_investigations = simple [launcher] author = {{ objects[0].author_company }} version = {{ objects[0].version }} -description = {{ objects[0].description }} +description = {{ objects[0].description | escapeNewlines() }} [ui] is_visible = true label = {{ objects[0].title }} [package] -id = {{ objects[0].name }} +id = {{ objects[0].appid }} diff --git a/contentctl/output/templates/app.manifest.j2 b/contentctl/output/templates/app.manifest.j2 index 42737a6a..7891f52b 100644 --- a/contentctl/output/templates/app.manifest.j2 +++ b/contentctl/output/templates/app.manifest.j2 @@ -4,7 +4,7 @@ "title": "{{ objects[0].title }}", "id": { "group": null, - "name": "{{ objects[0].name }}", + "name": "{{ objects[0].appid }}", "version": "{{ objects[0].version }}" }, "author": [ @@ -14,7 +14,7 @@ "company": "{{ objects[0].author_company }}" } ], - "releaseDate": null, + "releaseDate": "{{ currentDate }}", "description": "{{ objects[0].description }}", "classification": { "intendedAudience": null, diff --git a/contentctl/output/templates/detection_coverage.j2 b/contentctl/output/templates/detection_coverage.j2 index d9dca89f..c9c28fd4 100644 --- a/contentctl/output/templates/detection_coverage.j2 +++ b/contentctl/output/templates/detection_coverage.j2 @@ -1,18 +1,16 @@ - + - - - - + + - - coverage - {{ object.coverage }} + + % Production + {{object.coverage}} diff --git a/contentctl/output/templates/doc_detection_page.j2 b/contentctl/output/templates/doc_detection_page.j2 index b6bcd04f..d3e37d50 100644 --- a/contentctl/output/templates/doc_detection_page.j2 +++ b/contentctl/output/templates/doc_detection_page.j2 @@ -12,8 +12,8 @@ sidebar: | -------------- | --------------- | --------------- | {%- for detection in objects -%} {% if detection.tags.mitre_attack_enrichments %} -| [{{ detection.name }}](/{{ detection.source }}/{{ detection.name | lower | replace(' ', '_') }}/) | {% for attack in detection.tags.mitre_attack_enrichments -%} [{{ attack.mitre_attack_technique }}](/tags/#{{ attack.mitre_attack_technique | lower | replace(" ", "-") }}){% if not loop.last -%}, {% endif -%}{%- endfor %} | [{{ detection.type }}](https://github.com/splunk/security_content/wiki/Detection-Analytic-Types) | +| [{{ detection.name }}](/{{ detection.getSource() }}/{{ detection.name | lower | replace(' ', '_') }}/) | {% for attack in detection.tags.mitre_attack_enrichments -%} [{{ attack.mitre_attack_technique }}](/tags/#{{ attack.mitre_attack_technique | lower | replace(" ", "-") }}){% if not loop.last -%}, {% endif -%}{%- endfor %} | [{{ detection.type }}](https://github.com/splunk/security_content/wiki/Detection-Analytic-Types) | {%- else %} -| [{{ detection.name }}](/{{ detection.source }}/{{ detection.name | lower | replace(' ', '_') }}/) | None | [{{ detection.type }}](https://github.com/splunk/security_content/wiki/Detection-Analytic-Types) | +| [{{ detection.name }}](/{{ detection.getSource() }}/{{ detection.name | lower | replace(' ', '_') }}/) | None | [{{ detection.type }}](https://github.com/splunk/security_content/wiki/Detection-Analytic-Types) | {%- endif -%} {%- endfor -%} diff --git a/contentctl/output/templates/doc_detections.j2 b/contentctl/output/templates/doc_detections.j2 index 3f1a800f..5430b0ed 100644 --- a/contentctl/output/templates/doc_detections.j2 +++ b/contentctl/output/templates/doc_detections.j2 @@ -5,7 +5,7 @@ excerpt: "{% if object.tags.mitre_attack_enrichments %}{% for attack in object.t {% if not loop.last -%}, {% endif -%} {% endfor %}{% endif -%}" categories: - - {{object.source|capitalize}} + - {{object.getSource()|capitalize}} last_modified_at: {{object.date}} toc: true toc_label: "" @@ -207,4 +207,4 @@ Alternatively you can replay a dataset into a [Splunk Attack Range](https://gith {% endfor %} {% endif %} -[*source*](https://github.com/splunk/security_content/tree/develop/detections/{% if object.experimental is sameas true -%}experimental/{%- endif -%}{{object.source}}/{{ object.name | lower | replace (" ", "_") | replace("-", "_") }}.yml) \| *version*: **{{object.version}}** +[*source*](https://github.com/splunk/security_content/tree/develop/detections/{% if object.experimental is sameas true -%}experimental/{%- endif -%}{{object.getSource()}}/{{ object.name | lower | replace (" ", "_") | replace("-", "_") }}.yml) \| *version*: **{{object.version}}** diff --git a/contentctl/output/templates/doc_stories.j2 b/contentctl/output/templates/doc_stories.j2 index 443f0f76..2d87bf30 100644 --- a/contentctl/output/templates/doc_stories.j2 +++ b/contentctl/output/templates/doc_stories.j2 @@ -37,7 +37,7 @@ tags: | ----------- | ----------- |--------------| {%- if object.detections %} {%- for detection in object.detections %} -| [{{ detection.name }}](/{{ detection.source }}/{{ detection.name | lower | replace(' ', '_') }}/) | {% if detection.tags.mitre_attack_enrichments %}{% for attack in detection.tags.mitre_attack_enrichments -%}[{{ attack.mitre_attack_technique }}](/tags/#{{ attack.mitre_attack_technique | lower | replace(" ", "-") }}){% if not loop.last %}, {% endif %}{%- endfor %}{% else %}None{%- endif -%} | {{ detection.type }} | +| [{{ detection.name }}](/{{ detection.getSource() }}/{{ detection.name | lower | replace(' ', '_') }}/) | {% if detection.tags.mitre_attack_enrichments %}{% for attack in detection.tags.mitre_attack_enrichments -%}[{{ attack.mitre_attack_technique }}](/tags/#{{ attack.mitre_attack_technique | lower | replace(" ", "-") }}){% if not loop.last %}, {% endif %}{%- endfor %}{% else %}None{%- endif -%} | {{ detection.type }} | {%- endfor %} {%- endif %} diff --git a/contentctl/output/templates/es_investigations_investigations.j2 b/contentctl/output/templates/es_investigations_investigations.j2 index 883af418..39c548b9 100644 --- a/contentctl/output/templates/es_investigations_investigations.j2 +++ b/contentctl/output/templates/es_investigations_investigations.j2 @@ -2,19 +2,19 @@ {% for response_task in objects %} [panel://workbench_panel_{{ response_task.lowercase_name }}___response_task] label = {{ response_task.name }} -description = {{ response_task.description }} +description = {{ response_task.description | escapeNewlines() }} disabled = 0 tokens = {\ {% for token in response_task.inputs %} {% if token == 'user' %} - "user": {\ - "valuePrefix": "\"",\ - "valueSuffix": "\"",\ - "delimiter": " OR {{ token }}=",\ - "valueType": "primitive",\ - "value": "identity",\ - "default": "null"\ - }{% elif token == 'dest'%} + "user": {\ + "valuePrefix": "\"",\ + "valueSuffix": "\"",\ + "delimiter": " OR {{ token }}=",\ + "valueType": "primitive",\ + "value": "identity",\ + "default": "null"\ + }\{% elif token == 'dest'%} "dest": {\ "valuePrefix": "\"",\ "valueSuffix": "\"",\ @@ -22,7 +22,7 @@ tokens = {\ "valueType": "primitive",\ "value": "asset",\ "default": "null"\ - }{% else %} + }\{% else %} "{{ token }}": {\ "valuePrefix": "\"",\ "valueSuffix": "\"",\ @@ -30,9 +30,9 @@ tokens = {\ "valueType": "primitive",\ "value": "file",\ "default": "null"\ - }{% endif %}{{ "," if not loop.last }}\ + }\{% endif %}{{ "," if not loop.last }} {% endfor %} -}\ + }\ {% endfor %} \ No newline at end of file diff --git a/contentctl/output/templates/es_investigations_stories.j2 b/contentctl/output/templates/es_investigations_stories.j2 index da0268a0..3f4f8abf 100644 --- a/contentctl/output/templates/es_investigations_stories.j2 +++ b/contentctl/output/templates/es_investigations_stories.j2 @@ -2,7 +2,7 @@ {% for story in objects %} [panel_group://workbench_panel_group_{{ story.lowercase_name}}] label = {{ story.name }} -description = {{ story.description }} +description = {{ story.description | escapeNewlines() }} disabled = 0 {% if story.workbench_panels is defined %} diff --git a/contentctl/output/templates/header.j2 b/contentctl/output/templates/header.j2 index d9fd84f4..d959500a 100644 --- a/contentctl/output/templates/header.j2 +++ b/contentctl/output/templates/header.j2 @@ -1,5 +1,6 @@ ############# -# Automatically generated by generator.py in splunk/security_content +# Automatically generated by 'contentctl build' from +# https://github.com/splunk/contentctl # On Date: {{ time }} UTC # Author: {{ author }} # Contact: {{ author_email }} diff --git a/contentctl/output/templates/macros.j2 b/contentctl/output/templates/macros.j2 index a9fb11cf..f8136962 100644 --- a/contentctl/output/templates/macros.j2 +++ b/contentctl/output/templates/macros.j2 @@ -1,15 +1,11 @@ {% for macro in objects %} -[{{ macro.name }}{% if macro.arguments is not none %}({{ macro.arguments|length }}){% endif %}] -{% if macro.arguments is not none %} -args = {% for arg in macro.arguments %}{{ arg }}{{ ", " if not loop.last }} -{% endfor %} -{% endif %} -{% if macro.definition is not none %} -definition = {{ macro.definition }} -{% else %} -definition = +[{{ macro.name }}{% if macro.arguments | length > 0 %}({{ macro.arguments|length }}){% endif %}] +{% if macro.arguments | length > 0 %} +args = {% for arg in macro.arguments %}{{ arg }}{{ ", " if not loop.last }}{% endfor %} + {% endif %} -description = {{ macro.description }} +definition = {{ macro.definition | escapeNewlines() }} +description = {{ macro.description | escapeNewlines() }} {% endfor %} diff --git a/contentctl/output/templates/macros_detections.j2 b/contentctl/output/templates/macros_detections.j2 deleted file mode 100644 index dbfd54c5..00000000 --- a/contentctl/output/templates/macros_detections.j2 +++ /dev/null @@ -1,7 +0,0 @@ - -{% for detection in objects %} -[{{ detection.name | replace(' ', '_') | replace('-', '_') | replace('.', '_') | replace('/', '_') | lower + '_filter' }}] -definition = search * -description = Update this macro to limit the output results to filter out false positives. - -{% endfor %} diff --git a/contentctl/output/templates/savedsearches_baselines.j2 b/contentctl/output/templates/savedsearches_baselines.j2 index 29d59ec3..caf00fc0 100644 --- a/contentctl/output/templates/savedsearches_baselines.j2 +++ b/contentctl/output/templates/savedsearches_baselines.j2 @@ -9,11 +9,11 @@ action.escu = 0 action.escu.enabled = 1 action.escu.search_type = support action.escu.full_search_name = {{APP_NAME}} - {{ detection.name }} -description = {{ detection.description }} +description = {{ detection.description | escapeNewlines() }} action.escu.creation_date = {{ detection.date }} action.escu.modification_date = {{ detection.date }} {% if detection.tags.analytic_story is defined %} -action.escu.analytic_story = {{ detection.tags.analytic_story | tojson }} +action.escu.analytic_story = {{ objectListToNameList(detection.tags.analytic_story) | tojson }} {% else %} action.escu.analytic_story = [] {% endif %} @@ -30,9 +30,9 @@ action.escu.providing_technologies = {{ detection.providing_technologies | tojso {% else %} action.escu.providing_technologies = [] {% endif %} -action.escu.eli5 = {{ detection.description }} +action.escu.eli5 = {{ detection.description | escapeNewlines() }} {% if detection.how_to_implement is defined %} -action.escu.how_to_implement = {{ detection.how_to_implement }} +action.escu.how_to_implement = {{ detection.how_to_implement | escapeNewlines() }} {% else %} action.escu.how_to_implement = none {% endif %} @@ -42,7 +42,7 @@ disabled = false disabled = true {% endif %} is_visible = false -search = {{ detection.search }} +search = {{ detection.search | escapeNewlines() }} {% endif %} {% endfor %} diff --git a/contentctl/output/templates/savedsearches_detections.j2 b/contentctl/output/templates/savedsearches_detections.j2 index 9621bf06..92db3833 100644 --- a/contentctl/output/templates/savedsearches_detections.j2 +++ b/contentctl/output/templates/savedsearches_detections.j2 @@ -6,22 +6,22 @@ action.escu = 0 action.escu.enabled = 1 {% if detection.status == "deprecated" %} -description = **WARNING**, this detection has been marked **DEPRECATED** by the Splunk Threat Research Team. This means that it will no longer be maintained or supported. If you have any questions feel free to email us at: research@splunk.com. {{ detection.description }} +description = **WARNING**, this detection has been marked **DEPRECATED** by the Splunk Threat Research Team. This means that it will no longer be maintained or supported. If you have any questions feel free to email us at: research@splunk.com. {{ detection.description | escapeNewlines() }} {% elif detection.status == "experimental" %} -description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. {{ detection.description }} +description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. {{ detection.description | escapeNewlines() }} {% else %} -description = {{ detection.description }} +description = {{ detection.description | escapeNewlines() }} {% endif %} action.escu.mappings = {{ detection.mappings | tojson }} action.escu.data_models = {{ detection.datamodel | tojson }} -action.escu.eli5 = {{ detection.description }} -{% if detection.how_to_implement is defined %} -action.escu.how_to_implement = {{ detection.how_to_implement }} +action.escu.eli5 = {{ detection.description | escapeNewlines() }} +{% if detection.how_to_implement %} +action.escu.how_to_implement = {{ detection.how_to_implement | escapeNewlines() }} {% else %} action.escu.how_to_implement = none {% endif %} -{% if detection.known_false_positives is defined %} -action.escu.known_false_positives = {{ detection.known_false_positives }} +{% if detection.known_false_positives %} +action.escu.known_false_positives = {{ detection.known_false_positives | escapeNewlines() }} {% else %} action.escu.known_false_positives = None {% endif %} @@ -33,16 +33,19 @@ action.escu.search_type = detection {% if detection.tags.product is defined %} action.escu.product = {{ detection.tags.product | tojson }} {% endif %} -{% if detection.providing_technologies is defined %} +{% if detection.tags.atomic_guid %} +action.escu.atomic_red_team_guids = {{ detection.tags.getAtomicGuidStringArray() | tojson }} +{% endif %} +{% if detection.providing_technologies | length > 0 %} action.escu.providing_technologies = {{ detection.providing_technologies | tojson }} {% else %} -action.escu.providing_technologies = [] +action.escu.providing_technologies = null {% endif %} -{% if detection.tags.analytic_story is defined %} -action.escu.analytic_story = {{ detection.tags.analytic_story | tojson }} -{% if detection.deployment.rba.enabled is defined %} +{% if detection.tags.analytic_story %} +action.escu.analytic_story = {{ objectListToNameList(detection.tags.analytic_story) | tojson }} +{% if detection.deployment.alert_action.rba.enabled%} action.risk = 1 -action.risk.param._risk_message = {{ detection.tags.message }} +action.risk.param._risk_message = {{ detection.tags.message | escapeNewlines() }} action.risk.param._risk = {{ detection.risk | tojson }} action.risk.param._risk_score = 0 action.risk.param.verbose = 0 @@ -69,34 +72,34 @@ action.correlationsearch.metadata = {{ detection.getMetadata() | tojson }} schedule_window = {{ detection.deployment.scheduling.schedule_window }} {% endif %} {% if detection.deployment is defined %} -{% if detection.deployment.notable.rule_title is defined %} +{% if detection.deployment.alert_action.notable %} action.notable = 1 -{% if detection.nes_fields is defined %} +{% if detection.nes_fields %} action.notable.param.nes_fields = {{ detection.nes_fields }} {% endif %} -action.notable.param.rule_description = {{ detection.deployment.notable.rule_description | custom_jinja2_enrichment_filter(detection) }} -action.notable.param.rule_title = {% if detection.type | lower == "correlation" %}RBA: {{ detection.deployment.notable.rule_title | custom_jinja2_enrichment_filter(detection) }}{% else %}{{ detection.deployment.notable.rule_title | custom_jinja2_enrichment_filter(detection) }}{% endif +%} -action.notable.param.security_domain = {{ detection.tags.security_domain }} +action.notable.param.rule_description = {{ detection.deployment.alert_action.notable.rule_description | custom_jinja2_enrichment_filter(detection) | escapeNewlines()}} +action.notable.param.rule_title = {% if detection.type | lower == "correlation" %}RBA: {{ detection.deployment.alert_action.notable.rule_title | custom_jinja2_enrichment_filter(detection) }}{% else %}{{ detection.deployment.alert_action.notable.rule_title | custom_jinja2_enrichment_filter(detection) }}{% endif +%} +action.notable.param.security_domain = {{ detection.tags.security_domain.value }} action.notable.param.severity = high {% endif %} -{% if detection.deployment.email.to is defined %} -action.email.subject.alert = {{ detection.deployment.email.subject | custom_jinja2_enrichment_filter(detection) }} -action.email.to = {{ detection.deployment.email.to }} -action.email.message.alert = {{ detection.deployment.email.message | custom_jinja2_enrichment_filter(detection) }} +{% if detection.deployment.alert_action.email %} +action.email.subject.alert = {{ detection.deployment.alert_action.email.subject | custom_jinja2_enrichment_filter(detection) | escapeNewlines() }} +action.email.to = {{ detection.deployment.alert_action.email.to }} +action.email.message.alert = {{ detection.deployment.alert_action.email.message | custom_jinja2_enrichment_filter(detection) | escapeNewlines() }} action.email.useNSSubject = 1 {% endif %} -{% if detection.deployment.slack.channel is defined %} +{% if detection.deployment.alert_action.slack %} action.slack = 1 -action.slack.param.channel = {{ detection.deployment.slack.channel | custom_jinja2_enrichment_filter(detection) }} -action.slack.param.message = {{ detection.deployment.slack.message | custom_jinja2_enrichment_filter(detection) }} +action.slack.param.channel = {{ detection.deployment.alert_action.slack.channel | custom_jinja2_enrichment_filter(detection) | escapeNewlines() }} +action.slack.param.message = {{ detection.deployment.alert_action.slack.message | custom_jinja2_enrichment_filter(detection) | escapeNewlines() }} {% endif %} -{% if detection.deployment.phantom.phantom_server is defined %} +{% if detection.deployment.alert_action.phantom%} action.sendtophantom = 1 -action.sendtophantom.param._cam_workers = {{ detection.deployment.phantom.cam_workers | custom_jinja2_enrichment_filter(detection) }} -action.sendtophantom.param.label = {{ detection.deployment.phantom.label | custom_jinja2_enrichment_filter(detection) }} -action.sendtophantom.param.phantom_server = {{ detection.deployment.phantom.phantom_server | custom_jinja2_enrichment_filter(detection) }} -action.sendtophantom.param.sensitivity = {{ detection.deployment.phantom.sensitivity | custom_jinja2_enrichment_filter(detection) }} -action.sendtophantom.param.severity = {{ detection.deployment.phantom.severity | custom_jinja2_enrichment_filter(detection) }} +action.sendtophantom.param._cam_workers = {{ detection.deployment.alert_action.phantom.cam_workers | custom_jinja2_enrichment_filter(detection) }} +action.sendtophantom.param.label = {{ detection.deployment.alert_action.phantom.label | custom_jinja2_enrichment_filter(detection) }} +action.sendtophantom.param.phantom_server = {{ detection.deployment.alert_action.phantom.phantom_server | custom_jinja2_enrichment_filter(detection) }} +action.sendtophantom.param.sensitivity = {{ detection.deployment.alert_action.phantom.sensitivity | custom_jinja2_enrichment_filter(detection) }} +action.sendtophantom.param.severity = {{ detection.deployment.alert_action.phantom.severity | custom_jinja2_enrichment_filter(detection) }} {% endif %} {% endif %} alert.digest_mode = 1 @@ -112,7 +115,7 @@ relation = greater than quantity = 0 realtime_schedule = 0 is_visible = false -search = {{ detection.search }} +search = {{ detection.search | escapeNewlines() }} {% endif %} {% endfor %} diff --git a/contentctl/output/templates/savedsearches_investigations.j2 b/contentctl/output/templates/savedsearches_investigations.j2 index 3bf25cb5..d80a2420 100644 --- a/contentctl/output/templates/savedsearches_investigations.j2 +++ b/contentctl/output/templates/savedsearches_investigations.j2 @@ -10,11 +10,11 @@ action.escu = 0 action.escu.enabled = 1 action.escu.search_type = investigative action.escu.full_search_name = {{APP_NAME}} - {{ detection.name }} - Response Task -description = {{ detection.description }} +description = {{ detection.description | escapeNewlines() }} action.escu.creation_date = {{ detection.date }} action.escu.modification_date = {{ detection.date }} {% if detection.tags.analytic_story is defined %} -action.escu.analytic_story = {{ detection.tags.analytic_story | tojson }} +action.escu.analytic_story = {{ objectListToNameList(detection.tags.analytic_story) | tojson }} {% else %} action.escu.analytic_story = [] {% endif %} @@ -22,13 +22,13 @@ action.escu.earliest_time_offset = 3600 action.escu.latest_time_offset = 86400 action.escu.providing_technologies = [] action.escu.data_models = {{ detection.datamodel | tojson }} -action.escu.eli5 = {{ detection.description }} +action.escu.eli5 = {{ detection.description | escapeNewlines() }} action.escu.how_to_implement = none action.escu.known_false_positives = None at this time disabled = true schedule_window = auto is_visible = false -search = {{ detection.search }} +search = {{ detection.search | escapeNewlines() }} {% endif %} {% endif %} diff --git a/contentctl/output/templates/splunk_app/README.md b/contentctl/output/templates/splunk_app/README.md deleted file mode 100644 index d0a87c3f..00000000 --- a/contentctl/output/templates/splunk_app/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Splunk ES Content Update - -This subscription service delivers pre-packaged Security Content for use with Splunk Enterprise Security. Subscribers get regular updates to help security practitioners more quickly address ongoing and time-sensitive customer problems and threats. - -Requires Splunk Enterprise Security version 4.5 or greater. - -For more information please visit the [Splunk ES Content Update user documentation](https://docs.splunk.com/Documentation/ESSOC). \ No newline at end of file diff --git a/contentctl/output/templates/transforms.j2 b/contentctl/output/templates/transforms.j2 index 747e2cf0..2fd029ec 100644 --- a/contentctl/output/templates/transforms.j2 +++ b/contentctl/output/templates/transforms.j2 @@ -2,19 +2,19 @@ {% for lookup in objects %} [{{ lookup.name }}] {% if lookup.filename is defined and lookup.filename != None %} -filename = {{ lookup.filename }} +filename = {{ lookup.filename.name }} {% else %} collection = {{ lookup.collection }} external_type = kvstore {% endif %} {% if lookup.default_match is defined and lookup.default_match != None %} -default_match = {{ lookup.default_match }} +default_match = {{ lookup.default_match | lower }} {% endif %} {% if lookup.case_sensitive_match is defined and lookup.case_sensitive_match != None %} -case_sensitive_match = {{ lookup.case_sensitive_match }} +case_sensitive_match = {{ lookup.case_sensitive_match | lower }} {% endif %} {% if lookup.description is defined and lookup.description != None %} -# description = {{ lookup.description }} +# description = {{ lookup.description | escapeNewlines() }} {% endif %} {% if lookup.match_type is defined and lookup.match_type != None %} match_type = {{ lookup.match_type }} diff --git a/contentctl/output/yml_writer.py b/contentctl/output/yml_writer.py index 09d8e311..6ceb02a3 100644 --- a/contentctl/output/yml_writer.py +++ b/contentctl/output/yml_writer.py @@ -1,11 +1,11 @@ import yaml - +from typing import Any class YmlWriter: @staticmethod - def writeYmlFile(file_path : str, obj : dict) -> None: + def writeYmlFile(file_path : str, obj : dict[Any,Any]) -> None: with open(file_path, 'w') as outfile: yaml.safe_dump(obj, outfile, default_flow_style=False, sort_keys=False) \ No newline at end of file diff --git a/contentctl/templates/app_template/README.md b/contentctl/templates/app_template/README.md new file mode 100644 index 00000000..f484b061 --- /dev/null +++ b/contentctl/templates/app_template/README.md @@ -0,0 +1,7 @@ +# Content Pack built with contentctl + +This application was built using the open source [contentctl](https://github.com/splunk/contentctl) tool published by the Splunk Threat Research Team (STRT). + +For questions about the tool, please see the repo or contact STRT at research@splunk.com + +Feel free to update this file to include your own valuable README information. diff --git a/contentctl/output/templates/splunk_app/README/essoc_story_detail.txt b/contentctl/templates/app_template/README/essoc_story_detail.txt similarity index 100% rename from contentctl/output/templates/splunk_app/README/essoc_story_detail.txt rename to contentctl/templates/app_template/README/essoc_story_detail.txt diff --git a/contentctl/output/templates/splunk_app/README/essoc_summary.txt b/contentctl/templates/app_template/README/essoc_summary.txt similarity index 100% rename from contentctl/output/templates/splunk_app/README/essoc_summary.txt rename to contentctl/templates/app_template/README/essoc_summary.txt diff --git a/contentctl/output/templates/splunk_app/README/essoc_usage_dashboard.txt b/contentctl/templates/app_template/README/essoc_usage_dashboard.txt similarity index 100% rename from contentctl/output/templates/splunk_app/README/essoc_usage_dashboard.txt rename to contentctl/templates/app_template/README/essoc_usage_dashboard.txt diff --git a/contentctl/output/templates/splunk_app/default/analytic_stories.conf b/contentctl/templates/app_template/default/analytic_stories.conf similarity index 100% rename from contentctl/output/templates/splunk_app/default/analytic_stories.conf rename to contentctl/templates/app_template/default/analytic_stories.conf diff --git a/contentctl/output/templates/splunk_app/default/app.conf b/contentctl/templates/app_template/default/app.conf similarity index 100% rename from contentctl/output/templates/splunk_app/default/app.conf rename to contentctl/templates/app_template/default/app.conf diff --git a/contentctl/output/templates/splunk_app/default/commands.conf b/contentctl/templates/app_template/default/commands.conf similarity index 100% rename from contentctl/output/templates/splunk_app/default/commands.conf rename to contentctl/templates/app_template/default/commands.conf diff --git a/contentctl/output/templates/splunk_app/default/content-version.conf b/contentctl/templates/app_template/default/content-version.conf similarity index 100% rename from contentctl/output/templates/splunk_app/default/content-version.conf rename to contentctl/templates/app_template/default/content-version.conf diff --git a/contentctl/output/templates/splunk_app/default/data/ui/nav/default.xml b/contentctl/templates/app_template/default/data/ui/nav/default.xml similarity index 88% rename from contentctl/output/templates/splunk_app/default/data/ui/nav/default.xml rename to contentctl/templates/app_template/default/data/ui/nav/default.xml index a56d143e..438c284f 100644 --- a/contentctl/output/templates/splunk_app/default/data/ui/nav/default.xml +++ b/contentctl/templates/app_template/default/data/ui/nav/default.xml @@ -2,5 +2,6 @@ + Docs \ No newline at end of file diff --git a/contentctl/output/templates/splunk_app/default/data/ui/views/escu_summary.xml b/contentctl/templates/app_template/default/data/ui/views/escu_summary.xml similarity index 100% rename from contentctl/output/templates/splunk_app/default/data/ui/views/escu_summary.xml rename to contentctl/templates/app_template/default/data/ui/views/escu_summary.xml diff --git a/contentctl/output/templates/splunk_app/default/data/ui/views/feedback.xml b/contentctl/templates/app_template/default/data/ui/views/feedback.xml similarity index 100% rename from contentctl/output/templates/splunk_app/default/data/ui/views/feedback.xml rename to contentctl/templates/app_template/default/data/ui/views/feedback.xml diff --git a/contentctl/output/templates/splunk_app/default/distsearch.conf b/contentctl/templates/app_template/default/distsearch.conf similarity index 100% rename from contentctl/output/templates/splunk_app/default/distsearch.conf rename to contentctl/templates/app_template/default/distsearch.conf diff --git a/contentctl/output/templates/splunk_app/default/usage_searches.conf b/contentctl/templates/app_template/default/usage_searches.conf similarity index 100% rename from contentctl/output/templates/splunk_app/default/usage_searches.conf rename to contentctl/templates/app_template/default/usage_searches.conf diff --git a/contentctl/output/templates/splunk_app/default/use_case_library.conf b/contentctl/templates/app_template/default/use_case_library.conf similarity index 100% rename from contentctl/output/templates/splunk_app/default/use_case_library.conf rename to contentctl/templates/app_template/default/use_case_library.conf diff --git a/contentctl/templates/app_template/lookups/mitre_enrichment.csv b/contentctl/templates/app_template/lookups/mitre_enrichment.csv new file mode 100644 index 00000000..3396e889 --- /dev/null +++ b/contentctl/templates/app_template/lookups/mitre_enrichment.csv @@ -0,0 +1,638 @@ +mitre_id,technique,tactics,groups +T1059.010,AutoHotKey & AutoIT,Execution,APT39 +T1564.012,File/Path Exclusions,Defense Evasion,no +T1027.013,Encrypted/Encoded File,Defense Evasion,APT18|APT19|APT28|APT32|APT33|APT39|BITTER|Blue Mockingbird|Dark Caracal|Darkhotel|Elderwood|Fox Kitten|Group5|Higaisa|Inception|Lazarus Group|Leviathan|Magic Hound|Malteiro|Metador|Mofang|Molerats|Moses Staff|OilRig|Putter Panda|Sidewinder|TA2541|TA505|TeamTNT|Threat Group-3390|Transparent Tribe|Tropic Trooper|Whitefly|menuPass +T1574.014,AppDomainManager,Defense Evasion|Persistence|Privilege Escalation,no +T1584.008,Network Devices,Resource Development,APT28|Volt Typhoon +T1548.006,TCC Manipulation,Defense Evasion|Privilege Escalation,no +T1588.007,Artificial Intelligence,Resource Development,no +T1218.015,Electron Applications,Defense Evasion,no +T1543.005,Container Service,Persistence|Privilege Escalation,no +T1665,Hide Infrastructure,Command And Control,APT29 +T1216.002,SyncAppvPublishingServer,Defense Evasion,no +T1556.009,Conditional Access Policies,Credential Access|Defense Evasion|Persistence,Scattered Spider +T1027.012,LNK Icon Smuggling,Defense Evasion,no +T1036.009,Break Process Trees,Defense Evasion,no +T1555.006,Cloud Secrets Management Stores,Credential Access,no +T1016.002,Wi-Fi Discovery,Discovery,Magic Hound +T1566.004,Spearphishing Voice,Initial Access,no +T1598.004,Spearphishing Voice,Reconnaissance,LAPSUS$|Scattered Spider +T1578.005,Modify Cloud Compute Configurations,Defense Evasion,no +T1659,Content Injection,Command And Control|Initial Access,MoustachedBouncer +T1564.011,Ignore Process Interrupts,Defense Evasion,no +T1657,Financial Theft,Impact,Akira|Cinnamon Tempest|FIN13|Malteiro|Scattered Spider|SilverTerrier +T1656,Impersonation,Defense Evasion,LAPSUS$|Scattered Spider +T1567.004,Exfiltration Over Webhook,Exfiltration,no +T1098.006,Additional Container Cluster Roles,Persistence|Privilege Escalation,no +T1654,Log Enumeration,Discovery,APT5|Volt Typhoon +T1548.005,Temporary Elevated Cloud Access,Defense Evasion|Privilege Escalation,no +T1653,Power Settings,Persistence,no +T1021.008,Direct Cloud VM Connections,Lateral Movement,no +T1562.012,Disable or Modify Linux Audit System,Defense Evasion,no +T1556.008,Network Provider DLL,Credential Access|Defense Evasion|Persistence,no +T1652,Device Driver Discovery,Discovery,no +T1027.011,Fileless Storage,Defense Evasion,APT32|Turla +T1027.010,Command Obfuscation,Defense Evasion,APT19|APT32|Aquatic Panda|Chimera|Cobalt Group|Ember Bear|FIN6|FIN7|FIN8|Fox Kitten|GOLD SOUTHFIELD|Gamaredon Group|HEXANE|LazyScripter|Leafminer|Magic Hound|MuddyWater|Patchwork|Sandworm Team|Sidewinder|Silence|TA505|TA551|Turla|Wizard Spider +T1562.011,Spoof Security Alerting,Defense Evasion,no +T1552.008,Chat Messages,Credential Access,LAPSUS$ +T1651,Cloud Administration Command,Execution,APT29 +T1650,Acquire Access,Resource Development,no +T1036.008,Masquerade File Type,Defense Evasion,Volt Typhoon +T1567.003,Exfiltration to Text Storage Sites,Exfiltration,no +T1583.008,Malvertising,Resource Development,Mustard Tempest +T1021.007,Cloud Services,Lateral Movement,APT29|Scattered Spider +T1205.002,Socket Filters,Command And Control|Defense Evasion|Persistence,no +T1608.006,SEO Poisoning,Resource Development,Mustard Tempest +T1027.009,Embedded Payloads,Defense Evasion,no +T1027.008,Stripped Payloads,Defense Evasion,no +T1556.007,Hybrid Identity,Credential Access|Defense Evasion|Persistence,APT29 +T1546.016,Installer Packages,Persistence|Privilege Escalation,no +T1027.007,Dynamic API Resolution,Defense Evasion,Lazarus Group +T1593.003,Code Repositories,Reconnaissance,LAPSUS$ +T1649,Steal or Forge Authentication Certificates,Credential Access,APT29 +T1070.009,Clear Persistence,Defense Evasion,no +T1070.008,Clear Mailbox Data,Defense Evasion,no +T1584.007,Serverless,Resource Development,no +T1583.007,Serverless,Resource Development,no +T1070.007,Clear Network Connection History and Configurations,Defense Evasion,Volt Typhoon +T1556.006,Multi-Factor Authentication,Credential Access|Defense Evasion|Persistence,Scattered Spider +T1586.003,Cloud Accounts,Resource Development,APT29 +T1585.003,Cloud Accounts,Resource Development,no +T1648,Serverless Execution,Execution,no +T1647,Plist File Modification,Defense Evasion,no +T1622,Debugger Evasion,Defense Evasion|Discovery,no +T1621,Multi-Factor Authentication Request Generation,Credential Access,APT29|LAPSUS$|Scattered Spider +T1505.005,Terminal Services DLL,Persistence,no +T1557.003,DHCP Spoofing,Collection|Credential Access,no +T1059.009,Cloud API,Execution,APT29|TeamTNT +T1595.003,Wordlist Scanning,Reconnaissance,APT41|Volatile Cedar +T1098.005,Device Registration,Persistence|Privilege Escalation,APT29 +T1574.013,KernelCallbackTable,Defense Evasion|Persistence|Privilege Escalation,Lazarus Group +T1556.005,Reversible Encryption,Credential Access|Defense Evasion|Persistence,no +T1055.015,ListPlanting,Defense Evasion|Privilege Escalation,no +T1564.010,Process Argument Spoofing,Defense Evasion,no +T1564.009,Resource Forking,Defense Evasion,no +T1559.003,XPC Services,Execution,no +T1562.010,Downgrade Attack,Defense Evasion,no +T1547.015,Login Items,Persistence|Privilege Escalation,no +T1620,Reflective Code Loading,Defense Evasion,Lazarus Group +T1619,Cloud Storage Object Discovery,Discovery,no +T1218.014,MMC,Defense Evasion,no +T1218.013,Mavinject,Defense Evasion,no +T1614.001,System Language Discovery,Discovery,Ke3chang|Malteiro +T1615,Group Policy Discovery,Discovery,Turla +T1036.007,Double File Extension,Defense Evasion,Mustang Panda +T1562.009,Safe Mode Boot,Defense Evasion,no +T1564.008,Email Hiding Rules,Defense Evasion,FIN4|Scattered Spider +T1505.004,IIS Components,Persistence,no +T1027.006,HTML Smuggling,Defense Evasion,APT29 +T1213.003,Code Repositories,Collection,APT41|LAPSUS$|Scattered Spider +T1553.006,Code Signing Policy Modification,Defense Evasion,APT39|Turla +T1614,System Location Discovery,Discovery,SideCopy +T1613,Container and Resource Discovery,Discovery,TeamTNT +T1552.007,Container API,Credential Access,no +T1612,Build Image on Host,Defense Evasion,no +T1611,Escape to Host,Privilege Escalation,TeamTNT +T1204.003,Malicious Image,Execution,TeamTNT +T1053.007,Container Orchestration Job,Execution|Persistence|Privilege Escalation,no +T1610,Deploy Container,Defense Evasion|Execution,TeamTNT +T1609,Container Administration Command,Execution,TeamTNT +T1608.005,Link Target,Resource Development,LuminousMoth|Silent Librarian +T1608.004,Drive-by Target,Resource Development,APT32|Dragonfly|FIN7|LuminousMoth|Mustard Tempest|Threat Group-3390|Transparent Tribe +T1608.003,Install Digital Certificate,Resource Development,no +T1608.002,Upload Tool,Resource Development,Threat Group-3390 +T1608.001,Upload Malware,Resource Development,APT32|BITTER|EXOTIC LILY|Earth Lusca|FIN7|Gamaredon Group|HEXANE|Kimsuky|LazyScripter|LuminousMoth|Mustang Panda|Mustard Tempest|SideCopy|TA2541|TA505|TeamTNT|Threat Group-3390 +T1608,Stage Capabilities,Resource Development,Mustang Panda +T1016.001,Internet Connection Discovery,Discovery,APT29|FIN13|FIN8|Gamaredon Group|HAFNIUM|HEXANE|Magic Hound|TA2541|Turla +T1553.005,Mark-of-the-Web Bypass,Defense Evasion,APT29|TA505 +T1555.005,Password Managers,Credential Access,Fox Kitten|LAPSUS$|Threat Group-3390 +T1484.002,Trust Modification,Defense Evasion|Privilege Escalation,Scattered Spider +T1484.001,Group Policy Modification,Defense Evasion|Privilege Escalation,Cinnamon Tempest|Indrik Spider +T1547.014,Active Setup,Persistence|Privilege Escalation,no +T1606.002,SAML Tokens,Credential Access,no +T1606.001,Web Cookies,Credential Access,no +T1606,Forge Web Credentials,Credential Access,no +T1555.004,Windows Credential Manager,Credential Access,OilRig|Stealth Falcon|Turla|Wizard Spider +T1059.008,Network Device CLI,Execution,no +T1602.002,Network Device Configuration Dump,Collection,no +T1542.005,TFTP Boot,Defense Evasion|Persistence,no +T1542.004,ROMMONkit,Defense Evasion|Persistence,no +T1602.001,SNMP (MIB Dump),Collection,no +T1602,Data from Configuration Repository,Collection,no +T1601.002,Downgrade System Image,Defense Evasion,no +T1601.001,Patch System Image,Defense Evasion,no +T1601,Modify System Image,Defense Evasion,no +T1600.002,Disable Crypto Hardware,Defense Evasion,no +T1600.001,Reduce Key Space,Defense Evasion,no +T1600,Weaken Encryption,Defense Evasion,no +T1556.004,Network Device Authentication,Credential Access|Defense Evasion|Persistence,no +T1599.001,Network Address Translation Traversal,Defense Evasion,no +T1599,Network Boundary Bridging,Defense Evasion,no +T1020.001,Traffic Duplication,Exfiltration,no +T1557.002,ARP Cache Poisoning,Collection|Credential Access,Cleaver|LuminousMoth +T1588.006,Vulnerabilities,Resource Development,Sandworm Team +T1053.006,Systemd Timers,Execution|Persistence|Privilege Escalation,no +T1562.008,Disable or Modify Cloud Logs,Defense Evasion,APT29 +T1547.012,Print Processors,Persistence|Privilege Escalation,Earth Lusca +T1598.003,Spearphishing Link,Reconnaissance,APT28|APT32|Dragonfly|Kimsuky|Magic Hound|Mustang Panda|Patchwork|Sandworm Team|Sidewinder|Silent Librarian|ZIRCONIUM +T1598.002,Spearphishing Attachment,Reconnaissance,Dragonfly|SideCopy|Sidewinder +T1598.001,Spearphishing Service,Reconnaissance,no +T1598,Phishing for Information,Reconnaissance,APT28|Scattered Spider|ZIRCONIUM +T1597.002,Purchase Technical Data,Reconnaissance,LAPSUS$ +T1597.001,Threat Intel Vendors,Reconnaissance,no +T1597,Search Closed Sources,Reconnaissance,EXOTIC LILY +T1596.005,Scan Databases,Reconnaissance,APT41 +T1596.004,CDNs,Reconnaissance,no +T1596.003,Digital Certificates,Reconnaissance,no +T1596.001,DNS/Passive DNS,Reconnaissance,no +T1596.002,WHOIS,Reconnaissance,no +T1596,Search Open Technical Databases,Reconnaissance,no +T1595.002,Vulnerability Scanning,Reconnaissance,APT28|APT29|APT41|Aquatic Panda|Dragonfly|Earth Lusca|Magic Hound|Sandworm Team|TeamTNT|Volatile Cedar +T1595.001,Scanning IP Blocks,Reconnaissance,TeamTNT +T1595,Active Scanning,Reconnaissance,no +T1594,Search Victim-Owned Websites,Reconnaissance,EXOTIC LILY|Kimsuky|Sandworm Team|Silent Librarian +T1593.002,Search Engines,Reconnaissance,Kimsuky +T1593.001,Social Media,Reconnaissance,EXOTIC LILY|Kimsuky +T1593,Search Open Websites/Domains,Reconnaissance,Sandworm Team +T1592.004,Client Configurations,Reconnaissance,HAFNIUM +T1592.003,Firmware,Reconnaissance,no +T1592.002,Software,Reconnaissance,Andariel|Magic Hound|Sandworm Team +T1592.001,Hardware,Reconnaissance,no +T1592,Gather Victim Host Information,Reconnaissance,no +T1591.004,Identify Roles,Reconnaissance,HEXANE|LAPSUS$ +T1591.003,Identify Business Tempo,Reconnaissance,no +T1591.001,Determine Physical Locations,Reconnaissance,Magic Hound +T1591.002,Business Relationships,Reconnaissance,Dragonfly|LAPSUS$|Sandworm Team +T1591,Gather Victim Org Information,Reconnaissance,Kimsuky|Lazarus Group +T1590.006,Network Security Appliances,Reconnaissance,no +T1590.005,IP Addresses,Reconnaissance,Andariel|HAFNIUM|Magic Hound +T1590.004,Network Topology,Reconnaissance,FIN13 +T1590.003,Network Trust Dependencies,Reconnaissance,no +T1590.002,DNS,Reconnaissance,no +T1590.001,Domain Properties,Reconnaissance,Sandworm Team +T1590,Gather Victim Network Information,Reconnaissance,HAFNIUM +T1589.003,Employee Names,Reconnaissance,APT41|Kimsuky|Sandworm Team|Silent Librarian +T1589.002,Email Addresses,Reconnaissance,APT32|EXOTIC LILY|HAFNIUM|HEXANE|Kimsuky|LAPSUS$|Lazarus Group|Magic Hound|Sandworm Team|Silent Librarian|TA551 +T1589.001,Credentials,Reconnaissance,APT28|APT41|Chimera|LAPSUS$|Leviathan|Magic Hound +T1589,Gather Victim Identity Information,Reconnaissance,APT32|FIN13|HEXANE|LAPSUS$|Magic Hound +T1588.005,Exploits,Resource Development,Kimsuky +T1588.004,Digital Certificates,Resource Development,BlackTech|Lazarus Group|LuminousMoth|Silent Librarian +T1588.003,Code Signing Certificates,Resource Development,BlackTech|Ember Bear|FIN8|Threat Group-3390|Wizard Spider +T1588.002,Tool,Resource Development,APT-C-36|APT1|APT19|APT28|APT29|APT32|APT33|APT38|APT39|APT41|Aoqin Dragon|Aquatic Panda|BITTER|BRONZE BUTLER|BackdoorDiplomacy|BlackTech|Blue Mockingbird|Carbanak|Chimera|Cinnamon Tempest|Cleaver|Cobalt Group|CopyKittens|DarkHydrus|DarkVishnya|Dragonfly|Earth Lusca|Ember Bear|FIN10|FIN13|FIN5|FIN6|FIN7|FIN8|Ferocious Kitten|GALLIUM|Gorgon Group|HEXANE|Inception|IndigoZebra|Ke3chang|Kimsuky|LAPSUS$|Lazarus Group|Leafminer|LuminousMoth|Magic Hound|Metador|Moses Staff|MuddyWater|POLONIUM|Patchwork|PittyTiger|Sandworm Team|Silence|Silent Librarian|TA2541|TA505|Threat Group-3390|Thrip|Turla|Volt Typhoon|WIRTE|Whitefly|Wizard Spider|menuPass +T1588.001,Malware,Resource Development,APT1|Andariel|Aquatic Panda|BackdoorDiplomacy|Earth Lusca|LAPSUS$|LazyScripter|LuminousMoth|Metador|TA2541|TA505|Turla +T1588,Obtain Capabilities,Resource Development,no +T1587.004,Exploits,Resource Development,no +T1587.003,Digital Certificates,Resource Development,APT29|PROMETHIUM +T1587.002,Code Signing Certificates,Resource Development,PROMETHIUM|Patchwork +T1587.001,Malware,Resource Development,APT29|Aoqin Dragon|Cleaver|FIN13|FIN7|Indrik Spider|Ke3chang|Kimsuky|Lazarus Group|LuminousMoth|Moses Staff|Sandworm Team|TeamTNT|Turla +T1587,Develop Capabilities,Resource Development,Kimsuky +T1586.002,Email Accounts,Resource Development,APT28|APT29|HEXANE|IndigoZebra|Kimsuky|LAPSUS$|Leviathan|Magic Hound +T1586.001,Social Media Accounts,Resource Development,Leviathan|Sandworm Team +T1586,Compromise Accounts,Resource Development,no +T1585.002,Email Accounts,Resource Development,APT1|EXOTIC LILY|HEXANE|Indrik Spider|Kimsuky|Lazarus Group|Leviathan|Magic Hound|Mustang Panda|Sandworm Team|Silent Librarian|Wizard Spider +T1585.001,Social Media Accounts,Resource Development,APT32|CURIUM|Cleaver|EXOTIC LILY|Fox Kitten|HEXANE|Kimsuky|Lazarus Group|Leviathan|Magic Hound|Sandworm Team +T1585,Establish Accounts,Resource Development,APT17|Fox Kitten +T1584.006,Web Services,Resource Development,Earth Lusca|Turla +T1584.005,Botnet,Resource Development,Axiom|Sandworm Team +T1584.004,Server,Resource Development,APT16|Dragonfly|Earth Lusca|Indrik Spider|Lazarus Group|Sandworm Team|Turla|Volt Typhoon +T1584.003,Virtual Private Server,Resource Development,Turla +T1584.002,DNS Server,Resource Development,LAPSUS$ +T1584.001,Domains,Resource Development,APT1|Kimsuky|Magic Hound|Mustard Tempest|SideCopy|Transparent Tribe +T1583.006,Web Services,Resource Development,APT17|APT28|APT29|APT32|Confucius|Earth Lusca|FIN7|HAFNIUM|IndigoZebra|Kimsuky|Lazarus Group|LazyScripter|Magic Hound|MuddyWater|POLONIUM|TA2541|Turla|ZIRCONIUM +T1583.005,Botnet,Resource Development,no +T1583.004,Server,Resource Development,Earth Lusca|GALLIUM|Kimsuky|Mustard Tempest|Sandworm Team +T1583.003,Virtual Private Server,Resource Development,APT28|Axiom|Dragonfly|HAFNIUM|LAPSUS$ +T1583.002,DNS Server,Resource Development,Axiom|HEXANE +T1584,Compromise Infrastructure,Resource Development,no +T1583.001,Domains,Resource Development,APT1|APT28|APT32|BITTER|Dragonfly|EXOTIC LILY|Earth Lusca|FIN7|Ferocious Kitten|Gamaredon Group|HEXANE|IndigoZebra|Kimsuky|Lazarus Group|LazyScripter|Leviathan|Magic Hound|Mustang Panda|Sandworm Team|Silent Librarian|TA2541|TA505|TeamTNT|Threat Group-3390|Transparent Tribe|Winnti Group|ZIRCONIUM|menuPass +T1583,Acquire Infrastructure,Resource Development,Sandworm Team +T1564.007,VBA Stomping,Defense Evasion,no +T1558.004,AS-REP Roasting,Credential Access,no +T1580,Cloud Infrastructure Discovery,Discovery,Scattered Spider +T1218.012,Verclsid,Defense Evasion,no +T1205.001,Port Knocking,Command And Control|Defense Evasion|Persistence,PROMETHIUM +T1564.006,Run Virtual Instance,Defense Evasion,no +T1564.005,Hidden File System,Defense Evasion,Equation|Strider +T1556.003,Pluggable Authentication Modules,Credential Access|Defense Evasion|Persistence,no +T1574.012,COR_PROFILER,Defense Evasion|Persistence|Privilege Escalation,Blue Mockingbird +T1562.007,Disable or Modify Cloud Firewall,Defense Evasion,no +T1098.004,SSH Authorized Keys,Persistence|Privilege Escalation,Earth Lusca|TeamTNT +T1480.001,Environmental Keying,Defense Evasion,APT41|Equation +T1059.007,JavaScript,Execution,APT32|Cobalt Group|Earth Lusca|Ember Bear|Evilnum|FIN6|FIN7|Higaisa|Indrik Spider|Kimsuky|LazyScripter|Leafminer|Molerats|MoustachedBouncer|MuddyWater|Sidewinder|Silence|TA505|Turla +T1578.004,Revert Cloud Instance,Defense Evasion,no +T1578.003,Delete Cloud Instance,Defense Evasion,LAPSUS$ +T1578.001,Create Snapshot,Defense Evasion,no +T1578.002,Create Cloud Instance,Defense Evasion,LAPSUS$|Scattered Spider +T1127.001,MSBuild,Defense Evasion,no +T1027.005,Indicator Removal from Tools,Defense Evasion,APT3|Deep Panda|GALLIUM|OilRig|Patchwork|Turla +T1562.006,Indicator Blocking,Defense Evasion,APT41|APT5 +T1573.002,Asymmetric Cryptography,Command And Control,Cobalt Group|FIN6|FIN8|OilRig|TA2541|Tropic Trooper +T1573.001,Symmetric Cryptography,Command And Control,APT28|APT33|BRONZE BUTLER|Darkhotel|Higaisa|Inception|Lazarus Group|MuddyWater|Mustang Panda|Stealth Falcon|Volt Typhoon|ZIRCONIUM +T1573,Encrypted Channel,Command And Control,APT29|BITTER|Magic Hound|Tropic Trooper +T1027.004,Compile After Delivery,Defense Evasion,Gamaredon Group|MuddyWater|Rocke +T1574.004,Dylib Hijacking,Defense Evasion|Persistence|Privilege Escalation,no +T1546.015,Component Object Model Hijacking,Persistence|Privilege Escalation,APT28 +T1071.004,DNS,Command And Control,APT18|APT39|APT41|Chimera|Cobalt Group|FIN7|Ke3chang|LazyScripter|OilRig|Tropic Trooper +T1071.003,Mail Protocols,Command And Control,APT28|APT32|Kimsuky|SilverTerrier|Turla +T1071.002,File Transfer Protocols,Command And Control,APT41|Dragonfly|Kimsuky|SilverTerrier +T1071.001,Web Protocols,Command And Control,APT18|APT19|APT28|APT32|APT33|APT37|APT38|APT39|APT41|BITTER|BRONZE BUTLER|Chimera|Cobalt Group|Confucius|Dark Caracal|FIN13|FIN4|FIN8|Gamaredon Group|HAFNIUM|Higaisa|Inception|Ke3chang|Kimsuky|Lazarus Group|LuminousMoth|Magic Hound|Metador|MuddyWater|Mustang Panda|OilRig|Orangeworm|Rancor|Rocke|Sandworm Team|Sidewinder|SilverTerrier|Stealth Falcon|TA505|TA551|TeamTNT|Threat Group-3390|Tropic Trooper|Turla|WIRTE|Windshift|Wizard Spider +T1572,Protocol Tunneling,Command And Control,Chimera|Cinnamon Tempest|Cobalt Group|FIN13|FIN6|Fox Kitten|Leviathan|Magic Hound|OilRig +T1048.003,Exfiltration Over Unencrypted Non-C2 Protocol,Exfiltration,APT32|APT33|FIN6|FIN8|Lazarus Group|OilRig|Thrip|Wizard Spider +T1048.002,Exfiltration Over Asymmetric Encrypted Non-C2 Protocol,Exfiltration,APT28 +T1048.001,Exfiltration Over Symmetric Encrypted Non-C2 Protocol,Exfiltration,no +T1001.003,Protocol Impersonation,Command And Control,Higaisa|Lazarus Group +T1001.002,Steganography,Command And Control,Axiom +T1001.001,Junk Data,Command And Control,APT28 +T1132.002,Non-Standard Encoding,Command And Control,no +T1132.001,Standard Encoding,Command And Control,APT19|APT33|BRONZE BUTLER|HAFNIUM|Lazarus Group|MuddyWater|Patchwork|Sandworm Team|TA551|Tropic Trooper +T1090.004,Domain Fronting,Command And Control,APT29 +T1090.003,Multi-hop Proxy,Command And Control,APT28|APT29|FIN4|Inception|Leviathan +T1090.002,External Proxy,Command And Control,APT28|APT29|APT3|APT39|FIN5|GALLIUM|Lazarus Group|MuddyWater|Silence|Tonto Team|menuPass +T1090.001,Internal Proxy,Command And Control,APT39|FIN13|Higaisa|Lazarus Group|Strider|Turla|Volt Typhoon +T1102.003,One-Way Communication,Command And Control,Leviathan +T1102.002,Bidirectional Communication,Command And Control,APT12|APT28|APT37|APT39|Carbanak|FIN7|HEXANE|Kimsuky|Lazarus Group|Magic Hound|MuddyWater|POLONIUM|Sandworm Team|Turla|ZIRCONIUM +T1102.001,Dead Drop Resolver,Command And Control,APT41|BRONZE BUTLER|Patchwork|RTM|Rocke +T1571,Non-Standard Port,Command And Control,APT-C-36|APT32|APT33|DarkVishnya|FIN7|Lazarus Group|Magic Hound|Rocke|Sandworm Team|Silence|WIRTE +T1074.002,Remote Data Staging,Collection,APT28|Chimera|FIN6|FIN8|Leviathan|MoustachedBouncer|Threat Group-3390|ToddyCat|menuPass +T1074.001,Local Data Staging,Collection,APT28|APT3|APT39|APT5|BackdoorDiplomacy|Chimera|Dragonfly|FIN13|FIN5|GALLIUM|Indrik Spider|Kimsuky|Lazarus Group|Leviathan|MuddyWater|Mustang Panda|Patchwork|Sidewinder|TeamTNT|Threat Group-3390|Volt Typhoon|Wizard Spider|menuPass +T1078.004,Cloud Accounts,Defense Evasion|Initial Access|Persistence|Privilege Escalation,APT28|APT29|APT33|APT5|Ke3chang|LAPSUS$ +T1564.004,NTFS File Attributes,Defense Evasion,APT32 +T1564.003,Hidden Window,Defense Evasion,APT19|APT28|APT3|APT32|CopyKittens|DarkHydrus|Deep Panda|Gamaredon Group|Gorgon Group|Higaisa|Kimsuky|Magic Hound|Nomadic Octopus|ToddyCat +T1078.003,Local Accounts,Defense Evasion|Initial Access|Persistence|Privilege Escalation,APT29|APT32|FIN10|FIN7|HAFNIUM|Kimsuky|PROMETHIUM|Tropic Trooper|Turla +T1078.002,Domain Accounts,Defense Evasion|Initial Access|Persistence|Privilege Escalation,APT3|APT5|Chimera|Cinnamon Tempest|Indrik Spider|Magic Hound|Naikon|Sandworm Team|TA505|Threat Group-1314|ToddyCat|Volt Typhoon|Wizard Spider +T1078.001,Default Accounts,Defense Evasion|Initial Access|Persistence|Privilege Escalation,FIN13|Magic Hound +T1564.002,Hidden Users,Defense Evasion,Dragonfly|Kimsuky +T1574.006,Dynamic Linker Hijacking,Defense Evasion|Persistence|Privilege Escalation,APT41|Rocke +T1574.002,DLL Side-Loading,Defense Evasion|Persistence|Privilege Escalation,APT19|APT3|APT32|APT41|BRONZE BUTLER|BlackTech|Chimera|Cinnamon Tempest|Earth Lusca|FIN13|GALLIUM|Higaisa|Lazarus Group|LuminousMoth|MuddyWater|Mustang Panda|Naikon|Patchwork|SideCopy|Sidewinder|Threat Group-3390|Tropic Trooper|menuPass +T1574.001,DLL Search Order Hijacking,Defense Evasion|Persistence|Privilege Escalation,APT41|Aquatic Panda|BackdoorDiplomacy|Cinnamon Tempest|Evilnum|RTM|Threat Group-3390|Tonto Team|Whitefly|menuPass +T1574.008,Path Interception by Search Order Hijacking,Defense Evasion|Persistence|Privilege Escalation,no +T1574.007,Path Interception by PATH Environment Variable,Defense Evasion|Persistence|Privilege Escalation,no +T1574.009,Path Interception by Unquoted Path,Defense Evasion|Persistence|Privilege Escalation,no +T1574.011,Services Registry Permissions Weakness,Defense Evasion|Persistence|Privilege Escalation,no +T1574.005,Executable Installer File Permissions Weakness,Defense Evasion|Persistence|Privilege Escalation,no +T1574.010,Services File Permissions Weakness,Defense Evasion|Persistence|Privilege Escalation,no +T1574,Hijack Execution Flow,Defense Evasion|Persistence|Privilege Escalation,no +T1069.001,Local Groups,Discovery,Chimera|HEXANE|OilRig|Tonto Team|Turla|Volt Typhoon|admin@338 +T1570,Lateral Tool Transfer,Lateral Movement,APT32|APT41|Aoqin Dragon|Chimera|FIN10|GALLIUM|Magic Hound|Sandworm Team|Turla|Volt Typhoon|Wizard Spider +T1568.003,DNS Calculation,Command And Control,APT12 +T1204.002,Malicious File,Execution,APT-C-36|APT12|APT19|APT28|APT29|APT30|APT32|APT33|APT37|APT38|APT39|Ajax Security Team|Andariel|Aoqin Dragon|BITTER|BRONZE BUTLER|BlackTech|CURIUM|Cobalt Group|Confucius|Dark Caracal|DarkHydrus|Darkhotel|Dragonfly|EXOTIC LILY|Earth Lusca|Elderwood|Ember Bear|FIN4|FIN6|FIN7|FIN8|Ferocious Kitten|Gallmaker|Gamaredon Group|Gorgon Group|HEXANE|Higaisa|Inception|IndigoZebra|Indrik Spider|Kimsuky|Lazarus Group|LazyScripter|Leviathan|Machete|Magic Hound|Malteiro|Mofang|Molerats|MuddyWater|Mustang Panda|Naikon|Nomadic Octopus|OilRig|PLATINUM|PROMETHIUM|Patchwork|RTM|Rancor|Sandworm Team|SideCopy|Sidewinder|Silence|TA2541|TA459|TA505|TA551|The White Company|Threat Group-3390|Tonto Team|Transparent Tribe|Tropic Trooper|WIRTE|Whitefly|Windshift|Wizard Spider|admin@338|menuPass +T1204.001,Malicious Link,Execution,APT28|APT29|APT3|APT32|APT33|APT39|BlackTech|Cobalt Group|Confucius|EXOTIC LILY|Earth Lusca|Elderwood|Ember Bear|Evilnum|FIN4|FIN7|FIN8|Kimsuky|LazyScripter|Leviathan|LuminousMoth|Machete|Magic Hound|Mofang|Molerats|MuddyWater|Mustang Panda|Mustard Tempest|OilRig|Patchwork|Sandworm Team|Sidewinder|TA2541|TA505|Transparent Tribe|Turla|Windshift|Wizard Spider|ZIRCONIUM +T1195.003,Compromise Hardware Supply Chain,Initial Access,no +T1195.002,Compromise Software Supply Chain,Initial Access,APT41|Cobalt Group|Dragonfly|FIN7|GOLD SOUTHFIELD|Sandworm Team|Threat Group-3390 +T1195.001,Compromise Software Dependencies and Development Tools,Initial Access,no +T1568.001,Fast Flux DNS,Command And Control,TA505|menuPass +T1052.001,Exfiltration over USB,Exfiltration,Mustang Panda|Tropic Trooper +T1569.002,Service Execution,Execution,APT32|APT38|APT39|APT41|Blue Mockingbird|Chimera|FIN6|Ke3chang|Silence|Wizard Spider +T1569.001,Launchctl,Execution,no +T1569,System Services,Execution,TeamTNT +T1568.002,Domain Generation Algorithms,Command And Control,APT41|TA551 +T1568,Dynamic Resolution,Command And Control,APT29|BITTER|Gamaredon Group|TA2541|Transparent Tribe +T1011.001,Exfiltration Over Bluetooth,Exfiltration,no +T1567.002,Exfiltration to Cloud Storage,Exfiltration,Akira|Chimera|Cinnamon Tempest|Confucius|Earth Lusca|FIN7|HAFNIUM|HEXANE|Kimsuky|Leviathan|LuminousMoth|POLONIUM|Scattered Spider|Threat Group-3390|ToddyCat|Turla|Wizard Spider|ZIRCONIUM +T1567.001,Exfiltration to Code Repository,Exfiltration,no +T1059.006,Python,Execution,APT29|APT37|APT39|BRONZE BUTLER|Cinnamon Tempest|Dragonfly|Earth Lusca|Kimsuky|Machete|MuddyWater|Rocke|Tonto Team|Turla|ZIRCONIUM +T1059.005,Visual Basic,Execution,APT-C-36|APT32|APT33|APT37|APT38|APT39|BRONZE BUTLER|Cobalt Group|Confucius|Earth Lusca|FIN13|FIN4|FIN7|Gamaredon Group|Gorgon Group|HEXANE|Higaisa|Inception|Kimsuky|Lazarus Group|LazyScripter|Leviathan|Machete|Magic Hound|Malteiro|Molerats|MuddyWater|Mustang Panda|OilRig|Patchwork|Rancor|Sandworm Team|SideCopy|Sidewinder|Silence|TA2541|TA459|TA505|Transparent Tribe|Turla|WIRTE|Windshift +T1059.004,Unix Shell,Execution,APT41|Rocke|TeamTNT +T1059.003,Windows Command Shell,Execution,APT1|APT18|APT28|APT3|APT32|APT37|APT38|APT41|APT5|Aquatic Panda|BRONZE BUTLER|Blue Mockingbird|Chimera|Cinnamon Tempest|Cobalt Group|Dark Caracal|Darkhotel|Dragonfly|Ember Bear|FIN10|FIN13|FIN6|FIN7|FIN8|Fox Kitten|GALLIUM|Gamaredon Group|Gorgon Group|HAFNIUM|Higaisa|Indrik Spider|Ke3chang|Kimsuky|Lazarus Group|LazyScripter|Machete|Magic Hound|Metador|MuddyWater|Mustang Panda|Nomadic Octopus|OilRig|Patchwork|Rancor|Silence|Sowbug|Suckfly|TA505|TA551|TeamTNT|Threat Group-1314|Threat Group-3390|ToddyCat|Tropic Trooper|Turla|Volt Typhoon|Wizard Spider|ZIRCONIUM|admin@338|menuPass +T1059.002,AppleScript,Execution,no +T1059.001,PowerShell,Execution,APT19|APT28|APT29|APT3|APT32|APT33|APT38|APT39|APT41|APT5|Aquatic Panda|BRONZE BUTLER|Blue Mockingbird|Chimera|Cinnamon Tempest|Cobalt Group|Confucius|CopyKittens|DarkHydrus|DarkVishnya|Deep Panda|Dragonfly|Earth Lusca|Ember Bear|FIN10|FIN13|FIN6|FIN7|FIN8|Fox Kitten|GALLIUM|GOLD SOUTHFIELD|Gallmaker|Gamaredon Group|Gorgon Group|HAFNIUM|HEXANE|Inception|Indrik Spider|Kimsuky|Lazarus Group|LazyScripter|Leviathan|Magic Hound|Molerats|MoustachedBouncer|MuddyWater|Mustang Panda|Nomadic Octopus|OilRig|Patchwork|Poseidon Group|Sandworm Team|Sidewinder|Silence|Stealth Falcon|TA2541|TA459|TA505|TeamTNT|Threat Group-3390|Thrip|ToddyCat|Tonto Team|Turla|Volt Typhoon|WIRTE|Wizard Spider|menuPass +T1567,Exfiltration Over Web Service,Exfiltration,APT28|Magic Hound +T1497.003,Time Based Evasion,Defense Evasion|Discovery,no +T1497.002,User Activity Based Checks,Defense Evasion|Discovery,Darkhotel|FIN7 +T1497.001,System Checks,Defense Evasion|Discovery,Darkhotel|Evilnum|OilRig|Volt Typhoon +T1498.002,Reflection Amplification,Impact,no +T1498.001,Direct Network Flood,Impact,no +T1566.003,Spearphishing via Service,Initial Access,APT29|Ajax Security Team|CURIUM|Dark Caracal|EXOTIC LILY|FIN6|Lazarus Group|Magic Hound|OilRig|ToddyCat|Windshift +T1566.002,Spearphishing Link,Initial Access,APT1|APT28|APT29|APT3|APT32|APT33|APT39|BlackTech|Cobalt Group|Confucius|EXOTIC LILY|Earth Lusca|Elderwood|Ember Bear|Evilnum|FIN4|FIN7|FIN8|Kimsuky|Lazarus Group|LazyScripter|Leviathan|LuminousMoth|Machete|Magic Hound|Mofang|Molerats|MuddyWater|Mustang Panda|Mustard Tempest|OilRig|Patchwork|Sandworm Team|Sidewinder|TA2541|TA505|Transparent Tribe|Turla|Windshift|Wizard Spider|ZIRCONIUM +T1566.001,Spearphishing Attachment,Initial Access,APT-C-36|APT1|APT12|APT19|APT28|APT29|APT30|APT32|APT33|APT37|APT38|APT39|APT41|Ajax Security Team|Andariel|BITTER|BRONZE BUTLER|BlackTech|Cobalt Group|Confucius|DarkHydrus|Darkhotel|Dragonfly|EXOTIC LILY|Elderwood|Ember Bear|FIN4|FIN6|FIN7|FIN8|Ferocious Kitten|Gallmaker|Gamaredon Group|Gorgon Group|Higaisa|Inception|IndigoZebra|Kimsuky|Lazarus Group|LazyScripter|Leviathan|Machete|Malteiro|Mofang|Molerats|MuddyWater|Mustang Panda|Naikon|Nomadic Octopus|OilRig|PLATINUM|Patchwork|RTM|Rancor|Sandworm Team|SideCopy|Sidewinder|Silence|TA2541|TA459|TA505|TA551|The White Company|Threat Group-3390|Tonto Team|Transparent Tribe|Tropic Trooper|WIRTE|Windshift|Wizard Spider|admin@338|menuPass +T1566,Phishing,Initial Access,Axiom|GOLD SOUTHFIELD +T1565.003,Runtime Data Manipulation,Impact,APT38 +T1565.002,Transmitted Data Manipulation,Impact,APT38 +T1565.001,Stored Data Manipulation,Impact,APT38 +T1565,Data Manipulation,Impact,FIN13 +T1564.001,Hidden Files and Directories,Defense Evasion,APT28|APT32|FIN13|HAFNIUM|Lazarus Group|LuminousMoth|Mustang Panda|Rocke|Transparent Tribe|Tropic Trooper +T1564,Hide Artifacts,Defense Evasion,no +T1563.002,RDP Hijacking,Lateral Movement,Axiom +T1563.001,SSH Hijacking,Lateral Movement,no +T1563,Remote Service Session Hijacking,Lateral Movement,no +T1518.001,Security Software Discovery,Discovery,APT38|Aquatic Panda|Cobalt Group|Darkhotel|FIN8|Kimsuky|Malteiro|MuddyWater|Naikon|Patchwork|Rocke|SideCopy|Sidewinder|TA2541|TeamTNT|The White Company|ToddyCat|Tropic Trooper|Turla|Windshift|Wizard Spider +T1069.003,Cloud Groups,Discovery,no +T1069.002,Domain Groups,Discovery,Dragonfly|FIN7|Inception|Ke3chang|LAPSUS$|OilRig|ToddyCat|Turla|Volt Typhoon +T1087.004,Cloud Account,Discovery,APT29 +T1087.003,Email Account,Discovery,Magic Hound|Sandworm Team|TA505 +T1087.002,Domain Account,Discovery,APT41|BRONZE BUTLER|Chimera|Dragonfly|FIN13|FIN6|Fox Kitten|Ke3chang|LAPSUS$|MuddyWater|OilRig|Poseidon Group|Sandworm Team|Scattered Spider|ToddyCat|Turla|Volt Typhoon|Wizard Spider|menuPass +T1087.001,Local Account,Discovery,APT1|APT3|APT32|APT41|Chimera|Fox Kitten|Ke3chang|Moses Staff|OilRig|Poseidon Group|Threat Group-3390|Turla|admin@338 +T1553.004,Install Root Certificate,Defense Evasion,no +T1562.004,Disable or Modify System Firewall,Defense Evasion,APT38|Carbanak|Dragonfly|Kimsuky|Lazarus Group|Magic Hound|Moses Staff|Rocke|TeamTNT|ToddyCat +T1562.003,Impair Command History Logging,Defense Evasion,APT38 +T1562.002,Disable Windows Event Logging,Defense Evasion,Magic Hound|Threat Group-3390 +T1562.001,Disable or Modify Tools,Defense Evasion,Aquatic Panda|BRONZE BUTLER|Ember Bear|FIN6|Gamaredon Group|Gorgon Group|Indrik Spider|Kimsuky|Lazarus Group|Magic Hound|MuddyWater|Putter Panda|Rocke|TA2541|TA505|TeamTNT|Turla|Wizard Spider +T1562,Impair Defenses,Defense Evasion,Magic Hound +T1003.004,LSA Secrets,Credential Access,APT29|APT33|Dragonfly|Ke3chang|Leafminer|MuddyWater|OilRig|Threat Group-3390|menuPass +T1003.005,Cached Domain Credentials,Credential Access,APT33|Leafminer|MuddyWater|OilRig +T1561.002,Disk Structure Wipe,Impact,APT37|APT38|Lazarus Group|Sandworm Team +T1561.001,Disk Content Wipe,Impact,Lazarus Group +T1561,Disk Wipe,Impact,no +T1560.003,Archive via Custom Method,Collection,CopyKittens|FIN6|Kimsuky|Lazarus Group|Mustang Panda +T1560.002,Archive via Library,Collection,Lazarus Group|Threat Group-3390 +T1560.001,Archive via Utility,Collection,APT1|APT28|APT3|APT33|APT39|APT41|APT5|Akira|Aquatic Panda|BRONZE BUTLER|Chimera|CopyKittens|Earth Lusca|FIN13|FIN8|Fox Kitten|GALLIUM|Gallmaker|HAFNIUM|Ke3chang|Kimsuky|Magic Hound|MuddyWater|Mustang Panda|Sowbug|ToddyCat|Turla|Volt Typhoon|Wizard Spider|menuPass +T1560,Archive Collected Data,Collection,APT28|APT32|Axiom|Dragonfly|FIN6|Ke3chang|Lazarus Group|Leviathan|LuminousMoth|Patchwork|menuPass +T1499.004,Application or System Exploitation,Impact,no +T1499.003,Application Exhaustion Flood,Impact,no +T1499.002,Service Exhaustion Flood,Impact,no +T1499.001,OS Exhaustion Flood,Impact,no +T1491.002,External Defacement,Impact,Sandworm Team +T1491.001,Internal Defacement,Impact,Gamaredon Group|Lazarus Group +T1114.003,Email Forwarding Rule,Collection,Kimsuky|LAPSUS$|Silent Librarian +T1114.002,Remote Email Collection,Collection,APT1|APT28|APT29|Chimera|Dragonfly|FIN4|HAFNIUM|Ke3chang|Kimsuky|Leafminer|Magic Hound +T1114.001,Local Email Collection,Collection,APT1|Chimera|Magic Hound +T1134.005,SID-History Injection,Defense Evasion|Privilege Escalation,no +T1134.004,Parent PID Spoofing,Defense Evasion|Privilege Escalation,no +T1134.003,Make and Impersonate Token,Defense Evasion|Privilege Escalation,FIN13 +T1134.002,Create Process with Token,Defense Evasion|Privilege Escalation,Lazarus Group|Turla +T1134.001,Token Impersonation/Theft,Defense Evasion|Privilege Escalation,APT28|FIN8 +T1213.002,Sharepoint,Collection,APT28|Akira|Chimera|Ke3chang|LAPSUS$ +T1213.001,Confluence,Collection,LAPSUS$ +T1555.003,Credentials from Web Browsers,Credential Access,APT3|APT33|APT37|APT41|Ajax Security Team|FIN6|HEXANE|Inception|Kimsuky|LAPSUS$|Leafminer|Malteiro|Molerats|MuddyWater|OilRig|Patchwork|Sandworm Team|Stealth Falcon|TA505|ZIRCONIUM +T1555.002,Securityd Memory,Credential Access,no +T1555.001,Keychain,Credential Access,no +T1559.002,Dynamic Data Exchange,Execution,APT28|APT37|BITTER|Cobalt Group|FIN7|Gallmaker|Leviathan|MuddyWater|Patchwork|Sidewinder|TA505 +T1559.001,Component Object Model,Execution,Gamaredon Group|MuddyWater +T1559,Inter-Process Communication,Execution,no +T1558.002,Silver Ticket,Credential Access,no +T1558.001,Golden Ticket,Credential Access,Ke3chang +T1558,Steal or Forge Kerberos Tickets,Credential Access,no +T1557.001,LLMNR/NBT-NS Poisoning and SMB Relay,Collection|Credential Access,Lazarus Group|Wizard Spider +T1557,Adversary-in-the-Middle,Collection|Credential Access,Kimsuky +T1556.002,Password Filter DLL,Credential Access|Defense Evasion|Persistence,Strider +T1556.001,Domain Controller Authentication,Credential Access|Defense Evasion|Persistence,Chimera +T1556,Modify Authentication Process,Credential Access|Defense Evasion|Persistence,FIN13 +T1056.004,Credential API Hooking,Collection|Credential Access,PLATINUM +T1056.003,Web Portal Capture,Collection|Credential Access,no +T1056.002,GUI Input Capture,Collection|Credential Access,FIN4 +T1056.001,Keylogging,Collection|Credential Access,APT28|APT3|APT32|APT38|APT39|APT41|APT5|Ajax Security Team|Darkhotel|FIN13|FIN4|Group5|HEXANE|Ke3chang|Kimsuky|Lazarus Group|Magic Hound|OilRig|PLATINUM|Sandworm Team|Sowbug|Threat Group-3390|Tonto Team|menuPass +T1555,Credentials from Password Stores,Credential Access,APT33|APT39|Evilnum|FIN6|HEXANE|Leafminer|Malteiro|MuddyWater|OilRig|Stealth Falcon|Volt Typhoon +T1552.005,Cloud Instance Metadata API,Credential Access,TeamTNT +T1003.008,/etc/passwd and /etc/shadow,Credential Access,no +T1003.007,Proc Filesystem,Credential Access,no +T1003.006,DCSync,Credential Access,Earth Lusca|LAPSUS$ +T1558.003,Kerberoasting,Credential Access,FIN7|Wizard Spider +T1552.006,Group Policy Preferences,Credential Access,APT33|Wizard Spider +T1003.003,NTDS,Credential Access,APT28|APT41|Chimera|Dragonfly|FIN13|FIN6|Fox Kitten|HAFNIUM|Ke3chang|LAPSUS$|Mustang Panda|Sandworm Team|Scattered Spider|Volt Typhoon|Wizard Spider|menuPass +T1003.002,Security Account Manager,Credential Access,APT29|APT41|APT5|Dragonfly|FIN13|GALLIUM|Ke3chang|Threat Group-3390|Wizard Spider|menuPass +T1003.001,LSASS Memory,Credential Access,APT1|APT28|APT3|APT32|APT33|APT39|APT41|APT5|Aquatic Panda|BRONZE BUTLER|Blue Mockingbird|Cleaver|Earth Lusca|FIN13|FIN6|FIN8|Fox Kitten|GALLIUM|HAFNIUM|Indrik Spider|Ke3chang|Kimsuky|Leafminer|Leviathan|Magic Hound|MuddyWater|OilRig|PLATINUM|Sandworm Team|Silence|Threat Group-3390|Volt Typhoon|Whitefly|Wizard Spider +T1110.004,Credential Stuffing,Credential Access,Chimera +T1110.003,Password Spraying,Credential Access,APT28|APT29|APT33|Chimera|HEXANE|Lazarus Group|Leafminer|Silent Librarian +T1110.002,Password Cracking,Credential Access,APT3|APT41|Dragonfly|FIN6 +T1110.001,Password Guessing,Credential Access,APT28|APT29 +T1021.006,Windows Remote Management,Lateral Movement,Chimera|FIN13|Threat Group-3390|Wizard Spider +T1021.005,VNC,Lateral Movement,FIN7|Fox Kitten|GCMAN|Gamaredon Group +T1021.004,SSH,Lateral Movement,APT39|APT5|BlackTech|FIN13|FIN7|Fox Kitten|GCMAN|Lazarus Group|Leviathan|OilRig|Rocke|TeamTNT|menuPass +T1021.003,Distributed Component Object Model,Lateral Movement,no +T1021.002,SMB/Windows Admin Shares,Lateral Movement,APT28|APT3|APT32|APT39|APT41|Blue Mockingbird|Chimera|Cinnamon Tempest|Deep Panda|FIN13|FIN8|Fox Kitten|Ke3chang|Lazarus Group|Moses Staff|Orangeworm|Sandworm Team|Threat Group-1314|ToddyCat|Turla|Wizard Spider +T1021.001,Remote Desktop Protocol,Lateral Movement,APT1|APT3|APT39|APT41|APT5|Axiom|Blue Mockingbird|Chimera|Cobalt Group|Dragonfly|FIN10|FIN13|FIN6|FIN7|FIN8|Fox Kitten|HEXANE|Kimsuky|Lazarus Group|Leviathan|Magic Hound|OilRig|Patchwork|Silence|Wizard Spider|menuPass +T1554,Compromise Host Software Binary,Persistence,APT5 +T1036.006,Space after Filename,Defense Evasion,no +T1036.005,Match Legitimate Name or Location,Defense Evasion,APT1|APT28|APT29|APT32|APT39|APT41|APT5|Aoqin Dragon|BRONZE BUTLER|BackdoorDiplomacy|Blue Mockingbird|Carbanak|Chimera|Darkhotel|Earth Lusca|FIN13|FIN7|Ferocious Kitten|Fox Kitten|Gamaredon Group|Indrik Spider|Ke3chang|Kimsuky|Lazarus Group|LuminousMoth|Machete|Magic Hound|MuddyWater|Mustang Panda|Mustard Tempest|Naikon|PROMETHIUM|Patchwork|Poseidon Group|Rocke|Sandworm Team|SideCopy|Sidewinder|Silence|Sowbug|TA2541|TeamTNT|ToddyCat|Transparent Tribe|Tropic Trooper|Volt Typhoon|WIRTE|Whitefly|admin@338|menuPass +T1036.004,Masquerade Task or Service,Defense Evasion,APT-C-36|APT32|APT41|BITTER|BackdoorDiplomacy|Carbanak|FIN13|FIN6|FIN7|Fox Kitten|Higaisa|Kimsuky|Lazarus Group|Magic Hound|Naikon|PROMETHIUM|Wizard Spider|ZIRCONIUM +T1036.003,Rename System Utilities,Defense Evasion,APT32|GALLIUM|Lazarus Group|menuPass +T1036.002,Right-to-Left Override,Defense Evasion,BRONZE BUTLER|BlackTech|Ferocious Kitten|Ke3chang|Scarlet Mimic +T1036.001,Invalid Code Signature,Defense Evasion,APT37|Windshift +T1553.003,SIP and Trust Provider Hijacking,Defense Evasion,no +T1553.002,Code Signing,Defense Evasion,APT41|CopyKittens|Darkhotel|Ember Bear|FIN6|FIN7|GALLIUM|Kimsuky|Lazarus Group|Leviathan|LuminousMoth|Molerats|Moses Staff|PROMETHIUM|Patchwork|Scattered Spider|Silence|Suckfly|TA505|Winnti Group|Wizard Spider|menuPass +T1553.001,Gatekeeper Bypass,Defense Evasion,no +T1553,Subvert Trust Controls,Defense Evasion,Axiom +T1027.003,Steganography,Defense Evasion,APT37|Andariel|BRONZE BUTLER|Earth Lusca|Leviathan|MuddyWater|TA551|Tropic Trooper +T1027.002,Software Packing,Defense Evasion,APT29|APT3|APT38|APT39|APT41|Aoqin Dragon|Dark Caracal|Elderwood|Ember Bear|GALLIUM|Kimsuky|MoustachedBouncer|Patchwork|Rocke|TA2541|TA505|TeamTNT|The White Company|Threat Group-3390|ZIRCONIUM +T1027.001,Binary Padding,Defense Evasion,APT29|APT32|BRONZE BUTLER|Ember Bear|FIN7|Gamaredon Group|Higaisa|Leviathan|Moafee|Mustang Panda|Patchwork +T1222.002,Linux and Mac File and Directory Permissions Modification,Defense Evasion,APT32|Rocke|TeamTNT +T1222.001,Windows File and Directory Permissions Modification,Defense Evasion,Wizard Spider +T1552.004,Private Keys,Credential Access,Rocke|Scattered Spider|TeamTNT +T1552.003,Bash History,Credential Access,no +T1552.002,Credentials in Registry,Credential Access,APT32 +T1552.001,Credentials In Files,Credential Access,APT3|APT33|FIN13|Fox Kitten|Kimsuky|Leafminer|MuddyWater|OilRig|Scattered Spider|TA505|TeamTNT +T1552,Unsecured Credentials,Credential Access,no +T1216.001,PubPrn,Defense Evasion,APT32 +T1070.006,Timestomp,Defense Evasion,APT28|APT29|APT32|APT38|APT5|Chimera|Kimsuky|Lazarus Group|Rocke +T1070.005,Network Share Connection Removal,Defense Evasion,Threat Group-3390 +T1070.004,File Deletion,Defense Evasion,APT18|APT28|APT29|APT3|APT32|APT38|APT39|APT41|APT5|Aquatic Panda|BRONZE BUTLER|Chimera|Cobalt Group|Dragonfly|Evilnum|FIN10|FIN5|FIN6|FIN8|Gamaredon Group|Group5|Kimsuky|Lazarus Group|Magic Hound|Metador|Mustang Panda|OilRig|Patchwork|Rocke|Sandworm Team|Silence|TeamTNT|The White Company|Threat Group-3390|Tropic Trooper|Volt Typhoon|Wizard Spider|menuPass +T1070.003,Clear Command History,Defense Evasion,APT41|APT5|Lazarus Group|Magic Hound|TeamTNT|menuPass +T1550.004,Web Session Cookie,Defense Evasion|Lateral Movement,no +T1550.001,Application Access Token,Defense Evasion|Lateral Movement,APT28 +T1550.003,Pass the Ticket,Defense Evasion|Lateral Movement,APT29|APT32|BRONZE BUTLER +T1550.002,Pass the Hash,Defense Evasion|Lateral Movement,APT1|APT28|APT32|APT41|Chimera|FIN13|GALLIUM|Kimsuky|Wizard Spider +T1550,Use Alternate Authentication Material,Defense Evasion|Lateral Movement,no +T1548.004,Elevated Execution with Prompt,Defense Evasion|Privilege Escalation,no +T1548.003,Sudo and Sudo Caching,Defense Evasion|Privilege Escalation,no +T1548.002,Bypass User Account Control,Defense Evasion|Privilege Escalation,APT29|APT37|BRONZE BUTLER|Cobalt Group|Earth Lusca|Evilnum|MuddyWater|Patchwork|Threat Group-3390 +T1548.001,Setuid and Setgid,Defense Evasion|Privilege Escalation,no +T1548,Abuse Elevation Control Mechanism,Defense Evasion|Privilege Escalation,no +T1136.003,Cloud Account,Persistence,APT29|LAPSUS$ +T1070.002,Clear Linux or Mac System Logs,Defense Evasion,Rocke|TeamTNT +T1070.001,Clear Windows Event Logs,Defense Evasion,APT28|APT32|APT38|APT41|Chimera|Dragonfly|FIN5|FIN8|Indrik Spider +T1136.002,Domain Account,Persistence,GALLIUM|HAFNIUM|Wizard Spider +T1136.001,Local Account,Persistence,APT3|APT39|APT41|APT5|Dragonfly|FIN13|Fox Kitten|Kimsuky|Leafminer|Magic Hound|TeamTNT|Wizard Spider +T1547.010,Port Monitors,Persistence|Privilege Escalation,no +T1547.009,Shortcut Modification,Persistence|Privilege Escalation,APT39|Gorgon Group|Lazarus Group|Leviathan +T1547.008,LSASS Driver,Persistence|Privilege Escalation,no +T1547.007,Re-opened Applications,Persistence|Privilege Escalation,no +T1547.006,Kernel Modules and Extensions,Persistence|Privilege Escalation,no +T1547.005,Security Support Provider,Persistence|Privilege Escalation,no +T1547.004,Winlogon Helper DLL,Persistence|Privilege Escalation,Tropic Trooper|Turla|Wizard Spider +T1547.003,Time Providers,Persistence|Privilege Escalation,no +T1546.014,Emond,Persistence|Privilege Escalation,no +T1546.013,PowerShell Profile,Persistence|Privilege Escalation,Turla +T1546.012,Image File Execution Options Injection,Persistence|Privilege Escalation,no +T1218.008,Odbcconf,Defense Evasion,Cobalt Group +T1546.011,Application Shimming,Persistence|Privilege Escalation,FIN7 +T1547.002,Authentication Package,Persistence|Privilege Escalation,no +T1546.010,AppInit DLLs,Persistence|Privilege Escalation,APT39 +T1546.009,AppCert DLLs,Persistence|Privilege Escalation,no +T1218.007,Msiexec,Defense Evasion,Machete|Molerats|Rancor|TA505|ZIRCONIUM +T1546.008,Accessibility Features,Persistence|Privilege Escalation,APT29|APT3|APT41|Axiom|Deep Panda|Fox Kitten +T1546.007,Netsh Helper DLL,Persistence|Privilege Escalation,no +T1546.006,LC_LOAD_DYLIB Addition,Persistence|Privilege Escalation,no +T1546.005,Trap,Persistence|Privilege Escalation,no +T1546.004,Unix Shell Configuration Modification,Persistence|Privilege Escalation,no +T1546.003,Windows Management Instrumentation Event Subscription,Persistence|Privilege Escalation,APT29|APT33|Blue Mockingbird|FIN8|HEXANE|Leviathan|Metador|Mustang Panda|Rancor|Turla +T1546.002,Screensaver,Persistence|Privilege Escalation,no +T1546.001,Change Default File Association,Persistence|Privilege Escalation,Kimsuky +T1547.001,Registry Run Keys / Startup Folder,Persistence|Privilege Escalation,APT18|APT19|APT28|APT29|APT3|APT32|APT33|APT37|APT39|APT41|BRONZE BUTLER|Cobalt Group|Confucius|Dark Caracal|Darkhotel|Dragonfly|FIN10|FIN13|FIN6|FIN7|Gamaredon Group|Gorgon Group|Higaisa|Inception|Ke3chang|Kimsuky|Lazarus Group|LazyScripter|Leviathan|LuminousMoth|Magic Hound|Molerats|MuddyWater|Mustang Panda|Naikon|PROMETHIUM|Patchwork|Putter Panda|RTM|Rocke|Sidewinder|Silence|TA2541|TeamTNT|Threat Group-3390|Tropic Trooper|Turla|Windshift|Wizard Spider|ZIRCONIUM +T1218.002,Control Panel,Defense Evasion,Ember Bear +T1218.010,Regsvr32,Defense Evasion,APT19|APT32|Blue Mockingbird|Cobalt Group|Deep Panda|Inception|Kimsuky|Leviathan|TA551|WIRTE +T1218.009,Regsvcs/Regasm,Defense Evasion,no +T1218.005,Mshta,Defense Evasion,APT29|APT32|Confucius|Earth Lusca|FIN7|Gamaredon Group|Inception|Kimsuky|Lazarus Group|LazyScripter|MuddyWater|Mustang Panda|SideCopy|Sidewinder|TA2541|TA551 +T1218.004,InstallUtil,Defense Evasion,Mustang Panda|menuPass +T1218.001,Compiled HTML File,Defense Evasion,APT38|APT41|Dark Caracal|OilRig|Silence +T1218.003,CMSTP,Defense Evasion,Cobalt Group|MuddyWater +T1218.011,Rundll32,Defense Evasion,APT19|APT28|APT3|APT32|APT38|APT41|Blue Mockingbird|Carbanak|CopyKittens|FIN7|Gamaredon Group|HAFNIUM|Kimsuky|Lazarus Group|LazyScripter|Magic Hound|MuddyWater|Sandworm Team|TA505|TA551|Wizard Spider +T1547,Boot or Logon Autostart Execution,Persistence|Privilege Escalation,no +T1546,Event Triggered Execution,Persistence|Privilege Escalation,no +T1098.003,Additional Cloud Roles,Persistence|Privilege Escalation,LAPSUS$|Scattered Spider +T1098.002,Additional Email Delegate Permissions,Persistence|Privilege Escalation,APT28|APT29|Magic Hound +T1098.001,Additional Cloud Credentials,Persistence|Privilege Escalation,no +T1543.004,Launch Daemon,Persistence|Privilege Escalation,no +T1543.003,Windows Service,Persistence|Privilege Escalation,APT19|APT3|APT32|APT38|APT41|Blue Mockingbird|Carbanak|Cinnamon Tempest|Cobalt Group|DarkVishnya|Earth Lusca|FIN7|Ke3chang|Kimsuky|Lazarus Group|PROMETHIUM|TeamTNT|Threat Group-3390|Tropic Trooper|Wizard Spider +T1543.002,Systemd Service,Persistence|Privilege Escalation,Rocke|TeamTNT +T1543.001,Launch Agent,Persistence|Privilege Escalation,no +T1037.005,Startup Items,Persistence|Privilege Escalation,no +T1037.004,RC Scripts,Persistence|Privilege Escalation,APT29 +T1055.012,Process Hollowing,Defense Evasion|Privilege Escalation,Gorgon Group|Kimsuky|Patchwork|TA2541|Threat Group-3390|menuPass +T1055.013,Process Doppelgänging,Defense Evasion|Privilege Escalation,Leafminer +T1055.011,Extra Window Memory Injection,Defense Evasion|Privilege Escalation,no +T1055.014,VDSO Hijacking,Defense Evasion|Privilege Escalation,no +T1055.009,Proc Memory,Defense Evasion|Privilege Escalation,no +T1055.008,Ptrace System Calls,Defense Evasion|Privilege Escalation,no +T1055.005,Thread Local Storage,Defense Evasion|Privilege Escalation,no +T1055.004,Asynchronous Procedure Call,Defense Evasion|Privilege Escalation,FIN8 +T1055.003,Thread Execution Hijacking,Defense Evasion|Privilege Escalation,no +T1055.002,Portable Executable Injection,Defense Evasion|Privilege Escalation,Gorgon Group|Rocke +T1055.001,Dynamic-link Library Injection,Defense Evasion|Privilege Escalation,BackdoorDiplomacy|Lazarus Group|Leviathan|Malteiro|Putter Panda|TA505|Tropic Trooper|Turla|Wizard Spider +T1037.003,Network Logon Script,Persistence|Privilege Escalation,no +T1543,Create or Modify System Process,Persistence|Privilege Escalation,no +T1037.002,Login Hook,Persistence|Privilege Escalation,no +T1037.001,Logon Script (Windows),Persistence|Privilege Escalation,APT28|Cobalt Group +T1542.003,Bootkit,Defense Evasion|Persistence,APT28|APT41|Lazarus Group +T1542.002,Component Firmware,Defense Evasion|Persistence,Equation +T1542.001,System Firmware,Defense Evasion|Persistence,no +T1505.003,Web Shell,Persistence,APT28|APT29|APT32|APT38|APT39|APT5|BackdoorDiplomacy|Deep Panda|Dragonfly|FIN13|Fox Kitten|GALLIUM|HAFNIUM|Kimsuky|Leviathan|Magic Hound|Moses Staff|OilRig|Sandworm Team|Threat Group-3390|Tonto Team|Tropic Trooper|Volatile Cedar|Volt Typhoon +T1505.002,Transport Agent,Persistence,no +T1505.001,SQL Stored Procedures,Persistence,no +T1053.003,Cron,Execution|Persistence|Privilege Escalation,APT38|APT5|Rocke +T1053.005,Scheduled Task,Execution|Persistence|Privilege Escalation,APT-C-36|APT29|APT3|APT32|APT33|APT37|APT38|APT39|APT41|BITTER|BRONZE BUTLER|Blue Mockingbird|Chimera|Cobalt Group|Confucius|Dragonfly|FIN10|FIN13|FIN6|FIN7|FIN8|Fox Kitten|GALLIUM|Gamaredon Group|HEXANE|Higaisa|Kimsuky|Lazarus Group|LuminousMoth|Machete|Magic Hound|Molerats|MuddyWater|Mustang Panda|Naikon|OilRig|Patchwork|Rancor|Silence|Stealth Falcon|TA2541|ToddyCat|Wizard Spider|menuPass +T1053.002,At,Execution|Persistence|Privilege Escalation,APT18|BRONZE BUTLER|Threat Group-3390 +T1542,Pre-OS Boot,Defense Evasion|Persistence,no +T1137.001,Office Template Macros,Persistence,MuddyWater +T1137.004,Outlook Home Page,Persistence,OilRig +T1137.003,Outlook Forms,Persistence,no +T1137.005,Outlook Rules,Persistence,no +T1137.006,Add-ins,Persistence,Naikon +T1137.002,Office Test,Persistence,APT28 +T1531,Account Access Removal,Impact,Akira|LAPSUS$ +T1539,Steal Web Session Cookie,Credential Access,Evilnum|LuminousMoth|Sandworm Team|Scattered Spider +T1529,System Shutdown/Reboot,Impact,APT37|APT38|Lazarus Group +T1518,Software Discovery,Discovery,BRONZE BUTLER|HEXANE|Inception|MuddyWater|Mustang Panda|SideCopy|Sidewinder|Tropic Trooper|Volt Typhoon|Windigo|Windshift|Wizard Spider +T1547.013,XDG Autostart Entries,Persistence|Privilege Escalation,no +T1534,Internal Spearphishing,Lateral Movement,Gamaredon Group|HEXANE|Kimsuky|Leviathan +T1528,Steal Application Access Token,Credential Access,APT28|APT29 +T1535,Unused/Unsupported Cloud Regions,Defense Evasion,no +T1525,Implant Internal Image,Persistence,no +T1538,Cloud Service Dashboard,Discovery,Scattered Spider +T1530,Data from Cloud Storage,Collection,Fox Kitten|Scattered Spider +T1578,Modify Cloud Compute Infrastructure,Defense Evasion,no +T1537,Transfer Data to Cloud Account,Exfiltration,no +T1526,Cloud Service Discovery,Discovery,no +T1505,Server Software Component,Persistence,no +T1499,Endpoint Denial of Service,Impact,Sandworm Team +T1497,Virtualization/Sandbox Evasion,Defense Evasion|Discovery,Darkhotel +T1498,Network Denial of Service,Impact,APT28 +T1496,Resource Hijacking,Impact,APT41|Blue Mockingbird|Rocke|TeamTNT +T1495,Firmware Corruption,Impact,no +T1491,Defacement,Impact,no +T1490,Inhibit System Recovery,Impact,Wizard Spider +T1489,Service Stop,Impact,Indrik Spider|LAPSUS$|Lazarus Group|Wizard Spider +T1486,Data Encrypted for Impact,Impact,APT38|APT41|Akira|FIN7|FIN8|Indrik Spider|Magic Hound|Sandworm Team|Scattered Spider|TA505 +T1485,Data Destruction,Impact,APT38|Gamaredon Group|LAPSUS$|Lazarus Group|Sandworm Team +T1484,Domain or Tenant Policy Modification,Defense Evasion|Privilege Escalation,no +T1482,Domain Trust Discovery,Discovery,Akira|Chimera|Earth Lusca|FIN8|Magic Hound +T1480,Execution Guardrails,Defense Evasion,no +T1222,File and Directory Permissions Modification,Defense Evasion,no +T1220,XSL Script Processing,Defense Evasion,Cobalt Group|Higaisa +T1221,Template Injection,Defense Evasion,APT28|Confucius|DarkHydrus|Dragonfly|Gamaredon Group|Inception|Tropic Trooper +T1190,Exploit Public-Facing Application,Initial Access,APT28|APT29|APT39|APT41|APT5|Axiom|BackdoorDiplomacy|BlackTech|Blue Mockingbird|Cinnamon Tempest|Dragonfly|Earth Lusca|FIN13|FIN7|Fox Kitten|GALLIUM|GOLD SOUTHFIELD|HAFNIUM|Ke3chang|Kimsuky|Magic Hound|Moses Staff|MuddyWater|Rocke|Sandworm Team|Threat Group-3390|ToddyCat|Volatile Cedar|Volt Typhoon|menuPass +T1213,Data from Information Repositories,Collection,APT28|FIN6|Fox Kitten|LAPSUS$|Sandworm Team|Turla +T1202,Indirect Command Execution,Defense Evasion,Lazarus Group +T1207,Rogue Domain Controller,Defense Evasion,no +T1212,Exploitation for Credential Access,Credential Access,no +T1201,Password Policy Discovery,Discovery,Chimera|OilRig|Turla +T1197,BITS Jobs,Defense Evasion|Persistence,APT39|APT41|Leviathan|Patchwork|Wizard Spider +T1189,Drive-by Compromise,Initial Access,APT19|APT28|APT32|APT37|APT38|Andariel|Axiom|BRONZE BUTLER|Dark Caracal|Darkhotel|Dragonfly|Earth Lusca|Elderwood|Lazarus Group|Leafminer|Leviathan|Machete|Magic Hound|Mustard Tempest|PLATINUM|PROMETHIUM|Patchwork|RTM|Threat Group-3390|Transparent Tribe|Turla|Windigo|Windshift +T1218,System Binary Proxy Execution,Defense Evasion,Lazarus Group +T1210,Exploitation of Remote Services,Lateral Movement,APT28|Dragonfly|Earth Lusca|FIN7|Fox Kitten|MuddyWater|Threat Group-3390|Tonto Team|Wizard Spider|menuPass +T1203,Exploitation for Client Execution,Execution,APT12|APT28|APT29|APT3|APT32|APT33|APT37|APT41|Andariel|Aoqin Dragon|Axiom|BITTER|BRONZE BUTLER|BlackTech|Cobalt Group|Confucius|Darkhotel|Dragonfly|EXOTIC LILY|Elderwood|Ember Bear|Higaisa|Inception|Lazarus Group|Leviathan|MuddyWater|Mustang Panda|Patchwork|Sandworm Team|Sidewinder|TA459|The White Company|Threat Group-3390|Tonto Team|Transparent Tribe|Tropic Trooper|admin@338 +T1211,Exploitation for Defense Evasion,Defense Evasion,APT28 +T1216,System Script Proxy Execution,Defense Evasion,no +T1195,Supply Chain Compromise,Initial Access,no +T1219,Remote Access Software,Command And Control,Akira|Carbanak|Cobalt Group|DarkVishnya|Evilnum|FIN7|GOLD SOUTHFIELD|Kimsuky|MuddyWater|Mustang Panda|RTM|Sandworm Team|Scattered Spider|TeamTNT|Thrip +T1205,Traffic Signaling,Command And Control|Defense Evasion|Persistence,no +T1204,User Execution,Execution,LAPSUS$|Scattered Spider +T1199,Trusted Relationship,Initial Access,APT28|APT29|GOLD SOUTHFIELD|LAPSUS$|POLONIUM|Sandworm Team|Threat Group-3390|menuPass +T1217,Browser Information Discovery,Discovery,APT38|Chimera|Fox Kitten|Scattered Spider +T1200,Hardware Additions,Initial Access,DarkVishnya +T1176,Browser Extensions,Persistence,Kimsuky +T1185,Browser Session Hijacking,Collection,no +T1187,Forced Authentication,Credential Access,DarkHydrus|Dragonfly +T1137,Office Application Startup,Persistence,APT32|Gamaredon Group +T1140,Deobfuscate/Decode Files or Information,Defense Evasion,APT19|APT28|APT39|BRONZE BUTLER|Cinnamon Tempest|Darkhotel|Earth Lusca|FIN13|Gamaredon Group|Gorgon Group|Higaisa|Ke3chang|Kimsuky|Lazarus Group|Leviathan|Malteiro|Molerats|MuddyWater|OilRig|Rocke|Sandworm Team|TA505|TeamTNT|Threat Group-3390|Tropic Trooper|Turla|WIRTE|ZIRCONIUM|menuPass +T1136,Create Account,Persistence,Indrik Spider|Scattered Spider +T1135,Network Share Discovery,Discovery,APT1|APT32|APT38|APT39|APT41|Chimera|DarkVishnya|Dragonfly|FIN13|Sowbug|Tonto Team|Tropic Trooper|Wizard Spider +T1134,Access Token Manipulation,Defense Evasion|Privilege Escalation,Blue Mockingbird|FIN6 +T1133,External Remote Services,Initial Access|Persistence,APT18|APT28|APT29|APT41|Akira|Chimera|Dragonfly|FIN13|FIN5|GALLIUM|GOLD SOUTHFIELD|Ke3chang|Kimsuky|LAPSUS$|Leviathan|OilRig|Sandworm Team|Scattered Spider|TeamTNT|Threat Group-3390|Wizard Spider +T1132,Data Encoding,Command And Control,no +T1129,Shared Modules,Execution,no +T1127,Trusted Developer Utilities Proxy Execution,Defense Evasion,no +T1125,Video Capture,Collection,FIN7|Silence +T1124,System Time Discovery,Discovery,BRONZE BUTLER|Chimera|Darkhotel|Higaisa|Lazarus Group|Sidewinder|The White Company|Turla|ZIRCONIUM +T1123,Audio Capture,Collection,APT37 +T1120,Peripheral Device Discovery,Discovery,APT28|APT37|BackdoorDiplomacy|Equation|Gamaredon Group|OilRig|TeamTNT|Turla +T1119,Automated Collection,Collection,APT1|APT28|Chimera|Confucius|FIN5|FIN6|Gamaredon Group|Ke3chang|Mustang Panda|OilRig|Patchwork|Sidewinder|Threat Group-3390|Tropic Trooper|menuPass +T1115,Clipboard Data,Collection,APT38|APT39 +T1114,Email Collection,Collection,Magic Hound|Silent Librarian +T1113,Screen Capture,Collection,APT28|APT39|BRONZE BUTLER|Dark Caracal|Dragonfly|FIN7|GOLD SOUTHFIELD|Gamaredon Group|Group5|Magic Hound|MoustachedBouncer|MuddyWater|OilRig|Silence +T1112,Modify Registry,Defense Evasion,APT19|APT32|APT38|APT41|Blue Mockingbird|Dragonfly|Earth Lusca|Ember Bear|FIN8|Gamaredon Group|Gorgon Group|Kimsuky|LuminousMoth|Magic Hound|Patchwork|Silence|TA505|Threat Group-3390|Turla|Wizard Spider +T1111,Multi-Factor Authentication Interception,Credential Access,Chimera|Kimsuky|LAPSUS$ +T1110,Brute Force,Credential Access,APT28|APT38|APT39|DarkVishnya|Dragonfly|FIN5|Fox Kitten|HEXANE|OilRig|Turla +T1106,Native API,Execution,APT37|APT38|BlackTech|Chimera|Gamaredon Group|Gorgon Group|Higaisa|Lazarus Group|SideCopy|Silence|TA505|ToddyCat|Tropic Trooper|Turla|menuPass +T1105,Ingress Tool Transfer,Command And Control,APT-C-36|APT18|APT28|APT29|APT3|APT32|APT33|APT37|APT38|APT39|APT41|Ajax Security Team|Andariel|Aquatic Panda|BITTER|BRONZE BUTLER|BackdoorDiplomacy|Chimera|Cinnamon Tempest|Cobalt Group|Confucius|Darkhotel|Dragonfly|Elderwood|Ember Bear|Evilnum|FIN13|FIN7|FIN8|Fox Kitten|GALLIUM|Gamaredon Group|Gorgon Group|HAFNIUM|HEXANE|IndigoZebra|Indrik Spider|Ke3chang|Kimsuky|Lazarus Group|LazyScripter|Leviathan|LuminousMoth|Magic Hound|Metador|Molerats|Moses Staff|MuddyWater|Mustang Panda|Mustard Tempest|Nomadic Octopus|OilRig|PLATINUM|Patchwork|Rancor|Rocke|Sandworm Team|SideCopy|Sidewinder|Silence|TA2541|TA505|TA551|TeamTNT|Threat Group-3390|Tonto Team|Tropic Trooper|Turla|Volatile Cedar|WIRTE|Whitefly|Windshift|Winnti Group|Wizard Spider|ZIRCONIUM|menuPass +T1104,Multi-Stage Channels,Command And Control,APT3|APT41|Lazarus Group|MuddyWater +T1102,Web Service,Command And Control,APT32|EXOTIC LILY|Ember Bear|FIN6|FIN8|Fox Kitten|Gamaredon Group|Inception|LazyScripter|Mustang Panda|Rocke|TeamTNT|Turla +T1098,Account Manipulation,Persistence|Privilege Escalation,APT3|APT41|APT5|Dragonfly|FIN13|HAFNIUM|Kimsuky|Lazarus Group|Magic Hound +T1095,Non-Application Layer Protocol,Command And Control,APT3|BITTER|BackdoorDiplomacy|FIN6|HAFNIUM|Metador|PLATINUM|ToddyCat +T1092,Communication Through Removable Media,Command And Control,APT28 +T1091,Replication Through Removable Media,Initial Access|Lateral Movement,APT28|Aoqin Dragon|Darkhotel|FIN7|LuminousMoth|Mustang Panda|Tropic Trooper +T1090,Proxy,Command And Control,APT41|Blue Mockingbird|Cinnamon Tempest|CopyKittens|Earth Lusca|Fox Kitten|LAPSUS$|Magic Hound|MoustachedBouncer|POLONIUM|Sandworm Team|Turla|Volt Typhoon|Windigo +T1087,Account Discovery,Discovery,FIN13 +T1083,File and Directory Discovery,Discovery,APT18|APT28|APT3|APT32|APT38|APT39|APT41|APT5|Aoqin Dragon|BRONZE BUTLER|Chimera|Confucius|Dark Caracal|Darkhotel|Dragonfly|FIN13|Fox Kitten|Gamaredon Group|HAFNIUM|Inception|Ke3chang|Kimsuky|Lazarus Group|Leafminer|LuminousMoth|Magic Hound|MuddyWater|Mustang Panda|Patchwork|Sandworm Team|Scattered Spider|Sidewinder|Sowbug|TeamTNT|ToddyCat|Tropic Trooper|Turla|Windigo|Winnti Group|admin@338|menuPass +T1082,System Information Discovery,Discovery,APT18|APT19|APT3|APT32|APT37|APT38|APT41|Aquatic Panda|Blue Mockingbird|Chimera|Confucius|Darkhotel|FIN13|FIN8|Gamaredon Group|HEXANE|Higaisa|Inception|Ke3chang|Kimsuky|Lazarus Group|Magic Hound|Malteiro|Moses Staff|MuddyWater|Mustang Panda|Mustard Tempest|OilRig|Patchwork|Rocke|Sandworm Team|SideCopy|Sidewinder|Sowbug|Stealth Falcon|TA2541|TeamTNT|ToddyCat|Tropic Trooper|Turla|Volt Typhoon|Windigo|Windshift|Wizard Spider|ZIRCONIUM|admin@338 +T1080,Taint Shared Content,Lateral Movement,BRONZE BUTLER|Cinnamon Tempest|Darkhotel|Gamaredon Group +T1078,Valid Accounts,Defense Evasion|Initial Access|Persistence|Privilege Escalation,APT18|APT28|APT29|APT33|APT39|APT41|Akira|Axiom|Carbanak|Chimera|Cinnamon Tempest|Dragonfly|FIN10|FIN4|FIN5|FIN6|FIN7|FIN8|Fox Kitten|GALLIUM|Ke3chang|LAPSUS$|Lazarus Group|Leviathan|OilRig|POLONIUM|PittyTiger|Sandworm Team|Silence|Silent Librarian|Suckfly|Threat Group-3390|Wizard Spider|menuPass +T1074,Data Staged,Collection,Scattered Spider|Volt Typhoon|Wizard Spider +T1072,Software Deployment Tools,Execution|Lateral Movement,APT32|Sandworm Team|Silence|Threat Group-1314 +T1071,Application Layer Protocol,Command And Control,Magic Hound|Rocke|TeamTNT +T1070,Indicator Removal,Defense Evasion,APT5|Lazarus Group +T1069,Permission Groups Discovery,Discovery,APT3|APT41|FIN13|TA505 +T1068,Exploitation for Privilege Escalation,Privilege Escalation,APT28|APT29|APT32|APT33|BITTER|Cobalt Group|FIN6|FIN8|LAPSUS$|MoustachedBouncer|PLATINUM|Scattered Spider|Threat Group-3390|Tonto Team|Turla|Whitefly|ZIRCONIUM +T1059,Command and Scripting Interpreter,Execution,APT19|APT32|APT37|APT39|Dragonfly|FIN5|FIN6|FIN7|Fox Kitten|Ke3chang|OilRig|Stealth Falcon|Whitefly|Windigo +T1057,Process Discovery,Discovery,APT1|APT28|APT3|APT37|APT38|APT5|Andariel|Chimera|Darkhotel|Deep Panda|Earth Lusca|Gamaredon Group|HAFNIUM|HEXANE|Higaisa|Inception|Ke3chang|Kimsuky|Lazarus Group|Magic Hound|Molerats|MuddyWater|Mustang Panda|OilRig|Poseidon Group|Rocke|Sidewinder|Stealth Falcon|TeamTNT|ToddyCat|Tropic Trooper|Turla|Volt Typhoon|Windshift|Winnti Group +T1056,Input Capture,Collection|Credential Access,APT39 +T1055,Process Injection,Defense Evasion|Privilege Escalation,APT32|APT37|APT41|APT5|Cobalt Group|Kimsuky|PLATINUM|Silence|TA2541|Turla|Wizard Spider +T1053,Scheduled Task/Job,Execution|Persistence|Privilege Escalation,Earth Lusca +T1052,Exfiltration Over Physical Medium,Exfiltration,no +T1049,System Network Connections Discovery,Discovery,APT1|APT3|APT32|APT38|APT41|APT5|Andariel|BackdoorDiplomacy|Chimera|Earth Lusca|FIN13|GALLIUM|HEXANE|Ke3chang|Lazarus Group|Magic Hound|MuddyWater|Mustang Panda|OilRig|Poseidon Group|Sandworm Team|TeamTNT|Threat Group-3390|ToddyCat|Tropic Trooper|Turla|Volt Typhoon|admin@338|menuPass +T1048,Exfiltration Over Alternative Protocol,Exfiltration,TeamTNT +T1047,Windows Management Instrumentation,Execution,APT29|APT32|APT41|Blue Mockingbird|Chimera|Cinnamon Tempest|Deep Panda|Earth Lusca|FIN13|FIN6|FIN7|FIN8|GALLIUM|Gamaredon Group|Indrik Spider|Lazarus Group|Leviathan|Magic Hound|MuddyWater|Mustang Panda|Naikon|OilRig|Sandworm Team|Stealth Falcon|TA2541|Threat Group-3390|ToddyCat|Volt Typhoon|Windshift|Wizard Spider|menuPass +T1046,Network Service Discovery,Discovery,APT32|APT39|APT41|BackdoorDiplomacy|BlackTech|Chimera|Cobalt Group|DarkVishnya|FIN13|FIN6|Fox Kitten|Lazarus Group|Leafminer|Magic Hound|Naikon|OilRig|Rocke|Suckfly|TeamTNT|Threat Group-3390|Tropic Trooper|menuPass +T1041,Exfiltration Over C2 Channel,Exfiltration,APT3|APT32|APT39|Chimera|Confucius|GALLIUM|Gamaredon Group|Higaisa|Ke3chang|Kimsuky|Lazarus Group|Leviathan|LuminousMoth|MuddyWater|Sandworm Team|Stealth Falcon|Wizard Spider|ZIRCONIUM +T1040,Network Sniffing,Credential Access|Discovery,APT28|APT33|DarkVishnya|Kimsuky|Sandworm Team +T1039,Data from Network Shared Drive,Collection,APT28|BRONZE BUTLER|Chimera|Fox Kitten|Gamaredon Group|Sowbug|menuPass +T1037,Boot or Logon Initialization Scripts,Persistence|Privilege Escalation,APT29|Rocke +T1036,Masquerading,Defense Evasion,APT28|APT32|BRONZE BUTLER|Dragonfly|FIN13|LazyScripter|Nomadic Octopus|OilRig|PLATINUM|Sandworm Team|TA551|TeamTNT|Windshift|ZIRCONIUM|menuPass +T1033,System Owner/User Discovery,Discovery,APT19|APT3|APT32|APT37|APT38|APT39|APT41|Chimera|Dragonfly|Earth Lusca|FIN10|FIN7|FIN8|GALLIUM|Gamaredon Group|HAFNIUM|HEXANE|Ke3chang|Lazarus Group|LuminousMoth|Magic Hound|MuddyWater|OilRig|Patchwork|Sandworm Team|Sidewinder|Stealth Falcon|Threat Group-3390|Tropic Trooper|Volt Typhoon|Windshift|Wizard Spider|ZIRCONIUM +T1030,Data Transfer Size Limits,Exfiltration,APT28|APT41|LuminousMoth|Threat Group-3390 +T1029,Scheduled Transfer,Exfiltration,Higaisa +T1027,Obfuscated Files or Information,Defense Evasion,APT-C-36|APT3|APT37|APT41|BackdoorDiplomacy|BlackOasis|Earth Lusca|Ember Bear|GALLIUM|Gallmaker|Gamaredon Group|Ke3chang|Kimsuky|Mustang Panda|Rocke|Sandworm Team|Windshift +T1025,Data from Removable Media,Collection,APT28|Gamaredon Group|Turla +T1021,Remote Services,Lateral Movement,Wizard Spider +T1020,Automated Exfiltration,Exfiltration,Gamaredon Group|Ke3chang|Sidewinder|Tropic Trooper +T1018,Remote System Discovery,Discovery,APT3|APT32|APT39|Akira|BRONZE BUTLER|Chimera|Deep Panda|Dragonfly|Earth Lusca|FIN5|FIN6|FIN8|Fox Kitten|GALLIUM|HAFNIUM|HEXANE|Indrik Spider|Ke3chang|Leafminer|Magic Hound|Naikon|Rocke|Sandworm Team|Scattered Spider|Silence|Threat Group-3390|ToddyCat|Turla|Volt Typhoon|Wizard Spider|menuPass +T1016,System Network Configuration Discovery,Discovery,APT1|APT19|APT3|APT32|APT41|Chimera|Darkhotel|Dragonfly|Earth Lusca|FIN13|GALLIUM|HAFNIUM|HEXANE|Higaisa|Ke3chang|Kimsuky|Lazarus Group|Magic Hound|Moses Staff|MuddyWater|Mustang Panda|Naikon|OilRig|SideCopy|Sidewinder|Stealth Falcon|TeamTNT|Threat Group-3390|Tropic Trooper|Turla|Volt Typhoon|Wizard Spider|ZIRCONIUM|admin@338|menuPass +T1014,Rootkit,Defense Evasion,APT28|APT41|Rocke|TeamTNT|Winnti Group +T1012,Query Registry,Discovery,APT32|APT39|APT41|Chimera|Dragonfly|Fox Kitten|Kimsuky|Lazarus Group|OilRig|Stealth Falcon|Threat Group-3390|Turla|Volt Typhoon|ZIRCONIUM +T1011,Exfiltration Over Other Network Medium,Exfiltration,no +T1010,Application Window Discovery,Discovery,HEXANE|Lazarus Group +T1008,Fallback Channels,Command And Control,APT41|FIN7|Lazarus Group|OilRig +T1007,System Service Discovery,Discovery,APT1|Aquatic Panda|BRONZE BUTLER|Chimera|Earth Lusca|Indrik Spider|Ke3chang|Kimsuky|OilRig|Poseidon Group|TeamTNT|Turla|admin@338 +T1006,Direct Volume Access,Defense Evasion,Scattered Spider +T1005,Data from Local System,Collection,APT1|APT28|APT29|APT3|APT37|APT38|APT39|APT41|Andariel|Axiom|BRONZE BUTLER|CURIUM|Dark Caracal|Dragonfly|FIN13|FIN6|FIN7|Fox Kitten|GALLIUM|Gamaredon Group|HAFNIUM|Inception|Ke3chang|Kimsuky|LAPSUS$|Lazarus Group|LuminousMoth|Magic Hound|Patchwork|Sandworm Team|Stealth Falcon|Threat Group-3390|ToddyCat|Turla|Volt Typhoon|Windigo|Wizard Spider|menuPass +T1003,OS Credential Dumping,Credential Access,APT28|APT32|APT39|Axiom|Leviathan|Poseidon Group|Sowbug|Suckfly|Tonto Team +T1001,Data Obfuscation,Command And Control,no diff --git a/contentctl/output/templates/splunk_app/metadata/default.meta b/contentctl/templates/app_template/metadata/default.meta similarity index 100% rename from contentctl/output/templates/splunk_app/metadata/default.meta rename to contentctl/templates/app_template/metadata/default.meta diff --git a/contentctl/output/templates/splunk_app/static/appIcon.png b/contentctl/templates/app_template/static/appIcon.png similarity index 100% rename from contentctl/output/templates/splunk_app/static/appIcon.png rename to contentctl/templates/app_template/static/appIcon.png diff --git a/contentctl/output/templates/splunk_app/static/appIconAlt.png b/contentctl/templates/app_template/static/appIconAlt.png similarity index 100% rename from contentctl/output/templates/splunk_app/static/appIconAlt.png rename to contentctl/templates/app_template/static/appIconAlt.png diff --git a/contentctl/output/templates/splunk_app/static/appIconAlt_2x.png b/contentctl/templates/app_template/static/appIconAlt_2x.png similarity index 100% rename from contentctl/output/templates/splunk_app/static/appIconAlt_2x.png rename to contentctl/templates/app_template/static/appIconAlt_2x.png diff --git a/contentctl/output/templates/splunk_app/static/appIcon_2x.png b/contentctl/templates/app_template/static/appIcon_2x.png similarity index 100% rename from contentctl/output/templates/splunk_app/static/appIcon_2x.png rename to contentctl/templates/app_template/static/appIcon_2x.png diff --git a/contentctl/templates/deployments/00_default_anomaly.yml b/contentctl/templates/deployments/escu_default_configuration_anomaly.yml similarity index 94% rename from contentctl/templates/deployments/00_default_anomaly.yml rename to contentctl/templates/deployments/escu_default_configuration_anomaly.yml index 4ed7d3da..7b1fe0da 100644 --- a/contentctl/templates/deployments/00_default_anomaly.yml +++ b/contentctl/templates/deployments/escu_default_configuration_anomaly.yml @@ -2,6 +2,7 @@ name: ESCU Default Configuration Anomaly id: a9e210c6-9f50-4f8b-b60e-71bb26e4f216 date: '2021-12-21' author: Patrick Bareiss +type: Anomaly description: This configuration file applies to all detections of type anomaly. These detections will use Risk Based Alerting. scheduling: @@ -12,5 +13,3 @@ scheduling: alert_action: rba: enabled: 'true' -tags: - type: Anomaly diff --git a/contentctl/templates/deployments/00_default_baseline.yml b/contentctl/templates/deployments/escu_default_configuration_baseline.yml similarity index 93% rename from contentctl/templates/deployments/00_default_baseline.yml rename to contentctl/templates/deployments/escu_default_configuration_baseline.yml index b9d5b21c..3e1eb0a7 100644 --- a/contentctl/templates/deployments/00_default_baseline.yml +++ b/contentctl/templates/deployments/escu_default_configuration_baseline.yml @@ -2,11 +2,10 @@ name: ESCU Default Configuration Baseline id: 0f7ee854-1aad-4bef-89c5-5c402b488510 date: '2021-12-21' author: Patrick Bareiss +type: Baseline description: This configuration file applies to all detections of type baseline. scheduling: cron_schedule: 10 0 * * * earliest_time: -1450m@m latest_time: -10m@m schedule_window: auto -tags: - type: Baseline diff --git a/contentctl/templates/deployments/00_default_correlation.yml b/contentctl/templates/deployments/escu_default_configuration_correlation.yml similarity index 94% rename from contentctl/templates/deployments/00_default_correlation.yml rename to contentctl/templates/deployments/escu_default_configuration_correlation.yml index 6667ddcc..b105ac8d 100644 --- a/contentctl/templates/deployments/00_default_correlation.yml +++ b/contentctl/templates/deployments/escu_default_configuration_correlation.yml @@ -2,6 +2,7 @@ name: ESCU Default Configuration Correlation id: 36ba498c-46e8-4b62-8bde-67e984a40fb4 date: '2021-12-21' author: Patrick Bareiss +type: Correlation description: This configuration file applies to all detections of type Correlation. These correlations will generate Notable Events. scheduling: @@ -16,5 +17,4 @@ alert_action: nes_fields: - user - dest -tags: - type: 'Correlation' + diff --git a/contentctl/templates/deployments/00_default_hunting.yml b/contentctl/templates/deployments/escu_default_configuration_hunting.yml similarity index 93% rename from contentctl/templates/deployments/00_default_hunting.yml rename to contentctl/templates/deployments/escu_default_configuration_hunting.yml index bd562c39..b43cf8d2 100644 --- a/contentctl/templates/deployments/00_default_hunting.yml +++ b/contentctl/templates/deployments/escu_default_configuration_hunting.yml @@ -2,11 +2,11 @@ name: ESCU Default Configuration Hunting id: cc5895e8-3420-4ab7-af38-cf87a28f9c3b date: '2021-12-21' author: Patrick Bareiss +type: Hunting description: This configuration file applies to all detections of type hunting. scheduling: cron_schedule: 0 * * * * earliest_time: -70m@m latest_time: -10m@m schedule_window: auto -tags: - type: Hunting + diff --git a/contentctl/templates/deployments/00_default_ttp.yml b/contentctl/templates/deployments/escu_default_configuration_ttp.yml similarity index 96% rename from contentctl/templates/deployments/00_default_ttp.yml rename to contentctl/templates/deployments/escu_default_configuration_ttp.yml index 513bffeb..98ee7ff2 100644 --- a/contentctl/templates/deployments/00_default_ttp.yml +++ b/contentctl/templates/deployments/escu_default_configuration_ttp.yml @@ -2,6 +2,7 @@ name: ESCU Default Configuration TTP id: b81cd059-a3e8-4c03-96ca-e168c50ff70b date: '2021-12-21' author: Patrick Bareiss +type: TTP description: This configuration file applies to all detections of type TTP. These detections will use Risk Based Alerting and generate Notable Events. scheduling: @@ -18,5 +19,3 @@ alert_action: - dest rba: enabled: 'true' -tags: - type: TTP diff --git a/contentctl/templates/detections/anomalous_usage_of_7zip.yml b/contentctl/templates/detections/anomalous_usage_of_7zip.yml index 5b464f3e..172248d4 100644 --- a/contentctl/templates/detections/anomalous_usage_of_7zip.yml +++ b/contentctl/templates/detections/anomalous_usage_of_7zip.yml @@ -32,7 +32,6 @@ references: tags: analytic_story: - Cobalt Strike - - NOBELIUM Group asset_type: Endpoint confidence: 80 impact: 80 diff --git a/contentctl/templates/stories/cobalt_strike.yml b/contentctl/templates/stories/cobalt_strike.yml index 74e91da0..8e11f0f4 100644 --- a/contentctl/templates/stories/cobalt_strike.yml +++ b/contentctl/templates/stories/cobalt_strike.yml @@ -51,7 +51,6 @@ references: - https://github.com/MichaelKoczwara/Awesome-CobaltStrike-Defence - https://github.com/zer0yu/Awesome-CobaltStrike tags: - analytic_story: Cobalt Strike category: - Adversary Tactics product: diff --git a/pyproject.toml b/pyproject.toml index 03e30eb9..fd35d90c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "contentctl" -version = "3.6.0" +version = "4.0.0" description = "Splunk Content Control Tool" authors = ["STRT "] license = "Apache 2.0" @@ -10,26 +10,27 @@ readme = "README.md" contentctl = 'contentctl.contentctl:main' [tool.poetry.dependencies] -python = "^3.9" -pydantic = "^1.10.11" -PyYAML = "^6.0" -requests = "^2.28.1" +python = "^3.11" +pydantic = "^2.5.1" +PyYAML = "^6.0.1" +requests = "^2.31.0" pycvesearch = "^1.2" xmltodict = "^0.13.0" attackcti = "^0.3.7" Jinja2 = "^3.1.2" -questionary = "^1.10.0" -gitpython = "^3.1.29" -docker = "^6.0.1" -splunk-sdk = "^1.7.2" -validators = "^0.20.0" +questionary = "^2.0.1" +docker = "^6.1.3" +splunk-sdk = "^2.0.1" +validators = "^0.22.0" semantic-version = "^2.10.0" -bottle = "^0.12.23" -tqdm = "^4.65.0" +bottle = "^0.12.25" +tqdm = "^4.66.1" #splunk-appinspect = "^2.36.0" -pysigma = "^0.10.5" +pysigma = "^0.10.8" pysigma-backend-splunk = "^1.0.3" - +pygit2 = "^1.14.1" +tyro = "^0.8.3" +gitpython = "^3.1.43" [tool.poetry.dev-dependencies] [build-system]