diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 00000000..72f9f747 Binary files /dev/null and b/.DS_Store differ diff --git a/.gitignore b/.gitignore index 2993f997..8f57bb7f 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,7 @@ dist/* apps* test_results* attack_data* - +security_content/ # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/contentctl/actions/convert.py b/contentctl/actions/convert.py new file mode 100644 index 00000000..06ad1167 --- /dev/null +++ b/contentctl/actions/convert.py @@ -0,0 +1,25 @@ + +import sys +import shutil +import os + +from dataclasses import dataclass + +from contentctl.input.sigma_converter import * +from contentctl.output.yml_output import YmlOutput + +@dataclass(frozen=True) +class ConvertInputDto: + sigma_converter_input_dto: SigmaConverterInputDto + output_path : str + + +class Convert: + + def execute(self, input_dto: ConvertInputDto) -> None: + sigma_converter_output_dto = SigmaConverterOutputDto([]) + sigma_converter = SigmaConverter(sigma_converter_output_dto) + sigma_converter.execute(input_dto.sigma_converter_input_dto) + + yml_output = YmlOutput() + yml_output.writeDetections(sigma_converter_output_dto.detections, input_dto.output_path) \ No newline at end of file diff --git a/contentctl/actions/detection_testing/DetectionTestingManager.py b/contentctl/actions/detection_testing/DetectionTestingManager.py index e935379c..370d97a1 100644 --- a/contentctl/actions/detection_testing/DetectionTestingManager.py +++ b/contentctl/actions/detection_testing/DetectionTestingManager.py @@ -89,11 +89,11 @@ def sigint_handler(signum, frame): signal.signal(signal.SIGINT, sigint_handler) with concurrent.futures.ThreadPoolExecutor( - max_workers=self.input_dto.config.num_containers, + max_workers=len(self.input_dto.config.infrastructure_config.infrastructures), ) as instance_pool, concurrent.futures.ThreadPoolExecutor( max_workers=len(self.input_dto.views) ) as view_runner, concurrent.futures.ThreadPoolExecutor( - max_workers=self.input_dto.config.num_containers, + max_workers=len(self.input_dto.config.infrastructure_config.infrastructures), ) as view_shutdowner: # Start all the views @@ -151,39 +151,33 @@ def sigint_handler(signum, frame): def create_DetectionTestingInfrastructureObjects(self): import sys - for index in range(self.input_dto.config.num_containers): - instanceConfig = deepcopy(self.input_dto.config) - instanceConfig.api_port += index * 2 - instanceConfig.hec_port += index * 2 - instanceConfig.web_ui_port += index - - instanceConfig.container_name = instanceConfig.container_name % (index,) + for infrastructure in self.input_dto.config.infrastructure_config.infrastructures: if ( - self.input_dto.config.target_infrastructure + self.input_dto.config.infrastructure_config.infrastructure_type == DetectionTestingTargetInfrastructure.container ): self.detectionTestingInfrastructureObjects.append( DetectionTestingInfrastructureContainer( - config=instanceConfig, sync_obj=self.output_dto + global_config=self.input_dto.config, infrastructure=infrastructure, sync_obj=self.output_dto ) ) elif ( - self.input_dto.config.target_infrastructure + self.input_dto.config.infrastructure_config.infrastructure_type == DetectionTestingTargetInfrastructure.server ): self.detectionTestingInfrastructureObjects.append( DetectionTestingInfrastructureServer( - config=instanceConfig, sync_obj=self.output_dto + global_config=self.input_dto.config, infrastructure=infrastructure, sync_obj=self.output_dto ) ) else: print( - f"Unsupported target infrastructure '{self.input_dto.config.target_infrastructure}'" + f"Unsupported target infrastructure '{self.input_dto.config.infrastructure_config.infrastructure_type}'" ) sys.exit(1) diff --git a/contentctl/actions/detection_testing/GitHubService.py b/contentctl/actions/detection_testing/GitHubService.py index 886a3e05..c36eafec 100644 --- a/contentctl/actions/detection_testing/GitHubService.py +++ b/contentctl/actions/detection_testing/GitHubService.py @@ -49,6 +49,7 @@ def get_all_content(self, director: DirectorOutputDto) -> DirectorOutputDto: self.get_macros(director), self.get_lookups(director), [], + [] ) def get_stories(self, director: DirectorOutputDto) -> list[Story]: @@ -137,15 +138,63 @@ def get_detections_changed(self, director: DirectorOutputDto) -> list[Detection] f"Error: self.repo must be initialized before getting changed detections." ) ) - raise (Exception("not implemented")) - return [] + + differences = self.repo.git.diff("--name-status", f"origin/{self.config.version_control_config.main_branch}").split("\n") + new_content = [] + modified_content = [] + deleted_content = [] + for difference in differences: + mode, filename = difference.split("\t") + if mode == "A": + new_content.append(filename) + elif mode == "M": + modified_content.append(filename) + elif mode == "D": + deleted_content.append(filename) + else: + raise Exception(f"Unknown mode in determining differences: {difference}") + + #Changes to detections, macros, and lookups should trigger a re-test for anything which uses them + changed_lookups_list = list(filter(lambda x: x.startswith("lookups"), new_content+modified_content)) + changed_lookups = set() + + #We must account for changes to the lookup yml AND for the underlying csv + for lookup in changed_lookups_list: + if lookup.endswith(".csv"): + lookup = lookup.replace(".csv", ".yml") + changed_lookups.add(lookup) + + # At some point we should account for macros which contain other macros... + changed_macros = set(filter(lambda x: x.startswith("macros"), new_content+modified_content)) + changed_macros_and_lookups = set([str(pathlib.Path(filename).absolute()) for filename in changed_lookups.union(changed_macros)]) + + changed_detections = set(filter(lambda x: x.startswith("detections"), new_content+modified_content)) + + #Check and see if content that has been modified uses any of the changed macros or lookups + for detection in director.detections: + deps = set([content.file_path for content in detection.get_content_dependencies()]) + if not deps.isdisjoint(changed_macros_and_lookups): + changed_detections.add(detection.file_path) + + return Detection.get_detections_from_filenames(changed_detections, director.detections) def __init__(self, config: TestConfig): - self.repo = None + self.requested_detections: list[pathlib.Path] = [] self.config = config - - if config.mode == DetectionTestingMode.selected: + if config.version_control_config is not None: + self.repo = git.Repo(config.version_control_config.repo_path) + else: + self.repo = None + + + if config.mode == DetectionTestingMode.changes: + if self.repo is None: + raise Exception("You are using detection mode 'changes', but the app does not have a version_control_config in contentctl_test.yml.") + return + elif config.mode == DetectionTestingMode.all: + return + elif config.mode == DetectionTestingMode.selected: if config.detections_list is None or len(config.detections_list) < 1: raise ( Exception( @@ -171,63 +220,12 @@ def __init__(self, config: TestConfig): pathlib.Path(detection_file_name) for detection_file_name in config.detections_list ] - return - - elif config.mode == DetectionTestingMode.changes: - # Changes is ONLY possible if the app is version controlled - # in a github repo. Ensure that this is the case and, if not - # raise an exception - raise (Exception("Mode [changes] is not yet supported.")) - try: - repo = git.Repo(config.repo_path) - except Exception as e: - raise ( - Exception( - f"Error: detection mode [{config.mode}] REQUIRES that [{config.repo_path}] is a git repository, but it is not." - ) - ) - if config.main_branch == config.test_branch: - raise ( - Exception( - f"Error: test_branch [{config.test_branch}] is the same as the main_branch [{config.main_branch}]. When using mode [{config.mode}], these two branches MUST be different." - ) - ) - - # Ensure that the test branch is checked out - if self.repo.active_branch.name != config.test_branch: - raise ( - Exception( - f"Error: detection mode [{config.mode}] REQUIRES that the test_branch [{config.test_branch}] be checked out at the beginning of the test, but it is not." - ) - ) - - # Ensure that the base branch exists - - if Utils.validate_git_branch_name( - config.repo_path, "NO_URL", config.main_branch - ): - return - - elif config.mode == DetectionTestingMode.all: - return + else: - raise ( - Exception( - f"Unsupported detection testing mode [{config.mode}]. Supported detection testing modes are [{DetectionTestingMode._member_names_}]" - ) - ) - - def __init2__(self, config: TestConfig): - - self.repo = git.Repo(config.repo_path) - - if self.repo.active_branch.name != config.test_branch: - print( - f"Error - test_branch is '{config.test_branch}', but the current active branch in '{config.repo_path}' is '{self.repo.active_branch}'. Checking out the branch you specified..." - ) - self.repo.git.checkout(config.test_branch) - - self.config = config + raise Exception(f"Unsupported detection testing mode [{config.mode}]. "\ + "Supported detection testing modes are [{DetectionTestingMode._member_names_}]") + return + def clone_project(self, url, project, branch): LOGGER.info(f"Clone Security Content Project") @@ -252,7 +250,7 @@ def get_detections_to_test( ] if ignore_deprecated: director.detections = [ - d for d in director.detections if not (d.deprecated == True) + d for d in director.detections if not (d.status == "deprecated") ] if ignore_ssa: director.detections = [ @@ -352,29 +350,29 @@ def get_all_modified_content( # Because we have not passed -all as a kwarg, we will have a MAX of one commit returned: # https://gitpython.readthedocs.io/en/stable/reference.html?highlight=merge_base#git.repo.base.Repo.merge_base base_commits = self.repo.merge_base( - self.config.main_branch, self.config.test_branch + self.config.version_control_config.main_branch, self.config.version_control_config.test_branch ) if len(base_commits) == 0: raise ( Exception( - f"Error, main branch '{self.config.main_branch}' and test branch '{self.config.test_branch}' do not share a common ancestor" + f"Error, main branch '{self.config.version_control_config.main_branch}' and test branch '{self.config.version_control_config.test_branch}' do not share a common ancestor" ) ) base_commit = base_commits[0] if base_commit is None: raise ( Exception( - f"Error, main branch '{self.config.main_branch}' and test branch '{self.config.test_branch}' common ancestor commit was 'None'" + f"Error, main branch '{self.config.version_control_config.main_branch}' and test branch '{self.config.version_control_config.test_branch}' common ancestor commit was 'None'" ) ) all_changes = base_commit.diff( - self.config.test_branch, paths=[str(path) for path in paths] + self.config.version_control_config.test_branch, paths=[str(path) for path in paths] ) # distill changed files down to the paths of added or modified files all_changes_paths = [ - os.path.join(self.config.repo_path, change.b_path) + os.path.join(self.config.version_control_config.repo_path, change.b_path) for change in all_changes if change.change_type in ["M", "A"] ] diff --git a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py index e3c60ae6..9a895edc 100644 --- a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +++ b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py @@ -3,20 +3,21 @@ import abc import requests import splunklib.client as client -from contentctl.objects.enums import PostTestBehavior +from contentctl.objects.enums import PostTestBehavior, DetectionStatus from contentctl.objects.detection import Detection from contentctl.objects.unit_test_test import UnitTestTest from contentctl.objects.unit_test_attack_data import UnitTestAttackData from contentctl.objects.unit_test_result import UnitTestResult -from contentctl.objects.test_config import TestConfig +from contentctl.objects.test_config import TestConfig, Infrastructure from shutil import copyfile from splunklib.binding import HTTPError +from splunklib.results import JSONResultsReader, Message import os.path import configparser from ssl import SSLEOFError, SSLZeroReturnError import time import uuid - +from sys import stdout from tempfile import TemporaryDirectory, mktemp import pathlib @@ -33,6 +34,7 @@ import tqdm + MAX_TEST_NAME_LENGTH = 70 TESTING_STATES = [ "Downloading Data", @@ -54,6 +56,7 @@ class ContainerStoppedException(Exception): class DetectionTestingManagerOutputDto: inputQueue: list[Detection] = Field(default_factory=list) outputQueue: list[Detection] = Field(default_factory=list) + skippedQueue: list[Detection] = Field(default_factory=list) currentTestingQueue: dict[str, Union[Detection, None]] = Field(default_factory=dict) start_time: Union[datetime.datetime, None] = None replay_index: str = "CONTENTCTL_TESTING_INDEX" @@ -64,7 +67,8 @@ class DetectionTestingManagerOutputDto: class DetectionTestingInfrastructure(BaseModel, abc.ABC): # thread: threading.Thread = threading.Thread() - config: TestConfig + global_config: TestConfig + infrastructure: Infrastructure sync_obj: DetectionTestingManagerOutputDto hec_token: str = "" hec_channel: str = "" @@ -99,21 +103,20 @@ def setup(self): bar_format=f"{self.get_name()} starting", miniters=0, mininterval=0, + file=stdout ) + self.start_time = time.time() try: for func, msg in [ (self.start, "Starting"), - (self.get_conn, "Getting API Connection"), - ( - self.create_replay_index, - f"Create index '{self.sync_obj.replay_index}'", - ), + (self.get_conn, "Waiting for App Installation"), + (self.configure_conf_file_datamodels, "Configuring Datamodels"), + (self.create_replay_index,f"Create index '{self.sync_obj.replay_index}'"), (self.configure_imported_roles, "Configuring Roles"), (self.configure_delete_indexes, "Configuring Indexes"), - (self.configure_conf_file_datamodels, "Configuring Datamodels"), (self.configure_hec, "Configuring HEC"), - (self.wait_for_ui_ready, "Waiting for UI"), + (self.wait_for_ui_ready, "Finishing Setup") ]: self.format_pbar_string( @@ -185,10 +188,10 @@ def connect_to_api(self, sleep_seconds: int = 5): try: conn = client.connect( - host=self.config.test_instance_address, - port=self.config.api_port, - username=self.config.splunk_app_username, - password=self.config.splunk_app_password, + host=self.infrastructure.instance_address, + port=self.infrastructure.api_port, + username=self.infrastructure.splunk_app_username, + password=self.infrastructure.splunk_app_password, ) if conn.restart_required: @@ -244,12 +247,26 @@ def create_replay_index(self): def configure_imported_roles( self, imported_roles: list[str] = ["user", "power", "can_delete"], + enterprise_security_roles: list[str]= ["ess_admin", "ess_analyst", "ess_user"], indexes: list[str] = ["_*", "*"], ): indexes.append(self.sync_obj.replay_index) indexes_encoded = ";".join(indexes) + try: + self.get_conn().roles.post( + self.infrastructure.splunk_app_username, + imported_roles=imported_roles + enterprise_security_roles, + srchIndexesAllowed=indexes_encoded, + srchIndexesDefault=self.sync_obj.replay_index, + ) + return + except Exception as e: + self.pbar.write( + f"Enterprise Security Roles do not exist:'{enterprise_security_roles}: {str(e)}" + ) + self.get_conn().roles.post( - self.config.splunk_app_username, + self.infrastructure.splunk_app_username, imported_roles=imported_roles, srchIndexesAllowed=indexes_encoded, srchIndexesDefault=self.sync_obj.replay_index, @@ -338,6 +355,10 @@ def execute(self): try: detection = self.sync_obj.inputQueue.pop() + if detection.status != DetectionStatus.production.value: + self.sync_obj.skippedQueue.append(detection) + self.pbar.write(f"\nSkipping {detection.name} since it is status: {detection.status}\n") + continue self.sync_obj.currentTestingQueue[self.get_name()] = detection except IndexError as e: # self.pbar.write( @@ -407,7 +428,7 @@ def execute_test( test.result = UnitTestResult() test.result.set_job_content( - e, self.config, duration=time.time() - start_time + None, self.infrastructure, exception=e, duration=time.time() - start_time ) self.pbar.write( self.format_pbar_string( @@ -439,13 +460,13 @@ def execute_test( except Exception as e: test.result = UnitTestResult() test.result.set_job_content( - e, self.config, duration=time.time() - start_time + None, self.infrastructure, exception=e, duration=time.time() - start_time ) if ( - self.config.post_test_behavior == PostTestBehavior.always_pause + self.global_config.post_test_behavior == PostTestBehavior.always_pause or ( - self.config.post_test_behavior == PostTestBehavior.pause_on_failure + self.global_config.post_test_behavior == PostTestBehavior.pause_on_failure and (test.result is None or test.result.success == False) ) ) and not self.sync_obj.terminate: @@ -491,7 +512,7 @@ def execute_test( set_pbar=False, ) ) - + stdout.flush() if test.result is not None: test.result.duration = round(time.time() - start_time, 2) @@ -533,30 +554,85 @@ def retry_search_until_timeout( job = self.get_conn().search(query=search, **kwargs) - # the following raises an error if there is an exception in the search - _ = job.results(output_mode="json") - + results = JSONResultsReader(job.results(output_mode="json")) + + observable_fields_set = set([o.name for o in detection.tags.observable]) + if int(job.content.get("resultCount", "0")) > 0: test.result = UnitTestResult() + empty_fields = set() + for result in results: + if isinstance(result, Message): + continue + + #otherwise it is a dict and we will process is + results_fields_set = set(result.keys()) + + missing_fields = observable_fields_set - results_fields_set + + + if len(missing_fields) > 0: + e = Exception(f"The observable field(s) {missing_fields} are missing in the detection results") + test.result.set_job_content( + job.content, + self.infrastructure, + exception=e, + success=False, + duration=time.time() - search_start_time, + ) + + + return + + + + + # If we find one or more fields that contain the string "null" then they were + # not populated and we should throw an error. This can happen if there is a typo + # on a field. In this case, the field will appear but will not contain any values + current_empty_fields = set() + for field in observable_fields_set: + if result.get(field,'null') == 'null': + current_empty_fields.add(field) + + + if len(current_empty_fields) == 0: + test.result.set_job_content( + job.content, + self.infrastructure, + success=True, + duration=time.time() - search_start_time, + ) + return + + else: + empty_fields = empty_fields.union(current_empty_fields) + + + e = Exception(f"One or more required observable fields {empty_fields} contained 'null' values. Is the data being " + "parsed correctly or is there an error in the naming of a field?") test.result.set_job_content( job.content, - self.config, - success=True, + self.infrastructure, + exception=e, + success=False, duration=time.time() - search_start_time, ) - + return + else: test.result = UnitTestResult() test.result.set_job_content( job.content, - self.config, + self.infrastructure, success=False, duration=time.time() - search_start_time, ) - - tick += 1 - + tick += 1 + + + return def delete_attack_data(self, attack_data_files: list[UnitTestAttackData]): @@ -678,21 +754,21 @@ def hec_raw_replay( "host": attack_data_file.host or self.sync_obj.replay_host, } - if self.config.test_instance_address.strip().lower().startswith("https://"): - address_with_scheme = self.config.test_instance_address.strip().lower() - elif self.config.test_instance_address.strip().lower().startswith("http://"): + if self.infrastructure.instance_address.strip().lower().startswith("https://"): + address_with_scheme = self.infrastructure.instance_address.strip().lower() + elif self.infrastructure.instance_address.strip().lower().startswith("http://"): address_with_scheme = ( - self.config.test_instance_address.strip() + self.infrastructure.instance_address.strip() .lower() .replace("http://", "https://") ) else: - address_with_scheme = f"https://{self.config.test_instance_address}" + address_with_scheme = f"https://{self.infrastructure.instance_address}" # Generate the full URL, including the host, the path, and the params. # We can be a lot smarter about this (and pulling the port from the url, checking # for trailing /, etc, but we leave that for the future) - url_with_port = f"{address_with_scheme}:{self.config.hec_port}" + url_with_port = f"{address_with_scheme}:{self.infrastructure.hec_port}" url_with_hec_path = urllib.parse.urljoin( url_with_port, "services/collector/raw" ) diff --git a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py index c4ee664b..5ab14699 100644 --- a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +++ b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py @@ -28,7 +28,7 @@ def finish(self): super().finish() def get_name(self) -> str: - return self.config.container_name + return self.infrastructure.instance_name def get_docker_client(self): try: @@ -59,9 +59,9 @@ def make_container(self) -> docker.models.resource.Model: self.removeContainer() ports_dict = { - "8000/tcp": self.config.web_ui_port, - "8088/tcp": self.config.hec_port, - "8089/tcp": self.config.api_port, + "8000/tcp": self.infrastructure.web_ui_port, + "8088/tcp": self.infrastructure.hec_port, + "8089/tcp": self.infrastructure.api_port, } mounts = [ @@ -75,19 +75,19 @@ def make_container(self) -> docker.models.resource.Model: environment = {} environment["SPLUNK_START_ARGS"] = "--accept-license" - environment["SPLUNK_PASSWORD"] = self.config.splunk_app_password + environment["SPLUNK_PASSWORD"] = self.infrastructure.splunk_app_password environment["SPLUNK_APPS_URL"] = ",".join( - p.environment_path for p in self.config.apps + p.environment_path for p in self.global_config.apps ) if ( - self.config.splunkbase_password is not None - and self.config.splunkbase_username is not None + self.global_config.splunkbase_password is not None + and self.global_config.splunkbase_username is not None ): - environment["SPLUNKBASE_USERNAME"] = self.config.splunkbase_username - environment["SPLUNKBASE_PASSWORD"] = self.config.splunkbase_password + environment["SPLUNKBASE_USERNAME"] = self.global_config.splunkbase_username + environment["SPLUNKBASE_PASSWORD"] = self.global_config.splunkbase_password container = self.get_docker_client().containers.create( - self.config.full_image_path, + self.global_config.infrastructure_config.full_image_path, ports=ports_dict, environment=environment, name=self.get_name(), @@ -99,7 +99,6 @@ def make_container(self) -> docker.models.resource.Model: return container def removeContainer(self, removeVolumes: bool = True, forceRemove: bool = True): - try: container: docker.models.containers.Container = ( self.get_docker_client().containers.get(self.get_name()) @@ -118,6 +117,6 @@ def removeContainer(self, removeVolumes: bool = True, forceRemove: bool = True): except Exception as e: raise ( Exception( - f"Could not remove Docker Container [{self.config.container_name}]: {str(e)}" + f"Could not remove Docker Container [{self.get_name()}]: {str(e)}" ) ) diff --git a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureServer.py b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureServer.py index 34dde092..e7b8c2e8 100644 --- a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureServer.py +++ b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureServer.py @@ -11,4 +11,4 @@ def finish(self): super().finish() def get_name(self): - return self.config.container_name + return self.infrastructure.instance_name diff --git a/contentctl/actions/detection_testing/views/DetectionTestingView.py b/contentctl/actions/detection_testing/views/DetectionTestingView.py index 35158c60..7d03f8cb 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingView.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingView.py @@ -7,6 +7,7 @@ DetectionTestingManagerOutputDto, ) from contentctl.helper.utils import Utils +from contentctl.objects.enums import DetectionStatus class DetectionTestingView(BaseModel, abc.ABC): @@ -69,7 +70,7 @@ def getETA(self) -> datetime.timedelta: def getSummaryObject( self, - test_model_fields: list[str] = ["success", "message"], + test_model_fields: list[str] = ["success", "message", "exception"], test_job_fields: list[str] = ["resultCount", "runDuration"], ) -> dict: total_untested = len(self.sync_obj.inputQueue) @@ -95,6 +96,10 @@ def getSummaryObject( # All failures appear first untested_detections.sort(key=lambda x: x["name"]) + experimental_detections = sorted([detection.name for detection in self.sync_obj.skippedQueue if detection.status == DetectionStatus.experimental.value]) + deprecated_detections = sorted([detection.name for detection in self.sync_obj.skippedQueue if detection.status == DetectionStatus.deprecated.value]) + + if (total_fail + len(untested_detections)) == 0: overall_success = True else: @@ -113,10 +118,14 @@ def getSummaryObject( "total_detections": total_pass + total_fail, "total_pass": total_pass, "total_fail_or_untested": total_fail + total_untested, + "total_experimental_or_deprecated": len(deprecated_detections+experimental_detections), "success_rate": success_rate, }, "tested_detections": tested_detections, "untested_detections": untested_detections, "percent_complete": percent_complete, + "deprecated_detections": deprecated_detections, + "experimental_detections": experimental_detections + } return result_dict diff --git a/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py b/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py index ac9f2af8..7cdf5acf 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py @@ -21,8 +21,8 @@ class DetectionTestingViewFile(DetectionTestingView): output_filename: str = OUTPUT_FILENAME def getOutputFilePath(self) -> pathlib.Path: - - folder_path = pathlib.Path(self.config.repo_path) / self.output_folder + + folder_path = pathlib.Path('.') / self.output_folder output_file = folder_path / self.output_filename return output_file @@ -31,16 +31,17 @@ def setup(self): pass def stop(self): - folder_path = pathlib.Path(self.config.repo_path) / OUTPUT_FOLDER + folder_path = pathlib.Path('.') / self.output_folder output_file = self.getOutputFilePath() folder_path.mkdir(parents=True, exist_ok=True) - + + result_dict = self.getSummaryObject() - + # use the yaml writer class with open(output_file, "w") as res: - res.write(yaml.safe_dump(result_dict)) + res.write(yaml.safe_dump(result_dict,sort_keys=False)) def showStatus(self, interval: int = 60): pass diff --git a/contentctl/actions/generate.py b/contentctl/actions/generate.py index cb22ec08..83e98631 100644 --- a/contentctl/actions/generate.py +++ b/contentctl/actions/generate.py @@ -19,7 +19,7 @@ class GenerateInputDto: class Generate: def execute(self, input_dto: GenerateInputDto) -> DirectorOutputDto: - director_output_dto = DirectorOutputDto([],[],[],[],[],[],[],[]) + director_output_dto = DirectorOutputDto([],[],[],[],[],[],[],[],[]) director = Director(director_output_dto) director.execute(input_dto.director_input_dto) @@ -34,25 +34,31 @@ def execute(self, input_dto: GenerateInputDto) -> DirectorOutputDto: conf_output.writeObjects(director_output_dto.macros, SecurityContentType.macros) conf_output.writeAppConf() conf_output.packageApp() - conf_output.inspectApp() + #conf_output.inspectApp() + + print(f'Generate of security content successful to {conf_output.output_path}') + return director_output_dto elif input_dto.director_input_dto.product == SecurityContentProduct.SSA: - shutil.rmtree(input_dto.output_path + '/srs/', ignore_errors=True) - shutil.rmtree(input_dto.output_path + '/complex/', ignore_errors=True) - os.makedirs(input_dto.output_path + '/complex/') - os.makedirs(input_dto.output_path + '/srs/') + output_path = os.path.join(input_dto.director_input_dto.input_path, input_dto.director_input_dto.config.build_ssa.output_path) + shutil.rmtree(output_path + '/srs/', ignore_errors=True) + shutil.rmtree(output_path + '/complex/', ignore_errors=True) + os.makedirs(output_path + '/complex/') + os.makedirs(output_path + '/srs/') ba_yml_output = BAYmlOutput() - ba_yml_output.writeObjects(director_output_dto.detections, input_dto.output_path) + ba_yml_output.writeObjects(director_output_dto.ssa_detections, output_path) elif input_dto.director_input_dto.product == SecurityContentProduct.API: + output_path = os.path.join(input_dto.director_input_dto.input_path, input_dto.director_input_dto.config.build_api.output_path) + shutil.rmtree(output_path, ignore_errors=True) + os.makedirs(output_path) api_json_output = ApiJsonOutput() - api_json_output.writeObjects(director_output_dto.detections, input_dto.output_path, SecurityContentType.detections) - api_json_output.writeObjects(director_output_dto.stories, input_dto.output_path, SecurityContentType.stories) - api_json_output.writeObjects(director_output_dto.baselines, input_dto.output_path, SecurityContentType.baselines) - api_json_output.writeObjects(director_output_dto.investigations, input_dto.output_path, SecurityContentType.investigations) - api_json_output.writeObjects(director_output_dto.lookups, input_dto.output_path, SecurityContentType.lookups) - api_json_output.writeObjects(director_output_dto.macros, input_dto.output_path, SecurityContentType.macros) - api_json_output.writeObjects(director_output_dto.deployments, input_dto.output_path, SecurityContentType.deployments) - - print(f'Generate of security content successful to {conf_output.output_path}') + api_json_output.writeObjects(director_output_dto.detections, output_path, SecurityContentType.detections) + api_json_output.writeObjects(director_output_dto.stories, output_path, SecurityContentType.stories) + api_json_output.writeObjects(director_output_dto.baselines, output_path, SecurityContentType.baselines) + api_json_output.writeObjects(director_output_dto.investigations, output_path, SecurityContentType.investigations) + api_json_output.writeObjects(director_output_dto.lookups, output_path, SecurityContentType.lookups) + api_json_output.writeObjects(director_output_dto.macros, output_path, SecurityContentType.macros) + api_json_output.writeObjects(director_output_dto.deployments, output_path, SecurityContentType.deployments) + return director_output_dto \ No newline at end of file diff --git a/contentctl/actions/initialize.py b/contentctl/actions/initialize.py index 33dce84b..18c89fc4 100644 --- a/contentctl/actions/initialize.py +++ b/contentctl/actions/initialize.py @@ -5,6 +5,7 @@ from dataclasses import dataclass from contentctl.objects.config import Config, TestConfig, PASSWORD from contentctl.output.yml_writer import YmlWriter +import json @dataclass(frozen=True) class InitializeInputDto: @@ -18,8 +19,7 @@ def execute(self, input_dto: InitializeInputDto) -> None: c = Config() - t = TestConfig.construct(splunk_app_username="admin", - splunk_app_password= PASSWORD) #Disable validation for default object + t = TestConfig.construct() #Disable validation for default object config_as_dict = c.dict() config_as_dict.pop("test") @@ -29,13 +29,13 @@ def execute(self, input_dto: InitializeInputDto) -> None: # This field serialization hack is required to get # enums declared in Pydantic Models serialized properly # without emitting tags that make them hard to read in yml - import json + j = json.dumps(t.dict(),sort_keys=False) obj=json.loads(j) YmlWriter.writeYmlFile(os.path.join(input_dto.path, 'contentctl_test.yml'), dict(obj)) - folders = ['detections', 'stories', 'lookups', 'macros', 'baselines', 'dist', 'docs', 'reporting'] + folders = ['detections', 'stories', 'lookups', 'macros', 'baselines', 'dist', 'docs', 'reporting', 'investigations'] for folder in folders: os.makedirs(os.path.join(input_dto.path, folder)) @@ -53,6 +53,11 @@ def execute(self, input_dto: InitializeInputDto) -> None: dest_path/detection_name) + shutil.copytree( + os.path.join(os.path.dirname(__file__), '../templates/deployments'), + os.path.join(input_dto.path, 'deployments') + ) + shutil.copyfile( os.path.join(os.path.dirname(__file__), '../templates/stories/cobalt_strike.yml'), os.path.join(input_dto.path, 'stories', 'cobalt_strike.yml') diff --git a/contentctl/actions/reporting.py b/contentctl/actions/reporting.py index e38ac205..515237cf 100644 --- a/contentctl/actions/reporting.py +++ b/contentctl/actions/reporting.py @@ -11,16 +11,18 @@ class ReportingInputDto: director_input_dto: DirectorInputDto - class Reporting: def execute(self, input_dto: ReportingInputDto) -> None: - director_output_dto = DirectorOutputDto([],[],[],[],[],[],[],[]) + director_output_dto = DirectorOutputDto([],[],[],[],[],[],[],[],[]) director = Director(director_output_dto) director.execute(input_dto.director_input_dto) - #svg_output = SvgOutput() - #svg_output.writeObjects(director_output_dto.detections, input_dto.output_path) + svg_output = SvgOutput() + svg_output.writeObjects( + director_output_dto.detections, + os.path.join(input_dto.director_input_dto.input_path, "reporting") + ) attack_nav_output = AttackNavOutput() attack_nav_output.writeObjects( diff --git a/contentctl/actions/validate.py b/contentctl/actions/validate.py index 9527dde6..a36f638f 100644 --- a/contentctl/actions/validate.py +++ b/contentctl/actions/validate.py @@ -20,7 +20,7 @@ class ValidateInputDto: class Validate: def execute(self, input_dto: ValidateInputDto) -> None: - director_output_dto = DirectorOutputDto([], [], [], [], [], [], [], []) + director_output_dto = DirectorOutputDto([], [], [], [], [], [], [], [], []) director = Director(director_output_dto) director.execute(input_dto.director_input_dto) @@ -35,11 +35,6 @@ def execute(self, input_dto: ValidateInputDto) -> None: ) self.validate_duplicate_uuids(security_content_objects) - # validate tests - self.validate_detection_exist_for_test( - director_output_dto.tests, director_output_dto.detections - ) - except ValueError as e: print(e) sys.exit(1) @@ -73,14 +68,14 @@ def validate_duplicate_uuids(self, security_content_objects): + "\n".join([obj.name for obj in content_with_duplicate_uuid]) ) - def validate_detection_exist_for_test(self, tests: list, detections: list): - for test in tests: - found_detection = False - for detection in detections: - if test.tests[0].file in detection.file_path: - found_detection = True - - if not found_detection: - raise ValueError( - "ERROR: detection doesn't exist for test file: " + test.name - ) + # def validate_detection_exist_for_test(self, tests: list, detections: list): + # for test in tests: + # found_detection = False + # for detection in detections: + # if test.tests[0].file in detection.file_path: + # found_detection = True + + # if not found_detection: + # raise ValueError( + # "ERROR: detection doesn't exist for test file: " + test.name + # ) diff --git a/contentctl/contentctl.py b/contentctl/contentctl.py index 183c27ff..ccdbe414 100644 --- a/contentctl/contentctl.py +++ b/contentctl/contentctl.py @@ -1,9 +1,12 @@ import sys import argparse import os - +import tqdm +import functools +from typing import Union import yaml import pathlib + from contentctl.actions.detection_testing.GitHubService import ( GithubService, ) @@ -25,6 +28,7 @@ SecurityContentProduct, DetectionTestingMode, PostTestBehavior, + DetectionTestingTargetInfrastructure ) from contentctl.input.new_content_generator import NewContentGeneratorInputDto from contentctl.helper.config_handler import ConfigHandler @@ -32,14 +36,14 @@ from contentctl.objects.config import Config from contentctl.objects.app import App -from contentctl.objects.test_config import TestConfig +from contentctl.objects.test_config import TestConfig, Infrastructure from contentctl.actions.test import Test, TestInputDto, TestOutputDto +from contentctl.objects.enums import * +from contentctl.input.sigma_converter import * +from contentctl.actions.convert import * -import tqdm -import functools -from typing import Union - +SERVER_ARGS_ENV_VARIABLE = "CONTENTCTL_TEST_INFRASTRUCTURES" def configure_unattended(args: argparse.Namespace) -> argparse.Namespace: # disable all calls to tqdm - this is so that CI/CD contexts don't @@ -91,7 +95,7 @@ def print_ascii_art(): def start(args, read_test_file:bool = False) -> Config: base_config = ConfigHandler.read_config(pathlib.Path(args.path)/"contentctl.yml") if read_test_file: - base_config.test = ConfigHandler.read_test_config(pathlib.Path(args.path)/"contentctl_test.yml") + base_config.test = ConfigHandler.read_test_config(pathlib.Path(args.path)/"contentctl_test.yml", args.mode) return base_config @@ -104,9 +108,19 @@ def initialize(args) -> None: def build(args, config:Union[Config,None]=None) -> DirectorOutputDto: if config == None: config = start(args) - product_type = SecurityContentProduct.SPLUNK_APP + if args.type == "app": + product_type = SecurityContentProduct.SPLUNK_APP + elif args.type == "ssa": + product_type = SecurityContentProduct.SSA + elif args.type == "api": + product_type = SecurityContentProduct.API + else: + print(f"Invalid build type. Valid options app, ssa or api") + sys.exit(1) director_input_dto = DirectorInputDto( - input_path=os.path.abspath(args.path), product=product_type, config=config + input_path=os.path.abspath(args.path), + product=product_type, + config=config ) generate_input_dto = GenerateInputDto(director_input_dto) @@ -128,19 +142,63 @@ def acs_deploy(args) -> None: def test(args: argparse.Namespace): args = configure_unattended(args) + config = start(args, read_test_file=True) + if config.test is None: + raise Exception("Error parsing test configuration. Test Object was None.") # set some arguments that are not # yet exposed/written properly in # the config file - config.test.mode=DetectionTestingMode(args.mode) - config.test.num_containers=1 - config.test.post_test_behavior=PostTestBehavior(args.behavior) - config.test.detections_list=args.detections_list - - + if args.infrastructure is not None: + config.test.infrastructure_config.infrastructure_type = DetectionTestingTargetInfrastructure(args.infrastructure) + if args.mode is not None: + config.test.mode=DetectionTestingMode(args.mode) + if args.behavior is not None: + config.test.post_test_behavior=PostTestBehavior(args.behavior) + if args.detections_list is not None: + config.test.detections_list=args.detections_list + + + if config.test.infrastructure_config.infrastructure_type == DetectionTestingTargetInfrastructure.container: + if args.num_containers is None: + raise Exception("Error - trying to start a test using container infrastructure but no value for --num_containers was found") + config.test.infrastructure_config.infrastructures = Infrastructure.get_infrastructure_containers(args.num_containers) + elif config.test.infrastructure_config.infrastructure_type == DetectionTestingTargetInfrastructure.server: + if args.server_info is None and os.environ.get(SERVER_ARGS_ENV_VARIABLE) is None: + if len(config.test.infrastructure_config.infrastructures) == 0: + raise Exception("Error - trying to start a test using server infrastructure, but server information was not stored " + "in contentctl_test.yml or passed on the command line. Please see the documentation for --server_info " + "at the command line or 'infrastructures' in contentctl.yml.") + else: + print("Using server configuration from: [contentctl_test.yml infrastructures section]") + + else: + if args.server_info is not None: + print("Using server configuration from: [command line]") + pass + elif os.environ.get(SERVER_ARGS_ENV_VARIABLE) is not None: + args.server_info = os.environ.get(SERVER_ARGS_ENV_VARIABLE,"").split(';') + print(f"Using server configuration from: [{SERVER_ARGS_ENV_VARIABLE} environment variable]") + else: + raise Exception(f"Server infrastructure information not passed in contentctl_test.yml file, using --server_info switch on the command line, or in the {SERVER_ARGS_ENV_VARIABLE} environment variable") + # if server info was provided on the command line, us that. Otherwise use the env + + + + config.test.infrastructure_config.infrastructures = [] + + for server in args.server_info: + address,username,password,web_ui_port,hec_port,api_port = server.split(",") + config.test.infrastructure_config.infrastructures.append(Infrastructure(splunk_app_username=username, + splunk_app_password=password, + instance_address=address, + hec_port=int(hec_port), + web_ui_port=int(web_ui_port), + api_port=int(api_port))) + # We do this before generating the app to save some time if options are incorrect. # For example, if the detection(s) we are trying to test do not exist githubService = GithubService(config.test) @@ -158,7 +216,7 @@ def test(args: argparse.Namespace): title=config.build.name, release=config.build.version, http_path=None, - local_path=str(pathlib.Path(config.build.path_root)/f"{config.build.name}.tar.gz"), + local_path=str(pathlib.Path(config.build.path_root)/f"{config.build.name}-{config.build.version}.tar.gz"), description=config.build.description, splunkbase_path=None, force_local=True @@ -180,27 +238,33 @@ def test(args: argparse.Namespace): test = Test() - try: - - result = test.execute(test_input_dto) - # This return code is important. Even if testing - # fully completes, if everything does not pass then - # we want to return a nonzero status code - if result: - sys.exit(0) - else: - sys.exit(1) - - except Exception as e: - print(f"Error running contentctl test: {str(e)}") + + result = test.execute(test_input_dto) + # This return code is important. Even if testing + # fully completes, if everything does not pass then + # we want to return a nonzero status code + if result: + sys.exit(0) + else: sys.exit(1) + def validate(args) -> None: config = start(args) - product_type = SecurityContentProduct.SPLUNK_APP + if args.type == "app": + product_type = SecurityContentProduct.SPLUNK_APP + elif args.type == "ssa": + product_type = SecurityContentProduct.SSA + elif args.type == "api": + product_type = SecurityContentProduct.API + else: + print(f"Invalid build type. Valid options app, ssa or api") + sys.exit(1) director_input_dto = DirectorInputDto( - input_path=pathlib.Path(args.path), product=product_type, config=config + input_path=pathlib.Path(args.path), + product=product_type, + config=config ) validate_input_dto = ValidateInputDto(director_input_dto=director_input_dto) validate = Validate() @@ -249,6 +313,33 @@ def reporting(args) -> None: reporting.execute(reporting_input_dto) +def convert(args) -> None: + if args.data_model == 'cim': + data_model = SigmaConverterTarget.CIM + elif args.data_model == 'raw': + data_model = SigmaConverterTarget.RAW + elif args.data_model == 'ocsf': + data_model = SigmaConverterTarget.OCSF + else: + print("ERROR: data model " + args.data_model + " not supported") + sys.exit(1) + + sigma_converter_input_dto = SigmaConverterInputDto( + data_model = data_model, + detection_path = args.detection_path, + detection_folder = args.detection_folder, + input_path = args.path, + log_source = args.log_source + ) + + convert_input_dto = ConvertInputDto( + sigma_converter_input_dto = sigma_converter_input_dto, + output_path = os.path.abspath(args.output) + ) + convert = Convert() + convert.execute(convert_input_dto) + + def main(): """ main function parses the arguments passed to the script and calls the respctive method. @@ -306,6 +397,8 @@ def main(): help="Run a test of the detections against a Splunk Server or Splunk Docker Container", ) + convert_parser = actions_parser.add_parser("convert", help="Convert a sigma detection to a Splunk ESCU detection.") + init_parser.set_defaults(func=initialize) init_parser.add_argument("--demo", action=argparse.BooleanOptionalAction, help="Use this flag to pre-populate the content pack " @@ -313,8 +406,24 @@ def main(): "and on detection that will fail 'contentctl test'. This is useful " "for demonstrating contentctl functionality.") + validate_parser.add_argument( + "-t", + "--type", + required=False, + type=str, + default="app", + help="Type of package: app, ssa or api" + ) validate_parser.set_defaults(func=validate) + build_parser.add_argument( + "-t", + "--type", + required=False, + type=str, + default="app", + help="Type of package: app, ssa or api" + ) build_parser.set_defaults(func=build) docs_parser.set_defaults(func=doc_gen) @@ -334,10 +443,18 @@ def main(): api_deploy_parser.set_defaults(func=api_deploy) + test_parser.add_argument( + "-t", + "--type", + required=False, + type=str, + default="app", + help="Type of package: app, ssa or api" + ) test_parser.add_argument( "--mode", required=False, - default=DetectionTestingMode.all.name, + default=None, type=str, choices=DetectionTestingMode._member_names_, help="Controls which detections to test. 'all' will test all detections in the repo." @@ -347,7 +464,7 @@ def main(): test_parser.add_argument( "--behavior", required=False, - default=PostTestBehavior.pause_on_failure.name, + default=None, type=str, choices=PostTestBehavior._member_names_, help="Controls what to do when a test completes. 'always_pause' means that the state of " @@ -364,15 +481,35 @@ def main(): "--detections_list", required=False, nargs="+", + default=None, type=str, help="An explicit list " "of detections to test. Their paths should be relative to the app path.", ) + test_parser.add_argument("--unattended", action=argparse.BooleanOptionalAction) + + test_parser.add_argument("--infrastructure", required=False, type=str, + choices=DetectionTestingTargetInfrastructure._member_names_, default=None, + help="Determines what infrastructure to use for testing. The options are " + "container and server. Container will set up Splunk Container(s) at runtime, " + "install all relevant apps, and perform configurations. Server will use " + "preconfigured server(s) either specified on the command line or in " + "contentctl_test.yml.") + test_parser.add_argument("--num_containers", required=False, default=1, type=int) + test_parser.add_argument("--server_info", required=False, default=None, type=str, nargs='+') test_parser.set_defaults(func=test) + convert_parser.add_argument("-dm", "--data_model", required=False, type=str, default="cim", help="converter target, choose between cim, raw, ocsf") + convert_parser.add_argument("-lo", "--log_source", required=False, type=str, help="converter log source") + convert_parser.add_argument("-dp", "--detection_path", required=False, type=str, help="path to a single detection") + convert_parser.add_argument("-df", "--detection_folder", required=False, type=str, help="path to a detection folder") + convert_parser.add_argument("-o", "--output", required=True, type=str, help="output path to store the detections") + convert_parser.set_defaults(func=convert) + + # parse them args = parser.parse_args() @@ -383,5 +520,6 @@ def main(): print(f"Error during contentctl:\n{str(e)}") import traceback traceback.print_exc() + traceback.print_stack() sys.exit(1) diff --git a/contentctl/helper/config_handler.py b/contentctl/helper/config_handler.py index 958efde0..44882d02 100644 --- a/contentctl/helper/config_handler.py +++ b/contentctl/helper/config_handler.py @@ -5,7 +5,7 @@ from contentctl.input.yml_reader import YmlReader from contentctl.objects.config import Config, TestConfig - +from contentctl.objects.enums import DetectionTestingMode class ConfigHandler: @@ -27,7 +27,7 @@ def read_config(cls, config_path: pathlib.Path) -> Config: return config @classmethod - def read_test_config(cls, test_config_path: pathlib.Path) -> TestConfig: + def read_test_config(cls, test_config_path: pathlib.Path, mode:DetectionTestingMode) -> TestConfig: try: yml_dict = YmlReader.load_file(test_config_path, add_fields=False) except: @@ -35,6 +35,8 @@ def read_test_config(cls, test_config_path: pathlib.Path) -> TestConfig: sys.exit(1) try: + if mode != DetectionTestingMode.changes: + yml_dict['version_control_config'] = None test_config = TestConfig.parse_obj(yml_dict) except Exception as e: raise Exception(f"Error reading test config file: {str(e)}") diff --git a/contentctl/helper/constants.py b/contentctl/helper/constants.py new file mode 100644 index 00000000..f9de2cdd --- /dev/null +++ b/contentctl/helper/constants.py @@ -0,0 +1,134 @@ +ATTACK_TACTICS_KILLCHAIN_MAPPING = { + "Reconnaissance": "Reconnaissance", + "Resource Development": "Weaponization", + "Initial Access": "Delivery", + "Execution": "Installation", + "Persistence": "Installation", + "Privilege Escalation": "Exploitation", + "Defense Evasion": "Exploitation", + "Credential Access": "Exploitation", + "Discovery": "Exploitation", + "Lateral Movement": "Exploitation", + "Collection": "Exploitation", + "Command And Control": "Command And Control", + "Command And Control": "Command And Control", + "Exfiltration": "Actions on Objectives", + "Impact": "Actions on Objectives" +} + +SES_CONTEXT_MAPPING = { + "Unknown": 0, + "Source:Endpoint": 10, + "Source:AD": 11, + "Source:Firewall": 12, + "Source:Application Log": 13, + "Source:IPS": 14, + "Source:Cloud Data": 15, + "Source:Correlation": 16, + "Source:Printer": 17, + "Source:Badge": 18, + "Scope:Internal": 20, + "Scope:External": 21, + "Scope:Inbound": 22, + "Scope:Outbound": 23, + "Scope:Local": 24, + "Scope:Network": 25, + "Outcome:Blocked": 30, + "Outcome:Allowed": 31, + "Stage:Recon": 40, + "Stage:Initial Access": 41, + "Stage:Execution": 42, + "Stage:Persistence": 43, + "Stage:Privilege Escalation": 44, + "Stage:Defense Evasion": 45, + "Stage:Credential Access": 46, + "Stage:Discovery": 47, + "Stage:Lateral Movement": 48, + "Stage:Collection": 49, + "Stage:Exfiltration": 50, + "Stage:Command And Control": 51, + "Consequence:Infection": 60, + "Consequence:Reduced Visibility": 61, + "Consequence:Data Destruction": 62, + "Consequence:Denial Of Service": 63, + "Consequence:Loss Of Control": 64, + "Rares:Rare User": 70, + "Rares:Rare Process": 71, + "Rares:Rare Device": 72, + "Rares:Rare Domain": 73, + "Rares:Rare Network": 74, + "Rares:Rare Location": 75, + "Other:Peer Group": 80, + "Other:Brute Force": 81, + "Other:Policy Violation": 82, + "Other:Threat Intelligence": 83, + "Other:Flight Risk": 84, + "Other:Removable Storage": 85 +} + +SES_KILL_CHAIN_MAPPINGS = { + "Unknown": 0, + "Reconnaissance": 1, + "Weaponization": 2, + "Delivery": 3, + "Exploitation": 4, + "Installation": 5, + "Command And Control": 6, + "Actions on Objectives": 7 +} + +SES_OBSERVABLE_ROLE_MAPPING = { + "Other": -1, + "Unknown": 0, + "Actor": 1, + "Target": 2, + "Attacker": 3, + "Victim": 4, + "Parent Process": 5, + "Child Process": 6, + "Known Bad": 7, + "Data Loss": 8, + "Observer": 9 +} + +SES_OBSERVABLE_TYPE_MAPPING = { + "Unknown": 0, + "Hostname": 1, + "IP Address": 2, + "MAC Address": 3, + "User Name": 4, + "Email Address": 5, + "URL String": 6, + "File Name": 7, + "File Hash": 8, + "Process Name": 9, + "Ressource UID": 10, + "Endpoint": 20, + "User": 21, + "Email": 22, + "Uniform Resource Locator": 23, + "File": 24, + "Process": 25, + "Geo Location": 26, + "Container": 27, + "Registry Key": 28, + "Registry Value": 29, + "Other": 99 +} + +SES_ATTACK_TACTICS_ID_MAPPING = { + "Reconnaissance": "TA0043", + "Resource_Development": "TA0042", + "Initial_Access": "TA0001", + "Execution": "TA0002", + "Persistence": "TA0003", + "Privilege_Escalation": "TA0004", + "Defense_Evasion": "TA0005", + "Credential_Access": "TA0006", + "Discovery": "TA0007", + "Lateral_Movement": "TA0008", + "Collection": "TA0009", + "Command_and_Control": "TA0011", + "Exfiltration": "TA0010", + "Impact": "TA0040" +} \ No newline at end of file diff --git a/contentctl/helper/utils.py b/contentctl/helper/utils.py index 0c8901c6..ddf0e407 100644 --- a/contentctl/helper/utils.py +++ b/contentctl/helper/utils.py @@ -179,19 +179,19 @@ def validate_git_pull_request(repo_path: str, pr_number: int) -> str: return hash - @staticmethod - def check_required_fields( - thisField: str, definedFields: dict, requiredFields: list[str] - ): - missing_fields = [ - field for field in requiredFields if field not in definedFields - ] - if len(missing_fields) > 0: - raise ( - ValueError( - f"Could not validate - please resolve other errors resulting in missing fields {missing_fields}" - ) - ) + # @staticmethod + # def check_required_fields( + # thisField: str, definedFields: dict, requiredFields: list[str] + # ): + # missing_fields = [ + # field for field in requiredFields if field not in definedFields + # ] + # if len(missing_fields) > 0: + # raise ( + # ValueError( + # f"Could not validate - please resolve other errors resulting in missing fields {missing_fields}" + # ) + # ) @staticmethod def verify_file_exists( @@ -207,15 +207,16 @@ def verify_file_exists( # Try to make a head request to verify existence of the file try: + req = requests.head( file_path, timeout=timeout_seconds, verify=True, allow_redirects=True ) if req.status_code > 400: - raise (Exception(f"Return code {req.status_code}")) + raise (Exception(f"Return code={req.status_code}")) except Exception as e: raise ( Exception( - f"Cannot confirm the existence of '{file_path}' - are you sure it exists: {str(e)}" + f"HTTP Resolution Failed: {str(e)}" ) ) diff --git a/contentctl/input/backend_splunk_ba.py b/contentctl/input/backend_splunk_ba.py new file mode 100644 index 00000000..fd759ce4 --- /dev/null +++ b/contentctl/input/backend_splunk_ba.py @@ -0,0 +1,124 @@ +import re +from sigma.conversion.state import ConversionState +from sigma.rule import SigmaRule +from sigma.conversion.base import TextQueryBackend +from sigma.conversion.deferred import DeferredTextQueryExpression +from sigma.conditions import ConditionFieldEqualsValueExpression, ConditionOR, ConditionAND, ConditionNOT, ConditionItem +from sigma.types import SigmaCompareExpression +from sigma.exceptions import SigmaFeatureNotSupportedByBackendError +from sigma.pipelines.splunk.splunk import splunk_sysmon_process_creation_cim_mapping, splunk_windows_registry_cim_mapping, splunk_windows_file_event_cim_mapping + +from contentctl.objects.detection import Detection + +from typing import ClassVar, Dict, List, Optional, Pattern, Tuple + + +class SplunkBABackend(TextQueryBackend): + """Splunk SPL backend.""" + precedence: ClassVar[Tuple[ConditionItem, ConditionItem, ConditionItem]] = (ConditionNOT, ConditionOR, ConditionAND) + group_expression : ClassVar[str] = "({expr})" + parenthesize : bool = True + + or_token : ClassVar[str] = "OR" + and_token : ClassVar[str] = "AND" + not_token : ClassVar[str] = "NOT" + eq_token : ClassVar[str] = "=" + + field_quote: ClassVar[str] = '"' + field_quote_pattern: ClassVar[Pattern] = re.compile("^[\w.]+$") + + str_quote : ClassVar[str] = '"' + escape_char : ClassVar[str] = "\\" + wildcard_multi : ClassVar[str] = "%" + wildcard_single : ClassVar[str] = "%" + add_escaped : ClassVar[str] = "\\" + + re_expression : ClassVar[str] = "match({field}, /(?i){regex}/)=true" + re_escape_char : ClassVar[str] = "" + re_escape : ClassVar[Tuple[str]] = ('"',) + + cidr_expression : ClassVar[str] = "{value}" + + compare_op_expression : ClassVar[str] = "{field}{operator}{value}" + compare_operators : ClassVar[Dict[SigmaCompareExpression.CompareOperators, str]] = { + SigmaCompareExpression.CompareOperators.LT : "<", + SigmaCompareExpression.CompareOperators.LTE : "<=", + SigmaCompareExpression.CompareOperators.GT : ">", + SigmaCompareExpression.CompareOperators.GTE : ">=", + } + + field_null_expression : ClassVar[str] = "{field} IS NOT NULL" + + convert_or_as_in : ClassVar[bool] = False + convert_and_as_in : ClassVar[bool] = False + in_expressions_allow_wildcards : ClassVar[bool] = True + field_in_list_expression : ClassVar[str] = "{field} {op} ({list})" + or_in_operator : ClassVar[Optional[str]] = "IN" + list_separator : ClassVar[str] = ", " + + unbound_value_str_expression : ClassVar[str] = '{value}' + unbound_value_num_expression : ClassVar[str] = '{value}' + unbound_value_re_expression : ClassVar[str] = '{value}' + + deferred_start : ClassVar[str] = " " + deferred_separator : ClassVar[str] = " OR " + deferred_only_query : ClassVar[str] = "*" + + wildcard_match_expression : ClassVar[Optional[str]] = "{field} LIKE {value}" + + + def __init__(self, processing_pipeline: Optional["sigma.processing.pipeline.ProcessingPipeline"] = None, collect_errors: bool = False, min_time : str = "-30d", max_time : str = "now", detection : Detection = None, field_mapping: dict = None, **kwargs): + super().__init__(processing_pipeline, collect_errors, **kwargs) + self.min_time = min_time or "-30d" + self.max_time = max_time or "now" + self.detection = detection + self.field_mapping = field_mapping + + def finalize_query_data_model(self, rule: SigmaRule, query: str, index: int, state: ConversionState) -> str: + + try: + fields = state.processing_state["fields"] + except KeyError: + raise SigmaFeatureNotSupportedByBackendError("No fields specified by processing pipeline") + + # fields_input_parsing = '' + # for count, value in enumerate(fields): + # fields_input_parsing = fields_input_parsing + value + '=ucast(map_get(input_event, "' + value + '"), "string", null)' + # if not count == len(fields) - 1: + # fields_input_parsing = fields_input_parsing + ', ' + + detection_str = """ +$main = from source +| eval timestamp = time +| eval metadata_uid = metadata.uid +""".replace("\n", " ") + + parsed_fields = [] + + for field in self.field_mapping["mapping"].keys(): + mapped_field = self.field_mapping["mapping"][field] + parent = 'parent' + i = 1 + values = mapped_field.split('.') + for val in values: + if parent == "parent": + parent = val + continue + else: + new_val = parent + '_' + val + if new_val in parsed_fields: + parent = new_val + i = i + 1 + continue + parser_str = '| eval ' + new_val + ' = ' + parent + '.' + val + ' ' + detection_str = detection_str + parser_str + parsed_fields.append(new_val) + parent = new_val + i = i + 1 + + detection_str = detection_str + "| where " + query + detection_str = detection_str.replace("\\\\\\\\", "\\\\") + return detection_str + + def finalize_output_data_model(self, queries: List[str]) -> List[str]: + return queries \ No newline at end of file diff --git a/contentctl/input/baseline_builder.py b/contentctl/input/baseline_builder.py index 770b50b6..d4608a3b 100644 --- a/contentctl/input/baseline_builder.py +++ b/contentctl/input/baseline_builder.py @@ -18,34 +18,44 @@ def setObject(self, path: pathlib.Path) -> None: try: self.baseline = Baseline.parse_obj(yml_dict) - except ValidationError as e: print('Validation Error for file ' + str(path)) print(e) sys.exit(1) + def addDeployment(self, deployments: list) -> None: - matched_deployments = [] + if not self.baseline.deployment: + + matched_deployments = [] + + for d in deployments: + d_tags = dict(d.tags) + baseline_dict = self.baseline.dict() + baseline_tags_dict = self.baseline.tags.dict() + for d_tag in d_tags.keys(): + for attr in baseline_dict.keys(): + if attr == d_tag: + if isinstance(baseline_dict[attr], str): + if baseline_dict[attr] == d_tags[d_tag]: + matched_deployments.append(d) + elif isinstance(baseline_dict[attr], list): + if d_tags[d_tag] in baseline_dict[attr]: + matched_deployments.append(d) - for d in deployments: - d_tags = dict(d.tags) - for d_tag in d_tags.keys(): - for attr in dir(self.baseline): - if not (attr.startswith('__') or attr.startswith('_')): + for attr in baseline_tags_dict.keys(): if attr == d_tag: - if type(self.baseline.__getattribute__(attr)) is str: - attr_values = [self.baseline.__getattribute__(attr)] - else: - attr_values = self.baseline.__getattribute__(attr) - - for attr_value in attr_values: - if attr_value == d_tags[d_tag]: + if isinstance(baseline_tags_dict[attr], str): + if baseline_tags_dict[attr] == d_tags[d_tag]: + matched_deployments.append(d) + elif isinstance(baseline_tags_dict[attr], list): + if d_tags[d_tag] in baseline_tags_dict[attr]: matched_deployments.append(d) - if len(matched_deployments) == 0: - raise ValueError('No deployment found for baseline: ' + self.baseline.name) + if len(matched_deployments) == 0: + raise ValueError('No deployment found for baseline: ' + self.baseline.name) - self.baseline.deployment = matched_deployments[-1] + self.baseline.deployment = matched_deployments[-1] def reset(self) -> None: diff --git a/contentctl/input/basic_builder.py b/contentctl/input/basic_builder.py index afea476d..c1e5c6f3 100644 --- a/contentctl/input/basic_builder.py +++ b/contentctl/input/basic_builder.py @@ -17,7 +17,6 @@ class BasicBuilder(): def setObject(self, path: pathlib.Path, type: SecurityContentType) -> None: - #print(path) yml_dict = YmlReader.load_file(path) if type == SecurityContentType.deployments: if "alert_action" in yml_dict: diff --git a/contentctl/input/detection_builder.py b/contentctl/input/detection_builder.py index bbd8d684..163654a2 100644 --- a/contentctl/input/detection_builder.py +++ b/contentctl/input/detection_builder.py @@ -8,10 +8,12 @@ from contentctl.objects.detection import Detection from contentctl.objects.security_content_object import SecurityContentObject from contentctl.objects.macro import Macro +from contentctl.objects.lookup import Lookup from contentctl.objects.mitre_attack_enrichment import MitreAttackEnrichment from contentctl.enrichments.cve_enrichment import CveEnrichment from contentctl.enrichments.splunk_app_enrichment import SplunkAppEnrichment from contentctl.objects.config import ConfigDetectionConfiguration +from contentctl.helper.constants import * class DetectionBuilder(): @@ -25,9 +27,29 @@ def setObject(self, path: str) -> None: self.security_content_obj.source = os.path.split(os.path.dirname(self.security_content_obj.file_path))[-1] - def addDeployment(self, detection_configuration: ConfigDetectionConfiguration) -> None: + def addDeployment(self, deployments: list) -> None: if self.security_content_obj: - self.security_content_obj.deployment = detection_configuration + if not self.security_content_obj.deployment: + matched_deployments = [] + for d in deployments: + d_tags = dict(d.tags) + for d_tag in d_tags.keys(): + for attr in dir(self.security_content_obj): + if not (attr.startswith('__') or attr.startswith('_')): + if attr == d_tag: + if type(self.security_content_obj.__getattribute__(attr)) is str: + attr_values = [self.security_content_obj.__getattribute__(attr)] + else: + attr_values = self.security_content_obj.__getattribute__(attr) + + for attr_value in attr_values: + if attr_value == d_tags[d_tag]: + matched_deployments.append(d) + + if len(matched_deployments) == 0: + self.security_content_obj.deployment = None + else: + self.security_content_obj.deployment = matched_deployments[-1] def addRBA(self) -> None: @@ -41,25 +63,25 @@ def addRBA(self) -> None: for entity in self.security_content_obj.tags.observable: risk_object = dict() - if entity['type'].lower() in risk_object_user_types: + if entity.type.lower() in risk_object_user_types: risk_object['risk_object_type'] = 'user' - risk_object['risk_object_field'] = entity['name'] + risk_object['risk_object_field'] = entity.name risk_object['risk_score'] = self.security_content_obj.tags.risk_score risk_objects.append(risk_object) - elif entity['type'].lower() in risk_object_system_types: + elif entity.type.lower() in risk_object_system_types: risk_object['risk_object_type'] = 'system' - risk_object['risk_object_field'] = entity['name'] + risk_object['risk_object_field'] = entity.name risk_object['risk_score'] = self.security_content_obj.tags.risk_score risk_objects.append(risk_object) - elif 'role' in entity and 'Attacker' in entity['role']: - risk_object['threat_object_field'] = entity['name'] - risk_object['threat_object_type'] = entity['type'].lower() + elif 'Attacker' in entity.role: + risk_object['threat_object_field'] = entity.name + risk_object['threat_object_type'] = entity.type.lower() risk_objects.append(risk_object) else: risk_object['risk_object_type'] = 'other' - risk_object['risk_object_field'] = entity['name'] + risk_object['risk_object_field'] = entity.name risk_object['risk_score'] = self.security_content_obj.tags.risk_score risk_objects.append(risk_object) continue @@ -145,18 +167,10 @@ def addBaseline(self, baselines: list) -> None: self.security_content_obj.baselines = matched_baselines - def addUnitTest(self, tests: list) -> None: + def addUnitTest(self) -> None: if self.security_content_obj: - if self.security_content_obj.tests and len(self.security_content_obj.tests) > 0: - return - elif self.security_content_obj.type not in ["Correlation"] and \ - self.security_content_obj.deprecated == False and \ - self.security_content_obj.experimental == False and \ - self.security_content_obj.tags.manual_test == None: - raise(Exception(f"No tests found found {self.security_content_obj.file_path}")) - #print(f"No tests found found {self.security_content_obj.file_path}") - return None - + if self.security_content_obj.tests: + self.security_content_obj.test = self.security_content_obj.tests[0] def addMitreAttackEnrichment(self, attack_enrichment: dict) -> None: @@ -181,37 +195,23 @@ def addMitreAttackEnrichment(self, attack_enrichment: dict) -> None: def addMacros(self, macros: list) -> None: if self.security_content_obj: - macros_found = re.findall(r'`([^\s]+)`', self.security_content_obj.search) - macros_filtered = set() - self.security_content_obj.macros = [] - - for macro in macros_found: - if not '_filter' in macro and not 'drop_dm_object_name' in macro: - start = macro.find('(') - if start != -1: - macros_filtered.add(macro[:start]) - else: - macros_filtered.add(macro) - - for macro_name in macros_filtered: - for macro in macros: - if macro_name == macro.name: - self.security_content_obj.macros.append(macro) - + found_macros, missing_macros = Macro.get_macros(self.security_content_obj.search, macros) name = self.security_content_obj.name.replace(' ', '_').replace('-', '_').replace('.', '_').replace('/', '_').lower() + '_filter' macro = Macro(name=name, definition='search *', description='Update this macro to limit the output results to filter out false positives.') + found_macros.append(macro) + self.security_content_obj.macros = found_macros + if len(missing_macros) > 0: + raise Exception(f"{self.security_content_obj.name} is missing the following macros: {missing_macros}") - self.security_content_obj.macros.append(macro) def addLookups(self, lookups: list) -> None: if self.security_content_obj: - lookups_found = re.findall(r'lookup (?:update=true)?(?:append=t)?\s*([^\s]*)', self.security_content_obj.search) - self.security_content_obj.lookups = [] - for lookup_name in lookups_found: - for lookup in lookups: - if lookup.name == lookup_name: - self.security_content_obj.lookups.append(lookup) + found_lookups, missing_lookups = Lookup.get_lookups(self.security_content_obj.search, lookups) + self.security_content_obj.lookups = found_lookups + if len(missing_lookups) > 0: + raise Exception(f"{self.security_content_obj.name} is missing the following lookups: {missing_lookups}") + def addCve(self) -> None: @@ -221,6 +221,7 @@ def addCve(self) -> None: for cve in self.security_content_obj.tags.cve: self.security_content_obj.cve_enrichment.append(CveEnrichment.enrich_cve(cve)) + def addSplunkApp(self) -> None: if self.security_content_obj: self.security_content_obj.splunk_app_enrichment = [] @@ -228,6 +229,7 @@ def addSplunkApp(self) -> None: for splunk_app in self.security_content_obj.tags.supported_tas: self.security_content_obj.splunk_app_enrichment.append(SplunkAppEnrichment.enrich_splunk_app(splunk_app)) + def addCIS(self) -> None: if self.security_content_obj: if self.security_content_obj.tags.security_domain == "network": @@ -246,6 +248,7 @@ def addKillChainPhase(self) -> None: kill_chain_phases.append(ATTACK_TACTICS_KILLCHAIN_MAPPING[mitre_attack_tactic]) self.security_content_obj.tags.kill_chain_phases = list(dict.fromkeys(kill_chain_phases)) + def addNist(self) -> None: if self.security_content_obj: if self.security_content_obj.type == "TTP": @@ -253,6 +256,7 @@ def addNist(self) -> None: else: self.security_content_obj.tags.nist = ["DE.AE"] + def addDatamodel(self) -> None: if self.security_content_obj: self.security_content_obj.datamodel = [] diff --git a/contentctl/input/director.py b/contentctl/input/director.py index 7dbeadda..6fa10068 100644 --- a/contentctl/input/director.py +++ b/contentctl/input/director.py @@ -14,10 +14,11 @@ from contentctl.objects.deployment import Deployment from contentctl.objects.macro import Macro from contentctl.objects.lookup import Lookup -from contentctl.objects.unit_test import UnitTest +from contentctl.objects.ssa_detection import SSADetection from contentctl.input.basic_builder import BasicBuilder from contentctl.input.detection_builder import DetectionBuilder +from contentctl.input.ssa_detection_builder import SSADetectionBuilder from contentctl.input.playbook_builder import PlaybookBuilder from contentctl.input.baseline_builder import BaselineBuilder from contentctl.input.investigation_builder import InvestigationBuilder @@ -48,7 +49,8 @@ class DirectorOutputDto: playbooks: list[Playbook] macros: list[Macro] lookups: list[Lookup] - tests: list[UnitTest] + deployments: list[Deployment] + ssa_detections: list[SSADetection] class Director(): @@ -60,6 +62,7 @@ class Director(): investigation_builder: InvestigationBuilder story_builder: StoryBuilder detection_builder: DetectionBuilder + ssa_detection_builder: SSADetectionBuilder attack_enrichment: dict config: Config @@ -81,8 +84,9 @@ def execute(self, input_dto: DirectorInputDto) -> None: self.investigation_builder = InvestigationBuilder() self.story_builder = StoryBuilder() self.detection_builder = DetectionBuilder() + self.ssa_detection_builder = SSADetectionBuilder() if self.input_dto.product == SecurityContentProduct.SPLUNK_APP or self.input_dto.product == SecurityContentProduct.API: - self.createSecurityContent(SecurityContentType.unit_tests) + self.createSecurityContent(SecurityContentType.deployments) self.createSecurityContent(SecurityContentType.lookups) self.createSecurityContent(SecurityContentType.macros) self.createSecurityContent(SecurityContentType.baselines) @@ -91,13 +95,14 @@ def execute(self, input_dto: DirectorInputDto) -> None: self.createSecurityContent(SecurityContentType.detections) self.createSecurityContent(SecurityContentType.stories) elif self.input_dto.product == SecurityContentProduct.SSA: - self.createSecurityContent(SecurityContentType.unit_tests) - self.createSecurityContent(SecurityContentType.detections) + self.createSecurityContent(SecurityContentType.ssa_detections) def createSecurityContent(self, type: SecurityContentType) -> None: objects = [] - if type == SecurityContentType.unit_tests: + if type == SecurityContentType.ssa_detections: + files = Utils.get_all_yml_files_from_directory(os.path.join(self.input_dto.input_path, 'ssa_detections')) + elif type == SecurityContentType.unit_tests: files = Utils.get_all_yml_files_from_directory(os.path.join(self.input_dto.input_path, 'tests')) else: files = Utils.get_all_yml_files_from_directory(os.path.join(self.input_dto.input_path, str(type.name))) @@ -158,11 +163,12 @@ def createSecurityContent(self, type: SecurityContentType) -> None: self.constructDetection(self.detection_builder, file) detection = self.detection_builder.getObject() self.output_dto.detections.append(detection) - - elif type == SecurityContentType.unit_tests: - self.constructTest(self.basic_builder, file) - test = self.basic_builder.getObject() - self.output_dto.tests.append(test) + + elif type == SecurityContentType.ssa_detections: + self.constructSSADetection(self.ssa_detection_builder, file) + detection = self.ssa_detection_builder.getObject() + if detection.status == "production" or detection.status == "validated": + self.output_dto.ssa_detections.append(detection) else: raise Exception(f"Unsupported type: [{type}]") @@ -186,7 +192,8 @@ def createSecurityContent(self, type: SecurityContentType) -> None: def constructDetection(self, builder: DetectionBuilder, file_path: str) -> None: builder.reset() builder.setObject(file_path) - builder.addDeployment(self.input_dto.config.detection_configuration) + builder.addDeployment(self.output_dto.deployments) + builder.addMitreAttackEnrichment(self.attack_enrichment) builder.addKillChainPhase() builder.addCIS() builder.addNist() @@ -198,7 +205,6 @@ def constructDetection(self, builder: DetectionBuilder, file_path: str) -> None: builder.addMappings() builder.addBaseline(self.output_dto.baselines) builder.addPlaybook(self.output_dto.playbooks) - builder.addUnitTest(self.output_dto.tests) builder.addMacros(self.output_dto.macros) builder.addLookups(self.output_dto.lookups) @@ -212,6 +218,19 @@ def constructDetection(self, builder: DetectionBuilder, file_path: str) -> None: builder.addSplunkApp() + def constructSSADetection(self, builder: DetectionBuilder, file_path: str) -> None: + builder.reset() + builder.setObject(file_path) + builder.addMitreAttackEnrichment(self.attack_enrichment) + builder.addKillChainPhase() + builder.addCIS() + builder.addNist() + builder.addAnnotations() + builder.addMappings() + builder.addUnitTest() + builder.addRBA() + + def constructStory(self, builder: StoryBuilder, file_path: str) -> None: builder.reset() builder.setObject(file_path) @@ -224,8 +243,6 @@ def constructStory(self, builder: StoryBuilder, file_path: str) -> None: def constructBaseline(self, builder: BaselineBuilder, file_path: str) -> None: builder.reset() builder.setObject(file_path) - print("skipping deployment for baseline for now...") - return builder.addDeployment(self.output_dto.deployments) @@ -233,6 +250,7 @@ def constructDeployment(self, builder: BasicBuilder, file_path: str) -> None: builder.reset() builder.setObject(file_path, SecurityContentType.deployments) + def constructLookup(self, builder: BasicBuilder, file_path: str) -> None: builder.reset() builder.setObject(file_path, SecurityContentType.lookups) diff --git a/contentctl/input/sigma_converter.py b/contentctl/input/sigma_converter.py new file mode 100644 index 00000000..89351943 --- /dev/null +++ b/contentctl/input/sigma_converter.py @@ -0,0 +1,420 @@ +import os +import sys +import copy +import pathlib + +from dataclasses import dataclass +from jinja2 import Environment, FileSystemLoader + +from sigma.processing.conditions import LogsourceCondition +from sigma.processing.transformations import AddConditionTransformation, FieldMappingTransformation, DetectionItemFailureTransformation, RuleFailureTransformation, SetStateTransformation +from sigma.processing.conditions import LogsourceCondition, IncludeFieldCondition, ExcludeFieldCondition, RuleProcessingItemAppliedCondition +from sigma.collection import SigmaCollection +from sigma.backends.splunk import SplunkBackend +from sigma.processing.pipeline import ProcessingItem, ProcessingPipeline + +from contentctl.input.yml_reader import YmlReader +from contentctl.objects.detection import Detection +from contentctl.objects.data_source import DataSource +from contentctl.helper.constants import * +from contentctl.objects.enums import * +from contentctl.helper.utils import Utils +from contentctl.input.backend_splunk_ba import SplunkBABackend + + +@dataclass(frozen=True) +class SigmaConverterInputDto: + data_model: SigmaConverterTarget + detection_path: str + detection_folder : str + input_path: str + log_source: str + + +@dataclass(frozen=True) +class SigmaConverterOutputDto: + detections: list + + +class SigmaConverter(): + output_dto : SigmaConverterOutputDto + + def __init__(self, output_dto: SigmaConverterOutputDto) -> None: + self.output_dto = output_dto + + + def execute(self, input_dto: SigmaConverterInputDto) -> None: + + detection_files = [] + errors = [] + + if input_dto.detection_path: + detection_files.append(input_dto.detection_path) + elif input_dto.detection_folder: + detection_files = Utils.get_all_yml_files_from_directory(input_dto.detection_folder) + else: + print("ERROR: --detection_path or --detection_folder needed.") + sys.exit(1) + + for detection_file in detection_files: + try: + detection = self.read_detection(str(detection_file)) + print("Converting detection: " + detection.name) + data_source = self.load_data_source(input_dto.input_path, detection.data_source[0]) + if not data_source: + print("ERROR: Didn't find data source with name: " + detection.data_source[0] + " for detection " + detection.name) + sys.exit(1) + + file_name = detection.name.replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower() + + + if input_dto.data_model == SigmaConverterTarget.RAW: + if input_dto.log_source and input_dto.log_source != detection.data_source[0][0]: + try: + field_mapping = self.find_mapping(data_source.convert_to_log_source, 'data_source', input_dto.log_source) + except Exception as e: + print(e) + print("ERROR: Couldn't find data source mapping for log source " + input_dto.log_source + " for detection: " + detection.name) + sys.exit(1) + + detection = self.convert_detection_fields(detection, field_mapping) + + logsource_condition = self.get_logsource_condition(data_source) + processing_item = self.get_field_transformation_processing_item( + field_mapping['mapping'], + logsource_condition + ) + sigma_processing_pipeline = self.get_pipeline_from_processing_items([processing_item]) + splunk_backend = SplunkBackend(processing_pipeline=sigma_processing_pipeline) + data_source = self.load_data_source(input_dto.input_path, input_dto.log_source) + + else: + splunk_backend = SplunkBackend() + + sigma_rule = self.get_sigma_rule(detection, data_source) + search = splunk_backend.convert(sigma_rule)[0] + search = self.add_source_macro(search, data_source.type) + search = self.add_stats_count(search, data_source.raw_fields) + search = self.add_timeformat_conversion(search) + search = self.add_filter_macro(search, file_name) + + detection.file_path = file_name + '.yml' + + elif input_dto.data_model == SigmaConverterTarget.CIM: + logsource_condition = self.get_logsource_condition(data_source) + try: + field_mapping = self.find_mapping(data_source.field_mappings, 'data_model', 'cim') + except Exception as e: + print(e) + print("ERROR: Couldn't find data source mapping to cim for log source " + detection.data_source[0] + " and detection " + detection.name) + sys.exit(1) + + detection = self.convert_detection_fields(detection, field_mapping) + sigma_rule = self.get_sigma_rule(detection, data_source) + + sigma_transformation_processing_item = self.get_field_transformation_processing_item( + field_mapping['mapping'], + logsource_condition + ) + + sigma_state_fields_processing_item = self.get_state_fields_processing_item( + field_mapping['mapping'].values(), + logsource_condition + ) + sigma_state_data_model_processing_item = self.get_state_data_model_processing_item( + field_mapping['data_set'], + logsource_condition + ) + sigma_processing_pipeline = self.get_pipeline_from_processing_items([ + sigma_transformation_processing_item, + sigma_state_fields_processing_item, + sigma_state_data_model_processing_item + ]) + splunk_backend = SplunkBackend(processing_pipeline=sigma_processing_pipeline) + search = splunk_backend.convert(sigma_rule, "data_model")[0] + search = self.add_filter_macro(search, file_name) + + detection.file_path = file_name + '.yml' + + elif input_dto.data_model == SigmaConverterTarget.OCSF: + + processing_items = list() + logsource_condition = self.get_logsource_condition(data_source) + if input_dto.log_source and input_dto.log_source != detection.data_source[0]: + data_source_new = self.load_data_source(input_dto.input_path, input_dto.log_source) + + try: + field_mapping = self.get_mapping_converted_data_source( + data_source, + "data_source", + input_dto.log_source, + data_source_new, + "data_model", + "ocsf" + ) + except Exception as e: + print(e) + print("ERROR: Couldn't find data source mapping for log source " + input_dto.log_source + " and detection " + detection.name) + sys.exit(1) + + cim_to_ocsf_mapping = self.get_cim_to_ocsf_mapping(data_source_new) + + # elif input_dto.cim_to_ocsf: + # field_mapping = self.get_cim_to_ocsf_mapping(data_source) + # cim_to_ocsf_mapping = field_mapping + + else: + field_mapping = self.find_mapping(data_source.field_mappings, 'data_model', 'ocsf') + cim_to_ocsf_mapping = self.get_cim_to_ocsf_mapping(data_source) + + field_mapping_underline = copy.deepcopy(field_mapping) + for field in field_mapping_underline["mapping"].keys(): + field_mapping_underline["mapping"][field] = field_mapping_underline["mapping"][field].replace(".", "_") + + self.add_required_fields(cim_to_ocsf_mapping, detection) + self.add_mappings(cim_to_ocsf_mapping, detection) + + self.update_observables(detection) + + processing_items.append( + self.get_field_transformation_processing_item( + field_mapping_underline['mapping'], + logsource_condition + ) + ) + processing_items.append( + self.get_state_fields_processing_item( + field_mapping_underline['mapping'].values(), + logsource_condition + ) + ) + + detection = self.convert_detection_fields(detection) + sigma_rule = self.get_sigma_rule(detection, data_source) + sigma_processing_pipeline = self.get_pipeline_from_processing_items(processing_items) + + splunk_backend = SplunkBABackend(processing_pipeline=sigma_processing_pipeline, detection=detection, field_mapping=field_mapping) + search = splunk_backend.convert(sigma_rule, "data_model")[0] + + search = search + ' --finding_report--' + detection.file_path = 'ssa___' + file_name + '.yml' + + detection.search = search + + self.output_dto.detections.append(detection) + + except Exception as e: + print(e) + errors.append("ERROR: Converting detection " + detection.name) + + print() + for error in errors: + print(error) + + print() + + def read_detection(self, detection_path : str) -> Detection: + yml_dict = YmlReader.load_file(detection_path) + yml_dict["tags"]["name"] = yml_dict["name"] + detection = Detection.parse_obj(yml_dict) + detection.source = os.path.split(os.path.dirname(detection_path))[-1] + return detection + + + def load_data_source(self, input_path: str, data_source_name: str) -> DataSource: + data_sources = list() + files = Utils.get_all_yml_files_from_directory(os.path.join(input_path, 'data_sources')) + for file in files: + data_sources.append(DataSource.parse_obj(YmlReader.load_file(str(file)))) + + data_source = None + + for obj in data_sources: + if obj.name == data_source_name: + return obj + + return None + + + def get_sigma_rule(self, detection: Detection, data_source: DataSource) -> SigmaCollection: + return SigmaCollection.from_dicts([{ + "title": detection.name, + "status": "experimental", + "logsource": { + "category": data_source.category, + "product": data_source.product + }, + "detection": detection.search + }]) + + + # def convert_detection_fields(self, detection: Detection, mappings: dict) -> Detection: + # for selection in detection.search.keys(): + # if selection != "condition": + # new_selection = copy.deepcopy(detection.search[selection]) + # for field in detection.search[selection].keys(): + # for mapping in mappings["mapping"].keys(): + # if mapping == field: + # new_selection[mappings["mapping"][mapping]] = detection.search[selection][field] + # new_selection.pop(field) + # detection.search[selection] = new_selection + + # return detection + + def convert_detection_fields(self, detection: Detection) -> Detection: + for selection in detection.search.keys(): + if selection != "condition": + new_selection = copy.deepcopy(detection.search[selection]) + for field in detection.search[selection].keys(): + new_field_name = field.replace(".", "_") + new_selection[new_field_name] = detection.search[selection][field] + new_selection.pop(field) + detection.search[selection] = new_selection + + return detection + + + def get_logsource_condition(self, data_source: DataSource) -> LogsourceCondition: + return LogsourceCondition( + category=data_source.category, + product=data_source.product, + ) + + + def get_field_transformation_processing_item(self, data_source_mapping: dict, logsource_condition: LogsourceCondition) -> ProcessingItem: + return ProcessingItem( + identifier="field_mapping_transformation", + transformation=FieldMappingTransformation(data_source_mapping), + rule_conditions=[ + logsource_condition + ] + ) + + + def get_state_fields_processing_item(self, fields: list, logsource_condition: LogsourceCondition) -> ProcessingItem: + return ProcessingItem( + identifier="fields", + transformation=SetStateTransformation("fields", fields), + rule_conditions=[ + logsource_condition + ] + ) + + + def get_state_data_model_processing_item(self, data_model: str, logsource_condition: LogsourceCondition) -> ProcessingItem: + return ProcessingItem( + identifier="data_model", + transformation=SetStateTransformation("data_model_set", data_model), + rule_conditions=[ + logsource_condition + ] + ) + + + def get_pipeline_from_processing_items(self, processing_items: list) -> ProcessingPipeline: + return ProcessingPipeline( + name="Splunk Sigma", + priority=10, + items=processing_items + ) + + def add_source_macro(self, search: str, data_source_type: str) -> str: + return "`" + data_source_type + "` " + search + + def add_stats_count(self, search: str, fields: list) -> str: + search = search + " | fillnull | stats count min(_time) as firstTime max(_time) as lastTime by " + for key in fields: + search = search + key + " " + return search + + def add_timeformat_conversion(self, search: str) -> str: + return search + '| convert timeformat="%Y-%m-%dT%H:%M:%S" ctime(firstTime) | convert timeformat="%Y-%m-%dT%H:%M:%S" ctime(lastTime) ' + + def add_filter_macro(self, search: str, file_name: str) -> str: + return search + '| `' + file_name + '_filter`' + + def find(self, name: str, path: str) -> str: + for root, dirs, files in os.walk(path): + if name in files: + return os.path.join(root, name) + return None + + def find_mapping(self, field_mappings: list, object: str, data_model: str) -> dict: + for mapping in field_mappings: + if mapping[object] == data_model: + return mapping + + raise AttributeError("ERROR: Couldn't find mapping.") + + + def add_required_fields(self, field_mapping: dict, detection: Detection) -> None: + required_fields = list() +# required_fields = ["process.user.name", "device.hostname"] + for mapping in field_mapping["mapping"].keys(): + required_fields.append(field_mapping["mapping"][mapping]) + + detection.tags.required_fields = required_fields + + + def add_mappings(self, field_mapping: dict, detection: Detection) -> None: + mappings = list() + for mapping in field_mapping["mapping"].keys(): + mappings.append({ + "ocsf": field_mapping["mapping"][mapping], + "cim": mapping + }) + detection.tags.mappings = mappings + + def update_observables(self, detection : Detection) -> None: + mapping_field_to_type = { + "process.user.name": "User Name", + "actor.user.name": "User Name", + "device.hostname": "Hostname", + "process.file.name": "File Name", + "actor.process.file.name": "File Name", + "actor.process.file.path": "File Name", + "actor.process.cmd_line": "Process", + "actor.user.uid": "Other", + "process.cmd_line": "Other", + "process.file.path": "File", + "process.file.name": "File", + "process.uid": "Other", + "process.pid": "Other", + "actor.process.pid": "Other" + } + + observables = list() + + for field in detection.tags.required_fields: + observables.append({ + "name": field, + "type": mapping_field_to_type[field] + }) + + detection.tags.observable = observables + + + def get_cim_to_ocsf_mapping(self, data_source : DataSource) -> dict: + cim_to_ocsf_mapping = dict() + cim_to_ocsf_mapping["mapping"] = dict() + cim_mapping = self.find_mapping(data_source.field_mappings, "data_model", "cim") + ocsf_mapping = self.find_mapping(data_source.field_mappings, "data_model", "ocsf") + + for key in cim_mapping["mapping"].keys(): + cim_field = cim_mapping["mapping"][key].split(".")[1] + cim_to_ocsf_mapping["mapping"][cim_field] = ocsf_mapping["mapping"][key] + + return cim_to_ocsf_mapping + + + def get_mapping_converted_data_source(self, det_ds: DataSource, det_ds_obj: str, det_ds_dm: str, con_ds: DataSource, con_ds_obj: str, con_ds_dm: str) -> dict: + mapping = dict() + mapping["mapping"] = dict() + det_ds_mapping = self.find_mapping(det_ds.convert_to_log_source, det_ds_obj, det_ds_dm) + con_ds_mapping = self.find_mapping(con_ds.field_mappings, con_ds_obj, con_ds_dm) + + for key in det_ds_mapping["mapping"].keys(): + mapped_field = con_ds_mapping["mapping"][det_ds_mapping["mapping"][key]] + mapping["mapping"][key] = mapped_field + + return mapping \ No newline at end of file diff --git a/contentctl/input/ssa_detection_builder.py b/contentctl/input/ssa_detection_builder.py new file mode 100644 index 00000000..8efb1674 --- /dev/null +++ b/contentctl/input/ssa_detection_builder.py @@ -0,0 +1,161 @@ +import sys +import re +import os + +from pydantic import ValidationError + +from contentctl.input.yml_reader import YmlReader +from contentctl.objects.detection import Detection +from contentctl.objects.security_content_object import SecurityContentObject +from contentctl.objects.macro import Macro +from contentctl.objects.mitre_attack_enrichment import MitreAttackEnrichment +from contentctl.enrichments.cve_enrichment import CveEnrichment +from contentctl.enrichments.splunk_app_enrichment import SplunkAppEnrichment +from contentctl.objects.ssa_detection import SSADetection +from contentctl.helper.constants import * + + +class SSADetectionBuilder(): + security_content_obj : SSADetection + + + def setObject(self, path: str) -> None: + yml_dict = YmlReader.load_file(path) + yml_dict["tags"]["name"] = yml_dict["name"] + self.security_content_obj = SSADetection.parse_obj(yml_dict) + self.security_content_obj.source = os.path.split(os.path.dirname(self.security_content_obj.file_path))[-1] + + + def addProvidingTechnologies(self) -> None: + if self.security_content_obj: + if 'Endpoint' in str(self.security_content_obj.search): + self.security_content_obj.providing_technologies = ["Sysmon", "Microsoft Windows","Carbon Black Response","CrowdStrike Falcon", "Symantec Endpoint Protection"] + if "`cloudtrail`" in str(self.security_content_obj.search): + self.security_content_obj.providing_technologies = ["Amazon Web Services - Cloudtrail"] + if '`wineventlog_security`' in self.security_content_obj.search or '`powershell`' in self.security_content_obj.search: + self.security_content_obj.providing_technologies = ["Microsoft Windows"] + + + def addMappings(self) -> None: + if self.security_content_obj: + keys = ['mitre_attack', 'kill_chain_phases', 'cis20', 'nist'] + mappings = {} + for key in keys: + if key == 'mitre_attack': + if getattr(self.security_content_obj.tags, 'mitre_attack_id'): + mappings[key] = getattr(self.security_content_obj.tags, 'mitre_attack_id') + elif getattr(self.security_content_obj.tags, key): + mappings[key] = getattr(self.security_content_obj.tags, key) + self.security_content_obj.mappings = mappings + + + def addAnnotations(self) -> None: + if self.security_content_obj: + annotations = {} + annotation_keys = ['mitre_attack', 'kill_chain_phases', 'cis20', 'nist', + 'analytic_story', 'context', 'impact', 'confidence', 'cve'] + for key in annotation_keys: + if key == 'mitre_attack': + if getattr(self.security_content_obj.tags, 'mitre_attack_id'): + annotations[key] = getattr(self.security_content_obj.tags, 'mitre_attack_id') + try: + if getattr(self.security_content_obj.tags, key): + annotations[key] = getattr(self.security_content_obj.tags, key) + except AttributeError as e: + continue + self.security_content_obj.annotations = annotations + + + def addUnitTest(self) -> None: + if self.security_content_obj: + if self.security_content_obj.tests: + self.security_content_obj.test = self.security_content_obj.tests[0] + + + def addMitreAttackEnrichment(self, attack_enrichment: dict) -> None: + if self.security_content_obj: + if attack_enrichment: + if self.security_content_obj.tags.mitre_attack_id: + self.security_content_obj.tags.mitre_attack_enrichments = [] + + for mitre_attack_id in self.security_content_obj.tags.mitre_attack_id: + if mitre_attack_id in attack_enrichment: + mitre_attack_enrichment = MitreAttackEnrichment( + mitre_attack_id = mitre_attack_id, + mitre_attack_technique = attack_enrichment[mitre_attack_id]["technique"], + mitre_attack_tactics = sorted(attack_enrichment[mitre_attack_id]["tactics"]), + mitre_attack_groups = sorted(attack_enrichment[mitre_attack_id]["groups"]) + ) + self.security_content_obj.tags.mitre_attack_enrichments.append(mitre_attack_enrichment) + else: + #print("mitre_attack_id " + mitre_attack_id + " doesn't exist for detecction " + self.security_content_obj.name) + raise ValueError("mitre_attack_id " + mitre_attack_id + " doesn't exist for detection " + self.security_content_obj.name) + + + def addCIS(self) -> None: + if self.security_content_obj: + if self.security_content_obj.tags.security_domain == "network": + self.security_content_obj.tags.cis20 = ["CIS 13"] + else: + self.security_content_obj.tags.cis20 = ["CIS 10"] + + + def addKillChainPhase(self) -> None: + if self.security_content_obj: + if not self.security_content_obj.tags.kill_chain_phases: + kill_chain_phases = list() + if self.security_content_obj.tags.mitre_attack_enrichments: + for mitre_attack_enrichment in self.security_content_obj.tags.mitre_attack_enrichments: + for mitre_attack_tactic in mitre_attack_enrichment.mitre_attack_tactics: + kill_chain_phases.append(ATTACK_TACTICS_KILLCHAIN_MAPPING[mitre_attack_tactic]) + self.security_content_obj.tags.kill_chain_phases = list(dict.fromkeys(kill_chain_phases)) + + + def addNist(self) -> None: + if self.security_content_obj: + if self.security_content_obj.type == "TTP": + self.security_content_obj.tags.nist = ["DE.CM"] + else: + self.security_content_obj.tags.nist = ["DE.AE"] + + + def addDatamodel(self) -> None: + if self.security_content_obj: + self.security_content_obj.datamodel = [] + data_models = [ + "Authentication", + "Change", + "Change_Analysis", + "Email", + "Endpoint", + "Network_Resolution", + "Network_Sessions", + "Network_Traffic", + "Risk", + "Splunk_Audit", + "UEBA", + "Updates", + "Vulnerabilities", + "Web" + ] + for data_model in data_models: + if data_model in self.security_content_obj.search: + self.security_content_obj.datamodel.append(data_model) + + + def addRBA(self) -> None: + if self.security_content_obj: + if self.security_content_obj.tags.risk_score >= 80: + self.security_content_obj.tags.risk_severity = 'high' + elif (self.security_content_obj.tags.risk_score >= 50 and self.security_content_obj.tags.risk_score <= 79): + self.security_content_obj.tags.risk_severity = 'medium' + else: + self.security_content_obj.tags.risk_severity = 'low' + + + def reset(self) -> None: + self.security_content_obj = None + + + def getObject(self) -> SSADetection: + return self.security_content_obj diff --git a/contentctl/input/yml_reader.py b/contentctl/input/yml_reader.py index ee524ff0..8455c567 100644 --- a/contentctl/input/yml_reader.py +++ b/contentctl/input/yml_reader.py @@ -24,15 +24,5 @@ def load_file(file_path: pathlib.Path, add_fields=True) -> Dict: return yml_obj yml_obj['file_path'] = str(file_path) - - if 'deprecated' in [parent.name for parent in file_path.parents]: - yml_obj['deprecated'] = True - else: - yml_obj['deprecated'] = False - - if 'experimental' in [parent.name for parent in file_path.parents]: - yml_obj['experimental'] = True - else: - yml_obj['experimental'] = False return yml_obj diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index 4b0059ad..a554f703 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -1,9 +1,12 @@ +from __future__ import annotations + import uuid import string import requests import time import sys - +import re +import pathlib from pydantic import BaseModel, validator, root_validator, Extra from dataclasses import dataclass from typing import Union @@ -26,30 +29,29 @@ class Detection_Abstract(SecurityContentObject): - contentType: SecurityContentType = SecurityContentType.detections + #contentType: SecurityContentType = SecurityContentType.detections type: str status: DetectionStatus data_source: list[str] + tags: DetectionTags search: Union[str, dict] how_to_implement: str known_false_positives: str check_references: bool = False references: list - tags: DetectionTags + tests: list[UnitTest] = [] # enrichments datamodel: list = None - deprecated: bool = None - experimental: bool = None deployment: ConfigDetectionConfiguration = None annotations: dict = None risk: list = None - playbooks: list[Playbook] = None - baselines: list[Baseline] = None + playbooks: list[Playbook] = [] + baselines: list[Baseline] = [] mappings: dict = None - macros: list[Macro] = None - lookups: list[Lookup] = None + macros: list[Macro] = [] + lookups: list[Lookup] = [] cve_enrichment: list = None splunk_app_enrichment: list = None file_path: str = None @@ -61,6 +63,21 @@ class Detection_Abstract(SecurityContentObject): class Config: use_enum_values = True + + def get_content_dependencies(self)->list[SecurityContentObject]: + return self.playbooks + self.baselines + self.macros + self.lookups + + @staticmethod + def get_detections_from_filenames(detection_filenames:set[str], all_detections:list[Detection_Abstract])->list[Detection_Abstract]: + detection_filenames = set(str(pathlib.Path(filename).absolute()) for filename in detection_filenames) + detection_dict = SecurityContentObject.create_filename_to_content_dict(all_detections) + + try: + return [detection_dict[detection_filename] for detection_filename in detection_filenames] + except Exception as e: + raise Exception(f"Failed to find detection object for modified detection: {str(e)}") + + @validator("type") def type_valid(cls, v, values): if v.lower() not in [el.name.lower() for el in AnalyticsType]: @@ -88,10 +105,37 @@ def encode_error(cls, v, values, field): # return v - @validator("search") - def search_validate(cls, v, values): - # write search validator - return v + # @validator("search") + # def search_obsersables_exist_validate(cls, v, values): + # if type(v) is str: + # tags:DetectionTags = values.get("tags") + # if tags == None: + # raise ValueError("Unable to parse Detection Tags. Please resolve Detection Tags errors") + + # observable_fields = [ob.name.lower() for ob in tags.observable] + + # #All $field$ fields from the message must appear in the search + # field_match_regex = r"\$([^\s.]*)\$" + + # message_fields = [match.replace("$", "").lower() for match in re.findall(field_match_regex, tags.message.lower())] + # missing_fields = set([field for field in observable_fields if field not in v.lower()]) + + # error_messages = [] + # if len(missing_fields) > 0: + # error_messages.append(f"The following fields are declared as observables, but do not exist in the search: {missing_fields}") + + + # missing_fields = set([field for field in message_fields if field not in v.lower()]) + # if len(missing_fields) > 0: + # error_messages.append(f"The following fields are used as fields in the message, but do not exist in the search: {missing_fields}") + + # if len(error_messages) > 0 and values.get("status") == DetectionStatus.production.value: + # msg = "\n\t".join(error_messages) + # print("Errors found in notable validation - skipping for now") + # #raise(ValueError(msg)) + + # # Found everything + # return v @validator("tests") def tests_validate(cls, v, values): @@ -100,18 +144,6 @@ def tests_validate(cls, v, values): "tests value is needed for production detection: " + values["name"] ) return v - - @validator("experimental", always=True) - def experimental_validate(cls, v, values): - if DetectionStatus(values.get("status","")) == DetectionStatus.experimental: - return True - return False - - @validator("deprecated", always=True) - def deprecated_validate(cls, v, values): - if DetectionStatus(values.get("status","")) == DetectionStatus.deprecated: - return True - return False @validator("datamodel") def datamodel_valid(cls, v, values): @@ -131,7 +163,7 @@ def all_tests_successful(self) -> bool: def get_summary( self, detection_fields: list[str] = ["name", "search"], - test_model_fields: list[str] = ["success", "message"], + test_model_fields: list[str] = ["success", "message", "exception"], test_job_fields: list[str] = ["resultCount", "runDuration"], ) -> dict: summary_dict = {} diff --git a/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py b/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py index 4ae242cf..84a46a47 100644 --- a/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py @@ -1,24 +1,29 @@ +from __future__ import annotations + import abc import string import uuid from datetime import datetime -from pydantic import BaseModel, validator, ValidationError +from pydantic import BaseModel, validator, ValidationError, Field from contentctl.objects.enums import SecurityContentType +from typing import Tuple +import uuid +import pathlib - +NO_FILE_BUILT_AT_RUNTIME = "NO_FILE_BUILT_AT_RUNTIME" class SecurityContentObject_Abstract(BaseModel, abc.ABC): - contentType: SecurityContentType + #contentType: SecurityContentType name: str author: str = "UNKNOWN_AUTHOR" date: str = "1990-01-01" - version: int = 99999 - id: str = None + version: int = 1 + id: uuid.UUID = Field(default_factory=uuid.uuid4) #we set a default here until all content has a uuid description: str = "UNKNOWN_DESCRIPTION" + file_path: str = "NO_FILE_BUILT_AT_RUNTIME" @validator('name') def name_max_length(cls, v): if len(v) > 67: - print("LENGTH ERROR!") raise ValueError('name is longer then 67 chars: ' + v) return v @@ -29,16 +34,6 @@ def name_invalid_chars(cls, v): raise ValueError('invalid chars used in name: ' + v) return v - @validator('id',always=True) - def id_check(cls, v, values): - try: - uuid.UUID(str(v)) - except: - #print(f"Generating missing uuid for {values['name']}") - return str(uuid.uuid4()) - raise ValueError('uuid is not valid: ' + values["name"]) - return v - @validator('date') def date_valid(cls, v, values): try: @@ -58,3 +53,21 @@ def free_text_field_valid(input_cls, v, values, field): @validator('description') def description_valid(cls, v, values, field): return SecurityContentObject_Abstract.free_text_field_valid(cls,v,values,field) + + + @staticmethod + def get_objects_by_name(names_to_find:set[str], objects_to_search:list[SecurityContentObject_Abstract])->Tuple[list[SecurityContentObject_Abstract], set[str]]: + found_objects = list(filter(lambda obj: obj.name in names_to_find, objects_to_search)) + found_names = set([obj.name for obj in found_objects]) + missing_names = names_to_find - found_names + return found_objects,missing_names + + @staticmethod + def create_filename_to_content_dict(all_objects:list[SecurityContentObject_Abstract])->dict[str,SecurityContentObject_Abstract]: + name_dict:dict[str,SecurityContentObject_Abstract] = dict() + + for object in all_objects: + name_dict[str(pathlib.Path(object.file_path))] = object + + return name_dict + \ No newline at end of file diff --git a/contentctl/objects/app.py b/contentctl/objects/app.py index db7f7194..dc2a0210 100644 --- a/contentctl/objects/app.py +++ b/contentctl/objects/app.py @@ -102,32 +102,32 @@ def validate_string_alphanumeric_with_underscores(input: str) -> bool: ) return True - @validator("uid", always=True) + @validator("uid") def validate_uid(cls, v): return v - @validator("appid", always=True) + @validator("appid") def validate_appid(cls, v): # Called function raises exception on failure, so we don't need to raise it here cls.validate_string_alphanumeric_with_underscores(v) return v - @validator("title", always=True) + @validator("title") def validate_title(cls, v): # Basically, a title can be any string return v - @validator("description", always=True) + @validator("description") def validate_description(cls, v): # description can be anything return v - @validator("release", always=True) + @validator("release") def validate_release(cls, v): # release can be any string return v - @validator("local_path", always=True) + @validator("local_path") def validate_local_path(cls, v): if v is not None: p = pathlib.Path(v) @@ -139,7 +139,7 @@ def validate_local_path(cls, v): # release can be any string return v - @validator("http_path", always=True) + @validator("http_path") def validate_http_path(cls, v, values): if v is not None: try: @@ -149,11 +149,8 @@ def validate_http_path(cls, v, values): raise (ValueError(f"Error validating the http_path: {str(e)}")) return v - @validator("splunkbase_path", always=True) + @validator("splunkbase_path") def validate_splunkbase_path(cls, v, values): - Utils.check_required_fields( - "splunkbase_path", values, ["local_path", "http_path", "uid", "title"] - ) if v is not None: try: diff --git a/contentctl/objects/baseline.py b/contentctl/objects/baseline.py index 62019aa1..7e8723d8 100644 --- a/contentctl/objects/baseline.py +++ b/contentctl/objects/baseline.py @@ -20,7 +20,7 @@ class Baseline(SecurityContentObject): #version: int #date: str #author: str - contentType: SecurityContentType = SecurityContentType.baselines + #contentType: SecurityContentType = SecurityContentType.baselines type: str datamodel: list #description: str diff --git a/contentctl/objects/config.py b/contentctl/objects/config.py index 996911a3..a869de92 100644 --- a/contentctl/objects/config.py +++ b/contentctl/objects/config.py @@ -147,12 +147,19 @@ class ConfigEnrichments(BaseModel): splunk_app_enrichment: bool = False +class ConfigBuildSSA(BaseModel): + output_path: str + +class ConfigBuildApi(BaseModel): + output_path: str class Config(BaseModel, extra=Extra.forbid): #general: ConfigGlobal = ConfigGlobal() - detection_configuration: ConfigDetectionConfiguration = ConfigDetectionConfiguration() + #detection_configuration: ConfigDetectionConfiguration = ConfigDetectionConfiguration() deployments: Deployments = Deployments() build: ConfigBuild = ConfigBuild() + build_ssa: Union[ConfigBuildSSA,None] = None + build_api: Union[ConfigBuildApi,None] = None enrichments: ConfigEnrichments = ConfigEnrichments() test: Union[TestConfig,None] = None diff --git a/contentctl/objects/data_source.py b/contentctl/objects/data_source.py new file mode 100644 index 00000000..5b21b111 --- /dev/null +++ b/contentctl/objects/data_source.py @@ -0,0 +1,23 @@ + + + +from pydantic import BaseModel, validator, ValidationError +from dataclasses import dataclass + + +class DataSource(BaseModel): + name: str + id: str + date: str + author: str + type: str + source: str + sourcetype: str + category: str = None + product: str + service: str = None + supported_TA: list + references: list + raw_fields: list + field_mappings: list = None + convert_to_log_source: list = None \ No newline at end of file diff --git a/contentctl/objects/deployment.py b/contentctl/objects/deployment.py index db53541d..552c069b 100644 --- a/contentctl/objects/deployment.py +++ b/contentctl/objects/deployment.py @@ -19,7 +19,7 @@ class Deployment(SecurityContentObject): #date: str = None #author: str = None #description: str = None - contentType: SecurityContentType = SecurityContentType.deployments + #contentType: SecurityContentType = SecurityContentType.deployments scheduling: DeploymentScheduling = None email: DeploymentEmail = None notable: DeploymentNotable = None diff --git a/contentctl/objects/detection_tags.py b/contentctl/objects/detection_tags.py index 18fd08d9..7ba5c87e 100644 --- a/contentctl/objects/detection_tags.py +++ b/contentctl/objects/detection_tags.py @@ -3,6 +3,7 @@ from pydantic import BaseModel, validator, ValidationError, root_validator from contentctl.objects.mitre_attack_enrichment import MitreAttackEnrichment from contentctl.objects.constants import * +from contentctl.objects.observable import Observable class DetectionTags(BaseModel): # detection spec @@ -14,10 +15,10 @@ class DetectionTags(BaseModel): confidence: str impact: int kill_chain_phases: list = None - message: str mitre_attack_id: list = None nist: list = None - observable: list + observable: list[Observable] = [] + message: str product: list required_fields: list risk_score: int @@ -128,22 +129,32 @@ def tags_calculate_risk_score(cls, v, values): raise ValueError(f"Risk Score must be calculated as round(confidence * impact / 100)" f"\n Expected risk_score={calculated_risk_score}, found risk_score={int(v)}: {values['name']}") return v + + # The following validator is temporarily disabled pending further discussions + # @validator('message') + # def validate_message(cls,v,values): + + # observables:list[Observable] = values.get("observable",[]) + # observable_names = set([o.name for o in observables]) + # #find all of the observables used in the message by name + # name_match_regex = r"\$([^\s.]*)\$" + + # message_observables = set() + + # #Make sure that all observable names in + # for match in re.findall(name_match_regex, v): + # #Remove + # match_without_dollars = match.replace("$", "") + # message_observables.add(match_without_dollars) + - @root_validator - def tags_observable(cls, values): - valid_roles = SES_OBSERVABLE_ROLE_MAPPING.keys() - valid_types = SES_OBSERVABLE_TYPE_MAPPING.keys() + # missing_observables = message_observables - observable_names + # unused_observables = observable_names - message_observables + # if len(missing_observables) > 0: + # raise ValueError(f"The following observables are referenced in the message, but were not declared as observables: {missing_observables}") - for value in values["observable"]: - if value['type'] in valid_types: - if 'Splunk Behavioral Analytics' in values["product"]: - continue - - if 'role' not in value: - raise ValueError('Observable role is missing for ' + values["name"]) - for role in value['role']: - if role not in valid_roles: - raise ValueError('Observable role ' + role + ' not valid for ' + values["name"] + '. valid options are ' + str(valid_roles)) - else: - raise ValueError('Observable type ' + value['type'] + ' not valid for ' + values["name"] + '. valid options are ' + str(valid_types)) - return values \ No newline at end of file + # if len(unused_observables) > 0: + # raise ValueError(f"The following observables were declared, but are not referenced in the message: {unused_observables}") + # return v + + \ No newline at end of file diff --git a/contentctl/objects/enums.py b/contentctl/objects/enums.py index 4527205d..a5b983af 100644 --- a/contentctl/objects/enums.py +++ b/contentctl/objects/enums.py @@ -38,6 +38,7 @@ class SecurityContentType(enum.Enum): deployments = 7 investigations = 8 unit_tests = 9 + ssa_detections = 10 # Bringing these changes back in line will take some time after # the initial merge is complete @@ -124,3 +125,8 @@ class InstanceState(str, enum.Enum): stopping = "stopping" stopped = "stopped" +class SigmaConverterTarget(enum.Enum): + CIM = 1 + RAW = 2 + OCSF = 3 + ALL = 4 \ No newline at end of file diff --git a/contentctl/objects/investigation.py b/contentctl/objects/investigation.py index feeaa701..eeb24abf 100644 --- a/contentctl/objects/investigation.py +++ b/contentctl/objects/investigation.py @@ -18,7 +18,7 @@ class Investigation(SecurityContentObject): # investigation spec - contentType: SecurityContentType = SecurityContentType.investigations + #contentType: SecurityContentType = SecurityContentType.investigations #name: str #id: str #version: int diff --git a/contentctl/objects/lookup.py b/contentctl/objects/lookup.py index a82c45a2..3c96a4a9 100644 --- a/contentctl/objects/lookup.py +++ b/contentctl/objects/lookup.py @@ -1,13 +1,27 @@ -from pydantic import BaseModel, validator, ValidationError +from __future__ import annotations +from pydantic import BaseModel, validator, ValidationError +from typing import Tuple +import re from contentctl.objects.security_content_object import SecurityContentObject from contentctl.objects.enums import SecurityContentType +LOOKUPS_TO_IGNORE = set(["outputlookup"]) +LOOKUPS_TO_IGNORE.add("ut_shannon_lookup") #In the URL toolbox app which is recommended for ESCU +LOOKUPS_TO_IGNORE.add("identity_lookup_expanded") #Shipped with the Asset and Identity Framework +LOOKUPS_TO_IGNORE.add("cim_corporate_web_domain_lookup") #Shipped with the Asset and Identity Framework +LOOKUPS_TO_IGNORE.add("alexa_lookup_by_str") #Shipped with the Asset and Identity Framework +LOOKUPS_TO_IGNORE.add("interesting_ports_lookup") #Shipped with the Asset and Identity Framework + +#Special case for the Detection "Exploit Public Facing Application via Apache Commons Text" +LOOKUPS_TO_IGNORE.add("=") +LOOKUPS_TO_IGNORE.add("other_lookups") -class Lookup(SecurityContentObject): - contentType: SecurityContentType = SecurityContentType.lookups - #name: str - #description: str + +class Lookup(BaseModel): + #contentType: SecurityContentType = SecurityContentType.lookups + name: str + description: str collection: str = None fields_list: str = None filename: str = None @@ -29,4 +43,13 @@ def name_invalid_chars(cls, v): def name_max_length(cls, v): #if len(v) > 67: # raise ValueError('name is longer then 67 chars: ' + v) - return v \ No newline at end of file + return v + + @staticmethod + def get_lookups(text_field: str, all_lookups: list[Lookup], ignore_lookups:set[str]=LOOKUPS_TO_IGNORE)->Tuple[list[Lookup], set[str]]: + lookups_to_get = set(re.findall(r'[^output]lookup (?:update=true)?(?:append=t)?\s*([^\s]*)', text_field)) + lookups_to_ignore = set([lookup for lookup in lookups_to_get if any(to_ignore in lookups_to_get for to_ignore in ignore_lookups)]) + lookups_to_get -= lookups_to_ignore + found_lookups, missing_lookups = SecurityContentObject.get_objects_by_name(lookups_to_get, all_lookups) + return found_lookups, missing_lookups + \ No newline at end of file diff --git a/contentctl/objects/macro.py b/contentctl/objects/macro.py index 7fedf982..aef2caf6 100644 --- a/contentctl/objects/macro.py +++ b/contentctl/objects/macro.py @@ -1,16 +1,27 @@ - - +# Used so that we can have a staticmethod that takes the class +# type Macro as an argument +from __future__ import annotations +import re from pydantic import BaseModel, validator, ValidationError from contentctl.objects.security_content_object import SecurityContentObject from contentctl.objects.enums import SecurityContentType +from typing import Tuple + +MACROS_TO_IGNORE = set(["_filter", "drop_dm_object_name"]) +#Should all of the following be included as well? +MACROS_TO_IGNORE.add("get_asset" ) +MACROS_TO_IGNORE.add("get_risk_severity") +MACROS_TO_IGNORE.add("cim_corporate_web_domain_search") +MACROS_TO_IGNORE.add("prohibited_processes") -class Macro(SecurityContentObject): - contentType: SecurityContentType = SecurityContentType.macros - #name: str + +class Macro(BaseModel): + #contentType: SecurityContentType = SecurityContentType.macros + name: str definition: str - #description: str + description: str arguments: list = None # Macro can have different punctuatuation in it, @@ -26,4 +37,37 @@ def name_invalid_chars(cls, v): def name_max_length(cls, v): #if len(v) > 67: # raise ValueError('name is longer then 67 chars: ' + v) - return v \ No newline at end of file + return v + + + @staticmethod + def get_macros(text_field:str, all_macros: list[Macro], ignore_macros:set[str]=MACROS_TO_IGNORE)->Tuple[list[Macro], set[str]]: + + macros_to_get = re.findall(r'`([^\s]+)`', text_field) + #If macros take arguments, stop at the first argument. We just want the name of the macro + macros_to_get = set([macro[:macro.find('(')] if macro.find('(') != -1 else macro for macro in macros_to_get]) + + macros_to_ignore = set([macro for macro in macros_to_get if any(to_ignore in macro for to_ignore in ignore_macros)]) + #remove the ones that we will ignore + macros_to_get -= macros_to_ignore + found_macros, missing_macros = SecurityContentObject.get_objects_by_name(macros_to_get, all_macros) + return found_macros, missing_macros + + # found_macros = [macro for macro in all_macros if macro.name in macros_to_get] + + # missing_macros = macros_to_get - set([macro.name for macro in found_macros]) + # missing_macros_after_ignored_macros = set() + # for macro in missing_macros: + # found = False + # for ignore in ignore_macros: + # if ignore in macro: + # found=True + # break + # if found is False: + # missing_macros_after_ignored_macros.add(macro) + + #return found_macros, missing_macros_after_ignored_macros + + + + diff --git a/contentctl/objects/observable.py b/contentctl/objects/observable.py new file mode 100644 index 00000000..7b40b145 --- /dev/null +++ b/contentctl/objects/observable.py @@ -0,0 +1,45 @@ +import abc +import string +import uuid +from typing import Literal +from datetime import datetime +from pydantic import BaseModel, validator, ValidationError +from contentctl.objects.enums import SecurityContentType +from contentctl.objects.constants import * + + + +class Observable(BaseModel): + name: str + type: str + role: list[str] + + + + @validator('name') + def check_name(cls, v, values): + if v == "": + raise ValueError("No name provided for observable") + return v + + @validator('type') + def check_type(cls, v, values): + if v not in SES_OBSERVABLE_TYPE_MAPPING.keys(): + raise ValueError(f"Invalid type '{v}' provided for observable. Valid observable types are {SES_OBSERVABLE_TYPE_MAPPING.keys()}") + return v + + + @validator('role', each_item=False) + def check_roles_not_empty(cls, v, values): + if len(v) == 0: + raise ValueError("At least one role must be defined for observable") + return v + + @validator('role', each_item=True) + def check_roles(cls, v, values): + if v not in SES_OBSERVABLE_ROLE_MAPPING.keys(): + raise ValueError(f"Invalid role '{v}' provided for observable. Valid observable types are {SES_OBSERVABLE_ROLE_MAPPING.keys()}") + return v + + + \ No newline at end of file diff --git a/contentctl/objects/playbook.py b/contentctl/objects/playbook.py index c467c172..d9de2761 100644 --- a/contentctl/objects/playbook.py +++ b/contentctl/objects/playbook.py @@ -16,7 +16,7 @@ class Playbook(SecurityContentObject): #version: int #date: str #author: str - contentType: SecurityContentType = SecurityContentType.playbooks + #contentType: SecurityContentType = SecurityContentType.playbooks type: str #description: str how_to_implement: str diff --git a/contentctl/objects/repo_config.py b/contentctl/objects/repo_config.py index 9cfd0c58..bf0904f5 100644 --- a/contentctl/objects/repo_config.py +++ b/contentctl/objects/repo_config.py @@ -124,9 +124,9 @@ def validate_repo_path(cls,v): return v - @validator('repo_url', always=True) + @validator('repo_url') def validate_repo_url(cls, v, values): - Utils.check_required_fields('repo_url', values, ['repo_path']) + #First try to get the value from the repo try: @@ -152,9 +152,9 @@ def validate_repo_url(cls, v, values): return v - @validator('main_branch', always=True) + @validator('main_branch') def valid_main_branch(cls, v, values): - Utils.check_required_fields('main_branch', values, ['repo_path', 'repo_url']) + try: Utils.validate_git_branch_name(values['repo_path'],values['repo_url'], v) diff --git a/contentctl/objects/ssa_detection.py b/contentctl/objects/ssa_detection.py new file mode 100644 index 00000000..7d794f69 --- /dev/null +++ b/contentctl/objects/ssa_detection.py @@ -0,0 +1,156 @@ +import uuid +import string +import requests +import time +from pydantic import BaseModel, validator, root_validator +from dataclasses import dataclass +from datetime import datetime +from typing import Union +import re + +from contentctl.objects.abstract_security_content_objects.detection_abstract import Detection_Abstract +from contentctl.objects.enums import AnalyticsType +from contentctl.objects.enums import DataModel +from contentctl.objects.enums import DetectionStatus +from contentctl.objects.deployment import Deployment +from contentctl.objects.ssa_detection_tags import SSADetectionTags +from contentctl.objects.config import ConfigDetectionConfiguration +from contentctl.objects.unit_test import UnitTest +from contentctl.objects.macro import Macro +from contentctl.objects.lookup import Lookup +from contentctl.objects.baseline import Baseline +from contentctl.objects.playbook import Playbook +from contentctl.helper.link_validator import LinkValidator +from contentctl.objects.enums import SecurityContentType + +class SSADetection(BaseModel): + # detection spec + name: str + id: str + version: int + date: str + author: str + type: str + status: DetectionStatus + description: str + data_source: list[str] + search: Union[str, dict] + how_to_implement: str + known_false_positives: str + references: list + tags: SSADetectionTags + tests: list[UnitTest] = None + + # enrichments + annotations: dict = None + risk: list = None + mappings: dict = None + file_path: str = None + source: str = None + runtime: str = None + test: Union[UnitTest, dict] = None + + # @validator('name')v + # def name_max_length(cls, v, values): + # if len(v) > 67: + # raise ValueError('name is longer then 67 chars: ' + v) + # return v + + class Config: + use_enum_values = True + + @validator("name") + def name_invalid_chars(cls, v): + invalidChars = set(string.punctuation.replace("-", "")) + if any(char in invalidChars for char in v): + raise ValueError("invalid chars used in name: " + v) + return v + + @validator("id") + def id_check(cls, v, values): + try: + uuid.UUID(str(v)) + except: + raise ValueError("uuid is not valid: " + values["name"]) + return v + + @validator("date") + def date_valid(cls, v, values): + try: + datetime.strptime(v, "%Y-%m-%d") + except: + raise ValueError("date is not in format YYYY-MM-DD: " + values["name"]) + return v + + @validator("type") + def type_valid(cls, v, values): + if v.lower() not in [el.name.lower() for el in AnalyticsType]: + raise ValueError("not valid analytics type: " + values["name"]) + return v + + @validator("description", "how_to_implement") + def encode_error(cls, v, values, field): + try: + v.encode("ascii") + except UnicodeEncodeError: + raise ValueError("encoding error in " + field.name + ": " + values["name"]) + return v + + # @root_validator + # def search_validation(cls, values): + # if 'ssa_' not in values['file_path']: + # if not '_filter' in values['search']: + # raise ValueError('filter macro missing in: ' + values["name"]) + # if any(x in values['search'] for x in ['eventtype=', 'sourcetype=', ' source=', 'index=']): + # if not 'index=_internal' in values['search']: + # raise ValueError('Use source macro instead of eventtype, sourcetype, source or index in detection: ' + values["name"]) + # return values + + @root_validator + def name_max_length(cls, values): + # Check max length only for ESCU searches, SSA does not have that constraint + if "ssa_" not in values["file_path"]: + if len(values["name"]) > 67: + raise ValueError("name is longer then 67 chars: " + values["name"]) + return values + + + @root_validator + def new_line_check(cls, values): + # Check if there is a new line in description and how to implement that is not escaped + pattern = r'(? 'CIS 20'): {values['name']}") + return v + + @validator('nist') + def tags_nist(cls, v, values): + # Sourced Courtest of NIST: https://www.nist.gov/system/files/documents/cyberframework/cybersecurity-framework-021214.pdf (Page 19) + IDENTIFY = [f'ID.{category}' for category in ["AM", "BE", "GV", "RA", "RM"] ] + PROTECT = [f'PR.{category}' for category in ["AC", "AT", "DS", "IP", "MA", "PT"]] + DETECT = [f'DE.{category}' for category in ["AE", "CM", "DP"] ] + RESPOND = [f'RS.{category}' for category in ["RP", "CO", "AN", "MI", "IM"] ] + RECOVER = [f'RC.{category}' for category in ["RP", "IM", "CO"] ] + ALL_NIST_CATEGORIES = IDENTIFY + PROTECT + DETECT + RESPOND + RECOVER + + + for value in v: + if not value in ALL_NIST_CATEGORIES: + raise ValueError(f"NIST Category '{value}' is not a valid category") + return v + + @validator('confidence') + def tags_confidence(cls, v, values): + v = int(v) + if not (v > 0 and v <= 100): + raise ValueError('confidence score is out of range 1-100: ' + values["name"]) + else: + return v + + + @validator('impact') + def tags_impact(cls, v, values): + if not (v > 0 and v <= 100): + raise ValueError('impact score is out of range 1-100: ' + values["name"]) + else: + return v + + @validator('kill_chain_phases') + def tags_kill_chain_phases(cls, v, values): + valid_kill_chain_phases = SES_KILL_CHAIN_MAPPINGS.keys() + for value in v: + if value not in valid_kill_chain_phases: + raise ValueError('kill chain phase not valid for ' + values["name"] + '. valid options are ' + str(valid_kill_chain_phases)) + return v + + @validator('mitre_attack_id') + def tags_mitre_attack_id(cls, v, values): + pattern = 'T[0-9]{4}' + for value in v: + if not re.match(pattern, value): + raise ValueError('Mitre Attack ID are not following the pattern Txxxx: ' + values["name"]) + return v + + @validator('product') + def tags_product(cls, v, values): + valid_products = [ + "Splunk Enterprise", "Splunk Enterprise Security", "Splunk Cloud", + "Splunk Security Analytics for AWS", "Splunk Behavioral Analytics" + ] + + for value in v: + if value not in valid_products: + raise ValueError('product is not valid for ' + values['name'] + '. valid products are ' + str(valid_products)) + return v + + @validator('risk_score') + def tags_calculate_risk_score(cls, v, values): + calculated_risk_score = round(values['impact'] * values['confidence'] / 100) + if calculated_risk_score != int(v): + raise ValueError(f"Risk Score must be calculated as round(confidence * impact / 100)" + f"\n Expected risk_score={calculated_risk_score}, found risk_score={int(v)}: {values['name']}") + return v + + @root_validator + def tags_observable(cls, values): + valid_roles = SES_OBSERVABLE_ROLE_MAPPING.keys() + valid_types = SES_OBSERVABLE_TYPE_MAPPING.keys() + + for value in values["observable"]: + if value['type'] in valid_types: + if 'Splunk Behavioral Analytics' in values["product"]: + continue + + if 'role' not in value: + raise ValueError('Observable role is missing for ' + values["name"]) + for role in value['role']: + if role not in valid_roles: + raise ValueError('Observable role ' + role + ' not valid for ' + values["name"] + '. valid options are ' + str(valid_roles)) + else: + raise ValueError('Observable type ' + value['type'] + ' not valid for ' + values["name"] + '. valid options are ' + str(valid_types)) + return values \ No newline at end of file diff --git a/contentctl/objects/story.py b/contentctl/objects/story.py index f71c05e4..53daa655 100644 --- a/contentctl/objects/story.py +++ b/contentctl/objects/story.py @@ -17,7 +17,7 @@ class Story(SecurityContentObject): #date: str #author: str #description: str - contentType: SecurityContentType = SecurityContentType.stories + #contentType: SecurityContentType = SecurityContentType.stories narrative: str check_references: bool = False #Validation is done in order, this field must be defined first references: list diff --git a/contentctl/objects/test_config.py b/contentctl/objects/test_config.py index dca3f2d6..e3ef907a 100644 --- a/contentctl/objects/test_config.py +++ b/contentctl/objects/test_config.py @@ -1,7 +1,7 @@ # Needed for a staticmethod to be able to return an instance of the class it belongs to from __future__ import annotations - +import git import validators import pathlib import yaml @@ -9,6 +9,7 @@ from pydantic import BaseModel, validator, root_validator, Extra, Field from dataclasses import dataclass from typing import Union +import re import docker import docker.errors @@ -39,70 +40,96 @@ def getTestConfigFromYMLFile(path: pathlib.Path): except Exception as e: print(f"Error loading test configuration file '{path}': {str(e)}") - -class TestConfig(BaseModel, extra=Extra.forbid, validate_assignment=True): - repo_path: str = Field(default=".", title="Path to the root of your app") - repo_url: Union[str, None] = Field( - default=None, - title="HTTP(s) path to the repo for repo_path. If this field is blank, it will be inferred from the repo", - ) - # main_branch: Union[str,None] = Field(default=None, title="Main branch of the repo, if applicable.") - # test_branch: Union[str,None] = Field(default=None, title="Branch of the repo to be tested, if applicable.") - # commit_hash: Union[str,None] = Field(default=None, title="Commit hash of the repo state to be tested, if applicable") - target_infrastructure: DetectionTestingTargetInfrastructure = Field( - default=DetectionTestingTargetInfrastructure.container, - title=f"Control where testing should be launched. Choose one of {DetectionTestingTargetInfrastructure._member_names_}", - ) - full_image_path: str = Field( - default="registry.hub.docker.com/splunk/splunk:latest", - title="Full path to the container image to be used", - ) - container_name: str = Field( - default="splunk_contentctl_%d", - title="Template to be used for naming the Splunk Test Containers which will be created", - ) - post_test_behavior: PostTestBehavior = Field( - default=PostTestBehavior.pause_on_failure, - title=f"What to do after a test has completed. Choose one of {PostTestBehavior._member_names_}", - ) - mode: DetectionTestingMode = Field( - default=DetectionTestingMode.all, - title=f"Control which detections should be tested. Choose one of {DetectionTestingMode._member_names_}", - ) - detections_list: Union[list[str], None] = Field( - default=None, title="List of paths to detections which should be tested" - ) - num_containers: int = Field( - default=1, title="Number of testing containers to start in parallel." - ) - # pr_number: Union[int,None] = Field(default=None, title="The number of the PR to test") +class Infrastructure(BaseModel, extra=Extra.forbid, validate_assignment=True): splunk_app_username: Union[str, None] = Field( default="admin", title="The name of the user for testing" ) splunk_app_password: Union[str, None] = Field( default="password", title="Password for logging into Splunk Server" ) - splunkbase_username: Union[str, None] = Field( - default=None, - title="The username for logging into Splunkbase in case apps must be downloaded", - ) - splunkbase_password: Union[str, None] = Field( - default=None, - title="The password for logging into Splunkbase in case apps must be downloaded", - ) - apps: list[App] = Field( - default=App.get_default_apps(), - title="A list of all the apps to be installed on each container", - ) - test_instance_address: str = Field( + instance_address: str = Field( default="127.0.0.1", title="Domain name of IP address of Splunk server to be used for testing. Do NOT use a protocol, like http(s):// or 'localhost'", ) - + + instance_name: str = Field( + default="Splunk_Server_Name", + title="Template to be used for naming the Splunk Test Containers or referring to Test Servers.", + ) + hec_port: int = Field(default=8088, title="HTTP Event Collector Port") web_ui_port: int = Field(default=8000, title="Web UI Port") api_port: int = Field(default=8089, title="REST API Port") + @staticmethod + def get_infrastructure_containers(num_containers:int=1, splunk_app_username:str="admin", splunk_app_password:str="password", instance_name_template="splunk_contentctl_{index}")->list[Infrastructure]: + containers:list[Infrastructure] = [] + if num_containers < 0: + raise ValueError(f"Error - you must specifiy 1 or more containers, not {num_containers}.") + + #Get the starting ports + i = Infrastructure() #Instantiate to get the base port numbers + + for index in range(0, num_containers): + containers.append(Infrastructure(splunk_app_username=splunk_app_username, + splunk_app_password=splunk_app_password, + instance_name=instance_name_template.format(index=index), + hec_port=i.hec_port+(index*2), + web_ui_port=i.web_ui_port+index, + api_port=i.api_port+(index*2))) + + + return containers + + @validator("instance_name") + def validate_instance_name(cls,v,values): + if not re.fullmatch("[a-zA-Z0-9][a-zA-Z0-9_.-]*", v): + raise ValueError(f"The instance_name '{v}' is not valid. Please use an instance name which matches the regular expression '[a-zA-Z0-9][a-zA-Z0-9_.-]*'") + else: + return v + + @validator("instance_address") + def validate_instance_address(cls, v, values): + try: + if v.startswith("http"): + raise (Exception("should not begin with http")) + is_ipv4 = validators.ipv4(v) + if bool(is_ipv4): + return v + is_domain_name = validators.domain(v) + if bool(is_domain_name): + import socket + + try: + socket.gethostbyname(v) + return v + except Exception as e: + pass + raise (Exception("DNS Lookup failed")) + raise (Exception(f"not an IPV4 address or a domain name")) + except Exception as e: + raise ( + Exception( + f"Error, failed to validate instance_address '{v}': {str(e)}" + ) + ) + + + + @validator("splunk_app_password") + def validate_splunk_app_password(cls, v): + if v == None: + # No app password was provided, so generate one + v = Utils.get_random_password() + else: + MIN_PASSWORD_LENGTH = 6 + if len(v) < MIN_PASSWORD_LENGTH: + raise ( + ValueError( + f"Password is less than {MIN_PASSWORD_LENGTH} characters long. This password is extremely weak, please change it." + ) + ) + return v @validator("hec_port", "web_ui_port", "api_port", each_item=True) def validate_ports_range(cls, v): @@ -119,130 +146,55 @@ def validate_ports_range(cls, v): ) ) return v - + @validator("hec_port", "web_ui_port", "api_port", each_item=False) def validate_ports_overlap(cls, v): - global PREVIOUSLY_ALLOCATED_PORTS + if type(v) is not list: # Otherwise this throws error when we update a single field return v if len(set(v)) != len(v): raise (ValueError(f"Duplicate ports detected: [{v}]")) - if PREVIOUSLY_ALLOCATED_PORTS.isdisjoint(v): - PREVIOUSLY_ALLOCATED_PORTS = PREVIOUSLY_ALLOCATED_PORTS.union() - else: - raise ( - ValueError( - f"Duplicate ports detected: [{PREVIOUSLY_ALLOCATED_PORTS.intersection(v)}]" - ) - ) + return v +class InfrastructureConfig(BaseModel, extra=Extra.forbid, validate_assignment=True): + infrastructure_type: DetectionTestingTargetInfrastructure = Field( + default=DetectionTestingTargetInfrastructure.container, + title=f"Control where testing should be launched. Choose one of {DetectionTestingTargetInfrastructure._member_names_}", + ) + full_image_path: str = Field( + default="registry.hub.docker.com/splunk/splunk:latest", + title="Full path to the container image to be used", + ) + infrastructures: list[Infrastructure] = [] + + + @validator("infrastructure_type") + def validate_infrastructure_type(cls, v, values): + if v == DetectionTestingTargetInfrastructure.server: + # No need to validate that the docker client is available + return v + elif v == DetectionTestingTargetInfrastructure.container: + # we need to make sure we can actually get the docker client from the environment + try: + docker.client.from_env() + except Exception as e: + raise ( + Exception( + f"Error, failed to get docker client. Is Docker Installed and running " + f"and are docker environment variables set properly? Error:\n\t{str(e)}" + ) + ) return v - # Ensure that at least 1 of test_branch, commit_hash, and/or pr_number were passed. - # Otherwise, what are we testing?? - # @root_validator(pre=False) - # def ensure_there_is_something_to_test(cls, values): - # if 'test_branch' not in values and 'commit_hash' not in values and'pr_number' not in values: - # if 'mode' in values and values['mode'] == DetectionTestingMode.changes: - # raise(ValueError(f"Under mode [{DetectionTestingMode.changes}], 'test_branch', 'commit_hash', and/or 'pr_number' must be defined so that we know what to test.")) - - # return values - - # @validator('repo_path', always=True) - # def validate_repo_path(cls,v): - - # try: - # path = pathlib.Path(v) - # except Exception as e: - # raise(ValueError(f"Error, the provided path is is not a valid path: '{v}'")) - - # try: - # r = git.Repo(path) - # except Exception as e: - # raise(ValueError(f"Error, the provided path is not a valid git repo: '{path}'")) - - # try: - - # if ALWAYS_PULL_REPO: - # r.remotes.origin.pull() - # except Exception as e: - # raise ValueError(f"Error pulling git repository {v}: {str(e)}") - - # return v - - # @validator('repo_url', always=True) - # def validate_repo_url(cls, v, values): - # Utils.check_required_fields('repo_url', values, ['repo_path']) - - # #First try to get the value from the repo - # try: - # remote_url_from_repo = git.Repo(values['repo_path']).remotes.origin.url - # except Exception as e: - # raise(ValueError(f"Error reading remote_url from the repo located at {values['repo_path']}")) - - # if v is not None and remote_url_from_repo != v: - # raise(ValueError(f"The url of the remote repo supplied in the config file {v} does not "\ - # f"match the value read from the repository at {values['repo_path']}, {remote_url_from_repo}")) - - # if v is None: - # v = remote_url_from_repo - - # #Ensure that the url is the proper format - # try: - # if bool(validators.url(v)) == False: - # raise(Exception) - # except: - # raise(ValueError(f"Error validating the repo_url. The url is not valid: {v}")) - - # return v - - # @validator('main_branch', always=True) - # def valid_main_branch(cls, v, values): - # Utils.check_required_fields('main_branch', values, ['repo_path', 'repo_url']) - - # if v is None: - # print(f"main_branch is not supplied. Inferring from '{values['repo_path']}'...",end='') - - # main_branch = Utils.get_default_branch_name(values['repo_path'], values['repo_url']) - # print(f"main_branch name '{main_branch}' inferred'") - # #continue with the validation - # v = main_branch - - # try: - # Utils.validate_git_branch_name(values['repo_path'],values['repo_url'], v) - # except Exception as e: - # raise ValueError(f"Error validating main_branch: {str(e)}") - # return v - - # @validator('test_branch', always=True) - # def validate_test_branch(cls, v, values): - # Utils.check_required_fields('test_branch', values, ['repo_path', 'repo_url', 'main_branch']) - # if v is None: - # print(f"No test_branch provided, so we will default to using the main_branch '{values['main_branch']}'") - # return values['main_branch'] - # try: - # Utils.validate_git_branch_name(values['repo_path'],values['repo_url'], v) - # except Exception as e: - # raise ValueError(f"Error validating test_branch: {str(e)}") - # return v - - # @validator('commit_hash', always=True) - # def validate_commit_hash(cls, v, values): - # Utils.check_required_fields('commit_hash', values, ['repo_path', 'repo_url', 'test_branch']) - - # try: - # #We can a hash with this function too - # Utils.validate_git_hash(values['repo_path'],values['repo_url'], v, values['test_branch']) - # except Exception as e: - # raise ValueError(f"Error validating commit_hash '{v}': {str(e)}") - # return v - - @validator("full_image_path", always=True) + + + + @validator("full_image_path") def validate_full_image_path(cls, v, values): if ( - values.get("target_infrastructure", None) + values.get("infrastructure_type", None) == DetectionTestingTargetInfrastructure.server.value ): print( @@ -330,13 +282,236 @@ def validate_full_image_path(cls, v, values): return v + @validator("infrastructures", always=True) + def validate_infrastructures(cls, v, values): + MAX_RECOMMENDED_CONTAINERS_BEFORE_WARNING = 2 + if values.get("infrastructure_type",None) == DetectionTestingTargetInfrastructure.container and len(v) == 0: + v = [Infrastructure()] + + if len(v) < 1: + print("Fix number of infrastructure validation later") + return v + raise ( + ValueError( + f"Error validating infrastructures. Test must be run with AT LEAST 1 infrastructure, not {len(v)}" + ) + ) + if (values.get("infrastructure_type", None) == DetectionTestingTargetInfrastructure.container.value) and len(v) > MAX_RECOMMENDED_CONTAINERS_BEFORE_WARNING: + print( + f"You requested to run with [{v}] containers which may use a very large amount of resources " + "as they all run in parallel. The maximum suggested number of parallel containers is " + f"[{MAX_RECOMMENDED_CONTAINERS_BEFORE_WARNING}]. We will do what you asked, but be warned!" + ) + return v + + + @validator("infrastructures", each_item=False) + def validate_ports_overlap(cls, v, values): + ports = set() + if values.get("infrastructure_type", None) == DetectionTestingTargetInfrastructure.server.value: + #ports are allowed to overlap, they are on different servers + return v + + if len(v) == 0: + raise ValueError("Error, there must be at least one test infrastructure defined in infrastructures.") + for infrastructure in v: + for k in ["hec_port", "web_ui_port", "api_port"]: + if getattr(infrastructure, k) in ports: + raise ValueError(f"Port {getattr(infrastructure, k)} used more than once in container infrastructure ports") + ports.add(getattr(infrastructure, k)) + return v + +class VersionControlConfig(BaseModel, extra=Extra.forbid, validate_assignment=True): + repo_path: str = Field(default=".", title="Path to the root of your app") + repo_url: str = Field( + default="https://github.com/your_organization/your_repo", + title="HTTP(s) path to the repo for repo_path. If this field is blank, it will be inferred from the repo", + ) + main_branch: str = Field(default="main", title="Main branch of the repo, if applicable.") + test_branch: str = Field(default="main", title="Branch of the repo to be tested, if applicable.") + commit_hash: Union[str,None] = Field(default=None, title="Commit hash of the repo state to be tested, if applicable") + pr_number: Union[int,None] = Field(default=None, title="The number of the PR to test") + + @validator('repo_path') + def validate_repo_path(cls,v): + print(f"checking repo path '{v}'") + try: + path = pathlib.Path(v) + except Exception as e: + + raise(ValueError(f"Error, the provided path is is not a valid path: '{v}'")) + + try: + r = git.Repo(path) + except Exception as e: + + raise(ValueError(f"Error, the provided path is not a valid git repo: '{path}'")) + + try: + + if ALWAYS_PULL_REPO: + r.remotes.origin.pull() + except Exception as e: + raise ValueError(f"Error pulling git repository {v}: {str(e)}") + print("repo path looks good") + return v + + @validator('repo_url') + def validate_repo_url(cls, v, values): + #First try to get the value from the repo + try: + remotes = git.Repo(values['repo_path']).remotes + except Exception as e: + raise ValueError(f"Error - repo at {values['repo_path']} has no remotes. Repo must be tracked in a remote git repo.") + + try: + remote_url_from_repo = remotes.origin.url + except Exception as e: + raise(ValueError(f"Error reading remote_url from the repo located at '{values['repo_path']}'")) + + if v is not None and remote_url_from_repo != v: + raise(ValueError(f"The url of the remote repo supplied in the config file {v} does not "\ + f"match the value read from the repository at {values['repo_path']}, {remote_url_from_repo}")) + + if v is None: + v = remote_url_from_repo + + #Ensure that the url is the proper format + # try: + # if bool(validators.url(v)) == False: + # raise(Exception) + # except: + # raise(ValueError(f"Error validating the repo_url. The url is not valid: {v}")) + + return v + + @validator('main_branch') + def valid_main_branch(cls, v, values): + if v is None: + print(f"main_branch is not supplied. Inferring from '{values['repo_path']}'...",end='') + + main_branch = Utils.get_default_branch_name(values['repo_path'], values['repo_url']) + print(f"main_branch name '{main_branch}' inferred'") + #continue with the validation + v = main_branch + + try: + Utils.validate_git_branch_name(values['repo_path'],values['repo_url'], v) + except Exception as e: + raise ValueError(f"Error validating main_branch: {str(e)}") + return v + + @validator('test_branch') + def validate_test_branch(cls, v, values): + if v is None: + print(f"No test_branch provided, so we will default to using the main_branch '{values['main_branch']}'") + v = values['main_branch'] + try: + Utils.validate_git_branch_name(values['repo_path'],values['repo_url'], v) + except Exception as e: + raise ValueError(f"Error validating test_branch: {str(e)}") + + r = git.Repo(values.get("repo_path")) + try: + if r.active_branch.name != v: + print(f"We are trying to test {v} but the current active branch is {r.active_branch}") + print(f"Checking out {v}") + r.git.checkout(v) + except Exception as e: + raise ValueError(f"Error checking out test_branch '{v}': {str(e)}") + return v + + @validator('commit_hash') + def validate_commit_hash(cls, v, values): + try: + #We can a hash with this function too + Utils.validate_git_hash(values['repo_path'],values['repo_url'], v, values['test_branch']) + except Exception as e: + raise ValueError(f"Error validating commit_hash '{v}': {str(e)}") + return v + + @validator('pr_number') + def validate_pr_number(cls, v, values): + if v == None: + return v + + hash = Utils.validate_git_pull_request(values['repo_path'], v) + + #Ensure that the hash is equal to the one in the config file, if it exists. + if values['commit_hash'] is None: + values['commit_hash'] = hash + else: + if values['commit_hash'] != hash: + raise(ValueError(f"commit_hash specified in configuration was {values['commit_hash']}, but commit_hash"\ + f" from pr_number {v} was {hash}. These must match. If you're testing"\ + " a PR, you probably do NOT want to provide the commit_hash in the configuration file "\ + "and always want to test the head of the PR. This will be done automatically if you do "\ + "not provide the commit_hash.")) + + return v + + +class TestConfig(BaseModel, extra=Extra.forbid, validate_assignment=True): + + version_control_config: Union[VersionControlConfig,None] = VersionControlConfig() + + infrastructure_config: InfrastructureConfig = Field( + default=InfrastructureConfig(), + title=f"The infrastructure for testing to be run on", + ) + + + post_test_behavior: PostTestBehavior = Field( + default=PostTestBehavior.pause_on_failure, + title=f"What to do after a test has completed. Choose one of {PostTestBehavior._member_names_}", + ) + mode: DetectionTestingMode = Field( + default=DetectionTestingMode.all, + title=f"Control which detections should be tested. Choose one of {DetectionTestingMode._member_names_}", + ) + detections_list: Union[list[str], None] = Field( + default=None, title="List of paths to detections which should be tested" + ) + + + splunkbase_username: Union[str, None] = Field( + default=None, + title="The username for logging into Splunkbase in case apps must be downloaded", + ) + splunkbase_password: Union[str, None] = Field( + default=None, + title="The password for logging into Splunkbase in case apps must be downloaded", + ) + apps: list[App] = Field( + default=App.get_default_apps(), + title="A list of all the apps to be installed on each container", + ) + + + + + + + + + + # Ensure that at least 1 of test_branch, commit_hash, and/or pr_number were passed. + # Otherwise, what are we testing?? + # @root_validator(pre=False) + def ensure_there_is_something_to_test(cls, values): + if 'test_branch' not in values and 'commit_hash' not in values and'pr_number' not in values: + if 'mode' in values and values['mode'] == DetectionTestingMode.changes: + raise(ValueError(f"Under mode [{DetectionTestingMode.changes}], 'test_branch', 'commit_hash', and/or 'pr_number' must be defined so that we know what to test.")) + + return values + + + # presumably the post test behavior is validated by the enum? # presumably the mode is validated by the enum? @validator("detections_list", always=True) def validate_detections_list(cls, v, values): - - Utils.check_required_fields("detections_list", values, ["mode", "repo_path"]) # A detections list can only be provided if the mode is selected # otherwise, we must throw an error @@ -362,9 +537,8 @@ def validate_detections_list(cls, v, values): ) for detection in v: try: - full_path = os.path.join(values["repo_path"], detection) - if not pathlib.Path(full_path).exists(): - all_errors.append(full_path) + if not pathlib.Path(detection).exists(): + all_errors.append(detection) except Exception as e: all_errors.append( f"Unexpected error validating path '{detection}': {str(e)}" @@ -380,67 +554,18 @@ def validate_detections_list(cls, v, values): return v - @validator("num_containers", always=True) - def validate_num_containers(cls, v): - MAX_RECOMMENDED_CONTAINERS_BEFORE_WARNING = 2 - if v < 1: - raise ( - ValueError( - f"Error validating num_containers. Test must be run with at least 1 container, not {v}" - ) - ) - if v > MAX_RECOMMENDED_CONTAINERS_BEFORE_WARNING: - print( - f"You requested to run with [{v}] containers which may use a very large amount of resources " - "as they all run in parallel. The maximum suggested number of parallel containers is " - f"[{MAX_RECOMMENDED_CONTAINERS_BEFORE_WARNING}]. We will do what you asked, but be warned!" - ) - return v - - # @validator('pr_number', always=True) - # def validate_pr_number(cls, v, values): - # Utils.check_required_fields('pr_number', values, ['repo_path', 'commit_hash']) - - # if v == None: - # return v - # hash = Utils.validate_git_pull_request(values['repo_path'], v) - # #Ensure that the hash is equal to the one in the config file, if it exists. - # if values['commit_hash'] is None: - # values['commit_hash'] = hash - # else: - # if values['commit_hash'] != hash: - # raise(ValueError(f"commit_hash specified in configuration was {values['commit_hash']}, but commit_hash"\ - # f" from pr_number {v} was {hash}. These must match. If you're testing"\ - # " a PR, you probably do NOT want to provide the commit_hash in the configuration file "\ - # "and always want to test the head of the PR. This will be done automatically if you do "\ - # "not provide the commit_hash.")) + - # return v + - @validator("splunk_app_password", always=True) - def validate_splunk_app_password(cls, v): - if v == None: - # No app password was provided, so generate one - v = Utils.get_random_password() - else: - MIN_PASSWORD_LENGTH = 6 - if len(v) < MIN_PASSWORD_LENGTH: - raise ( - ValueError( - f"Password is less than {MIN_PASSWORD_LENGTH} characters long. This password is extremely weak, please change it." - ) - ) - return v - - @validator("splunkbase_username", always=True) + @validator("splunkbase_username") def validate_splunkbase_username(cls, v): return v - @validator("splunkbase_password", always=True) + @validator("splunkbase_password") def validate_splunkbase_password(cls, v, values): - Utils.check_required_fields("repo_url", values, ["splunkbase_username"]) if values["splunkbase_username"] == None: return v elif (v == None and values["splunkbase_username"] != None) or ( @@ -460,16 +585,14 @@ def validate_splunkbase_password(cls, v, values): @validator("apps",) def validate_apps(cls, v, values): - Utils.check_required_fields( - "repo_url", values, ["splunkbase_username", "splunkbase_password"] - ) + app_errors = [] # ensure that the splunkbase username and password are provided username = values["splunkbase_username"] password = values["splunkbase_password"] - app_directory = pathlib.Path(values["repo_path"]) / LOCAL_APP_DIR + app_directory = LOCAL_APP_DIR try: os.makedirs(LOCAL_APP_DIR, exist_ok=True) except Exception as e: @@ -496,46 +619,4 @@ def validate_apps(cls, v, values): return v - @validator("target_infrastructure", always=True) - def validate_target_infrastructure(cls, v, values): - if v == DetectionTestingTargetInfrastructure.server: - # No need to validate that the docker client is available - return v - elif v == DetectionTestingTargetInfrastructure.container: - # we need to make sure we can actually get the docker client from the environment - try: - docker.client.from_env() - except Exception as e: - raise ( - Exception( - f"Error, failed to get docker client. Is Docker Installed and running " - f"and are docker environment variables set properly? Error:\n\t{str(e)}" - ) - ) - return v - - @validator("test_instance_address", always=True) - def validate_test_instance_address(cls, v, values): - try: - if v.startswith("http"): - raise (Exception("should not begin with http")) - is_ipv4 = validators.ipv4(v) - if bool(is_ipv4): - return v - is_domain_name = validators.domain(v) - if bool(is_domain_name): - import socket - - try: - socket.gethostbyname(v) - return v - except Exception as e: - pass - raise (Exception("DNS Lookup failed")) - raise (Exception(f"not an IPV4 address or a domain name")) - except Exception as e: - raise ( - Exception( - f"Error, failed to validate test_instance_address '{v}': {str(e)}" - ) - ) + \ No newline at end of file diff --git a/contentctl/objects/unit_test.py b/contentctl/objects/unit_test.py index e629964b..5167480a 100644 --- a/contentctl/objects/unit_test.py +++ b/contentctl/objects/unit_test.py @@ -8,9 +8,11 @@ from contentctl.objects.unit_test_attack_data import UnitTestAttackData from contentctl.objects.unit_test_result import UnitTestResult from contentctl.objects.enums import SecurityContentType -class UnitTest(SecurityContentObject): - contentType: SecurityContentType = SecurityContentType.unit_tests - #name: str + + +class UnitTest(BaseModel): + #contentType: SecurityContentType = SecurityContentType.unit_tests + name: str pass_condition: Union[str, None] = None earliest_time: Union[str, None] = None latest_time: Union[str, None] = None diff --git a/contentctl/objects/unit_test_attack_data.py b/contentctl/objects/unit_test_attack_data.py index eeb54e42..8bb38697 100644 --- a/contentctl/objects/unit_test_attack_data.py +++ b/contentctl/objects/unit_test_attack_data.py @@ -4,6 +4,7 @@ class UnitTestAttackData(BaseModel): + file_name: str = None data: str = None source: str = None sourcetype: str = None diff --git a/contentctl/objects/unit_test_old.py b/contentctl/objects/unit_test_old.py new file mode 100644 index 00000000..83154f47 --- /dev/null +++ b/contentctl/objects/unit_test_old.py @@ -0,0 +1,9 @@ +from pydantic import BaseModel, validator, ValidationError + + +from contentctl.objects.unit_test import UnitTest + + +class UnitTestOld(BaseModel): + name: str + tests: list[UnitTest] \ No newline at end of file diff --git a/contentctl/objects/unit_test_result.py b/contentctl/objects/unit_test_result.py index 01baf5ed..b6054cac 100644 --- a/contentctl/objects/unit_test_result.py +++ b/contentctl/objects/unit_test_result.py @@ -4,11 +4,12 @@ from typing import Union from datetime import timedelta from splunklib.data import Record -from contentctl.objects.test_config import TestConfig +from contentctl.objects.test_config import Infrastructure from contentctl.helper.utils import Utils FORCE_TEST_FAILURE_FOR_MISSING_OBSERVABLE = False +NO_SID = "Testing Failed, NO Search ID" SID_TEMPLATE = "{server}:{web_port}/en-US/app/search/search?sid={sid}" @@ -17,12 +18,13 @@ class UnitTestResult(BaseModel): missing_observables: list[str] = [] sid_link: Union[None, str] = None message: Union[None, str] = None - exception: bool = False + exception: Union[Exception,None] = None success: bool = False duration: float = 0 class Config: validate_assignment = True + arbitrary_types_allowed = True def get_summary_dict( self, @@ -31,8 +33,12 @@ def get_summary_dict( ) -> dict: results_dict = {} for field in model_fields: - value = getattr(self, field) - results_dict[field] = getattr(self, field) + if getattr(self, field) is not None: + if isinstance(getattr(self, field), Exception): + #Exception cannot be serialized, so convert to str + results_dict[field] = str(getattr(self, field)) + else: + results_dict[field] = getattr(self, field) for field in job_fields: if self.job_content is not None: @@ -50,170 +56,41 @@ def get_summary_dict( def set_job_content( self, - content: Union[Record, None, Exception], - config: TestConfig, + content: Union[Record, None], + config: Infrastructure, + exception: Union[Exception, None] = None, success: bool = False, duration: float = 0, ): self.duration = round(duration, 2) - if isinstance(content, Record): + self.exception = exception + self.success = success + + if content is not None: self.job_content = content - self.success = success + if success: self.message = "TEST PASSED" else: self.message = "TEST FAILED" - self.exception = False + - if not config.test_instance_address.startswith("http://"): + if not config.instance_address.startswith("http://"): sid_template = f"http://{SID_TEMPLATE}" else: sid_template = SID_TEMPLATE self.sid_link = sid_template.format( - server=config.test_instance_address, + server=config.instance_address, web_port=config.web_ui_port, sid=content.get("sid", None), ) - elif isinstance(content, Exception): - self.job_content = None - self.success = False - self.exception = True - self.message = f"Error during test: {str(content)}" - elif content is None: self.job_content = None self.success = False - self.exception = True - self.message = f"Error during test: unable to run test" - - else: - msg = f"Error: Unknown type for content in UnitTestResult: {type(content)}" - print(msg) - self.job_content = None - self.success = False - self.exception = True - self.message = f"Error during test - unable to run test {msg}" - return self.success - - """ - def get_summary(self, test_name: str, verbose=False) -> str: - lines: list[str] = [] - lines.append(f"SEARCH NAME : '{test_name}'") - if verbose or self.determine_success() == False: - lines.append(f"SEARCH : {self.get_search()}") - lines.append(f"SUCCESS : {self.determine_success()}") - if self.exception is True: - lines.append(f"EXCEPTION : {self.exception}") - if self.message is not None: - lines.append(f"MESSAGE : {self.message}") - else: - lines.append(f"SUCCESS : {self.determine_success()}") - if len(self.missing_observables) > 0: - lines.append(f"MISSING OBSERVABLES: {self.missing_observables}") - - return "\n\t".join(lines) - - def get_search(self) -> str: - if self.job_content is not None: - return self.job_content.get( - "search", "NO SEARCH FOUND - JOB MISSING SEARCH FIELD" - ) - return "NO SEARCH FOUND - JOB IS EMPTY" - - def add_message(self, message: str): - if self.message is None: - self.message = message - else: - self.message += f"\n{message}" - - @root_validator(pre=False) - def update_success(cls, values): - if values["job_content"] is None: - values["exception"] = True - values["success"] = False - if values["message"] is None: - # If the message has not been overridden, then put in a default - values["message"] = "Job Content was None - unknown failure reason" - # Otherwise, a message has been passed so don't overwrite it - return values - - if "messages" in values["job_content"]: - fatal_or_error = False - all_messages = values["job_content"]["messages"] - unique_messages = set() - for level, level_messages in all_messages.items(): - if level in ["info"]: - # we will skip any info messages - continue - elif level in ["fatal", "error"]: - for msg in level_messages: - # These error indicate a failure - the search was - # not successful. They are important for debugging, - # so we will pass them to the user. - # They also represent a an error during the test - values["logic"] = False - values["success"] = False - values["exception"] = True - unique_messages.add(msg) - fatal_or_error = True - else: - unknown_messages_as_single_string = "\n".join(level_messages) - unique_messages.add(unknown_messages_as_single_string) - - if len(unique_messages) == 0: - values["message"] = None # No messages + self.message = f"Error during test: {str(content)}" + self.sid_link = NO_SID - else: - # Merge all those messages together - values["message"] = "\n".join(unique_messages) - - if fatal_or_error: - return values - - # Can there still be a success even if there was an error/fatal message above? Probably not? - if ( - "resultCount" in values["job_content"] - and int(values["job_content"]["resultCount"]) == 1 - ): - # in the future we probably want other metrics, about noise or others, here - values["logic"] = True - values["success"] = True - - elif ( - "resultCount" in values["job_content"] - and int(values["job_content"]["resultCount"]) != 1 - ): - values["logic"] = False - values["success"] = False - - else: - raise (Exception("Result created with indeterminate success.")) - - return values - - def update_missing_observables(self, missing_observables: set[str]): - self.missing_observables = list(missing_observables) - self.success = self.determine_success() - - def determine_success(self) -> bool: - # values_dict = self.update_success(self.__dict__) - # self.exception = values_dict['exception'] - # self.success = values_dict['success'] return self.success - def get_job_field(self, fieldName: str): - if self.job_content is None: - # return f"FIELD NAME {fieldName} does not exist in Job Content because Job Content is NONE" - return None - return self.job_content.get(fieldName, None) - - def get_time(self) -> timedelta: - if self.job_content is None: - return timedelta(0) - elif "runDuration" in self.job_content: - duration = str(self.job_content["runDuration"]) - return timedelta(float(duration)) - else: - raise (Exception("runDuration missing from job.")) - """ + \ No newline at end of file diff --git a/contentctl/output/api_json_output.py b/contentctl/output/api_json_output.py index 8b935ee0..4ae5ced8 100644 --- a/contentctl/output/api_json_output.py +++ b/contentctl/output/api_json_output.py @@ -1,4 +1,5 @@ import os +import json from contentctl.output.json_writer import JsonWriter @@ -7,10 +8,11 @@ class ApiJsonOutput(): - def writeObjects(self, objects: list, output_path: str, type: SecurityContentType = None) -> None: + def writeObjects(self, objects: list, output_path: str, type: SecurityContentType = None) -> None: if type == SecurityContentType.detections: obj_array = [] for detection in objects: + detection.id = str(detection.id) obj_array.append(detection.dict(exclude_none=True, exclude = { @@ -22,15 +24,54 @@ def writeObjects(self, objects: list, output_path: str, type: SecurityContentTyp "baselines": True, "mappings": True, "test": True, - "deployment": True + "deployment": True, + "type": True, + "status": True, + "data_source": True, + "tests": True, + "cve_enrichment": True, + "tags": + { + "file_path": True, + "required_fields": True, + "confidence": True, + "impact": True, + "product": True, + "cve": True + } } )) JsonWriter.writeJsonObject(os.path.join(output_path, 'detections.json'), {'detections': obj_array }) + + ### Code to be added to contentctl to ship filter macros to macros.json + + obj_array = [] + for detection in objects: + detection_dict = detection.dict() + if "macros" in detection_dict: + for macro in detection_dict["macros"]: + obj_array.append(macro) + + uniques:set[str] = set() + for obj in obj_array: + if obj.get("arguments",None) != None: + uniques.add(json.dumps(obj,sort_keys=True)) + else: + obj.pop("arguments") + uniques.add(json.dumps(obj, sort_keys=True)) + + obj_array = [] + for item in uniques: + obj_array.append(json.loads(item)) + + JsonWriter.writeJsonObject(os.path.join(output_path, 'macros.json'), {'macros': obj_array}) + elif type == SecurityContentType.stories: obj_array = [] for story in objects: + story.id = str(story.id) obj_array.append(story.dict(exclude_none=True, exclude = { @@ -43,6 +84,7 @@ def writeObjects(self, objects: list, output_path: str, type: SecurityContentTyp elif type == SecurityContentType.baselines: obj_array = [] for baseline in objects: + baseline.id = str(baseline.id) obj_array.append(baseline.dict( exclude = { @@ -55,6 +97,7 @@ def writeObjects(self, objects: list, output_path: str, type: SecurityContentTyp elif type == SecurityContentType.investigations: obj_array = [] for investigation in objects: + investigation.id = str(investigation.id) obj_array.append(investigation.dict(exclude_none=True)) JsonWriter.writeJsonObject(os.path.join(output_path, 'response_tasks.json'), {'response_tasks': obj_array }) @@ -62,21 +105,17 @@ def writeObjects(self, objects: list, output_path: str, type: SecurityContentTyp elif type == SecurityContentType.lookups: obj_array = [] for lookup in objects: + obj_array.append(lookup.dict(exclude_none=True)) - JsonWriter.writeJsonObject(os.path.join(output_path, 'lookups.json'), {'lookups': obj_array }) - elif type == SecurityContentType.macros: - obj_array = [] - for macro in objects: - obj_array.append(macro.dict(exclude_none=True)) + JsonWriter.writeJsonObject(os.path.join(output_path, 'lookups.json'), {'lookups': obj_array }) - JsonWriter.writeJsonObject(os.path.join(output_path, 'macros.json'), {'macros': obj_array }) elif type == SecurityContentType.deployments: obj_array = [] for deployment in objects: + deployment.id = str(deployment.id) obj_array.append(deployment.dict(exclude_none=True)) JsonWriter.writeJsonObject(os.path.join(output_path, 'deployments.json'), {'deployments': obj_array }) - diff --git a/contentctl/output/ba_yml_output.py b/contentctl/output/ba_yml_output.py index dd5ba9de..6b316f34 100644 --- a/contentctl/output/ba_yml_output.py +++ b/contentctl/output/ba_yml_output.py @@ -1,9 +1,12 @@ import os import re +from urllib.parse import urlparse + from contentctl.output.yml_writer import YmlWriter from contentctl.objects.enums import SecurityContentType from contentctl.output.finding_report_writer import FindingReportObject +from contentctl.objects.unit_test_old import UnitTestOld class BAYmlOutput(): @@ -26,31 +29,70 @@ def writeObjects(self, objects: list, output_path: str, type: SecurityContentTyp else: file_path = os.path.join(output_path, 'srs', file_name) + # add research object + RESEARCH_SITE_BASE = 'https://research.splunk.com/' + research_site_url = RESEARCH_SITE_BASE + obj.source + "/" + obj.id + "/" + obj.tags.research_site_url = research_site_url + + # add ocsf schema tag + obj.tags.event_schema = 'ocsf' + body = FindingReportObject.writeFindingReport(obj) + if obj.test: + test_dict = { + "name": obj.name + " Unit Test", + "tests": [obj.test.dict()] + } + test_dict["tests"][0]["name"] = obj.name + for count in range(len(test_dict["tests"][0]["attack_data"])): + a = urlparse(test_dict["tests"][0]["attack_data"][count]["data"]) + test_dict["tests"][0]["attack_data"][count]["file_name"] = os.path.basename(a.path) + test = UnitTestOld.parse_obj(test_dict) + + obj.test = test + + # create annotations object + obj.tags.annotations = { + "analytic_story": obj.tags.analytic_story, + "cis20": obj.tags.cis20, + "kill_chain_phases": obj.tags.kill_chain_phases, + "mitre_attack_id": obj.tags.mitre_attack_id, + "nist": obj.tags.nist + } + + obj.runtime = "SPL-DSP" + # remove unncessary fields YmlWriter.writeYmlFile(file_path, obj.dict( + exclude_none=True, include = { "name": True, "id": True, "version": True, + "status": True, "description": True, "search": True, "how_to_implement": True, "known_false_positives": True, "references": True, + "runtime": True, "tags": { - "analytic_story": True, - "cis20" : True, - "nist": True, - "kill_chain_phases": True, - "mitre_attack_id": True, + #"analytic_story": True, + #"cis20" : True, + #"nist": True, + #"kill_chain_phases": True, + "annotations": True, + "mappings": True, + #"mitre_attack_id": True, "risk_severity": True, "risk_score": True, "security_domain": True, - "required_fields": True + "required_fields": True, + "research_site_url": True, + "event_schema": True }, "test": { @@ -77,7 +119,7 @@ def writeObjects(self, objects: list, output_path: str, type: SecurityContentTyp # Add Finding Report Object with open(file_path, 'r') as file: - data = file.read().replace('--body--', body) + data = file.read().replace('--finding_report--', body) f = open(file_path, "w") f.write(data) diff --git a/contentctl/output/conf_output.py b/contentctl/output/conf_output.py index aaac72d3..4115a5fd 100644 --- a/contentctl/output/conf_output.py +++ b/contentctl/output/conf_output.py @@ -24,7 +24,7 @@ class ConfOutput: def __init__(self, input_path: str, config: Config): self.input_path = input_path self.config = config - self.output_path = pathlib.Path(self.config.build.path_root) /self.config.build.name + self.output_path = pathlib.Path(os.path.join(self.input_path, self.config.build.path_root)) /self.config.build.name self.output_path.mkdir(parents=True, exist_ok=True) template_splunk_app_path = os.path.join(os.path.dirname(__file__), 'templates/splunk_app') shutil.copytree(template_splunk_app_path, self.output_path, dirs_exist_ok=True) @@ -39,11 +39,13 @@ def writeHeaders(self) -> None: ConfWriter.writeConfFileHeader(self.output_path/'default/transforms.conf', self.config) ConfWriter.writeConfFileHeader(self.output_path/'default/workflow_actions.conf', self.config) ConfWriter.writeConfFileHeader(self.output_path/'default/app.conf', self.config) - + ConfWriter.writeConfFileHeader(self.output_path/'default/content-version.conf', self.config) def writeAppConf(self): ConfWriter.writeConfFile(self.output_path/"default"/"app.conf", "app.conf.j2", self.config, [self.config.build] ) + ConfWriter.writeConfFile(self.output_path/"default"/"content-version.conf", "content-version.j2", self.config, [self.config.build] ) + ConfWriter.writeConfFile(self.output_path/"app.manifest", "app.manifest.j2", self.config, [self.config.build] ) def writeObjects(self, objects: list, type: SecurityContentType = None) -> None: if type == SecurityContentType.detections: @@ -110,14 +112,30 @@ def writeObjects(self, objects: list, type: SecurityContentType = None) -> None: 'transforms.j2', self.config, objects) - + #import code + #code.interact(local=locals()) if self.input_path is None: raise(Exception(f"input_path is required for lookups, but received [{self.input_path}]")) files = glob.iglob(os.path.join(self.input_path, 'lookups', '*.csv')) - for file in files: - if os.path.isfile(file): - shutil.copy(file, os.path.join(self.output_path, 'lookups')) + lookup_folder = self.output_path/"lookups" + if lookup_folder.exists(): + # Remove it since we want to remove any previous lookups that are not + # currently part of the app + if lookup_folder.is_dir(): + shutil.rmtree(lookup_folder) + else: + lookup_folder.unlink() + + # Make the new folder for the lookups + lookup_folder.mkdir() + + #Copy each lookup into the folder + for lookup_name in files: + lookup_path = pathlib.Path(lookup_name) + if lookup_path.is_file(): + lookup_target_path = self.output_path/"lookups"/lookup_path.name + shutil.copy(lookup_path, lookup_target_path) elif type == SecurityContentType.macros: ConfWriter.writeConfFile(self.output_path/'default/macros.conf', @@ -128,38 +146,38 @@ def writeObjects(self, objects: list, type: SecurityContentType = None) -> None: def packageApp(self) -> None: - input_app_path = pathlib.Path(self.config.build.path_root)/f"{self.config.build.name}" + # input_app_path = pathlib.Path(self.config.build.path_root)/f"{self.config.build.name}" - readme_file = pathlib.Path("README") - if not readme_file.is_file(): - raise Exception("The README file does not exist in this directory. Cannot build app.") - shutil.copyfile(readme_file, input_app_path/readme_file.name) - output_app_expected_name = pathlib.Path(self.config.build.path_root)/f"{self.config.build.name}-{self.config.build.version}.tar.gz" + # readme_file = pathlib.Path("README") + # if not readme_file.is_file(): + # raise Exception("The README file does not exist in this directory. Cannot build app.") + # shutil.copyfile(readme_file, input_app_path/readme_file.name) + output_app_expected_name = pathlib.Path(os.path.join(self.input_path, self.config.build.path_root))/f"{self.config.build.name}-{self.config.build.version}.tar.gz" - try: - import slim - use_slim = True + # try: + # import slim + # use_slim = True - except Exception as e: - print("Failed to import Splunk Packaging Toolkit (slim). slim requires Python<3.10. " - "Packaging app with tar instead. This should still work, but appinspect may catch " - "errors that otherwise would have been flagged by slim.") - use_slim = False + # except Exception as e: + # print("Failed to import Splunk Packaging Toolkit (slim). slim requires Python<3.10. " + # "Packaging app with tar instead. This should still work, but appinspect may catch " + # "errors that otherwise would have been flagged by slim.") + # use_slim = False - if use_slim: - import slim - from slim.utils import SlimLogger - import logging - #In order to avoid significant output, only emit FATAL log messages - SlimLogger.set_level(logging.ERROR) - try: - slim.package(source=input_app_path, output_dir=pathlib.Path(self.config.build.path_root)) - except SystemExit as e: - raise Exception(f"Error building package with slim: {str(e)}") - else: - with tarfile.open(output_app_expected_name, "w:gz") as app_archive: - app_archive.add(self.output_path, arcname=os.path.basename(self.output_path)) + # if use_slim: + # import slim + # from slim.utils import SlimLogger + # import logging + # #In order to avoid significant output, only emit FATAL log messages + # SlimLogger.set_level(logging.ERROR) + # try: + # slim.package(source=input_app_path, output_dir=pathlib.Path(self.config.build.path_root)) + # except SystemExit as e: + # raise Exception(f"Error building package with slim: {str(e)}") + # else: + with tarfile.open(output_app_expected_name, "w:gz") as app_archive: + app_archive.add(self.output_path, arcname=os.path.basename(self.output_path)) @@ -243,5 +261,29 @@ def inspectApp(self)-> None: #back as we read logfile.seek(0) json.dump(j, logfile, indent=3, ) + bad_stuff = ["error", "failure", "manual_check", "warning"] + reports = j.get("reports", []) + if len(reports) != 1: + raise Exception("Expected to find one appinspect report but found 0") + verbose_errors = [] + + for group in reports[0].get("groups", []): + for check in group.get("checks",[]): + if check.get("result","") in bad_stuff: + verbose_errors.append(f"Result: {check.get('result','')} - [{group.get('name','NONAME')}: {check.get('name', 'NONAME')}]") + verbose_errors.sort() + + summary = j.get("summary", None) + if summary is None: + raise Exception("Missing summary from appinspect report") + msgs = [] + for key in bad_stuff: + if summary.get(key,0)>0: + msgs.append(f"{summary.get(key,0)} {key}s") + if len(msgs)>0 or len(verbose_errors): + summary = '\n - '.join(msgs) + details = '\n - '.join(verbose_errors) + raise Exception(f"AppInspect found issue(s) that may prevent automated vetting:\nSummary:\n{summary}\nDetails:\n{details}") + except Exception as e: print(f"Failed to format {appinspect_output}: {str(e)}") \ No newline at end of file diff --git a/contentctl/output/conf_writer.py b/contentctl/output/conf_writer.py index 3b3867fe..da6ba4f0 100644 --- a/contentctl/output/conf_writer.py +++ b/contentctl/output/conf_writer.py @@ -59,7 +59,7 @@ def custom_jinja2_enrichment_filter(string, object): j2_env.filters['custom_jinja2_enrichment_filter'] = custom_jinja2_enrichment_filter template = j2_env.get_template(template_name) - output = template.render(objects=objects, APP_NAME=config.build.name) + output = template.render(objects=objects, APP_NAME=config.build.prefix) output_path.parent.mkdir(parents=True, exist_ok=True) with open(output_path, 'a') as f: output = output.encode('ascii', 'ignore').decode('ascii') diff --git a/contentctl/output/detection_writer.py b/contentctl/output/detection_writer.py new file mode 100644 index 00000000..2f439ca9 --- /dev/null +++ b/contentctl/output/detection_writer.py @@ -0,0 +1,28 @@ + +import yaml + + +class DetectionWriter: + + @staticmethod + def writeYmlFile(file_path : str, obj : dict) -> None: + + new_obj = dict() + new_obj["name"] = obj["name"] + new_obj["id"] = obj["id"] + new_obj["version"] = obj["version"] + new_obj["date"] = obj["date"] + new_obj["author"] = obj["author"] + new_obj["type"] = obj["type"] + new_obj["status"] = obj["status"] + new_obj["description"] = obj["description"] + new_obj["data_source"] = obj["data_source"] + new_obj["search"] = obj["search"] + new_obj["how_to_implement"] = obj["how_to_implement"] + new_obj["known_false_positives"] = obj["known_false_positives"] + new_obj["references"] = obj["references"] + new_obj["tags"] = obj["tags"] + new_obj["tests"] = obj["tests"] + + with open(file_path, 'w') as outfile: + yaml.safe_dump(new_obj, outfile, default_flow_style=False, sort_keys=False) \ No newline at end of file diff --git a/contentctl/output/finding_report_writer.py b/contentctl/output/finding_report_writer.py index 9483dd74..ce62f57a 100644 --- a/contentctl/output/finding_report_writer.py +++ b/contentctl/output/finding_report_writer.py @@ -2,39 +2,14 @@ import re from jinja2 import Environment, FileSystemLoader -from contentctl.objects.detection import Detection +from contentctl.objects.ssa_detection import SSADetection from contentctl.objects.constants import * class FindingReportObject(): @staticmethod - def writeFindingReport(detection : Detection) -> None: - - if detection.tags.confidence < 33: - detection.tags.confidence_id = 1 - elif detection.tags.confidence < 66: - detection.tags.confidence_id = 2 - else: - detection.tags.confidence_id = 3 - - detection.tags.context_ids = list() - for context in detection.tags.context: - detection.tags.context_ids.append(SES_CONTEXT_MAPPING[context]) - - if detection.tags.impact < 20: - detection.tags.impact_id = 1 - elif detection.tags.impact < 40: - detection.tags.impact_id = 2 - elif detection.tags.impact < 60: - detection.tags.impact_id = 3 - elif detection.tags.impact < 80: - detection.tags.impact_id = 4 - else: - detection.tags.impact_id = 5 + def writeFindingReport(detection : SSADetection) -> None: - detection.tags.kill_chain_phases_id = list() - for kill_chain_phase in detection.tags.kill_chain_phases: - detection.tags.kill_chain_phases_id.append(SES_KILL_CHAIN_MAPPINGS[kill_chain_phase]) if detection.tags.risk_score < 20: detection.tags.risk_level_id = 0 @@ -52,23 +27,24 @@ def writeFindingReport(detection : Detection) -> None: detection.tags.risk_level_id = 4 detection.tags.risk_level = "Critical" - observable_str = "[" - for i in range(len(detection.tags.observable)): - role_list = [] - for role in detection.tags.observable[i]["role"]: - role_list.append(str(SES_OBSERVABLE_ROLE_MAPPING[role])) - - observable_str = observable_str + 'create_map("name", "' + detection.tags.observable[i]["name"] + '", "role_ids", [' + ",".join(role_list) + '], "type_id", ' + str(SES_OBSERVABLE_TYPE_MAPPING[detection.tags.observable[i]["type"]]) + ', "value", ' + detection.tags.observable[i]["name"] + ')' - if not i == len(detection.tags.observable): - observable_str = observable_str + ', ' - observable_str = observable_str + ']' + evidence_str = "create_map(" + for i in range(len(detection.tags.observable)): + evidence_str = evidence_str + '"' + detection.tags.observable[i]["name"] + '", ' + detection.tags.observable[i]["name"].replace(".", "_") + if not i == (len(detection.tags.observable) - 1): + evidence_str = evidence_str + ', ' + evidence_str = evidence_str + ')' - detection.tags.observable_str = observable_str + detection.tags.evidence_str = evidence_str + + if "actor.user.name" in detection.tags.required_fields: + actor_user_name = "actor_user_name" + else: + actor_user_name = "\"Unknown\"" j2_env = Environment( loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')), trim_blocks=True) template = j2_env.get_template('finding_report.j2') - body = template.render(detection=detection) + body = template.render(detection=detection, attack_tactics_id_mapping=SES_ATTACK_TACTICS_ID_MAPPING, actor_user_name=actor_user_name) return body diff --git a/contentctl/output/json_writer.py b/contentctl/output/json_writer.py index 726ca794..0ae387c2 100644 --- a/contentctl/output/json_writer.py +++ b/contentctl/output/json_writer.py @@ -5,6 +5,5 @@ class JsonWriter(): @staticmethod def writeJsonObject(file_path : str, obj) -> None: - with open(file_path, 'w') as outfile: json.dump(obj, outfile, ensure_ascii=False) \ No newline at end of file diff --git a/contentctl/output/svg_output.py b/contentctl/output/svg_output.py index f9992f6a..b16cb4b7 100644 --- a/contentctl/output/svg_output.py +++ b/contentctl/output/svg_output.py @@ -1,25 +1,27 @@ import os - +import pathlib from contentctl.objects.enums import SecurityContentType from contentctl.output.jinja_writer import JinjaWriter - +from contentctl.objects.config import Config class SvgOutput(): - def writeObjects(self, objects: list, output_path: str, type: SecurityContentType = None) -> None: + def writeObjects(self, objects: list, path: str, type: SecurityContentType = None) -> None: detections_tmp = objects detection_without_test = 0 - + + output_path = pathlib.Path(path) + detections = [] obj = dict() for detection in detections_tmp: - if not detection.deprecated: + if not detection.status == "deprecated": detections.append(detection) - if not detection.test and not detection.experimental: + if not detection.tests and not detection.experimental: detection_without_test = detection_without_test + 1 diff --git a/contentctl/output/templates/analyticstories_stories.j2 b/contentctl/output/templates/analyticstories_stories.j2 index b6543567..d6fe6a0f 100644 --- a/contentctl/output/templates/analyticstories_stories.j2 +++ b/contentctl/output/templates/analyticstories_stories.j2 @@ -4,7 +4,7 @@ {% for story in objects %} [analytic_story://{{ story.name }}] -category = {{ story.tags.category[0] }} +category = {{ story.tags.category[0].value }} last_updated = {{ story.date }} version = {{ story.version }} references = {{ story.references | tojson }} diff --git a/contentctl/output/templates/app.conf.j2 b/contentctl/output/templates/app.conf.j2 index ea7843c6..e7954172 100644 --- a/contentctl/output/templates/app.conf.j2 +++ b/contentctl/output/templates/app.conf.j2 @@ -1,30 +1,10 @@ - - ## Splunk app configuration file -## For verbose documenation please see: -## https://docs.splunk.com/Documentation/Splunk/9.0.4/Admin/Appconf -{% for conf in objects %} -[author={{ conf.author_name }}] -email = {{ conf.author_email }} -company = {{ conf.author_company }} - -[id] -name = {{ conf.name }} -version = {{ conf.version }} - -[launcher] -author = {{ conf.author_company }} ({{ conf.author_name }}) -version = {{ conf.version }} -description = {{ conf.description }} - -[package] -id = {{ conf.id }} [install] is_configured = false state = enabled state_change_requires_restart = false -build = {{ conf.build }} +build = {{ objects[0].build }} [triggers] reload.analytic_stories = simple @@ -38,11 +18,18 @@ reload.postprocess = simple reload.content-version = simple reload.es_investigations = simple - +[launcher] +author = {{ objects[0].author_company }} +version = {{ objects[0].version }} +description = {{ objects[0].description }} [ui] is_visible = true -label = {{ conf.label }} -{% endfor %} +label = {{ objects[0].label }} + +[package] +id = {{ objects[0].id }} + + diff --git a/contentctl/output/templates/app.manifest.j2 b/contentctl/output/templates/app.manifest.j2 new file mode 100644 index 00000000..594f3058 --- /dev/null +++ b/contentctl/output/templates/app.manifest.j2 @@ -0,0 +1,46 @@ +{ + "schemaVersion": "1.0.0", + "info": { + "title": "ES Content Updates", + "id": { + "group": null, + "name": "DA-ESS-ContentUpdate", + "version": "{{ objects[0].version }}" + }, + "author": [ + { + "name": "Splunk Security Research Team", + "email": "research@splunk.com", + "company": "Splunk" + } + ], + "releaseDate": null, + "description": "Explore the Analytic Stories included with ES Content Updates.", + "classification": { + "intendedAudience": null, + "categories": [], + "developmentStatus": null + }, + "commonInformationModels": null, + "license": { + "name": null, + "text": null, + "uri": null + }, + "privacyPolicy": { + "name": null, + "text": null, + "uri": null + }, + "releaseNotes": { + "name": null, + "text": "./README.md", + "uri": null + } + }, + "dependencies": null, + "tasks": null, + "inputGroups": null, + "incompatibleApps": null, + "platformRequirements": null +} \ No newline at end of file diff --git a/contentctl/output/templates/collections.j2 b/contentctl/output/templates/collections.j2 index 349f2177..06e49140 100644 --- a/contentctl/output/templates/collections.j2 +++ b/contentctl/output/templates/collections.j2 @@ -4,5 +4,6 @@ [{{ lookup.name }}] enforceTypes = false replicate = false + {% endif %} {% endfor %} \ No newline at end of file diff --git a/contentctl/output/templates/content-version.j2 b/contentctl/output/templates/content-version.j2 new file mode 100644 index 00000000..922a4066 --- /dev/null +++ b/contentctl/output/templates/content-version.j2 @@ -0,0 +1,2 @@ +[content-version] +version = {{ objects[0].version }} diff --git a/contentctl/output/templates/finding_report.j2 b/contentctl/output/templates/finding_report.j2 index 92e64682..feff0520 100644 --- a/contentctl/output/templates/finding_report.j2 +++ b/contentctl/output/templates/finding_report.j2 @@ -1,11 +1,30 @@ -create_map("category_id", 101, "class_id", 101000, "detection_start_time", start_time, - "detection_end_time", end_time, "device_entities", [create_map("uid", ucast(map_get(input_event, "enrichments.device_entities.device.uid"), "string", null), "type_id", 0)], - "disposition_id", 1, "end_time", end_time, "event_id", 10100001, "event_time", timestamp, - "finding", create_map("confidence", {{ detection.tags.confidence }}, "confidence_id", {{ detection.tags.confidence_id }}, - "context_ids", {{ detection.tags.context_ids }}, "impact", {{ detection.tags.impact }}, "impact_id", {{ detection.tags.impact_id }}, - "kill_chain_phase", {{ detection.tags.kill_chain_phases[0] }}, "kill_chain_phase_id", {{ detection.tags.kill_chain_phases_id[0] }}, - "risk_level", {{ detection.tags.risk_level }}, "risk_level_id", {{ detection.tags.risk_level_id }}, "type_id", 1, "ref_event_uid", event_id), - "message", {{ detection.tags.message }}, "metadata", create_map("log_name", {{ detection.datamodel[0] }}, "version", - "1.0.0"), "observables", {{ detection.tags.observables_str }}, "origin", create_map("product", create_map("name", "Splunk Behavioral Analytics")), - "rule", create_map("name", "{{ detection.name }}", "uid", "{{ detection.id }}", "version", "1"), "start_time", start_time, "time", start_time, - "user_entities", [create_map("uid", ucast(map_get(input_event, "enrichments.user_entities.user.uid"),"string", null))]) \ No newline at end of file + + | eval devices = [{"hostname": device_hostname, "type_id": 0, "uuid": device.uuid}], + time = timestamp, + evidence = {{ detection.tags.evidence_str }}, + message = "{{ detection.name }} has been triggered on " + device_hostname + " by " + {{ actor_user_name }} + ".", + users = [{"name": {{ actor_user_name }}, "uid": actor_user.uid}], + activity_id = 1, + cis_csc = [{"control": "CIS 10", "version": 8}], + analytic_stories = {{ detection.tags.analytics_story_str }}, + class_name = "Detection Report", + confidence = {{ detection.tags.confidence }}, + confidence_id = {{ detection.tags.confidence_id }}, + duration = 0, + impact = {{ detection.tags.impact }}, + impact_id = {{ detection.tags.impact_id }}, + kill_chain = {{ detection.tags.kill_chain_phases_str }}, + nist = ["DE.AE"], + risk_level = "{{ detection.tags.risk_level }}", + category_uid = 2, + class_uid = 102001, + risk_level_id = {{ detection.tags.risk_level_id }}, + risk_score = {{ detection.tags.risk_score }}, + severity_id = 0, + rule = {"name": "{{ detection.name }}", "uid": "{{ detection.id }}", "type": "Streaming"}, + metadata = {"customer_uid": metadata.customer_uid, "product": {"name": "Behavior Analytics", "vendor_name": "Splunk"}, "version": "1.0.0-rc.2", "logged_time": time()}, + type_uid = 10200101, + start_time = timestamp, + end_time = timestamp + | fields metadata, rule, activity_id, analytic_stories, cis_csc, category_uid, class_name, class_uid, confidence, confidence_id, devices, duration, time, evidence, impact, impact_id, kill_chain, message, nist, observables, risk_level, risk_level_id, risk_score, severity_id, type_uid, users, start_time, end_time + | into sink; \ No newline at end of file diff --git a/contentctl/output/templates/savedsearches_baselines.j2 b/contentctl/output/templates/savedsearches_baselines.j2 index 75a8fa55..29d59ec3 100644 --- a/contentctl/output/templates/savedsearches_baselines.j2 +++ b/contentctl/output/templates/savedsearches_baselines.j2 @@ -5,34 +5,36 @@ {% for detection in objects %} {% if (detection.type == 'Baseline') %} [{{APP_NAME}} - {{ detection.name }}] -action.{{APP_NAME|lower}}= 0 -action.{{APP_NAME|lower}}.enabled = 1 -action.{{APP_NAME|lower}}.search_type = support -action.{{APP_NAME|lower}}.full_search_name = {{APP_NAME}} - {{ detection.name }} +action.escu = 0 +action.escu.enabled = 1 +action.escu.search_type = support +action.escu.full_search_name = {{APP_NAME}} - {{ detection.name }} description = {{ detection.description }} -action.{{APP_NAME|lower}}.creation_date = {{ detection.date }} -action.{{APP_NAME|lower}}.modification_date = {{ detection.date }} +action.escu.creation_date = {{ detection.date }} +action.escu.modification_date = {{ detection.date }} {% if detection.tags.analytic_story is defined %} -action.{{APP_NAME|lower}}.analytic_story = {{ detection.tags.analytic_story | tojson }} +action.escu.analytic_story = {{ detection.tags.analytic_story | tojson }} {% else %} -action.{{APP_NAME|lower}}.analytic_story = [] +action.escu.analytic_story = [] {% endif %} -action.{{APP_NAME|lower}}.data_models = {{ detection.datamodel | tojson }} - +action.escu.data_models = {{ detection.datamodel | tojson }} +cron_schedule = {{ detection.deployment.scheduling.cron_schedule }} enableSched = 1 - - - +dispatch.earliest_time = {{ detection.deployment.scheduling.earliest_time }} +dispatch.latest_time = {{ detection.deployment.scheduling.latest_time }} +{% if detection.deployment.scheduling.schedule_window is defined %} +schedule_window = {{ detection.deployment.scheduling.schedule_window }} +{% endif %} {% if detection.providing_technologies is defined %} -action.{{APP_NAME|lower}}.providing_technologies = {{ detection.providing_technologies | tojson }} +action.escu.providing_technologies = {{ detection.providing_technologies | tojson }} {% else %} -action.{{APP_NAME|lower}}.providing_technologies = [] +action.escu.providing_technologies = [] {% endif %} -action.{{APP_NAME|lower}}.eli5 = {{ detection.description }} +action.escu.eli5 = {{ detection.description }} {% if detection.how_to_implement is defined %} -action.{{APP_NAME|lower}}.how_to_implement = {{ detection.how_to_implement }} +action.escu.how_to_implement = {{ detection.how_to_implement }} {% else %} -action.{{APP_NAME|lower}}.how_to_implement = none +action.escu.how_to_implement = none {% endif %} {% if detection.disabled is defined %} disabled = false diff --git a/contentctl/output/templates/savedsearches_detections.j2 b/contentctl/output/templates/savedsearches_detections.j2 index 6dcf7ecb..2cfb7c77 100644 --- a/contentctl/output/templates/savedsearches_detections.j2 +++ b/contentctl/output/templates/savedsearches_detections.j2 @@ -3,41 +3,41 @@ {% for detection in objects %} {% if (detection.type == 'TTP' or detection.type == 'Anomaly' or detection.type == 'Hunting' or detection.type == 'Correlation') %} [{{APP_NAME}} - {{ detection.name }} - Rule] -action.{{APP_NAME|lower}} = 0 -action.{{APP_NAME|lower}}.enabled = 1 -{% if detection.deprecated %} +action.escu = 0 +action.escu.enabled = 1 +{% if detection.status == "deprecated" %} description = WARNING, this detection has been marked deprecated by the Splunk Threat Research team, this means that it will no longer be maintained or supported. If you have any questions feel free to email us at: research@splunk.com. {{ detection.description }} {% else %} description = {{ detection.description }} {% endif %} -action.{{APP_NAME|lower}}.mappings = {{ detection.mappings | tojson }} -action.{{APP_NAME|lower}}.data_models = {{ detection.datamodel | tojson }} -action.{{APP_NAME|lower}}.eli5 = {{ detection.description }} +action.escu.mappings = {{ detection.mappings | tojson }} +action.escu.data_models = {{ detection.datamodel | tojson }} +action.escu.eli5 = {{ detection.description }} {% if detection.how_to_implement is defined %} -action.{{APP_NAME|lower}}.how_to_implement = {{ detection.how_to_implement }} +action.escu.how_to_implement = {{ detection.how_to_implement }} {% else %} -action.{{APP_NAME|lower}}.how_to_implement = none +action.escu.how_to_implement = none {% endif %} {% if detection.known_false_positives is defined %} -action.{{APP_NAME|lower}}.known_false_positives = {{ detection.known_false_positives }} +action.escu.known_false_positives = {{ detection.known_false_positives }} {% else %} -action.{{APP_NAME|lower}}.known_false_positives = None +action.escu.known_false_positives = None {% endif %} -action.{{APP_NAME|lower}}.creation_date = {{ detection.date }} -action.{{APP_NAME|lower}}.modification_date = {{ detection.date }} -action.{{APP_NAME|lower}}.confidence = high -action.{{APP_NAME|lower}}.full_search_name = {{APP_NAME}} - {{ detection.name }} - Rule -action.{{APP_NAME|lower}}.search_type = detection +action.escu.creation_date = {{ detection.date }} +action.escu.modification_date = {{ detection.date }} +action.escu.confidence = high +action.escu.full_search_name = {{APP_NAME}} - {{ detection.name }} - Rule +action.escu.search_type = detection {% if detection.tags.product is defined %} -action.{{APP_NAME|lower}}.product = {{ detection.tags.product | tojson }} +action.escu.product = {{ detection.tags.product | tojson }} {% endif %} {% if detection.providing_technologies is defined %} -action.{{APP_NAME|lower}}.providing_technologies = {{ detection.providing_technologies | tojson }} +action.escu.providing_technologies = {{ detection.providing_technologies | tojson }} {% else %} -action.{{APP_NAME|lower}}.providing_technologies = [] +action.escu.providing_technologies = [] {% endif %} {% if detection.tags.analytic_story is defined %} -action.{{APP_NAME|lower}}.analytic_story = {{ detection.tags.analytic_story | tojson }} +action.escu.analytic_story = {{ detection.tags.analytic_story | tojson }} {% if detection.deployment.rba.enabled is defined %} action.risk = 1 action.risk.param._risk_message = {{ detection.tags.message }} @@ -46,14 +46,16 @@ action.risk.param._risk_score = 0 action.risk.param.verbose = 0 {% endif %} {% else %} -action.{{APP_NAME|lower}}.analytic_story = [] +action.escu.analytic_story = [] {% endif %} cron_schedule = {{ detection.deployment.scheduling.cron_schedule }} dispatch.earliest_time = {{ detection.deployment.scheduling.earliest_time }} dispatch.latest_time = {{ detection.deployment.scheduling.latest_time }} action.correlationsearch.enabled = 1 -{% if detection.deprecated %} +{% if detection.status == "deprecated" %} action.correlationsearch.label = {{APP_NAME}} - Deprecated - {{ detection.name }} - Rule +{% elif detection.type | lower == "correlation" %} +action.correlationsearch.label = {{APP_NAME}} - RIR - {{ detection.name }} - Rule {% else %} action.correlationsearch.label = {{APP_NAME}} - {{ detection.name }} - Rule {% endif %} @@ -68,7 +70,7 @@ action.notable = 1 action.notable.param.nes_fields = {{ detection.nes_fields }} {% endif %} action.notable.param.rule_description = {{ detection.deployment.notable.rule_description | custom_jinja2_enrichment_filter(detection) }} -action.notable.param.rule_title = {{ detection.deployment.notable.rule_title | custom_jinja2_enrichment_filter(detection) }} +action.notable.param.rule_title = {% if detection.type | lower == "correlation" %}RBA: {{ detection.deployment.notable.rule_title | custom_jinja2_enrichment_filter(detection) }}{% else %}{{ detection.deployment.notable.rule_title | custom_jinja2_enrichment_filter(detection) }}{% endif +%} action.notable.param.security_domain = {{ detection.tags.security_domain }} action.notable.param.severity = high {% endif %} diff --git a/contentctl/output/templates/savedsearches_investigations.j2 b/contentctl/output/templates/savedsearches_investigations.j2 index ad7659e4..3bf25cb5 100644 --- a/contentctl/output/templates/savedsearches_investigations.j2 +++ b/contentctl/output/templates/savedsearches_investigations.j2 @@ -6,25 +6,25 @@ {% if (detection.type == 'Investigation') %} {% if detection.search is defined %} [{{APP_NAME}} - {{ detection.name }} - Response Task] -action.{{APP_NAME|lower}} = 0 -action.{{APP_NAME|lower}}.enabled = 1 -action.{{APP_NAME|lower}}.search_type = investigative -action.{{APP_NAME|lower}}.full_search_name = {{APP_NAME}} - {{ detection.name }} - Response Task +action.escu = 0 +action.escu.enabled = 1 +action.escu.search_type = investigative +action.escu.full_search_name = {{APP_NAME}} - {{ detection.name }} - Response Task description = {{ detection.description }} -action.{{APP_NAME|lower}}.creation_date = {{ detection.date }} -action.{{APP_NAME|lower}}.modification_date = {{ detection.date }} +action.escu.creation_date = {{ detection.date }} +action.escu.modification_date = {{ detection.date }} {% if detection.tags.analytic_story is defined %} -action.{{APP_NAME|lower}}.analytic_story = {{ detection.tags.analytic_story | tojson }} +action.escu.analytic_story = {{ detection.tags.analytic_story | tojson }} {% else %} -action.{{APP_NAME|lower}}.analytic_story = [] +action.escu.analytic_story = [] {% endif %} -action.{{APP_NAME|lower}}.earliest_time_offset = 3600 -action.{{APP_NAME|lower}}.latest_time_offset = 86400 -action.{{APP_NAME|lower}}.providing_technologies = [] -action.{{APP_NAME|lower}}.data_models = {{ detection.datamodel | tojson }} -action.{{APP_NAME|lower}}.eli5 = {{ detection.description }} -action.{{APP_NAME|lower}}.how_to_implement = none -action.{{APP_NAME|lower}}.known_false_positives = None at this time +action.escu.earliest_time_offset = 3600 +action.escu.latest_time_offset = 86400 +action.escu.providing_technologies = [] +action.escu.data_models = {{ detection.datamodel | tojson }} +action.escu.eli5 = {{ detection.description }} +action.escu.how_to_implement = none +action.escu.known_false_positives = None at this time disabled = true schedule_window = auto is_visible = false diff --git a/contentctl/output/templates/splunk_app/README.md b/contentctl/output/templates/splunk_app/README.md new file mode 100644 index 00000000..d0a87c3f --- /dev/null +++ b/contentctl/output/templates/splunk_app/README.md @@ -0,0 +1,7 @@ +# Splunk ES Content Update + +This subscription service delivers pre-packaged Security Content for use with Splunk Enterprise Security. Subscribers get regular updates to help security practitioners more quickly address ongoing and time-sensitive customer problems and threats. + +Requires Splunk Enterprise Security version 4.5 or greater. + +For more information please visit the [Splunk ES Content Update user documentation](https://docs.splunk.com/Documentation/ESSOC). \ No newline at end of file diff --git a/contentctl/output/templates/splunk_app/README/essoc_story_detail.txt b/contentctl/output/templates/splunk_app/README/essoc_story_detail.txt new file mode 100644 index 00000000..53337528 --- /dev/null +++ b/contentctl/output/templates/splunk_app/README/essoc_story_detail.txt @@ -0,0 +1,15 @@ +The Analytic Story Details dashboard renders all the details of the content related to a specific analytic story which +can be chose via the drop down + +Each analytic story has attributes associated with it and the following: +______________________________________________________________________ + + + Analytic Story: name of the analytic story + Description ; description of the analytic story + Search Name : The name of the searches belonging to the chosen analytic story + Search : The search query which looks for an attack pattern corresponding to the analytic story + Search Description: The description of the search query + Asset Type: The analytic story specifies what asset in the infrastructure may be compromised + Category: The category that the search belongs to (malware, vulnerabilities, best practices, abuse) + Kill Chain Phase: The kill chain phase of the attack that the search is after. \ No newline at end of file diff --git a/contentctl/output/templates/splunk_app/README/essoc_summary.txt b/contentctl/output/templates/splunk_app/README/essoc_summary.txt new file mode 100644 index 00000000..d7dde31e --- /dev/null +++ b/contentctl/output/templates/splunk_app/README/essoc_summary.txt @@ -0,0 +1,24 @@ +The ES_SOC Summary Dashboard provides you a summarized view of the analytic story contents of the ES-SOC app. +The dashboard has the following panels gives you following details + +1) Analytic story Summary + - Total Analytic Stories : The total number of Analytic stories in the ES-SOC application + - Total Searches: The total number of searches in ES-SOC + - Searches added last week: Number of searches added to ES-SOC in the last week. + + 2) Analytic story Category: This dashboard panel summarizes the categories of the searches that the ES-SOC app contains. The categories of the analytic stories are as follow + -Malware: These searches detect specific malware behavior for a particular phase of the attack kill chain. E.g. a malware’s delivery method via email or a malware’s installation behavior via registry key changes + -Vulnerability: These searches detect behavior or a signature of a vulnerable software in use. These searches are not designed to replace vulnerability management or scanning systems. The purpose of these searches is to discover a vulnerability through side effects or behaviors. + -Abuse: Some actions can be deemed malicious because they are unexpected, violate corporate policy or are significantly different than the actions of other users. E.g. A USB disk that is seen on multiple systems or a user that uploads excessive files to a cloud service or a database query that dumps an entire table + -Best Practices: Searches that correspond to specific guidelines from organizations like SANS or OWASP + + 3) Kill Chain phases: Every analytic story has one or more searches which look for a certain kind of attack pattern/behavior. These searches have an attribute which essentially tells you what Kill chain phase does the search correspond to. + The numbers on the dashboard represents the number of searches correponding to each kill chain phase + + 4) Analytic story table: This table gives the user a comprehensive view of some of the details of the analytic story. Some of the listed attributes are: + - Analytic Story : The name of the analytic story + - Description: The description of the analyttic story + - Search names: The name of the searches in each analytic story + - Datamodels: The name of the datamodel that the search is querying against. + - Technology Examples: This field represent some examples related to the technologies required to populate the datamodels(Nessues, Cisco Firewall,etc) + - Kill chain phase: The name of the kill chain phase that the search belongs to \ No newline at end of file diff --git a/contentctl/output/templates/splunk_app/README/essoc_usage_dashboard.txt b/contentctl/output/templates/splunk_app/README/essoc_usage_dashboard.txt new file mode 100644 index 00000000..fcbc8428 --- /dev/null +++ b/contentctl/output/templates/splunk_app/README/essoc_usage_dashboard.txt @@ -0,0 +1,51 @@ +###################### +ESSOC Usage Dashboard# +###################### + +The ESSOC Usage dashboard is designed to provide high-level insight into the usage of the ES-SOC app. It is suitable for display when providing feedback to the Splunk team or for identifying how the ES-SOC app is being used. This dashboard has two time selectors that work independently - the top time selector determines the search time range for all the single-value. And the lower time selector, determines the time range for the usage table. + +IMPORTANT: The user loading this dashboard must have permission to search the _audit index + +################## +#Dashboard panels# +################## + +Searches Ran + +The total number of searches in ES-SOC that were executed. This number includes scheduled searches and ad hoc searches run from the search bar using the '| savedsearch ‘ syntax + +Unique Searches + +The unique/distinct searches executed on the deployment. This is equivalent to the distinct count of searches run in the ES-SOC app. + +Most Run + +The total number of searches in ES-SOC that were executed. This number includes scheduled searches and ad hoc searches run from the search bar using the '| savedsearch ‘ syntax. + +Ad hoc Searches + +The total number of searches run from the search bar using the '| savedsearch ‘ syntax. + +Scheduled + +The total number of ESSOC searches run that were scheduled. + +Most Active User + +The user who executed the highest number/count of searches. This calculation includes scheduled searches and ad hoc searches run from the search bar using the '| savedsearch ‘ syntax. + +Search Run Time (seconds) + +Total run time of all searches executed in seconds. This calculation includes scheduled searches and ad hoc searches run from the search bar using the '| savedsearch ‘ syntax. + +Average Run Time (seconds) + +Average run time of all searches executed in seconds. This calculation includes scheduled searches and ad hoc searches run from the search bar using the '| savedsearch ‘ syntax. + +Max Run Time (seconds) + +The run time of the longest running search. This calculation includes scheduled searches and ad hoc searches run from the search bar using the '| savedsearch ‘ syntax. + +Search summary + +This table provides details on each search that was executed in the ESSOC app. \ No newline at end of file diff --git a/contentctl/output/templates/splunk_app/default/analytic_stories.conf b/contentctl/output/templates/splunk_app/default/analytic_stories.conf new file mode 100644 index 00000000..0cfdca34 --- /dev/null +++ b/contentctl/output/templates/splunk_app/default/analytic_stories.conf @@ -0,0 +1,2 @@ +### Deprecated since ESCU UI was deprecated and this conf file is no longer in use +### Using one single file analyticstories.conf that will be used both by ES and ESCU \ No newline at end of file diff --git a/contentctl/output/templates/splunk_app/default/app.conf b/contentctl/output/templates/splunk_app/default/app.conf new file mode 100644 index 00000000..2e303162 --- /dev/null +++ b/contentctl/output/templates/splunk_app/default/app.conf @@ -0,0 +1,31 @@ +## Splunk app configuration file + +[install] +is_configured = false +state = enabled +state_change_requires_restart = false +build = 16367 + +[triggers] +reload.analytic_stories = simple +reload.usage_searches = simple +reload.use_case_library = simple +reload.correlationsearches = simple +reload.analyticstories = simple +reload.governance = simple +reload.managed_configurations = simple +reload.postprocess = simple +reload.content-version = simple +reload.es_investigations = simple + +[launcher] +author = Splunk +version = 4.9.0 +description = Explore the Analytic Stories included with ES Content Updates. + +[ui] +is_visible = true +label = ES Content Updates + +[package] +id = DA-ESS-ContentUpdate diff --git a/contentctl/output/templates/splunk_app/default/commands.conf b/contentctl/output/templates/splunk_app/default/commands.conf new file mode 100644 index 00000000..ad3cbfdf --- /dev/null +++ b/contentctl/output/templates/splunk_app/default/commands.conf @@ -0,0 +1,11 @@ +# deprecated please see gist: https://gist.github.com/d1vious/c4c2aae7fa7d5cbb1f24adc5f6303ac1 +#[dnstwist] +#filename = dnstwist.py +#chunked = true + +# run story functionality has been moved to: https://github.com/splunk/analytic_story_execution' +# [runstory] +# filename = runstory.py +# chunked = true +# is_risky = true + diff --git a/contentctl/output/templates/splunk_app/default/content-version.conf b/contentctl/output/templates/splunk_app/default/content-version.conf new file mode 100644 index 00000000..4bbba5eb --- /dev/null +++ b/contentctl/output/templates/splunk_app/default/content-version.conf @@ -0,0 +1,2 @@ +[content-version] +version = 4.9.0 diff --git a/contentctl/output/templates/splunk_app/default/data/ui/nav/default.xml b/contentctl/output/templates/splunk_app/default/data/ui/nav/default.xml new file mode 100644 index 00000000..a56d143e --- /dev/null +++ b/contentctl/output/templates/splunk_app/default/data/ui/nav/default.xml @@ -0,0 +1,6 @@ + \ No newline at end of file diff --git a/contentctl/output/templates/splunk_app/default/data/ui/views/escu_summary.xml b/contentctl/output/templates/splunk_app/default/data/ui/views/escu_summary.xml new file mode 100644 index 00000000..0af3fa77 --- /dev/null +++ b/contentctl/output/templates/splunk_app/default/data/ui/views/escu_summary.xml @@ -0,0 +1,193 @@ +
+ + + + Splunk Security Content + + | rest /services/saved/searches splunk_server=local count=0 | search title="ESCU - *" + + + | rest /services/configs/conf-analyticstories splunk_server=local count=0 |search eai:acl.app = "DA-ESS-ContentUpdate" + + + * + * + * + * + + + + +
+ + + +

Explore the Analytic Stories included with Splunk Security via ES Use Case Library or Splunk Security Essentials.

+ +
+
+ + + + Total Analytic Stories + + search title="analytic_story://*" |stats count + + + + + + + + + + + + + + + + + + + + Total Detections + + stats count by action.correlationsearch.label| eventstats sum(count) as total_detection_count| fields total_detection_count + + + + + + + + + + + + + + + + + + + + ESCU App Version + + | rest /services/configs/conf-content-version splunk_server=local count=0 | table version + + + + + + + + + + + + + + + + + + + + + Story Categories + + + | rest /services/configs/conf-analyticstories splunk_server=local count=0 | search eai:acl.app = "DA-ESS-ContentUpdate"| search title="analytic_story://*"| stats count by category + + + $click.value$ + $click.value$ + + + + + + + + + + + Analytic Stories by MITRE Technique ID + + + + | rest /services/saved/searches splunk_server=local count=0 | search title="ESCU - *" +| spath input=action.correlationsearch.annotations path=mitre_attack{} output="MITRE Technique ID" +| spath input=action.correlationsearch.annotations path=analytic_story{} output=story_name + | stats dc(story_name) as "Analytic Stories" by "MITRE Technique ID" + + + + $click.value$ + $click.value$ + + + + + + + + + + All + + now + | rest /services/configs/conf-savedsearches splunk_server=local count=0 +| search action.escu.search_type = detection +| spath input=action.correlationsearch.annotations path=analytic_story{} output="story" +| mvexpand story +| dedup story | fields story + + story + story + * + " + " + * + + + + + + Analytic Story Details + + | rest /services/configs/conf-savedsearches splunk_server=local count=0 +| search action.escu.search_type = detection +| spath input=action.correlationsearch.annotations path=analytic_story{} output="analytic_story" +| spath input=action.correlationsearch.annotations path=mitre_attack{} output="mitre_attack" +| spath input=action.escu.data_models path={} output="Data Models" +| rename title as "Detections" +| join analytic_story + [| rest /services/configs/conf-analyticstories splunk_server=local count=0 + | search title="analytic_story://*" + | eval "analytic_story"=replace(title,"analytic_story://","" ) + ] +| search analytic_story= $story$ +|stats values(Detections) as Detections values(mitre_attack) as "MITRE Technique ID" values(last_updated) as "Last Updated" by analytic_story description| rename analytic_story as "Analytic Story"| rename description as Description| table "Analytic Story" Description Detections "MITRE Technique ID" "Last Updated" + $earliest$ + $latest$ + + + + + + + + + + + + +
+
+
+
\ No newline at end of file diff --git a/contentctl/output/templates/splunk_app/default/data/ui/views/feedback.xml b/contentctl/output/templates/splunk_app/default/data/ui/views/feedback.xml new file mode 100644 index 00000000..3ec519bf --- /dev/null +++ b/contentctl/output/templates/splunk_app/default/data/ui/views/feedback.xml @@ -0,0 +1,13 @@ +
+ + Welcome to Splunk Enterprise Security Content Updates Feedback Center. + + + + Contact us at research@splunk.com to send us support requests, bug reports, or questions directly to the Splunk Security Research Team. +
Please specify your request type and/or the title of any related Analytic Stories.
+ You can also find us in the #security-research room in the Splunk Slack channel
+ +
+
+
diff --git a/contentctl/output/templates/splunk_app/default/distsearch.conf b/contentctl/output/templates/splunk_app/default/distsearch.conf new file mode 100644 index 00000000..23129734 --- /dev/null +++ b/contentctl/output/templates/splunk_app/default/distsearch.conf @@ -0,0 +1,5 @@ +[replicationSettings:refineConf] +replicate.analytic_stories = false + +[replicationBlacklist] +excludeESCU = apps[/\\]DA-ESS-ContentUpdate[/\\]lookups[/\\]... diff --git a/contentctl/output/templates/splunk_app/default/usage_searches.conf b/contentctl/output/templates/splunk_app/default/usage_searches.conf new file mode 100644 index 00000000..0c8aa32c --- /dev/null +++ b/contentctl/output/templates/splunk_app/default/usage_searches.conf @@ -0,0 +1,73 @@ +[escu-metrics-usage] +action.email.useNSSubject = 1 +alert.digest_mode = True +alert.suppress = 0 +alert.track = 0 +auto_summarize.dispatch.earliest_time = -1d@h +dispatchAs = user +search = index=_audit sourcetype="audittrail" \ +"ESCU - "\ +`comment("Find all the search names in the audittrail.")`\ +| stats count(search) by search savedsearch_name user\ +| eval usage=(if(savedsearch_name=="","Adhoc","Scheduled")) \ +`comment("If the savedsearch_name field in the audittrail is empty, the search was run adhoc. Otherwise it was run as a scheduled search")`\ +| rex field=search "\"(?.*)\""\ +`comment("Extract the name of the search from the search string")`\ +| table savedsearch_name count(search) usage user | join savedsearch_name max=0 type=left [search sourcetype="manifests" | spath searches{} | mvexpand searches{} | spath input=searches{} | table category search_name | rename search_name as savedsearch_name | dedup savedsearch_name] | search category=* + +[escu-metrics-search] +action.email.useNSSubject = 1 +alert.suppress = 0 +alert.track = 0 +auto_summarize.dispatch.earliest_time = -1d@h +enableSched = 1 +cron_schedule = 0 0 * * * +dispatch.earliest_time = -4h@h +dispatch.latest_time = -1h@h +search = index=_audit action=search | transaction search_id maxspan=3m | search ESCU | stats sum(total_run_time) avg(total_run_time) max(total_run_time) sum(result_count) + +[escu-metrics-search-events] +action.email.useNSSubject = 1 +alert.digest_mode = True +alert.suppress = 0 +alert.track = 0 +auto_summarize.dispatch.earliest_time = -1d@h +cron_schedule = 0 0 * * * +enableSched = 1 +dispatch.earliest_time = -4h@h +dispatch.latest_time = -1h@h +search = [search index=_audit sourcetype="audittrail" \"ESCU NOT "index=_audit" | where search !="" | dedup search_id | rex field=search "\"(?.*)\"" | rex field=_raw "user=(?[a-zA-Z0-9_\-]+)" | eval usage=if(savedsearch_name!="", "scheduled", "adhoc") | eval savedsearch_name=if(savedsearch_name != "", savedsearch_name, search_name) | table savedsearch_name search_id user _time usage | outputlookup escu_search_id.csv | table search_id] index=_audit total_run_time event_count result_count NOT "index=_audit" | lookup escu_search_id.csv search_id | stats count(savedsearch_name) AS search_count avg(total_run_time) AS search_avg_run_time sum(total_run_time) AS search_total_run_time sum(result_count) AS search_total_results earliest(_time) AS firsts latest(_time) AS lasts by savedsearch_name user usage| eval first_run=strftime(firsts, "%B %d %Y") | eval last_run=strftime(lasts, "%B %d %Y") + +[escu-metrics-search-longest-runtime] +action.email.useNSSubject = 1 +alert.digest_mode = True +alert.suppress = 0 +alert.track = 0 +auto_summarize.dispatch.earliest_time = -1d@h +enableSched = 1 +cron_schedule = 0 0 * * * +disabled = 1 +dispatch.earliest_time = -4h@h +dispatch.latest_time = -1h@h +search = index=_* ESCU [search index=_* action=search latest=-2h earliest=-1d| transaction search_id maxspan=3m | search ESCU | stats values(total_run_time) AS run by search_id | sort -run | head 1| table search_id] | table search search_id + +[escu-metrics-usage-search] +action.email.useNSSubject = 1 +alert.digest_mode = True +alert.suppress = 0 +alert.track = 0 +auto_summarize.dispatch.earliest_time = -1d@h +cron_schedule = 0 0 * * * +dispatch.earliest_time = -4h@h +dispatch.latest_time = -1h@h +enableSched = 1 +dispatchAs = user +search = index=_audit sourcetype="audittrail" \ +"ESCU - "\ +`comment("Find all the search names in the audittrail. Ignore the last few minutes so we can exclude this search's text from the result.")`\ +| stats count(search) by search savedsearch_name user\ +| eval usage=(if(savedsearch_name=="","Adhoc","Scheduled")) \ +`comment("If the savedsearch_name field in the audittrail is empty, the search was run adhoc. Otherwise it was run as a scheduled search")`\ +| rex field=search "\"(?.*)\""\ +`comment("Extract the name of the search from the search string")`\ +| table savedsearch_name count(search) usage user | join savedsearch_name max=0 type=left [search sourcetype="manifests" | spath searches{} | mvexpand searches{} | spath input=searches{} | table category search_name | rename search_name as savedsearch_name | dedup savedsearch_name] | search category=* diff --git a/contentctl/output/templates/splunk_app/default/use_case_library.conf b/contentctl/output/templates/splunk_app/default/use_case_library.conf new file mode 100644 index 00000000..0cfdca34 --- /dev/null +++ b/contentctl/output/templates/splunk_app/default/use_case_library.conf @@ -0,0 +1,2 @@ +### Deprecated since ESCU UI was deprecated and this conf file is no longer in use +### Using one single file analyticstories.conf that will be used both by ES and ESCU \ No newline at end of file diff --git a/contentctl/output/templates/splunk_app/metadata/default.meta b/contentctl/output/templates/splunk_app/metadata/default.meta index f9364dd1..b9b933bf 100644 --- a/contentctl/output/templates/splunk_app/metadata/default.meta +++ b/contentctl/output/templates/splunk_app/metadata/default.meta @@ -1,6 +1,23 @@ +## shared Application-level permissions [] access = read : [ * ], write : [ admin ] export = system [savedsearches] -owner = admin \ No newline at end of file +owner = admin + +## Correlation Searches +[correlationsearches] +access = read : [ * ], write : [ * ] + +[governance] +access = read : [ * ], write : [ * ] + +## Managed Configurations +[managed_configurations] +access = read : [ * ], write : [ * ] + +## Postprocess +[postprocess] +access = read : [ * ], write : [ * ] + diff --git a/contentctl/output/templates/splunk_app/static/appIcon.png b/contentctl/output/templates/splunk_app/static/appIcon.png index b9406627..593ed9af 100644 Binary files a/contentctl/output/templates/splunk_app/static/appIcon.png and b/contentctl/output/templates/splunk_app/static/appIcon.png differ diff --git a/contentctl/output/templates/splunk_app/static/appIconAlt.png b/contentctl/output/templates/splunk_app/static/appIconAlt.png new file mode 100644 index 00000000..8035698d Binary files /dev/null and b/contentctl/output/templates/splunk_app/static/appIconAlt.png differ diff --git a/contentctl/output/templates/splunk_app/static/appIconAlt_2x.png b/contentctl/output/templates/splunk_app/static/appIconAlt_2x.png new file mode 100644 index 00000000..94e6caea Binary files /dev/null and b/contentctl/output/templates/splunk_app/static/appIconAlt_2x.png differ diff --git a/contentctl/output/templates/splunk_app/static/appIcon_2x.png b/contentctl/output/templates/splunk_app/static/appIcon_2x.png index 541fd2de..351da438 100644 Binary files a/contentctl/output/templates/splunk_app/static/appIcon_2x.png and b/contentctl/output/templates/splunk_app/static/appIcon_2x.png differ diff --git a/contentctl/output/yml_output.py b/contentctl/output/yml_output.py new file mode 100644 index 00000000..93eae5dc --- /dev/null +++ b/contentctl/output/yml_output.py @@ -0,0 +1,66 @@ +import os + +from contentctl.output.detection_writer import DetectionWriter +from contentctl.objects.detection import Detection + + +class YmlOutput(): + + + def writeDetections(self, objects: list, output_path : str) -> None: + for obj in objects: + file_path = obj.file_path + obj.id = str(obj.id) + + DetectionWriter.writeYmlFile(os.path.join(output_path, file_path), obj.dict( + exclude_none=True, + include = + { + "name": True, + "id": True, + "version": True, + "date": True, + "author": True, + "type": True, + "status": True, + "description": True, + "data_source": True, + "search": True, + "how_to_implement": True, + "known_false_positives": True, + "references": True, + "tags": + { + "analytic_story": True, + "asset_type": True, + "atomic_guid": True, + "confidence": True, + "impact": True, + "drilldown_search": True, + "mappings": True, + "message": True, + "mitre_attack_id": True, + "kill_chain_phases:": True, + "observable": True, + "product": True, + "required_fields": True, + "risk_score": True, + "security_domain": True + }, + "tests": + { + '__all__': + { + "name": True, + "attack_data": { + '__all__': + { + "data": True, + "source": True, + "sourcetype": True + } + } + } + } + } + )) \ No newline at end of file diff --git a/contentctl/output/yml_writer.py b/contentctl/output/yml_writer.py index ca2e3292..09d8e311 100644 --- a/contentctl/output/yml_writer.py +++ b/contentctl/output/yml_writer.py @@ -8,4 +8,4 @@ class YmlWriter: def writeYmlFile(file_path : str, obj : dict) -> None: with open(file_path, 'w') as outfile: - yaml.dump(obj, outfile, default_flow_style=False, sort_keys=False) \ No newline at end of file + yaml.safe_dump(obj, outfile, default_flow_style=False, sort_keys=False) \ No newline at end of file diff --git a/contentctl/templates/deployments/00_default_anomaly.yml b/contentctl/templates/deployments/00_default_anomaly.yml new file mode 100644 index 00000000..4ed7d3da --- /dev/null +++ b/contentctl/templates/deployments/00_default_anomaly.yml @@ -0,0 +1,16 @@ +name: ESCU Default Configuration Anomaly +id: a9e210c6-9f50-4f8b-b60e-71bb26e4f216 +date: '2021-12-21' +author: Patrick Bareiss +description: This configuration file applies to all detections of type anomaly. + These detections will use Risk Based Alerting. +scheduling: + cron_schedule: 0 * * * * + earliest_time: -70m@m + latest_time: -10m@m + schedule_window: auto +alert_action: + rba: + enabled: 'true' +tags: + type: Anomaly diff --git a/contentctl/templates/deployments/00_default_baseline.yml b/contentctl/templates/deployments/00_default_baseline.yml new file mode 100644 index 00000000..b9d5b21c --- /dev/null +++ b/contentctl/templates/deployments/00_default_baseline.yml @@ -0,0 +1,12 @@ +name: ESCU Default Configuration Baseline +id: 0f7ee854-1aad-4bef-89c5-5c402b488510 +date: '2021-12-21' +author: Patrick Bareiss +description: This configuration file applies to all detections of type baseline. +scheduling: + cron_schedule: 10 0 * * * + earliest_time: -1450m@m + latest_time: -10m@m + schedule_window: auto +tags: + type: Baseline diff --git a/contentctl/templates/deployments/00_default_correlation.yml b/contentctl/templates/deployments/00_default_correlation.yml new file mode 100644 index 00000000..6667ddcc --- /dev/null +++ b/contentctl/templates/deployments/00_default_correlation.yml @@ -0,0 +1,20 @@ +name: ESCU Default Configuration Correlation +id: 36ba498c-46e8-4b62-8bde-67e984a40fb4 +date: '2021-12-21' +author: Patrick Bareiss +description: This configuration file applies to all detections of type Correlation. + These correlations will generate Notable Events. +scheduling: + cron_schedule: 0 * * * * + earliest_time: -70m@m + latest_time: -10m@m + schedule_window: auto +alert_action: + notable: + rule_description: '%description%' + rule_title: '%name%' + nes_fields: + - user + - dest +tags: + type: 'Correlation' diff --git a/contentctl/templates/deployments/00_default_hunting.yml b/contentctl/templates/deployments/00_default_hunting.yml new file mode 100644 index 00000000..bd562c39 --- /dev/null +++ b/contentctl/templates/deployments/00_default_hunting.yml @@ -0,0 +1,12 @@ +name: ESCU Default Configuration Hunting +id: cc5895e8-3420-4ab7-af38-cf87a28f9c3b +date: '2021-12-21' +author: Patrick Bareiss +description: This configuration file applies to all detections of type hunting. +scheduling: + cron_schedule: 0 * * * * + earliest_time: -70m@m + latest_time: -10m@m + schedule_window: auto +tags: + type: Hunting diff --git a/contentctl/templates/deployments/00_default_ttp.yml b/contentctl/templates/deployments/00_default_ttp.yml new file mode 100644 index 00000000..513bffeb --- /dev/null +++ b/contentctl/templates/deployments/00_default_ttp.yml @@ -0,0 +1,22 @@ +name: ESCU Default Configuration TTP +id: b81cd059-a3e8-4c03-96ca-e168c50ff70b +date: '2021-12-21' +author: Patrick Bareiss +description: This configuration file applies to all detections of type TTP. + These detections will use Risk Based Alerting and generate Notable Events. +scheduling: + cron_schedule: 0 * * * * + earliest_time: -70m@m + latest_time: -10m@m + schedule_window: auto +alert_action: + notable: + rule_description: '%description%' + rule_title: '%name%' + nes_fields: + - user + - dest + rba: + enabled: 'true' +tags: + type: TTP diff --git a/contentctl/templates/detections/anomalous_usage_of_7zip.yml b/contentctl/templates/detections/anomalous_usage_of_7zip.yml index 82380985..5b464f3e 100644 --- a/contentctl/templates/detections/anomalous_usage_of_7zip.yml +++ b/contentctl/templates/detections/anomalous_usage_of_7zip.yml @@ -18,7 +18,7 @@ search: '| tstats `security_content_summariesonly` count min(_time) as firstTime as lastTime from datamodel=Endpoint.Processes where Processes.parent_process_name IN ("rundll32.exe", "dllhost.exe") Processes.process_name=*7z* by Processes.dest Processes.user Processes.parent_process Processes.process_name Processes.process - Processes.process_id Processes.parent_process_id | `drop_dm_object_name(Processes)` + Processes.process_id Processes.parent_process_id Processes.parent_process_name | `drop_dm_object_name(Processes)` | `security_content_ctime(firstTime)` | `security_content_ctime(lastTime)`| `anomalous_usage_of_7zip_filter`' how_to_implement: To successfully implement this search you need to be ingesting information on process that include the name of the process responsible for the changes from diff --git a/contentctl/templates/detections/anomalous_usage_of_7zip_test_fail.yml b/contentctl/templates/detections/anomalous_usage_of_7zip_test_fail.yml deleted file mode 100644 index 2c53907e..00000000 --- a/contentctl/templates/detections/anomalous_usage_of_7zip_test_fail.yml +++ /dev/null @@ -1,87 +0,0 @@ -name: Anomalous usage of 7zip Test Fail -id: 9364ee8e-a39a-11eb-8f1d-acde48001124 -version: 1 -date: '2021-04-22' -author: Michael Haag, Teoderick Contreras, Splunk -status: production -type: Anomaly -description: The following detection identifies a 7z.exe spawned from `Rundll32.exe` - or `Dllhost.exe`. It is assumed that the adversary has brought in `7z.exe` and `7z.dll`. - It has been observed where an adversary will rename `7z.exe`. Additional coverage - may be required to identify the behavior of renamed instances of `7z.exe`. During - triage, identify the source of injection into `Rundll32.exe` or `Dllhost.exe`. Capture - any files written to disk and analyze as needed. Review parallel processes for additional - behaviors. Typically, archiving files will result in exfiltration. -data_source: -- Sysmon Event ID 1 -#Note that the search field below is intentionally incorrect. -# CORRECT: Processes.process_name=*7z* -# INCORRECT: Processes.process_name=*INCORRECT_PROCESS_NAME* -search: '| tstats `security_content_summariesonly` count min(_time) as firstTime max(_time) - as lastTime from datamodel=Endpoint.Processes where Processes.parent_process_name - IN ("rundll32.exe", "dllhost.exe") Processes.process_name=*INCORRECT_PROCESS_NAME* by Processes.dest - Processes.user Processes.parent_process Processes.process_name Processes.process - Processes.process_id Processes.parent_process_id | `drop_dm_object_name(Processes)` - | `security_content_ctime(firstTime)` | `security_content_ctime(lastTime)`| `anomalous_usage_of_7zip_filter`' -how_to_implement: To successfully implement this search you need to be ingesting information - on process that include the name of the process responsible for the changes from - your endpoints into the `Endpoint` datamodel in the `Processes` node. -known_false_positives: False positives should be limited as this behavior is not normal - for `rundll32.exe` or `dllhost.exe` to spawn and run 7zip. -references: -- https://attack.mitre.org/techniques/T1560/001/ -- https://www.microsoft.com/security/blog/2021/01/20/deep-dive-into-the-solorigate-second-stage-activation-from-sunburst-to-teardrop-and-raindrop/ -- https://thedfirreport.com/2021/01/31/bazar-no-ryuk/ -tags: - analytic_story: - - Cobalt Strike - - NOBELIUM Group - asset_type: Endpoint - confidence: 80 - impact: 80 - message: An instance of $parent_process_name$ spawning $process_name$ was identified - on endpoint $dest$ by user $user$. This behavior is indicative of suspicious loading - of 7zip. - mitre_attack_id: - - T1560.001 - - T1560 - observable: - - name: user - type: User - role: - - Victim - - name: dest - type: Hostname - role: - - Victim - - name: parent_process_name - type: Process - role: - - Parent Process - - name: process_name - type: Process - role: - - Child Process - product: - - Splunk Enterprise - - Splunk Enterprise Security - - Splunk Cloud - required_fields: - - _time - - Processes.process_name - - Processes.process - - Processes.dest - - Processes.user - - Processes.parent_process_name - - Processes.process_name - - Processes.parent_process - - Processes.process_id - - Processes.parent_process_id - risk_score: 64 - security_domain: endpoint -tests: -- name: True Positive Test - attack_data: - - data: https://media.githubusercontent.com/media/splunk/attack_data/master/datasets/attack_techniques/T1560.001/archive_utility/windows-sysmon.log - source: XmlWinEventLog:Microsoft-Windows-Sysmon/Operational - sourcetype: xmlwineventlog \ No newline at end of file diff --git a/contentctl/templates/detections/anomalous_usage_of_7zip_validation_fail.yml b/contentctl/templates/detections/anomalous_usage_of_7zip_validation_fail.yml deleted file mode 100644 index 974ea946..00000000 --- a/contentctl/templates/detections/anomalous_usage_of_7zip_validation_fail.yml +++ /dev/null @@ -1,85 +0,0 @@ -name: Anomalous usage of 7zip Validation Fail -id: 9364ee8e-a39a-11eb-8f1d-acde48001123 -version: 1 -date: '2021-04-22' -author: Michael Haag, Teoderick Contreras, Splunk -status: production -type: Anomaly -description: The following detection identifies a 7z.exe spawned from `Rundll32.exe` - or `Dllhost.exe`. It is assumed that the adversary has brought in `7z.exe` and `7z.dll`. - It has been observed where an adversary will rename `7z.exe`. Additional coverage - may be required to identify the behavior of renamed instances of `7z.exe`. During - triage, identify the source of injection into `Rundll32.exe` or `Dllhost.exe`. Capture - any files written to disk and analyze as needed. Review parallel processes for additional - behaviors. Typically, archiving files will result in exfiltration. -data_source: -- Sysmon Event ID 1 -#Note that the search field below is INTENTIONALLY Misspelled -searchh: '| tstats `security_content_summariesonly` count min(_time) as firstTime max(_time) - as lastTime from datamodel=Endpoint.Processes where Processes.parent_process_name - IN ("rundll32.exe", "dllhost.exe") Processes.process_name=*7z* by Processes.dest - Processes.user Processes.parent_process Processes.process_name Processes.process - Processes.process_id Processes.parent_process_id | `drop_dm_object_name(Processes)` - | `security_content_ctime(firstTime)` | `security_content_ctime(lastTime)`| `anomalous_usage_of_7zip_filter`' -how_to_implement: To successfully implement this search you need to be ingesting information - on process that include the name of the process responsible for the changes from - your endpoints into the `Endpoint` datamodel in the `Processes` node. -known_false_positives: False positives should be limited as this behavior is not normal - for `rundll32.exe` or `dllhost.exe` to spawn and run 7zip. -references: -- https://attack.mitre.org/techniques/T1560/001/ -- https://www.microsoft.com/security/blog/2021/01/20/deep-dive-into-the-solorigate-second-stage-activation-from-sunburst-to-teardrop-and-raindrop/ -- https://thedfirreport.com/2021/01/31/bazar-no-ryuk/ -tags: - analytic_story: - - Cobalt Strike - - NOBELIUM Group - asset_type: Endpoint - confidence: 80 - impact: 80 - message: An instance of $parent_process_name$ spawning $process_name$ was identified - on endpoint $dest$ by user $user$. This behavior is indicative of suspicious loading - of 7zip. - mitre_attack_id: - - T1560.001 - - T1560 - observable: - - name: user - type: User - role: - - Victim - - name: dest - type: Hostname - role: - - Victim - - name: parent_process_name - type: Process - role: - - Parent Process - - name: process_name - type: Process - role: - - Child Process - product: - - Splunk Enterprise - - Splunk Enterprise Security - - Splunk Cloud - required_fields: - - _time - - Processes.process_name - - Processes.process - - Processes.dest - - Processes.user - - Processes.parent_process_name - - Processes.process_name - - Processes.parent_process - - Processes.process_id - - Processes.parent_process_id - risk_score: 64 - security_domain: endpoint -tests: -- name: True Positive Test - attack_data: - - data: https://media.githubusercontent.com/media/splunk/attack_data/master/datasets/attack_techniques/T1560.001/archive_utility/windows-sysmon.log - source: XmlWinEventLog:Microsoft-Windows-Sysmon/Operational - sourcetype: xmlwineventlog \ No newline at end of file diff --git a/poetry.lock b/poetry.lock deleted file mode 100644 index ffb03566..00000000 --- a/poetry.lock +++ /dev/null @@ -1,835 +0,0 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. - -[[package]] -name = "antlr4-python3-runtime" -version = "4.9.3" -description = "ANTLR 4.9.3 runtime for Python 3.7" -optional = false -python-versions = "*" -files = [ - {file = "antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b"}, -] - -[[package]] -name = "attackcti" -version = "0.3.9" -description = "MITRE ATTACK CTI Python Libary" -optional = false -python-versions = "*" -files = [ - {file = "attackcti-0.3.9-py3-none-any.whl", hash = "sha256:b283e3273cf242d6606c123b14cb22f0a59478d4eb301cdfd4dd99220caf71b3"}, - {file = "attackcti-0.3.9.tar.gz", hash = "sha256:eea2cf951889bd4f9c5357d27759f40eaf3a235b18e97c226cfc01f98200a41c"}, -] - -[package.dependencies] -stix2 = "*" -taxii2-client = "*" - -[[package]] -name = "bottle" -version = "0.12.25" -description = "Fast and simple WSGI-framework for small web-applications." -optional = false -python-versions = "*" -files = [ - {file = "bottle-0.12.25-py3-none-any.whl", hash = "sha256:d6f15f9d422670b7c073d63bd8d287b135388da187a0f3e3c19293626ce034ea"}, - {file = "bottle-0.12.25.tar.gz", hash = "sha256:e1a9c94970ae6d710b3fb4526294dfeb86f2cb4a81eff3a4b98dc40fb0e5e021"}, -] - -[[package]] -name = "certifi" -version = "2023.7.22" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -files = [ - {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, - {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.2.0" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"}, - {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"}, -] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "decorator" -version = "5.1.1" -description = "Decorators for Humans" -optional = false -python-versions = ">=3.5" -files = [ - {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, - {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, -] - -[[package]] -name = "docker" -version = "6.1.3" -description = "A Python library for the Docker Engine API." -optional = false -python-versions = ">=3.7" -files = [ - {file = "docker-6.1.3-py3-none-any.whl", hash = "sha256:aecd2277b8bf8e506e484f6ab7aec39abe0038e29fa4a6d3ba86c3fe01844ed9"}, - {file = "docker-6.1.3.tar.gz", hash = "sha256:aa6d17830045ba5ef0168d5eaa34d37beeb113948c413affe1d5991fc11f9a20"}, -] - -[package.dependencies] -packaging = ">=14.0" -pywin32 = {version = ">=304", markers = "sys_platform == \"win32\""} -requests = ">=2.26.0" -urllib3 = ">=1.26.0" -websocket-client = ">=0.32.0" - -[package.extras] -ssh = ["paramiko (>=2.4.3)"] - -[[package]] -name = "future" -version = "0.18.3" -description = "Clean single-source support for Python 3 and 2" -optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "future-0.18.3.tar.gz", hash = "sha256:34a17436ed1e96697a86f9de3d15a3b0be01d8bc8de9c1dffd59fb8234ed5307"}, -] - -[[package]] -name = "gitdb" -version = "4.0.10" -description = "Git Object Database" -optional = false -python-versions = ">=3.7" -files = [ - {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"}, - {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"}, -] - -[package.dependencies] -smmap = ">=3.0.1,<6" - -[[package]] -name = "gitpython" -version = "3.1.32" -description = "GitPython is a Python library used to interact with Git repositories" -optional = false -python-versions = ">=3.7" -files = [ - {file = "GitPython-3.1.32-py3-none-any.whl", hash = "sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f"}, - {file = "GitPython-3.1.32.tar.gz", hash = "sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6"}, -] - -[package.dependencies] -gitdb = ">=4.0.1,<5" - -[[package]] -name = "idna" -version = "3.4" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.5" -files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] - -[[package]] -name = "jinja2" -version = "3.1.2" -description = "A very fast and expressive template engine." -optional = false -python-versions = ">=3.7" -files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, -] - -[package.dependencies] -MarkupSafe = ">=2.0" - -[package.extras] -i18n = ["Babel (>=2.7)"] - -[[package]] -name = "markupsafe" -version = "2.1.3" -description = "Safely add untrusted strings to HTML/XML markup." -optional = false -python-versions = ">=3.7" -files = [ - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, - {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, -] - -[[package]] -name = "packaging" -version = "23.1" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, - {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, -] - -[[package]] -name = "prompt-toolkit" -version = "3.0.39" -description = "Library for building powerful interactive command lines in Python" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "prompt_toolkit-3.0.39-py3-none-any.whl", hash = "sha256:9dffbe1d8acf91e3de75f3b544e4842382fc06c6babe903ac9acb74dc6e08d88"}, - {file = "prompt_toolkit-3.0.39.tar.gz", hash = "sha256:04505ade687dc26dc4284b1ad19a83be2f2afe83e7a828ace0c72f3a1df72aac"}, -] - -[package.dependencies] -wcwidth = "*" - -[[package]] -name = "pycvesearch" -version = "1.2" -description = "Python API for CVE search." -optional = false -python-versions = ">=3.8,<4.0" -files = [ - {file = "pycvesearch-1.2-py3-none-any.whl", hash = "sha256:8aada6de946f7a86875ab9fc4fb7d0fe8c189181867f89e77bd3ff5f8b529fec"}, - {file = "pycvesearch-1.2.tar.gz", hash = "sha256:ec5cea5541b9902ce89d870645396d5d7321257b5d7b1abe5999786c8656a9dc"}, -] - -[package.dependencies] -requests = ">=2.28.1,<3.0.0" - -[[package]] -name = "pydantic" -version = "1.10.12" -description = "Data validation and settings management using python type hints" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pydantic-1.10.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a1fcb59f2f355ec350073af41d927bf83a63b50e640f4dbaa01053a28b7a7718"}, - {file = "pydantic-1.10.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b7ccf02d7eb340b216ec33e53a3a629856afe1c6e0ef91d84a4e6f2fb2ca70fe"}, - {file = "pydantic-1.10.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fb2aa3ab3728d950bcc885a2e9eff6c8fc40bc0b7bb434e555c215491bcf48b"}, - {file = "pydantic-1.10.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:771735dc43cf8383959dc9b90aa281f0b6092321ca98677c5fb6125a6f56d58d"}, - {file = "pydantic-1.10.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ca48477862372ac3770969b9d75f1bf66131d386dba79506c46d75e6b48c1e09"}, - {file = "pydantic-1.10.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a5e7add47a5b5a40c49b3036d464e3c7802f8ae0d1e66035ea16aa5b7a3923ed"}, - {file = "pydantic-1.10.12-cp310-cp310-win_amd64.whl", hash = "sha256:e4129b528c6baa99a429f97ce733fff478ec955513630e61b49804b6cf9b224a"}, - {file = "pydantic-1.10.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b0d191db0f92dfcb1dec210ca244fdae5cbe918c6050b342d619c09d31eea0cc"}, - {file = "pydantic-1.10.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:795e34e6cc065f8f498c89b894a3c6da294a936ee71e644e4bd44de048af1405"}, - {file = "pydantic-1.10.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69328e15cfda2c392da4e713443c7dbffa1505bc9d566e71e55abe14c97ddc62"}, - {file = "pydantic-1.10.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2031de0967c279df0d8a1c72b4ffc411ecd06bac607a212892757db7462fc494"}, - {file = "pydantic-1.10.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ba5b2e6fe6ca2b7e013398bc7d7b170e21cce322d266ffcd57cca313e54fb246"}, - {file = "pydantic-1.10.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2a7bac939fa326db1ab741c9d7f44c565a1d1e80908b3797f7f81a4f86bc8d33"}, - {file = "pydantic-1.10.12-cp311-cp311-win_amd64.whl", hash = "sha256:87afda5539d5140cb8ba9e8b8c8865cb5b1463924d38490d73d3ccfd80896b3f"}, - {file = "pydantic-1.10.12-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:549a8e3d81df0a85226963611950b12d2d334f214436a19537b2efed61b7639a"}, - {file = "pydantic-1.10.12-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:598da88dfa127b666852bef6d0d796573a8cf5009ffd62104094a4fe39599565"}, - {file = "pydantic-1.10.12-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba5c4a8552bff16c61882db58544116d021d0b31ee7c66958d14cf386a5b5350"}, - {file = "pydantic-1.10.12-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c79e6a11a07da7374f46970410b41d5e266f7f38f6a17a9c4823db80dadf4303"}, - {file = "pydantic-1.10.12-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab26038b8375581dc832a63c948f261ae0aa21f1d34c1293469f135fa92972a5"}, - {file = "pydantic-1.10.12-cp37-cp37m-win_amd64.whl", hash = "sha256:e0a16d274b588767602b7646fa05af2782576a6cf1022f4ba74cbb4db66f6ca8"}, - {file = "pydantic-1.10.12-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a9dfa722316f4acf4460afdf5d41d5246a80e249c7ff475c43a3a1e9d75cf62"}, - {file = "pydantic-1.10.12-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a73f489aebd0c2121ed974054cb2759af8a9f747de120acd2c3394cf84176ccb"}, - {file = "pydantic-1.10.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b30bcb8cbfccfcf02acb8f1a261143fab622831d9c0989707e0e659f77a18e0"}, - {file = "pydantic-1.10.12-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fcfb5296d7877af406ba1547dfde9943b1256d8928732267e2653c26938cd9c"}, - {file = "pydantic-1.10.12-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2f9a6fab5f82ada41d56b0602606a5506aab165ca54e52bc4545028382ef1c5d"}, - {file = "pydantic-1.10.12-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dea7adcc33d5d105896401a1f37d56b47d443a2b2605ff8a969a0ed5543f7e33"}, - {file = "pydantic-1.10.12-cp38-cp38-win_amd64.whl", hash = "sha256:1eb2085c13bce1612da8537b2d90f549c8cbb05c67e8f22854e201bde5d98a47"}, - {file = "pydantic-1.10.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef6c96b2baa2100ec91a4b428f80d8f28a3c9e53568219b6c298c1125572ebc6"}, - {file = "pydantic-1.10.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c076be61cd0177a8433c0adcb03475baf4ee91edf5a4e550161ad57fc90f523"}, - {file = "pydantic-1.10.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d5a58feb9a39f481eda4d5ca220aa8b9d4f21a41274760b9bc66bfd72595b86"}, - {file = "pydantic-1.10.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5f805d2d5d0a41633651a73fa4ecdd0b3d7a49de4ec3fadf062fe16501ddbf1"}, - {file = "pydantic-1.10.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1289c180abd4bd4555bb927c42ee42abc3aee02b0fb2d1223fb7c6e5bef87dbe"}, - {file = "pydantic-1.10.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5d1197e462e0364906cbc19681605cb7c036f2475c899b6f296104ad42b9f5fb"}, - {file = "pydantic-1.10.12-cp39-cp39-win_amd64.whl", hash = "sha256:fdbdd1d630195689f325c9ef1a12900524dceb503b00a987663ff4f58669b93d"}, - {file = "pydantic-1.10.12-py3-none-any.whl", hash = "sha256:b749a43aa51e32839c9d71dc67eb1e4221bb04af1033a32e3923d46f9effa942"}, - {file = "pydantic-1.10.12.tar.gz", hash = "sha256:0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303"}, -] - -[package.dependencies] -typing-extensions = ">=4.2.0" - -[package.extras] -dotenv = ["python-dotenv (>=0.10.4)"] -email = ["email-validator (>=1.0.3)"] - -[[package]] -name = "pytz" -version = "2023.3" -description = "World timezone definitions, modern and historical" -optional = false -python-versions = "*" -files = [ - {file = "pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"}, - {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"}, -] - -[[package]] -name = "pywin32" -version = "306" -description = "Python for Window Extensions" -optional = false -python-versions = "*" -files = [ - {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, - {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, - {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, - {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, - {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, - {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, - {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, - {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, - {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, - {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, - {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, - {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, - {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, - {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, -] - -[[package]] -name = "pyyaml" -version = "6.0.1" -description = "YAML parser and emitter for Python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, -] - -[[package]] -name = "questionary" -version = "1.10.0" -description = "Python library to build pretty command line user prompts ⭐️" -optional = false -python-versions = ">=3.6,<4.0" -files = [ - {file = "questionary-1.10.0-py3-none-any.whl", hash = "sha256:fecfcc8cca110fda9d561cb83f1e97ecbb93c613ff857f655818839dac74ce90"}, - {file = "questionary-1.10.0.tar.gz", hash = "sha256:600d3aefecce26d48d97eee936fdb66e4bc27f934c3ab6dd1e292c4f43946d90"}, -] - -[package.dependencies] -prompt_toolkit = ">=2.0,<4.0" - -[package.extras] -docs = ["Sphinx (>=3.3,<4.0)", "sphinx-autobuild (>=2020.9.1,<2021.0.0)", "sphinx-autodoc-typehints (>=1.11.1,<2.0.0)", "sphinx-copybutton (>=0.3.1,<0.4.0)", "sphinx-rtd-theme (>=0.5.0,<0.6.0)"] - -[[package]] -name = "requests" -version = "2.31.0" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.7" -files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "semantic-version" -version = "2.10.0" -description = "A library implementing the 'SemVer' scheme." -optional = false -python-versions = ">=2.7" -files = [ - {file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"}, - {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"}, -] - -[package.extras] -dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"] -doc = ["Sphinx", "sphinx-rtd-theme"] - -[[package]] -name = "simplejson" -version = "3.19.1" -description = "Simple, fast, extensible JSON encoder/decoder for Python" -optional = false -python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "simplejson-3.19.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:412e58997a30c5deb8cab5858b8e2e5b40ca007079f7010ee74565cc13d19665"}, - {file = "simplejson-3.19.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e765b1f47293dedf77946f0427e03ee45def2862edacd8868c6cf9ab97c8afbd"}, - {file = "simplejson-3.19.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:3231100edee292da78948fa0a77dee4e5a94a0a60bcba9ed7a9dc77f4d4bb11e"}, - {file = "simplejson-3.19.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:081ea6305b3b5e84ae7417e7f45956db5ea3872ec497a584ec86c3260cda049e"}, - {file = "simplejson-3.19.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:f253edf694ce836631b350d758d00a8c4011243d58318fbfbe0dd54a6a839ab4"}, - {file = "simplejson-3.19.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:5db86bb82034e055257c8e45228ca3dbce85e38d7bfa84fa7b2838e032a3219c"}, - {file = "simplejson-3.19.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:69a8b10a4f81548bc1e06ded0c4a6c9042c0be0d947c53c1ed89703f7e613950"}, - {file = "simplejson-3.19.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:58ee5e24d6863b22194020eb62673cf8cc69945fcad6b283919490f6e359f7c5"}, - {file = "simplejson-3.19.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:73d0904c2471f317386d4ae5c665b16b5c50ab4f3ee7fd3d3b7651e564ad74b1"}, - {file = "simplejson-3.19.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:66d780047c31ff316ee305c3f7550f352d87257c756413632303fc59fef19eac"}, - {file = "simplejson-3.19.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cd4d50a27b065447c9c399f0bf0a993bd0e6308db8bbbfbc3ea03b41c145775a"}, - {file = "simplejson-3.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c16ec6a67a5f66ab004190829eeede01c633936375edcad7cbf06d3241e5865"}, - {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17a963e8dd4d81061cc05b627677c1f6a12e81345111fbdc5708c9f088d752c9"}, - {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7e78d79b10aa92f40f54178ada2b635c960d24fc6141856b926d82f67e56d169"}, - {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad071cd84a636195f35fa71de2186d717db775f94f985232775794d09f8d9061"}, - {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e7c70f19405e5f99168077b785fe15fcb5f9b3c0b70b0b5c2757ce294922c8c"}, - {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:54fca2b26bcd1c403146fd9461d1da76199442297160721b1d63def2a1b17799"}, - {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:48600a6e0032bed17c20319d91775f1797d39953ccfd68c27f83c8d7fc3b32cb"}, - {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:93f5ac30607157a0b2579af59a065bcfaa7fadeb4875bf927a8f8b6739c8d910"}, - {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b79642a599740603ca86cf9df54f57a2013c47e1dd4dd2ae4769af0a6816900"}, - {file = "simplejson-3.19.1-cp310-cp310-win32.whl", hash = "sha256:d9f2c27f18a0b94107d57294aab3d06d6046ea843ed4a45cae8bd45756749f3a"}, - {file = "simplejson-3.19.1-cp310-cp310-win_amd64.whl", hash = "sha256:5673d27806085d2a413b3be5f85fad6fca4b7ffd31cfe510bbe65eea52fff571"}, - {file = "simplejson-3.19.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:79c748aa61fd8098d0472e776743de20fae2686edb80a24f0f6593a77f74fe86"}, - {file = "simplejson-3.19.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:390f4a8ca61d90bcf806c3ad644e05fa5890f5b9a72abdd4ca8430cdc1e386fa"}, - {file = "simplejson-3.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d61482b5d18181e6bb4810b4a6a24c63a490c3a20e9fbd7876639653e2b30a1a"}, - {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2541fdb7467ef9bfad1f55b6c52e8ea52b3ce4a0027d37aff094190a955daa9d"}, - {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46133bc7dd45c9953e6ee4852e3de3d5a9a4a03b068bd238935a5c72f0a1ce34"}, - {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f96def94576f857abf58e031ce881b5a3fc25cbec64b2bc4824824a8a4367af9"}, - {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f14ecca970d825df0d29d5c6736ff27999ee7bdf5510e807f7ad8845f7760ce"}, - {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:66389b6b6ee46a94a493a933a26008a1bae0cfadeca176933e7ff6556c0ce998"}, - {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:22b867205cd258050c2625325fdd9a65f917a5aff22a23387e245ecae4098e78"}, - {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c39fa911e4302eb79c804b221ddec775c3da08833c0a9120041dd322789824de"}, - {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:65dafe413b15e8895ad42e49210b74a955c9ae65564952b0243a18fb35b986cc"}, - {file = "simplejson-3.19.1-cp311-cp311-win32.whl", hash = "sha256:f05d05d99fce5537d8f7a0af6417a9afa9af3a6c4bb1ba7359c53b6257625fcb"}, - {file = "simplejson-3.19.1-cp311-cp311-win_amd64.whl", hash = "sha256:b46aaf0332a8a9c965310058cf3487d705bf672641d2c43a835625b326689cf4"}, - {file = "simplejson-3.19.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b438e5eaa474365f4faaeeef1ec3e8d5b4e7030706e3e3d6b5bee6049732e0e6"}, - {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa9d614a612ad02492f704fbac636f666fa89295a5d22b4facf2d665fc3b5ea9"}, - {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46e89f58e4bed107626edce1cf098da3664a336d01fc78fddcfb1f397f553d44"}, - {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96ade243fb6f3b57e7bd3b71e90c190cd0f93ec5dce6bf38734a73a2e5fa274f"}, - {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed18728b90758d171f0c66c475c24a443ede815cf3f1a91e907b0db0ebc6e508"}, - {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:6a561320485017ddfc21bd2ed5de2d70184f754f1c9b1947c55f8e2b0163a268"}, - {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:2098811cd241429c08b7fc5c9e41fcc3f59f27c2e8d1da2ccdcf6c8e340ab507"}, - {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:8f8d179393e6f0cf6c7c950576892ea6acbcea0a320838c61968ac7046f59228"}, - {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:eff87c68058374e45225089e4538c26329a13499bc0104b52b77f8428eed36b2"}, - {file = "simplejson-3.19.1-cp36-cp36m-win32.whl", hash = "sha256:d300773b93eed82f6da138fd1d081dc96fbe53d96000a85e41460fe07c8d8b33"}, - {file = "simplejson-3.19.1-cp36-cp36m-win_amd64.whl", hash = "sha256:37724c634f93e5caaca04458f267836eb9505d897ab3947b52f33b191bf344f3"}, - {file = "simplejson-3.19.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:74bf802debe68627227ddb665c067eb8c73aa68b2476369237adf55c1161b728"}, - {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70128fb92932524c89f373e17221cf9535d7d0c63794955cc3cd5868e19f5d38"}, - {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8090e75653ea7db75bc21fa5f7bcf5f7bdf64ea258cbbac45c7065f6324f1b50"}, - {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a755f7bfc8adcb94887710dc70cc12a69a454120c6adcc6f251c3f7b46ee6aac"}, - {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ccb2c1877bc9b25bc4f4687169caa925ffda605d7569c40e8e95186e9a5e58b"}, - {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:919bc5aa4d8094cf8f1371ea9119e5d952f741dc4162810ab714aec948a23fe5"}, - {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e333c5b62e93949f5ac27e6758ba53ef6ee4f93e36cc977fe2e3df85c02f6dc4"}, - {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3a4480e348000d89cf501b5606415f4d328484bbb431146c2971123d49fd8430"}, - {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cb502cde018e93e75dc8fc7bb2d93477ce4f3ac10369f48866c61b5e031db1fd"}, - {file = "simplejson-3.19.1-cp37-cp37m-win32.whl", hash = "sha256:f41915a4e1f059dfad614b187bc06021fefb5fc5255bfe63abf8247d2f7a646a"}, - {file = "simplejson-3.19.1-cp37-cp37m-win_amd64.whl", hash = "sha256:3844305bc33d52c4975da07f75b480e17af3558c0d13085eaa6cc2f32882ccf7"}, - {file = "simplejson-3.19.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1cb19eacb77adc5a9720244d8d0b5507421d117c7ed4f2f9461424a1829e0ceb"}, - {file = "simplejson-3.19.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:926957b278de22797bfc2f004b15297013843b595b3cd7ecd9e37ccb5fad0b72"}, - {file = "simplejson-3.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b0e9a5e66969f7a47dc500e3dba8edc3b45d4eb31efb855c8647700a3493dd8a"}, - {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79d46e7e33c3a4ef853a1307b2032cfb7220e1a079d0c65488fbd7118f44935a"}, - {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344a5093b71c1b370968d0fbd14d55c9413cb6f0355fdefeb4a322d602d21776"}, - {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23fbb7b46d44ed7cbcda689295862851105c7594ae5875dce2a70eeaa498ff86"}, - {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d3025e7e9ddb48813aec2974e1a7e68e63eac911dd5e0a9568775de107ac79a"}, - {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:87b190e6ceec286219bd6b6f13547ca433f977d4600b4e81739e9ac23b5b9ba9"}, - {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dc935d8322ba9bc7b84f99f40f111809b0473df167bf5b93b89fb719d2c4892b"}, - {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3b652579c21af73879d99c8072c31476788c8c26b5565687fd9db154070d852a"}, - {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6aa7ca03f25b23b01629b1c7f78e1cd826a66bfb8809f8977a3635be2ec48f1a"}, - {file = "simplejson-3.19.1-cp38-cp38-win32.whl", hash = "sha256:08be5a241fdf67a8e05ac7edbd49b07b638ebe4846b560673e196b2a25c94b92"}, - {file = "simplejson-3.19.1-cp38-cp38-win_amd64.whl", hash = "sha256:ca56a6c8c8236d6fe19abb67ef08d76f3c3f46712c49a3b6a5352b6e43e8855f"}, - {file = "simplejson-3.19.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6424d8229ba62e5dbbc377908cfee9b2edf25abd63b855c21f12ac596cd18e41"}, - {file = "simplejson-3.19.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:547ea86ca408a6735335c881a2e6208851027f5bfd678d8f2c92a0f02c7e7330"}, - {file = "simplejson-3.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:889328873c35cb0b2b4c83cbb83ec52efee5a05e75002e2c0c46c4e42790e83c"}, - {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44cdb4e544134f305b033ad79ae5c6b9a32e7c58b46d9f55a64e2a883fbbba01"}, - {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc2b3f06430cbd4fac0dae5b2974d2bf14f71b415fb6de017f498950da8159b1"}, - {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d125e754d26c0298715bdc3f8a03a0658ecbe72330be247f4b328d229d8cf67f"}, - {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:476c8033abed7b1fd8db62a7600bf18501ce701c1a71179e4ce04ac92c1c5c3c"}, - {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:199a0bcd792811c252d71e3eabb3d4a132b3e85e43ebd93bfd053d5b59a7e78b"}, - {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a79b439a6a77649bb8e2f2644e6c9cc0adb720fc55bed63546edea86e1d5c6c8"}, - {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:203412745fed916fc04566ecef3f2b6c872b52f1e7fb3a6a84451b800fb508c1"}, - {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ca922c61d87b4c38f37aa706520328ffe22d7ac1553ef1cadc73f053a673553"}, - {file = "simplejson-3.19.1-cp39-cp39-win32.whl", hash = "sha256:3e0902c278243d6f7223ba3e6c5738614c971fd9a887fff8feaa8dcf7249c8d4"}, - {file = "simplejson-3.19.1-cp39-cp39-win_amd64.whl", hash = "sha256:d396b610e77b0c438846607cd56418bfc194973b9886550a98fd6724e8c6cfec"}, - {file = "simplejson-3.19.1-py3-none-any.whl", hash = "sha256:4710806eb75e87919b858af0cba4ffedc01b463edc3982ded7b55143f39e41e1"}, - {file = "simplejson-3.19.1.tar.gz", hash = "sha256:6277f60848a7d8319d27d2be767a7546bc965535b28070e310b3a9af90604a4c"}, -] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "smmap" -version = "5.0.0" -description = "A pure Python implementation of a sliding window memory map manager" -optional = false -python-versions = ">=3.6" -files = [ - {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"}, - {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"}, -] - -[[package]] -name = "splunk-packaging-toolkit" -version = "1.0.1" -description = "Splunk Packaging Toolkit" -optional = false -python-versions = "*" -files = [ - {file = "splunk-packaging-toolkit-1.0.1.tar.gz", hash = "sha256:7302af724718256108a6b78ed66c4c0066672223e58881e67897c87eddaf7388"}, -] - -[package.dependencies] -future = ">=0.18.2" -semantic_version = ">=2.5.0" - -[[package]] -name = "splunk-sdk" -version = "1.7.4" -description = "The Splunk Software Development Kit for Python." -optional = false -python-versions = "*" -files = [ - {file = "splunk-sdk-1.7.4.tar.gz", hash = "sha256:8f3f149e3a0daf7526ed36882c109e4ec8080e417efe25d23f4578e86d38b9f2"}, -] - -[[package]] -name = "stix2" -version = "3.0.1" -description = "Produce and consume STIX 2 JSON content" -optional = false -python-versions = ">=3.6" -files = [ - {file = "stix2-3.0.1-py2.py3-none-any.whl", hash = "sha256:827acf0b5b319c1b857c9db0d54907bb438b2b32312d236c891a305ad49b0ba2"}, - {file = "stix2-3.0.1.tar.gz", hash = "sha256:2a2718dc3451c84c709990b2ca220cc39c75ed23e0864d7e8d8190a9365b0cbf"}, -] - -[package.dependencies] -pytz = "*" -requests = "*" -simplejson = "*" -stix2-patterns = ">=1.2.0" - -[package.extras] -semantic = ["haversine", "rapidfuzz"] -taxii = ["taxii2-client (>=2.3.0)"] - -[[package]] -name = "stix2-patterns" -version = "2.0.0" -description = "Validate STIX 2 Patterns." -optional = false -python-versions = ">=3.6" -files = [ - {file = "stix2-patterns-2.0.0.tar.gz", hash = "sha256:07750c5a5af2c758e9d2aa4dde9d8e04bcd162ac2a9b0b4c4de4481d443efa08"}, - {file = "stix2_patterns-2.0.0-py2.py3-none-any.whl", hash = "sha256:ca4d68b2db42ed99794a418388769d2676ca828e9cac0b8629e73cd3f68f6458"}, -] - -[package.dependencies] -antlr4-python3-runtime = ">=4.9.0,<4.10.0" -six = "*" - -[package.extras] -dev = ["bumpversion", "check-manifest", "coverage", "pre-commit", "pytest", "pytest-cov", "sphinx", "sphinx-prompt", "tox"] -docs = ["sphinx", "sphinx-prompt"] -test = ["coverage", "pytest", "pytest-cov"] - -[[package]] -name = "taxii2-client" -version = "2.3.0" -description = "TAXII 2 Client Library" -optional = false -python-versions = "*" -files = [ - {file = "taxii2-client-2.3.0.tar.gz", hash = "sha256:fb3bf895e2eaff3cd08bb7aad75c9d30682ffc00b9f3add77de3a67dc6b895a3"}, - {file = "taxii2_client-2.3.0-py2.py3-none-any.whl", hash = "sha256:b4212b8a8bab170cd5dc386ca3ea36bc44b53932f1da30db150abeef00bce7b9"}, -] - -[package.dependencies] -pytz = "*" -requests = "*" -six = "*" - -[package.extras] -docs = ["sphinx", "sphinx-prompt"] -test = ["coverage", "pytest", "pytest-cov", "responses", "tox"] - -[[package]] -name = "tqdm" -version = "4.65.0" -description = "Fast, Extensible Progress Meter" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tqdm-4.65.0-py3-none-any.whl", hash = "sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671"}, - {file = "tqdm-4.65.0.tar.gz", hash = "sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[package.extras] -dev = ["py-make (>=0.1.0)", "twine", "wheel"] -notebook = ["ipywidgets (>=6)"] -slack = ["slack-sdk"] -telegram = ["requests"] - -[[package]] -name = "typing-extensions" -version = "4.7.1" -description = "Backported and Experimental Type Hints for Python 3.7+" -optional = false -python-versions = ">=3.7" -files = [ - {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, - {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, -] - -[[package]] -name = "urllib3" -version = "2.0.4" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.7" -files = [ - {file = "urllib3-2.0.4-py3-none-any.whl", hash = "sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4"}, - {file = "urllib3-2.0.4.tar.gz", hash = "sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "validators" -version = "0.20.0" -description = "Python Data Validation for Humans™." -optional = false -python-versions = ">=3.4" -files = [ - {file = "validators-0.20.0.tar.gz", hash = "sha256:24148ce4e64100a2d5e267233e23e7afeb55316b47d30faae7eb6e7292bc226a"}, -] - -[package.dependencies] -decorator = ">=3.4.0" - -[package.extras] -test = ["flake8 (>=2.4.0)", "isort (>=4.2.2)", "pytest (>=2.2.3)"] - -[[package]] -name = "wcwidth" -version = "0.2.6" -description = "Measures the displayed width of unicode strings in a terminal" -optional = false -python-versions = "*" -files = [ - {file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"}, - {file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"}, -] - -[[package]] -name = "websocket-client" -version = "1.6.1" -description = "WebSocket client for Python with low level API options" -optional = false -python-versions = ">=3.7" -files = [ - {file = "websocket-client-1.6.1.tar.gz", hash = "sha256:c951af98631d24f8df89ab1019fc365f2227c0892f12fd150e935607c79dd0dd"}, - {file = "websocket_client-1.6.1-py3-none-any.whl", hash = "sha256:f1f9f2ad5291f0225a49efad77abf9e700b6fef553900623060dad6e26503b9d"}, -] - -[package.extras] -docs = ["Sphinx (>=3.4)", "sphinx-rtd-theme (>=0.5)"] -optional = ["python-socks", "wsaccel"] -test = ["websockets"] - -[[package]] -name = "xmltodict" -version = "0.13.0" -description = "Makes working with XML feel like you are working with JSON" -optional = false -python-versions = ">=3.4" -files = [ - {file = "xmltodict-0.13.0-py2.py3-none-any.whl", hash = "sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852"}, - {file = "xmltodict-0.13.0.tar.gz", hash = "sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56"}, -] - -[metadata] -lock-version = "2.0" -python-versions = "^3.9" -content-hash = "1a552c0b23de4a3391a2b8d6b190492e1e1a75a92647d0f7772735c98d8ca6e0" diff --git a/pyproject.toml b/pyproject.toml index 86c6ae31..e607ebbc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,7 +27,8 @@ semantic-version = "^2.10.0" bottle = "^0.12.23" tqdm = "^4.65.0" #splunk-appinspect = "^2.36.0" -splunk-packaging-toolkit = "^1.0.1" +pysigma = "^0.10.5" +pysigma-backend-splunk = "^1.0.3" [tool.poetry.dev-dependencies]