Skip to content

Commit

Permalink
Branch was auto-updated.
Browse files Browse the repository at this point in the history
  • Loading branch information
srv-rr-gh-researchbt committed Oct 4, 2023
2 parents 6a65276 + f7dcab2 commit 48f8598
Show file tree
Hide file tree
Showing 350 changed files with 6,252 additions and 5,949 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def createSecurityContent(self, type: SecurityContentType) -> list[Tuple[pathlib



if detection.status == "production":
if detection.status in ["production","validation"]:
self.output_dto.detections.append(detection)
else:
raise(Exception(f"Unsupported content type: [{type}]"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ class Detection(BaseModel, SecurityContentObject):
nes_fields: str = None
providing_technologies: list = None
runtime: str = None
internalVersion: str = None

# @validator('name')v
# def name_max_length(cls, v, values):
Expand Down Expand Up @@ -139,6 +140,17 @@ def name_max_length(cls, values):
if len(values["name"]) > 67:
raise ValueError("name is longer then 67 chars: " + values["name"])
return values

@root_validator
def validation_for_ba_only(cls, values):
# Ensure that only a BA detection can have status: validation
if values["status"] == "validation":
if "ssa_" not in values["file_path"]:
raise ValueError(f"The following is NOT an ssa_ detection, but has 'status: {values['status']} which may ONLY be used for ssa_ detections:' {values['file_path']}")
else:
#This is an ssa_ validation detection
pass
return values


@root_validator
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,9 @@ class DetectionTags(BaseModel):
risk_level: str = None
observable_str: str = None
evidence_str: str = None
analytics_story_str: str = None
kill_chain_phases_id: list = None
kill_chain_phases_str: str = None
research_site_url: str = None
event_schema: str = None
mappings: list = None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,3 +59,4 @@ class DetectionStatus(enum.Enum):
production = "production"
deprecated = "deprecated"
experimental = "experimental"
validation = "validation"
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,37 @@ class FindingReportObject():
@staticmethod
def writeFindingReport(detection : Detection) -> None:

if detection.tags.confidence < 33:
detection.tags.confidence_id = 1
elif detection.tags.confidence < 66:
detection.tags.confidence_id = 2
else:
detection.tags.confidence_id = 3

if detection.tags.impact < 20:
detection.tags.impact_id = 1
elif detection.tags.impact < 40:
detection.tags.impact_id = 2
elif detection.tags.impact < 60:
detection.tags.impact_id = 3
elif detection.tags.impact < 80:
detection.tags.impact_id = 4
else:
detection.tags.impact_id = 5

detection.tags.kill_chain_phases_id = dict()
for kill_chain_phase in detection.tags.kill_chain_phases:
detection.tags.kill_chain_phases_id[kill_chain_phase] = SES_KILL_CHAIN_MAPPINGS[kill_chain_phase]

kill_chain_phase_str = "["
i = 0
for kill_chain_phase in detection.tags.kill_chain_phases_id.keys():
kill_chain_phase_str = kill_chain_phase_str + '{"phase": "' + kill_chain_phase + '", "phase_id": ' + str(detection.tags.kill_chain_phases_id[kill_chain_phase]) + "}"
if not i == (len(detection.tags.kill_chain_phases_id.keys()) - 1):
kill_chain_phase_str = kill_chain_phase_str + ', '
i = i + 1
kill_chain_phase_str = kill_chain_phase_str + ']'
detection.tags.kill_chain_phases_str = kill_chain_phase_str

if detection.tags.risk_score < 20:
detection.tags.risk_level_id = 0
Expand All @@ -27,15 +58,23 @@ def writeFindingReport(detection : Detection) -> None:
detection.tags.risk_level_id = 4
detection.tags.risk_level = "Critical"

evidence_str = "create_map("
evidence_str = "{"
for i in range(len(detection.tags.observable)):
evidence_str = evidence_str + '"' + detection.tags.observable[i]["name"] + '", ' + detection.tags.observable[i]["name"].replace(".", "_")
evidence_str = evidence_str + '"' + detection.tags.observable[i]["name"] + '": ' + detection.tags.observable[i]["name"].replace(".", "_")
if not i == (len(detection.tags.observable) - 1):
evidence_str = evidence_str + ', '
evidence_str = evidence_str + ')'
evidence_str = evidence_str + '}'

detection.tags.evidence_str = evidence_str

analytics_story_str = "["
for i in range(len(detection.tags.analytic_story)):
analytics_story_str = analytics_story_str + '"' + detection.tags.analytic_story[i] + '"'
if not i == (len(detection.tags.analytic_story) - 1):
analytics_story_str = analytics_story_str + ', '
analytics_story_str = analytics_story_str + ']'
detection.tags.analytics_story_str = analytics_story_str

if "actor.user.name" in detection.tags.required_fields:
actor_user_name = "actor_user_name"
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ def writeObjects(self, objects: list, output_path: str, type: SecurityContentTyp
"nist": obj.tags.nist
}

obj.runtime = "SPL-DSP"
obj.runtime = "SPL2"
obj.internalVersion = 2

# remove unncessary fields
YmlWriter.writeYmlFile(file_path, obj.dict(
Expand All @@ -84,6 +85,7 @@ def writeObjects(self, objects: list, output_path: str, type: SecurityContentTyp
"known_false_positives": True,
"references": True,
"runtime": True,
"internalVersion": True,
"tags":
{
#"analytic_story": True,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,28 +1,30 @@

| eval body=create_map(
"devices", [
create_map(
"hostname", device_hostname, "type_id", 0, "uuid", ucast(map_get(device,"uuid"), "string", null)
)
],
"time", timestamp,
"evidence", {{ detection.tags.evidence_str }},
"message", concat("{{ detection.name }} has been triggered on ", device_hostname, " by ", {{ actor_user_name }}, "."),
"users", [
create_map(
"name", {{ actor_user_name }}, "uid", ucast(map_get(actor_user,"uid"), "string", null)
)
],
"activity_id", 1,
"category_uid", 2,
"class_uid", 102001,
"risk_level_id", {{ detection.tags.risk_level_id }},
"risk_score", {{ detection.tags.risk_score }},
"severity_id", 0,
"rule", create_map("name", "{{ detection.name }}", "uid", "{{ detection.id }}", "type", "Streaming"),
"metadata", create_map("customer_uid", ucast(map_get(metadata,"customer_uid"), "string", null), "product", create_map("name", "Behavior Analytics", "vendor_name", "Splunk"), "version", "1.0.0-rc.2", "logged_time", time()),
"type_uid", 10200101,
"start_time", timestamp,
"end_time", timestamp
)
| into write_ba_finding_events();
| eval devices = [{"hostname": device_hostname, "type_id": 0, "uuid": device.uuid}],
time = timestamp,
evidence = {{ detection.tags.evidence_str }},
message = "{{ detection.name }} has been triggered on " + device_hostname + " by " + {{ actor_user_name }} + ".",
users = [{"name": {{ actor_user_name }}, "uid": actor_user.uid}],
activity_id = 1,
cis_csc = [{"control": "CIS 10", "version": 8}],
analytic_stories = {{ detection.tags.analytics_story_str }},
class_name = "Detection Report",
confidence = {{ detection.tags.confidence }},
confidence_id = {{ detection.tags.confidence_id }},
duration = 0,
impact = {{ detection.tags.impact }},
impact_id = {{ detection.tags.impact_id }},
kill_chain = {{ detection.tags.kill_chain_phases_str }},
nist = ["DE.AE"],
risk_level = "{{ detection.tags.risk_level }}",
category_uid = 2,
class_uid = 102001,
risk_level_id = {{ detection.tags.risk_level_id }},
risk_score = {{ detection.tags.risk_score }},
severity_id = 0,
rule = {"name": "{{ detection.name }}", "uid": "{{ detection.id }}", "type": "Streaming"},
metadata = {"customer_uid": metadata.customer_uid, "product": {"name": "Behavior Analytics", "vendor_name": "Splunk"}, "version": "1.0.0-rc.2", "logged_time": time()},
type_uid = 10200101,
start_time = timestamp,
end_time = timestamp
| fields metadata, rule, activity_id, analytic_stories, cis_csc, category_uid, class_name, class_uid, confidence, confidence_id, devices, duration, time, evidence, impact, impact_id, kill_chain, message, nist, observables, risk_level, risk_level_id, risk_score, severity_id, type_uid, users, start_time, end_time
| into sink;
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class SplunkBABackend(TextQueryBackend):
wildcard_single : ClassVar[str] = "%"
add_escaped : ClassVar[str] = "\\"

re_expression : ClassVar[str] = "match_regex({field}, /(?i){regex}/)=true"
re_expression : ClassVar[str] = "match({field}, /(?i){regex}/)=true"
re_escape_char : ClassVar[str] = ""
re_escape : ClassVar[Tuple[str]] = ('"',)

Expand Down Expand Up @@ -64,7 +64,7 @@ class SplunkBABackend(TextQueryBackend):
deferred_separator : ClassVar[str] = " OR "
deferred_only_query : ClassVar[str] = "*"

wildcard_match_expression : ClassVar[Optional[str]] = "like({field}, {value})"
wildcard_match_expression : ClassVar[Optional[str]] = "{field} LIKE {value}"


def __init__(self, processing_pipeline: Optional["sigma.processing.pipeline.ProcessingPipeline"] = None, collect_errors: bool = False, min_time : str = "-30d", max_time : str = "now", detection : Detection = None, field_mapping: dict = None, **kwargs):
Expand All @@ -88,41 +88,36 @@ def finalize_query_data_model(self, rule: SigmaRule, query: str, index: int, sta
# fields_input_parsing = fields_input_parsing + ', '

detection_str = """
| from read_ba_enriched_events()
| eval timestamp = ucast(map_get(input_event,"time"),"long", null)
| eval metadata = ucast(map_get(input_event, "metadata"),"map<string, any>", null)
| eval metadata_uid = ucast(map_get(metadata, "uid"),"string", null)
$main = from source
| eval timestamp = time
| eval metadata_uid = metadata.uid
""".replace("\n", " ")

parsed_fields = []

for field in self.field_mapping["mapping"].keys():
mapped_field = self.field_mapping["mapping"][field]
parent = 'input_event'
parent = 'parent'
i = 1
values = mapped_field.split('.')
for val in values:
if parent == "input_event":
new_val = val
if parent == "parent":
parent = val
continue
else:
new_val = parent + '_' + val
if new_val in parsed_fields:
parent = new_val
i = i + 1
continue
if i == len(values):
parser_str = '| eval ' + new_val + '' + '=ucast(map_get(' + parent + ',"' + val + '"), "string", null) '
else:
parser_str = '| eval ' + new_val + '' + '=ucast(map_get(' + parent + ',"' + val + '"), "map<string, any>", null) '
parser_str = '| eval ' + new_val + ' = ' + parent + '.' + val + ' '
detection_str = detection_str + parser_str
parsed_fields.append(new_val)
parent = new_val
i = i + 1

detection_str = detection_str + "| where " + query
detection_str = detection_str.replace("\\\\\\\\", "\\\\")


return detection_str

def finalize_output_data_model(self, queries: List[str]) -> List[str]:
Expand Down
14 changes: 3 additions & 11 deletions detections/application/okta_risk_threshold_exceeded.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,7 @@ type: Correlation
description:
The following correlation will take risk associated with the content
from "Suspicious Okta Activity" and "Okta MFA Exhaustion" analytic stories and
tally it up. Once it hits the threshold of 100 (may be changed), it will trigger
an anomaly. As needed, reduce or raise the risk scores assocaited with the anomaly
and TTP analytics tagged to these two stories.
tally it up. Once it hits the threshold of 100 (can be changed), it will trigger an a notable. As needed, reduce or raise the risk scores assocaited with the anomaly and TTP analytics tagged to these two analytic stories.
data_source: []
search:
'| tstats `security_content_summariesonly` sum(All_Risk.calculated_risk_score) as risk_score,
Expand Down Expand Up @@ -44,19 +42,13 @@ tags:
asset_type: Infrastructure
confidence: 80
impact: 70
message:
Risk threshold exceeded for $risk_object_type$=$risk_object$ related to
Okta events.
message: Risk score $risk_score$ threshold exceeded for $risk_object$ related to Okta events.
mitre_attack_id:
- T1078
- T1110
observable:
- name: risk_object
type: Other
role:
- Victim
- name: risk_object_type
type: Other
type: Hostname
role:
- Victim
product:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
name: Splunk Absolute Path Traversal Using runshellscript
id: 356bd3fe-f59b-4f64-baa1-51495411b7ad
version: 1
date: '2023-09-05'
author: Rod Soto
status: production
type: Hunting
data_source: []
description: In Splunk Enterprise versions lower than 8.2.12, 9.0.6, and 9.1.1, an attacker can exploit an absolute path traversal to execute arbitrary code that is located on a separate disk.
search: >
`splunk_python` *runshellscript*
| eval log_split=split(_raw, "runshellscript: ")
| eval array_raw = mvindex(log_split,1)
| eval data_cleaned=replace(replace(replace(array_raw,"\[",""),"\]",""),"'","")
| eval array_indices=split(data_cleaned,",")
| eval runshellscript_args_count=mvcount(array_indices)
| where runshellscript_args_count = 10
| eval interpreter=mvindex(array_indices,0)
| eval targetScript=mvindex(array_indices,1)
| eval targetScript != "*C:*"
| stats count min(_time) as firstTime max(_time) as lastTime by splunk_server interpreter targetScript
| `security_content_ctime(firstTime)`| `security_content_ctime(lastTime)`
| `splunk_absolute_path_traversal_using_runshellscript_filter`
how_to_implement: Must have access to internal indexes. Only applies to Splunk on Windows versions.
known_false_positives: The command runshellscript can be used for benign purposes. Analyst will have to review the searches and determined maliciousness specially by looking at targeted script.
references:
- https://advisory.splunk.com/advisories/SVD-2023-0806
tags:
analytic_story:
- Splunk Vulnerabilities
asset_type: endpoint
confidence: 70
impact: 50
message: Possible attack against splunk_server $splunk_server$ through abuse of the runshellscript command
mitre_attack_id:
- T1083
cve:
- CVE-2023-40597
observable:
- name: splunk_server
type: Hostname
role:
- Victim
product:
- Splunk Enterprise
- Splunk Enterprise Security
- Splunk Cloud
risk_score: 35
required_fields:
- search
security_domain: endpoint
tests:
- name: True Positive Test
attack_data:
- data: https://media.githubusercontent.com/media/splunk/attack_data/master/datasets/attack_techniques/T1083/splunk/splunk_absolute_path_traversal_using_runshellscript_splunk_python.log
source: python.log
sourcetype: splunk_python
custom_index: _internal
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ description: In Splunk Enterprise versions below 8.1.13, 8.2.10, and 9.0.4, a cr
this vulnerability.
data_source: []
search: '`splunkda` uri_path="/en-US/splunkd/__raw/services/ssg/kvstore_client" method="GET"
delete_field_value="spacebridge_server" status="200" | table splunk_server status
delete_field_value="spacebridge_server" status="200" | table splunk_server status
uri delete_field_value method post_data | `splunk_csrf_in_the_ssg_kvstore_client_endpoint_filter`'
how_to_implement: Requires access to internal index.
known_false_positives: This hunting search only applies to the affected versions and
Expand All @@ -31,7 +31,7 @@ tags:
cve:
- CVE-2023-22942
impact: 50
message: Potential CSRF exploitation attempt from $host$
message: Potential CSRF exploitation attempt from $splunk_server$
mitre_attack_id:
- T1189
observable:
Expand Down
Loading

0 comments on commit 48f8598

Please sign in to comment.