From e8183585ed4675e59b3a702601fec1be0f9f2872 Mon Sep 17 00:00:00 2001 From: research bot Date: Wed, 15 May 2024 15:41:49 +0000 Subject: [PATCH] Updating Github with Content from ESCU - v4.31.1 --- contentctl | 2 +- contentctl.yml | 2 +- dist/DA-ESS-ContentUpdate/app.manifest | 4 +- .../default/analyticstories.conf | 1586 ++++++------- dist/DA-ESS-ContentUpdate/default/app.conf | 6 +- .../default/collections.conf | 2 +- .../default/content-version.conf | 4 +- ...l_backup_logs_for_host___response_task.xml | 2 +- ...tes_activity_by_src_ip___response_task.xml | 2 +- ...ity_hub_alerts_by_dest___response_task.xml | 2 +- ...ivities_by_accesskeyid___response_task.xml | 2 +- ...user_activities_by_arn___response_task.xml | 2 +- ...rk_acl_details_from_id___response_task.xml | 2 +- ...details_via_resourceid___response_task.xml | 2 +- ...details_via_bucketname___response_task.xml | 2 +- ...tes_activity_by_src_ip___response_task.xml | 2 +- ...aws_activity_from_city___response_task.xml | 2 +- ..._activity_from_country___response_task.xml | 2 +- ...tivity_from_ip_address___response_task.xml | 2 +- ...s_activity_from_region___response_task.xml | 2 +- ...ckup_logs_for_endpoint___response_task.xml | 2 +- ...cate_logs_for_a_domain___response_task.xml | 2 +- ...ver_history_for_a_host___response_task.xml | 2 +- ..._get_dns_traffic_ratio___response_task.xml | 2 +- ..._details_by_instanceid___response_task.xml | 2 +- ...get_ec2_launch_details___response_task.xml | 2 +- ...h_panel_get_email_info___response_task.xml | 2 +- ...s_from_specific_sender___response_task.xml | 2 +- ...rence_of_a_mac_address___response_task.xml | 2 +- ...story_of_email_sources___response_task.xml | 2 +- ...fications_for_endpoint___response_task.xml | 2 +- ...modifications_for_user___response_task.xml | 2 +- ...el_get_notable_history___response_task.xml | 2 +- ...et_parent_process_info___response_task.xml | 2 +- ..._process_file_activity___response_task.xml | 2 +- ...panel_get_process_info___response_task.xml | 2 +- ...tion_for_port_activity___response_task.xml | 2 +- ...le_for_the_dns_traffic___response_task.xml | 2 +- ..._wmi_activity_for_host___response_task.xml | 2 +- ...rmation_via_session_id___response_task.xml | 2 +- ...vities_via_region_name___response_task.xml | 2 +- ...tivities_by_user_field___response_task.xml | 2 +- ..._multiple_destinations___response_task.xml | 2 +- ...rk_traffic_from_src_ip___response_task.xml | 2 +- ...e_okta_activity_by_app___response_task.xml | 2 +- ...pass_the_hash_attempts___response_task.xml | 2 +- ...ss_the_ticket_attempts___response_task.xml | 2 +- ...e_previous_unseen_user___response_task.xml | 2 +- ...esktop_authentications___response_task.xml | 2 +- ...strings_in_http_header___response_task.xml | 2 +- ...ser_activities_in_okta___response_task.xml | 2 +- ...ate_web_posts_from_src___response_task.xml | 2 +- .../default/es_investigations.conf | 172 +- dist/DA-ESS-ContentUpdate/default/macros.conf | 2 +- .../default/savedsearches.conf | 1994 ++++++++--------- .../default/transforms.conf | 2 +- .../default/workflow_actions.conf | 2 +- dist/api/stories.json | 2 +- dist/api/version.json | 2 +- 59 files changed, 1936 insertions(+), 1936 deletions(-) diff --git a/contentctl b/contentctl index 3505a8fcd1..a169fee8d7 160000 --- a/contentctl +++ b/contentctl @@ -1 +1 @@ -Subproject commit 3505a8fcd1e84d67eba21626714e023f779238e5 +Subproject commit a169fee8d71ba8f299cd96acb566d2991f123b26 diff --git a/contentctl.yml b/contentctl.yml index df5b79f170..e901ee5ca7 100644 --- a/contentctl.yml +++ b/contentctl.yml @@ -3,7 +3,7 @@ app: uid: 3449 title: ES Content Updates appid: DA-ESS-ContentUpdate - version: 4.31.0 + version: 4.31.1 description: Explore the Analytic Stories included with ES Content Updates. prefix: ESCU label: ESCU diff --git a/dist/DA-ESS-ContentUpdate/app.manifest b/dist/DA-ESS-ContentUpdate/app.manifest index 8a190f7d1f..ae1919a026 100644 --- a/dist/DA-ESS-ContentUpdate/app.manifest +++ b/dist/DA-ESS-ContentUpdate/app.manifest @@ -5,7 +5,7 @@ "id": { "group": null, "name": "DA-ESS-ContentUpdate", - "version": "4.31.0" + "version": "4.31.1" }, "author": [ { @@ -14,7 +14,7 @@ "company": "Splunk" } ], - "releaseDate": "2024-05-10", + "releaseDate": "2024-05-15", "description": "Explore the Analytic Stories included with ES Content Updates.", "classification": { "intendedAudience": null, diff --git a/dist/DA-ESS-ContentUpdate/default/analyticstories.conf b/dist/DA-ESS-ContentUpdate/default/analyticstories.conf index 0598c511a7..71a03a29af 100644 --- a/dist/DA-ESS-ContentUpdate/default/analyticstories.conf +++ b/dist/DA-ESS-ContentUpdate/default/analyticstories.conf @@ -1,7 +1,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:36 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# @@ -33,8 +33,8 @@ type = detection asset_type = Endpoint confidence = medium explanation = Attackers often use spaces as a means to obfuscate an attachment's file extension. This search looks for messages with email attachments that have many spaces within the file names. -how_to_implement = You need to ingest data from emails. Specifically, the sender's address and the file names of any attachments must be mapped to the Email data model. The threshold ratio is set to 10%, but this value can be configured to suit each environment.\ -**Splunk Phantom Playbook Integration**\ +how_to_implement = You need to ingest data from emails. Specifically, the sender's address and the file names of any attachments must be mapped to the Email data model. The threshold ratio is set to 10%, but this value can be configured to suit each environment. \ +**Splunk Phantom Playbook Integration** \ If Splunk Phantom is also configured in your environment, a playbook called "Suspicious Email Attachment Investigate and Delete" can be configured to run when any results are found by this detection search. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/` and add the correct hostname to the "Phantom Instance" field in the Adaptive Response Actions when configuring this detection search. The notable event will be sent to Phantom and the playbook will gather further information about the file attachment and its network behaviors. If Phantom finds malicious behavior and an analyst approves of the results, the email will be deleted from the user's inbox. annotations = {"cis20": ["CIS 13"], "nist": ["DE.AE"]} known_false_positives = None at this time @@ -114,15 +114,15 @@ providing_technologies = ["Okta"] type = detection asset_type = Okta Tenant confidence = medium -explanation = The following analytic identifies variations in client-based values for source and response events to identify suspicious request behavior. The detection is enhanced if the org is evaluating behavior conditions in sign-on policies using Okta Behavior Detection. NOTE: This detection requires the use of Okta Identity Engine (OIE) and will not function on Okta Classic.\ -For each Okta Verify Push challenge, the following two events are recorded in Okta System Log\ -Source of Push (Sign-In)\ -eventType eq \"system.push.send_factor_verify_push\"\ -User Push Response (Okta Verify client)\ -eventType eq "user.authentication.auth_via_mfa" AND debugContext.debugData.factor eq "OKTA_VERIFY_PUSH"\ -In sequence, the logic for the analytic -\ -* Groups by SessionID and retrieves any system.push.send_factor_verify_push events (the source of the push) and user.authentication.auth_via_mfa events where the factor is OKTA_VERIFY_PUSH - (the user response to the push)\ -* Counts the total number of push events, successful authentication events, and any push sources where the client is a new device. * Creates a ratio of successful sign-ins to pushes.\ +explanation = The following analytic identifies variations in client-based values for source and response events to identify suspicious request behavior. The detection is enhanced if the org is evaluating behavior conditions in sign-on policies using Okta Behavior Detection. NOTE: This detection requires the use of Okta Identity Engine (OIE) and will not function on Okta Classic. \ +For each Okta Verify Push challenge, the following two events are recorded in Okta System Log \ +Source of Push (Sign-In) \ +eventType eq \"system.push.send_factor_verify_push\" \ +User Push Response (Okta Verify client) \ +eventType eq "user.authentication.auth_via_mfa" AND debugContext.debugData.factor eq "OKTA_VERIFY_PUSH" \ +In sequence, the logic for the analytic - \ +* Groups by SessionID and retrieves any system.push.send_factor_verify_push events (the source of the push) and user.authentication.auth_via_mfa events where the factor is OKTA_VERIFY_PUSH - (the user response to the push) \ +* Counts the total number of push events, successful authentication events, and any push sources where the client is a new device. * Creates a ratio of successful sign-ins to pushes. \ * If the ratio (currently tuned aggressively) indicates push spam, or if a user has rejected a push, the detection proceeds to evaluate whether there is more than one IP address used during the session (session roaming) and the presence of both a new IP and new device during the session. how_to_implement = The analytic leverages Okta OktaIm2 logs to be ingested using the Splunk Add-on for Okta Identity Cloud (https://splunkbase.splunk.com/app/6553). annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1621"], "nist": ["DE.CM"]} @@ -163,10 +163,10 @@ providing_technologies = ["Okta"] type = detection asset_type = Okta Tenant confidence = medium -explanation = The following analytic identifies multiple failed app requests in an attempt to identify the reuse a stolen web session cookie. The logic of the analytic is as follows: * Retrieves policy evaluation and SSO details in events that contain the Application requested\ -* Formats target fields so we can aggregate specifically on Applications (AppInstances)\ -* Groups by User, Session and IP\ -* Creates a ratio of successful SSO events to total MFA challenges related to Application Sign On Policies\ +explanation = The following analytic identifies multiple failed app requests in an attempt to identify the reuse a stolen web session cookie. The logic of the analytic is as follows: * Retrieves policy evaluation and SSO details in events that contain the Application requested \ +* Formats target fields so we can aggregate specifically on Applications (AppInstances) \ +* Groups by User, Session and IP \ +* Creates a ratio of successful SSO events to total MFA challenges related to Application Sign On Policies \ * Alerts when more than half of app sign on events are unsuccessful, and challenges were unsatisfied for more than three apps. how_to_implement = This analytic is specific to Okta and requires Okta:im2 logs to be ingested. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1550.004", "T1538"], "nist": ["DE.AE"]} @@ -247,9 +247,9 @@ providing_technologies = ["Okta"] type = detection asset_type = Okta Tenant confidence = medium -explanation = The following analytic looks for one or more policy evaluation events in which multiple client values (IP, User Agent, etc.) change associated to the same Device Token for a specific user. A detection opportunity arises when an adversary attempts to reuse a stolen web session cookie.\ -* Retrieves policy evaluation events from successful authentication events.\ -* Aggregates/Groups by Device Token and User, providing the first policy evaluation event in the search window.\ +explanation = The following analytic looks for one or more policy evaluation events in which multiple client values (IP, User Agent, etc.) change associated to the same Device Token for a specific user. A detection opportunity arises when an adversary attempts to reuse a stolen web session cookie. \ +* Retrieves policy evaluation events from successful authentication events. \ +* Aggregates/Groups by Device Token and User, providing the first policy evaluation event in the search window. \ * It checks for the presence of more than one IP and whether there are multiple OS or browsers for each User/Device Token combination. how_to_implement = This detection utilizes logs from Okta Identity Management (IM) environments. It requires the ingestion of OktaIm2 logs through the Splunk Add-on for Okta Identity Cloud (https://splunkbase.splunk.com/app/6553). annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1539"], "nist": ["DE.AE"]} @@ -831,8 +831,8 @@ type = detection asset_type = Endpoint confidence = medium explanation = The following analytic detects emails that contain attachments with suspicious file extensions. Detecting and responding to emails with suspicious attachments can mitigate the risks associated with phishing and malware attacks, thereby protecting the organization's data and systems from potential harm. The detection is made by using a Splunk query that searches for emails in the datamodel=Email where the filename of the attachment is not empty. The analytic uses the tstats command to summarize the count, first time, and last time of the emails that meet the criteria. It groups the results by the source user, file name, and message ID of the email. The detection is important because it indicates potential phishing or malware delivery attempts in which an attacker attempts to deliver malicious content through email attachments, which can lead to data breaches, malware infections, or unauthorized access to sensitive information. Next steps include reviewing the identified emails and attachments and analyzing the source user, file name, and message ID to determine if they are legitimate or malicious. Additionally, you must inspect any relevant on-disk artifacts associated with the attachments and investigate any concurrent processes to identify the source of the attack. -how_to_implement = You need to ingest data from emails. Specifically, the sender's address and the file names of any attachments must be mapped to the Email data model.\ -**Splunk Phantom Playbook Integration**\ +how_to_implement = You need to ingest data from emails. Specifically, the sender's address and the file names of any attachments must be mapped to the Email data model. \ +**Splunk Phantom Playbook Integration** \ If Splunk Phantom is also configured in your environment, a Playbook called "Suspicious Email Attachment Investigate and Delete" can be configured to run when any results are found by this detection search. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, and add the correct hostname to the "Phantom Instance" field in the Adaptive Response Actions when configuring this detection search. The notable event will be sent to Phantom and the playbook will gather further information about the file attachment and its network behaviors. If Phantom finds malicious behavior and an analyst approves of the results, the email will be deleted from the user's inbox.' annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery"], "mitre_attack": ["T1566.001", "T1566"], "nist": ["DE.AE"]} known_false_positives = None identified @@ -1352,7 +1352,7 @@ providing_technologies = ["Amazon Web Services - Cloudtrail"] type = detection asset_type = EC2 Snapshot confidence = medium -explanation = The following analytic detects API calls made to an S3 bucket when bucket replication services are enabled. S3 bucket replication is a feature offered by Amazon Web Services (AWS) that allows you to automatically and asynchronously copy data from one S3 bucket to another in the same or different region.\ +explanation = The following analytic detects API calls made to an S3 bucket when bucket replication services are enabled. S3 bucket replication is a feature offered by Amazon Web Services (AWS) that allows you to automatically and asynchronously copy data from one S3 bucket to another in the same or different region. \ S3 bucket replication can also be used for cross-account replication, where data is replicated from a source bucket owned by one AWS account to a destination bucket owned by a different AWS account. how_to_implement = You must install splunk AWS add on and Splunk App for AWS. This search works with AWS CloudTrail logs. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Actions on Objectives"], "mitre_attack": ["T1537"], "nist": ["DE.CM"]} @@ -1803,7 +1803,7 @@ providing_technologies = ["Azure AD", "Entra ID"] type = detection asset_type = Azure Active Directory confidence = medium -explanation = The following analytic identifies one source Ip failing to authenticate with 30 unique valid users within 5 minutes. This behavior could represent an adversary performing a Password Spraying attack against an Azure Active Directory tenant to obtain initial access or elevate privileges. Error Code 50126 represents an invalid password. This logic can be used for real time security monitoring as well as threat hunting exercises.\ +explanation = The following analytic identifies one source Ip failing to authenticate with 30 unique valid users within 5 minutes. This behavior could represent an adversary performing a Password Spraying attack against an Azure Active Directory tenant to obtain initial access or elevate privileges. Error Code 50126 represents an invalid password. This logic can be used for real time security monitoring as well as threat hunting exercises. \ Azure AD tenants can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold if needed. how_to_implement = You must install the latest version of Splunk Add-on for Microsoft Cloud Services from Splunkbase (https://splunkbase.splunk.com/app/3110/#/details). You must be ingesting Azure Active Directory events into your Splunk environment through an EventHub. This analytic was written to be used with the azure:monitor:aad sourcetype leveraging the Signin log category. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Weaponization"], "mitre_attack": ["T1586", "T1586.003", "T1110", "T1110.003", "T1110.004"], "nist": ["DE.AE"]} @@ -2004,8 +2004,8 @@ providing_technologies = ["Azure AD", "Entra ID"] type = detection asset_type = Azure Active Directory confidence = medium -explanation = The following analytic identifies one source Ip failing to authenticate with multiple valid users. This behavior could represent an adversary performing a Password Spraying attack against an Azure Active Directory tenant to obtain initial access or elevate privileges. Error Code 50126 represents an invalid password.\ -The detection calculates the standard deviation for source Ip and leverages the 3-sigma statistical rule to identify an unusual number of failed authentication attempts. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ +explanation = The following analytic identifies one source Ip failing to authenticate with multiple valid users. This behavior could represent an adversary performing a Password Spraying attack against an Azure Active Directory tenant to obtain initial access or elevate privileges. Error Code 50126 represents an invalid password. \ +The detection calculates the standard deviation for source Ip and leverages the 3-sigma statistical rule to identify an unusual number of failed authentication attempts. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ While looking for anomalies using statistical methods like the standard deviation can have benefits, we also recommend using threshold-based detections to complement coverage. A similar analytic following the threshold model is `Azure AD Multiple Users Failing To Authenticate From Ip`. how_to_implement = You must install the latest version of Splunk Add-on for Microsoft Cloud Services from Splunkbase (https://splunkbase.splunk.com/app/3110/#/details). You must be ingesting Azure Active Directory events into your Splunk environment through an EventHub. This analytic was written to be used with the azure:monitor:aad sourcetype leveraging the Signin log category. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Weaponization"], "mitre_attack": ["T1586", "T1586.003", "T1110", "T1110.003", "T1110.004"], "nist": ["DE.AE"]} @@ -2169,7 +2169,7 @@ confidence = medium explanation = This search looks for cloud provisioning activities from previously unseen cities. Provisioning activities are defined broadly as any event that runs or creates something. how_to_implement = You must be ingesting your cloud infrastructure logs from your cloud provider. You should run the baseline search `Previously Seen Cloud Provisioning Activity Sources - Initial` to build the initial table of source IP address, geographic locations, and times. You must also enable the second baseline search `Previously Seen Cloud Provisioning Activity Sources - Update` to keep this table up to date and to age out old data. You can adjust the time window for this search by updating the `previously_unseen_cloud_provisioning_activity_window` macro. You can also provide additional filtering for this search by customizing the `cloud_provisioning_activity_from_previously_unseen_city_filter` macro. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", "Exploitation", "Installation"], "mitre_attack": ["T1078"], "nist": ["DE.AE"]} -known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new IP address is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your country, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. providing_technologies = null @@ -2180,7 +2180,7 @@ confidence = medium explanation = This search looks for cloud provisioning activities from previously unseen countries. Provisioning activities are defined broadly as any event that runs or creates something. how_to_implement = You must be ingesting your cloud infrastructure logs from your cloud provider. You should run the baseline search `Previously Seen Cloud Provisioning Activity Sources - Initial` to build the initial table of source IP address, geographic locations, and times. You must also enable the second baseline search `Previously Seen Cloud Provisioning Activity Sources - Update` to keep this table up to date and to age out old data. You can adjust the time window for this search by updating the `previously_unseen_cloud_provisioning_activity_window` macro. You can also provide additional filtering for this search by customizing the `cloud_provisioning_activity_from_previously_unseen_country_filter` macro. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", "Exploitation", "Installation"], "mitre_attack": ["T1078"], "nist": ["DE.AE"]} -known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new IP address is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your country, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. providing_technologies = null @@ -2191,7 +2191,7 @@ confidence = medium explanation = This search looks for cloud provisioning activities from previously unseen IP addresses. Provisioning activities are defined broadly as any event that runs or creates something. how_to_implement = You must be ingesting your cloud infrastructure logs from your cloud provider. You should run the baseline search `Previously Seen Cloud Provisioning Activity Sources - Initial` to build the initial table of source IP address, geographic locations, and times. You must also enable the second baseline search `Previously Seen Cloud Provisioning Activity Sources - Update` to keep this table up to date and to age out old data. You can adjust the time window for this search by updating the `previously_unseen_cloud_provisioning_activity_window` macro. You can also provide additional filtering for this search by customizing the `cloud_provisioning_activity_from_previously_unseen_ip_address_filter` macro. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", "Exploitation", "Installation"], "mitre_attack": ["T1078"], "nist": ["DE.AE"]} -known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new IP address is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your country, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. providing_technologies = null @@ -2202,7 +2202,7 @@ confidence = medium explanation = This search looks for cloud provisioning activities from previously unseen regions. Provisioning activities are defined broadly as any event that runs or creates something. how_to_implement = You must be ingesting your cloud infrastructure logs from your cloud provider. You should run the baseline search `Previously Seen Cloud Provisioning Activity Sources - Initial` to build the initial table of source IP address, geographic locations, and times. You must also enable the second baseline search `Previously Seen Cloud Provisioning Activity Sources - Update` to keep this table up to date and to age out old data. You can adjust the time window for this search by updating the `previously_unseen_cloud_provisioning_activity_window` macro. You can also provide additional filtering for this search by customizing the `cloud_provisioning_activity_from_previously_unseen_region_filter` macro. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", "Exploitation", "Installation"], "mitre_attack": ["T1078"], "nist": ["DE.AE"]} -known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new IP address is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your country, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. providing_technologies = null @@ -2210,9 +2210,9 @@ providing_technologies = null type = detection asset_type = Cloud Instance confidence = medium -explanation = The following analytic identifies users who are unsually modifying security group in your cloud enriovnment,focusing on actions such as modifications, deletions, or creations performed by users over 30-minute intervals. Analyzing patterns of modifications to security groups can help in identifying anomalous behavior that may indicate a compromised account or an insider threat.\ -The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ -This detection will only trigger on all user and service accounts that have created/modified/deleted a security group .\ +explanation = The following analytic identifies users who are unsually modifying security group in your cloud enriovnment,focusing on actions such as modifications, deletions, or creations performed by users over 30-minute intervals. Analyzing patterns of modifications to security groups can help in identifying anomalous behavior that may indicate a compromised account or an insider threat. \ +The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ +This detection will only trigger on all user and service accounts that have created/modified/deleted a security group . \ The analytics returned fields allow analysts to investigate the event further by providing fields like source ip and values of the security objects affected. how_to_implement = This search requries the Cloud infrastructure logs such as AWS Cloudtrail, GCP Pubsub Message logs, Azure Audit logs to be ingested into an accelerated Change datamodel. It is also recommended that users can try different combinations of the `bucket` span time and outlier conditions to better suit with their environment. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1578.005"], "nist": ["DE.AE"]} @@ -2624,10 +2624,10 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This detection detects inbound network traffic volume anomalies from processes running within containerised workloads. Anomalies are provided with context identifying the Kubernetes cluster, the workload name, and the type of anomaly.This detection leverages Network performance Monitoring metrics harvested using an OTEL collector, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection compares the tcp.bytes, tcp.new_sockets, tcp.packets, udp.bytes, udp.packets metrics for destination (receiving) workload process pairs over the last 1 hour, with the average of those metrics for those pairs over the last 30 days in order to detect any anonymously high inbound network activity. Anomalies in inbound network traffic may suggest that the container is receiving unexpected or unauthorized data, potentially indicative of a breach, a vulnerability exploitation attempt, an attempt to overload the service, or propagation of malware. Successful compromise of a containerised application resulting in the ability to upload data, can result in installation of command and control software or other malware, data integrity damage, container escape, and further compromise of the environment. Additionally this kind of activity may result in resource contention, performance degradation and disruption to the normal operation of the environment. -how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default:\ -* Name sim_npm_metrics_to_metrics_index\ -* Org ID \ -* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E')\ +how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default: \ +* Name sim_npm_metrics_to_metrics_index \ +* Org ID \ +* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E') \ * Metric Resolution 10000 annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2638,17 +2638,17 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This analytic identifies high Inbound or Outbound Network IO anomalies in a Kubernetes container. It uses process metrics from an OTEL collector and Kubelet Stats Receiver, and data from Splunk Observability cloud via the Splunk Infrastructure Monitoring Add-on. A lookup table containing average and standard deviation for network IO is used to evaluate anomalies for each container. An event is generated if the anomaly persists over a 1 hour period. These anomalies may indicate security threats such as data exfiltration, command and control communication, service disruptions, or unauthorized data transfers. They can compromise the confidentiality, availability, and integrity of applications and data, necessitating rapid detection and response. Anomalous network utilization may suggest a compromised container, potentially leading to data breaches, service outages, financial losses, and reputational damage. -how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2659,17 +2659,17 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This analytic identifies changes in network communication behavior in a Kubernetes container by examining inbound to outbound network IO ratios. It uses process metrics from an OTEL collector and Kubelet Stats Receiver, and data from Splunk Observability cloud via the Splunk Infrastructure Monitoring Add-on. A lookup table containing average and standard deviation for network IO is used to evaluate anomalies for each container. An event is generated if the anomaly persists over a 1 hour period. These anomalies may indicate security threats such as data exfiltration, command and control communication, or compromised container behavior. They can compromise the confidentiality, availability, and integrity of applications and data, necessitating rapid detection and response. Anomalous network utilization may suggest a compromised container, potentially leading to data breaches, service outages, and unauthorized access within the Kubernetes cluster. -how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2680,10 +2680,10 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This detection detects outbound network traffic volume anomalies from processes running within containerised workloads. Anomalies are provided with context identifying the Kubernetes cluster, the workload name, and the type of anomaly. This detection leverages Network performance Monitoring metrics harvested using an OTEL collector, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection compares the tcp.bytes, tcp.new_sockets, tcp.packets, udp.bytes, udp.packets metrics for source (transmitting) workload process pairs over the last 1 hout, with the average of those metrics for those pairs over the last 30 days in order to detect any anonymously high outbound network activity. Anonymously high outbound network traffic from a process running in a container is a potential indication of data exfiltration, or an indication that the process has been modified. Anomalously high outbound network activity from a process running within a container suggests the potential compromise, which may lead to unauthorized data exfiltration, communication with malicious entities, or the propagation of malware to external systems. The compromised container could also serve as a pivot point for further attacks within the containerized environment. -how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default:\ -* Name sim_npm_metrics_to_metrics_index\ -* Org ID \ -* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E')\ +how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default: \ +* Name sim_npm_metrics_to_metrics_index \ +* Org ID \ +* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E') \ * Metric Resolution 10000 annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2694,10 +2694,10 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This detection detects network traffic volume anomalies between workloads in a microservices hosted application, or between a workload and the outside world if the workload is shown as (unknown). This detection leverages Network performance Monitoring metrics harvested using an OTEL collector, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on (https://splunkbase.splunk.com/app/5247). This detection compares the tcp.bytes, tcp.new_sockets, tcp.packets, udp.bytes, udp.packets metrics between workloads over the last 1 hour, with the average of those metrics over the last 30 days in order to detect any anonymously high inbound or outbound network activity. Unexpected spikes in network traffic may signify unauthorized data transfers, or abnormal behavior within the microservices ecosystem. Such activity might signify data exfiltration, unauthorized lateral movement, within the microservices environment. If a bad actor is responsible for this traffic they could compromise additional services or extract sensitive data, potentially leading to data breaches. -how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default:\ -* Name sim_npm_metrics_to_metrics_index\ -* Org ID \ -* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E')\ +how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default: \ +* Name sim_npm_metrics_to_metrics_index \ +* Org ID \ +* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E') \ * Metric Resolution 10000 annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2758,10 +2758,10 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This analytic detects TCP communication between a newly seen source and destination workload pair. This is done to identify changes in network behavior between workloads in a kubernetes cluster. This detection leverages Network performance Monitoring metrics harvested using an OTEL collector, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection compares network activity between workloads over the last 1 hour, with those over the last 30 days in order to detect newly seen inter workload communication. Newly seen network connections in a microservices based app indicate a change in behavior which could indicate potential security threats or anomalies. Distributed applications typically have common established network connection topologies, and new connections are often either an indication of a change in the application or an active threat. Unauthorized connections may enable the attacker to infiltrate the applications ecosystem, potentially leading to data breaches, manipulation of sensitive information, or disruption of critical services. Bad actors may exploit these connections to gain access, escalate privileges, move laterally within the microservices, or introduce malicious code or payloads, putting the applications integrity, availability, and confidentiality at risk. -how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default:\ -* Name sim_npm_metrics_to_metrics_index\ -* Org ID \ -* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E')\ +how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default: \ +* Name sim_npm_metrics_to_metrics_index \ +* Org ID \ +* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E') \ * Metric Resolution 10000 annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2772,10 +2772,10 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This analytic detects UDP communication between a newly seen source and destination workload pair. This is done to identify changes in network behavior between workloads in a kubernetes cluster. This detection leverages Network performance Monitoring metrics harvested using an OTEL collector, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection compares network activity between workloads over the last 1 hour, with those over the last 30 days in order to detect newly seen inter workload communication. Newly seen network connections in a microservices based app indicate a change in behavior which could indicate potential security threats or anomalies. Distributed applications typically have common established network connection topologies, and new connections are often either an indication of a change in the application or an active threat. Unauthorized connections may enable the attacker to infiltrate the applications ecosystem, potentially leading to data breaches, manipulation of sensitive information, or disruption of critical services. Bad actors may exploit these connections to gain access, escalate privileges, move laterally within the microservices, or introduce malicious code or payloads, putting the applications integrity, availability, and confidentiality at risk. -how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default:\ -* Name sim_npm_metrics_to_metrics_index\ -* Org ID \ -* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E')\ +how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default: \ +* Name sim_npm_metrics_to_metrics_index \ +* Org ID \ +* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E') \ * Metric Resolution 10000 annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2836,17 +2836,17 @@ type = detection asset_type = Kubernetes confidence = medium explanation = The following analytic identifies containerised workloads that have been created using a previously unseen image. This detection leverages process metrics harvested using an OTEL collector and kubernetes cluster receiver, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection uses the k8s.container.ready metric to compare the container image names seen in the last 1 hour with those seen in the 30 days prior to those 1 hour, and alerts if a new container image is detected. When a container in a Kubernetes cluster created using a previously unseen image it raises potential security risks and unknown variables. Unfamiliar container images could contain vulnerabilities, malware, or misconfigurations that pose threats to the cluster's integrity and the applications it hosts. The absence of prior knowledge about the image makes it difficult to assess its trustworthiness, track its lineage, or verify its compliance with security policies. The potential security impact of a container created using a compromised image is significant. Compromised containers can potentially introduce malware, backdoors, or other malicious code into the containerized application, leading to data breaches, service disruptions, and unauthorized access within the Kubernetes cluster. A compromised image can serve as a foothold for lateral movement and privilege escalation, potentially compromising other containers, pods, or nodes in the cluster. Additionally, it may enable the actor to exfiltrate sensitive data, manipulate configurations, or execute arbitrary code, posing risks to the confidentiality, availability, and integrity of applications and data hosted within the cluster -how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2857,17 +2857,17 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This analytic detects newly seen process within the Kubernetes scope on a master or worker node. This detection leverages process metrics harvested using an OTEL collector and hostmetrics receiever, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection compares the processes seen for each node over the previous 1 hour with those over the previous 30 days up until the previous 1 hour. The specific metric used by this detection is process.memory.utilization. Newly seen processes on a Kubernetes worker node are concerning as they may represent security risks and anomalies that could be related to unauthorized activity. New processes may be introduced in an attempt to compromise the node or gain control of the Kubernetes cluster. By detecting these processes, they can be investigated, and correlated with other anomalous activity for that host. Newly seen processes may be part of an attacker's strategy to compromise the node, gain unauthorized access, and subsequently extend their control to the entire Kubernetes cluster. These processes could facilitate activities such as data exfiltration, privilege escalation, denial-of-service attacks, or the introduction of malware and backdoors, putting sensitive data, applications, and the entire infrastructure at risk. The consequences may include data breaches, service disruptions, financial losses, and reputational damage, underscoring the need to identify anomalous process and associate them with any concurrent risk activity. -how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2878,17 +2878,17 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This analytic detects processes running within the same scope as Kubernetes that have been run from a newly seen path. This detection leverages process metrics harvested using an OTEL collector and hostmetrics receiever, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection compares the processes seen for each node over the previous 1 hour with those over the previous 30 days up until the previous 1 hour, and alerts if the path for that process was not seen over the previous 30 days. The specific metric used by this detection is process.memory.utilization. Processes running from a newly seen path can signify potential security risks and anomalies. A process executing from an unfamiliar file path may indicate unauthorized changes to the file system, a compromised node, or the introduction of malicious software. If the presence of a process running from a newly seen file path on a Kubernetes node indicates malicious activity, the security implications could be severe. It suggests that an attacker has potentially compromised the node, allowing them to execute unauthorized processes and potentially gain control over critical resources. This could lead to further exploitation, data exfiltration, privilege escalation, or the introduction of malware and backdoors within the Kubernetes cluster. -how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2899,17 +2899,17 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This analytic identifies high resource utilization anomalies in Kubernetes processes. It uses process metrics from an OTEL collector and hostmetrics receiver, fetched from Splunk Observability cloud via the Splunk Infrastructure Monitoring Add-on. The detection uses a lookup table with average and standard deviation values for various process metrics to identify anomalies. High resource utilization can indicate security threats or operational issues, such as cryptojacking, unauthorized data exfiltration, or compromised containers. These anomalies can disrupt services, exhaust resources, increase costs, and allow attackers to evade detection or maintain access. -how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2920,17 +2920,17 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This analytic detects anomalously changes in the ratio between specific process resources on a Kubernetes node, based on the past behavior for each process running in the Kubernetes scope on that node. This detection leverages process metrics harvested using an OTEL collector and hostmetrics receiver, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection also leverages a lookup table that contains average and standard deviation for the cpu:disk operations, cpu:mem, cpu:thread count, disk operations:thread count, and mem:disk operations ratios. This is used to indicate an anomalous change in resource ratios that indicate the workload has changed behavior irrespective of load. Changes in the relationship between utilization of different resources can indicate a change in behavior of the monitored process, which can indicate a potentially compromised application. Deviations in resource ratios, such as memory-to-CPU or CPU-to-disk utilization, may signify compromised processes, malicious activity, or misconfigurations that could pose risks. A change in process behavior could signify a potential security breach within the Kubernetes environment, where an attacker may have compromised a process either on the node or running within a container. -how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2961,17 +2961,17 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This analytic identifies shell activity within the Kubernetes privilege scope on a worker node, returning a list of shell processes regardless of CPU resource consumption. It uses process metrics from an OTEL collector hostmetrics receiver, pulled from Splunk Observability cloud via the Splunk Infrastructure Monitoring Add-on. Metrics used are process.cpu.utilization and process.memory.utilization. Shell processes can indicate unauthorized or suspicious activity, posing a security threat. Shell access to worker nodes can provide attackers an entry point to compromise the node and the entire Kubernetes cluster. Monitoring and detecting shell processes is crucial for anomaly identification, security policy enforcement, and breach mitigation. Unauthorized shell processes on a Kubernetes worker node can severely compromise the cluster's security and integrity. Such access can lead to data theft, service disruption, privilege escalation, lateral movement, and further attacks within the cluster. It may also enable attackers to manipulate configurations, deploy malicious containers, and execute arbitrary code, posing a severe risk to the confidentiality, availability, and integrity of applications and sensitive data. -how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -2982,17 +2982,17 @@ type = detection asset_type = Kubernetes confidence = medium explanation = This analytic identifies shell activity within the Kubernetes privilege scope on a worker node. It returns shell processes only if they're consuming CPU resources. The detection uses process metrics from an OTEL collector hostmetrics receiver, pulled from Splunk Observability cloud via the Splunk Infrastructure Monitoring Add-on. The metrics used are process.cpu.utilization and process.memory.utilization. Shell processes can indicate unauthorized activity, posing a security threat. Attackers could compromise the node and the entire Kubernetes cluster via shell access to worker nodes. Monitoring shell processes is crucial for anomaly detection, policy enforcement, and breach mitigation. Unauthorized shell processes on a Kubernetes worker node could severely impact the cluster's security and integrity. Attackers could gain full control over the host's resources and file system, compromising all hosted workloads and data. This access could lead to data theft, service disruption, privilege escalation, lateral movement, and further attacks within the cluster. Attackers could also manipulate configurations, deploy malicious containers, and execute arbitrary code, severely risking the confidentiality, availability, and integrity of applications and sensitive data. A rapid and comprehensive incident response is required to mitigate and recover from such a breach. -how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} known_false_positives = unknown @@ -3535,7 +3535,7 @@ confidence = medium explanation = This search looks for AWS provisioning activities from previously unseen cities. Provisioning activities are defined broadly as any event that begins with "Run" or "Create." This search is deprecated and have been translated to use the latest Change Datamodel. how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. This search works best when you run the "Previously Seen AWS Provisioning Activity Sources" support search once to create a history of previously seen locations that have provisioned AWS resources. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1535"], "nist": ["DE.AE"]} -known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new city is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your city, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. providing_technologies = ["Amazon Web Services - Cloudtrail"] @@ -3546,7 +3546,7 @@ confidence = medium explanation = This search looks for AWS provisioning activities from previously unseen countries. Provisioning activities are defined broadly as any event that begins with "Run" or "Create." This search is deprecated and have been translated to use the latest Change Datamodel. how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. This search works best when you run the "Previously Seen AWS Provisioning Activity Sources" support search once to create a history of previously seen locations that have provisioned AWS resources. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1535"], "nist": ["DE.AE"]} -known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching over plus what is stored in the cache feature. But while there are really no \"false positives\" in a traditional sense, there is definitely lots of noise.\ +known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching over plus what is stored in the cache feature. But while there are really no \"false positives\" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new country is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your country, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. providing_technologies = ["Amazon Web Services - Cloudtrail"] @@ -3557,7 +3557,7 @@ confidence = medium explanation = This search looks for AWS provisioning activities from previously unseen IP addresses. Provisioning activities are defined broadly as any event that begins with "Run" or "Create." This search is deprecated and have been translated to use the latest Change Datamodel. how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. This search works best when you run the "Previously Seen AWS Provisioning Activity Sources" support search once to create a history of previously seen locations that have provisioned AWS resources. annotations = {"cis20": ["CIS 10"], "nist": ["DE.AE"]} -known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new IP address is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your country, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. providing_technologies = ["Amazon Web Services - Cloudtrail"] @@ -3568,7 +3568,7 @@ confidence = medium explanation = This search looks for AWS provisioning activities from previously unseen regions. Region in this context is similar to a state in the United States. Provisioning activities are defined broadly as any event that begins with "Run" or "Create." This search is deprecated and have been translated to use the latest Change Datamodel. how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. This search works best when you run the "Previously Seen AWS Provisioning Activity Sources" support search once to create a history of previously seen locations that have provisioned AWS resources. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1535"], "nist": ["DE.AE"]} -known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new region is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your region, there should be few false positives. If you are located in regions where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. providing_technologies = ["Amazon Web Services - Cloudtrail"] @@ -3587,9 +3587,9 @@ type = detection asset_type = Endpoint confidence = medium explanation = This search allows you to identify the endpoints that have connected to more than five DNS servers and made DNS Queries over the time frame of the search. -how_to_implement = This search requires that DNS data is being ingested and populating the `Network_Resolution` data model. This data can come from DNS logs or from solutions that parse network traffic for this data, such as Splunk Stream or Bro.\ -This search produces fields (`dest_count`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** Distinct DNS Connections, **Field:** dest_count\ +how_to_implement = This search requires that DNS data is being ingested and populating the `Network_Resolution` data model. This data can come from DNS logs or from solutions that parse network traffic for this data, such as Splunk Stream or Bro. \ +This search produces fields (`dest_count`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** Distinct DNS Connections, **Field:** dest_count \ Detailed documentation on how to create a new field within Incident Review may be found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Actions on Objectives"], "mitre_attack": ["T1048.003"], "nist": ["DE.CM"]} known_false_positives = It's possible that an enterprise has more than five DNS servers that are configured in a round-robin rotation. Please customize the search, as appropriate. @@ -3640,11 +3640,11 @@ type = detection asset_type = AWS Instance confidence = medium explanation = This search looks for AWS CloudTrail events where a user logged into the AWS account, is making API calls and has not enabled Multi Factor authentication. Multi factor authentication adds a layer of security by forcing the users to type a unique authentication code from an approved authentication device when they access AWS websites or services. AWS Best Practices recommend that you enable MFA for privileged IAM users. -how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. Leverage the support search `Create a list of approved AWS service accounts`: run it once every 30 days to create a list of service accounts and validate them.\ -This search produces fields (`eventName`,`userIdentity.type`,`userIdentity.arn`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** AWS Event Name, **Field:** eventName\ -* **Label:** AWS User ARN, **Field:** userIdentity.arn\ -* **Label:** AWS User Type, **Field:** userIdentity.type\ +how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. Leverage the support search `Create a list of approved AWS service accounts`: run it once every 30 days to create a list of service accounts and validate them. \ +This search produces fields (`eventName`,`userIdentity.type`,`userIdentity.arn`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** AWS Event Name, **Field:** eventName \ +* **Label:** AWS User ARN, **Field:** userIdentity.arn \ +* **Label:** AWS User Type, **Field:** userIdentity.type \ Detailed documentation on how to create a new field within Incident Review may be found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` annotations = {"cis20": ["CIS 13"], "nist": ["DE.AE"]} known_false_positives = Many service accounts configured within an AWS infrastructure do not have multi factor authentication enabled. Please ignore the service accounts, if triggered and instead add them to the aws_service_accounts.csv file to fine tune the detection. It is also possible that the search detects users in your environment using Single Sign-On systems, since the MFA is not handled by AWS. @@ -3655,11 +3655,11 @@ type = detection asset_type = AWS Instance confidence = medium explanation = This search looks for successful AWS CloudTrail activity by user accounts that are not listed in the identity table or `aws_service_accounts.csv`. It returns event names and count, as well as the first and last time a specific user or service is detected, grouped by users. Deprecated because managing this list can be quite hard. -how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. You must also populate the `identity_lookup_expanded` lookup shipped with the Asset and Identity framework to be able to look up users in your identity table in Enterprise Security (ES). Leverage the support search called "Create a list of approved AWS service accounts": run it once every 30 days to create and validate a list of service accounts.\ -This search produces fields (`eventName`,`firstTime`,`lastTime`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** AWS Event Name, **Field:** eventName\ -* **Label:** First Time, **Field:** firstTime\ -* **Label:** Last Time, **Field:** lastTime\ +how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. You must also populate the `identity_lookup_expanded` lookup shipped with the Asset and Identity framework to be able to look up users in your identity table in Enterprise Security (ES). Leverage the support search called "Create a list of approved AWS service accounts": run it once every 30 days to create and validate a list of service accounts. \ +This search produces fields (`eventName`,`firstTime`,`lastTime`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** AWS Event Name, **Field:** eventName \ +* **Label:** First Time, **Field:** firstTime \ +* **Label:** Last Time, **Field:** lastTime \ Detailed documentation on how to create a new field within Incident Review may be found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", "Exploitation", "Installation"], "mitre_attack": ["T1078.004"], "nist": ["DE.AE"]} known_false_positives = It's likely that you'll find activity detected by users/service accounts that are not listed in the `identity_lookup_expanded` or ` aws_service_accounts.csv` file. If the user is a legitimate service account, update the `aws_service_accounts.csv` table with that entry. @@ -3670,9 +3670,9 @@ type = detection asset_type = Endpoint confidence = medium explanation = This search looks for DNS requests for phishing domains that are leveraging EvilGinx tools to mimic websites. -how_to_implement = You need to ingest data from your DNS logs in the Network_Resolution datamodel. Specifically you must ingest the domain that is being queried and the IP of the host originating the request. Ideally, you should also be ingesting the answer to the query and the query type. This approach allows you to also create your own localized passive DNS capability which can aid you in future investigations. You will have to add legitimate domain names to the `legit_domains.csv` file shipped with the app.\ -**Splunk>Phantom Playbook Integration**\ -If Splunk>Phantom is also configured in your environment, a Playbook called `Lets Encrypt Domain Investigate` can be configured to run when any results are found by this detection search. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, add the correct hostname to the "Phantom Instance" field in the Adaptive Response Actions when configuring this detection search, and set the corresponding Playbook to active.\ +how_to_implement = You need to ingest data from your DNS logs in the Network_Resolution datamodel. Specifically you must ingest the domain that is being queried and the IP of the host originating the request. Ideally, you should also be ingesting the answer to the query and the query type. This approach allows you to also create your own localized passive DNS capability which can aid you in future investigations. You will have to add legitimate domain names to the `legit_domains.csv` file shipped with the app. \ +**Splunk>Phantom Playbook Integration** \ +If Splunk>Phantom is also configured in your environment, a Playbook called `Lets Encrypt Domain Investigate` can be configured to run when any results are found by this detection search. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, add the correct hostname to the "Phantom Instance" field in the Adaptive Response Actions when configuring this detection search, and set the corresponding Playbook to active. \ (Playbook link:`https://my.phantom.us/4.2/playbook/lets-encrypt-domain-investigate/`) annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery"], "mitre_attack": ["T1566.003"], "nist": ["DE.CM"]} known_false_positives = If a known good domain is not listed in the legit_domains.csv file, then the search could give you false postives. Please update that lookup file to filter out DNS requests to legitimate domains. @@ -3733,11 +3733,11 @@ type = detection asset_type = AWS Instance confidence = medium explanation = This search will detect users creating spikes of API activity in your AWS environment. It will also update the cache file that factors in the latest data. This search is deprecated and have been translated to use the latest Change Datamodel. -how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. You can modify `dataPointThreshold` and `deviationThreshold` to better fit your environment. The `dataPointThreshold` variable is the minimum number of data points required to have a statistically significant amount of data to determine. The `deviationThreshold` variable is the number of standard deviations away from the mean that the value must be to be considered a spike.\ -This search produces fields (`eventName`,`numberOfApiCalls`,`uniqueApisCalled`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** AWS Event Name, **Field:** eventName\ -* **Label:** Number of API Calls, **Field:** numberOfApiCalls\ -* **Label:** Unique API Calls, **Field:** uniqueApisCalled\ +how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. You can modify `dataPointThreshold` and `deviationThreshold` to better fit your environment. The `dataPointThreshold` variable is the minimum number of data points required to have a statistically significant amount of data to determine. The `deviationThreshold` variable is the number of standard deviations away from the mean that the value must be to be considered a spike. \ +This search produces fields (`eventName`,`numberOfApiCalls`,`uniqueApisCalled`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** AWS Event Name, **Field:** eventName \ +* **Label:** Number of API Calls, **Field:** numberOfApiCalls \ +* **Label:** Unique API Calls, **Field:** uniqueApisCalled \ Detailed documentation on how to create a new field within Incident Review may be found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery", "Exploitation", "Installation"], "mitre_attack": ["T1078.004"], "nist": ["DE.AE"]} known_false_positives = None. @@ -3778,9 +3778,9 @@ type = detection asset_type = Endpoint confidence = medium explanation = This search looks for web connections to dynamic DNS providers. -how_to_implement = This search requires you to be ingesting web-traffic logs. You can obtain these logs from indexing data from a web proxy or by using a network-traffic-analysis tool, such as Bro or Splunk Stream. The web data model must contain the URL being requested, the IP address of the host initiating the request, and the destination IP. This search also leverages a lookup file, `dynamic_dns_providers_default.csv`, which contains a non-exhaustive list of dynamic DNS providers. Consider periodically updating this local lookup file with new domains.\ -This search produces fields (`isDynDNS`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** IsDynamicDNS, **Field:** isDynDNS\ +how_to_implement = This search requires you to be ingesting web-traffic logs. You can obtain these logs from indexing data from a web proxy or by using a network-traffic-analysis tool, such as Bro or Splunk Stream. The web data model must contain the URL being requested, the IP address of the host initiating the request, and the destination IP. This search also leverages a lookup file, `dynamic_dns_providers_default.csv`, which contains a non-exhaustive list of dynamic DNS providers. Consider periodically updating this local lookup file with new domains. \ +This search produces fields (`isDynDNS`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** IsDynamicDNS, **Field:** isDynDNS \ Detailed documentation on how to create a new field within Incident Review may be found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` Deprecated because duplicate. annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Command and Control"], "mitre_attack": ["T1071.001"], "nist": ["DE.CM"]} known_false_positives = It is possible that list of dynamic DNS providers is outdated and/or that the URL being requested is legitimate. @@ -3790,7 +3790,7 @@ providing_technologies = null type = detection asset_type = Endpoint confidence = medium -explanation = This search is used to detect DNS tunneling, by calculating the sum of the length of DNS queries and DNS answers. The search also filters out potential false positives by filtering out queries made to internal systems and the queries originating from internal DNS, Web, and Email servers. Endpoints using DNS as a method of transmission for data exfiltration, Command And Control, or evasion of security controls can often be detected by noting an unusually large volume of DNS traffic.\ +explanation = This search is used to detect DNS tunneling, by calculating the sum of the length of DNS queries and DNS answers. The search also filters out potential false positives by filtering out queries made to internal systems and the queries originating from internal DNS, Web, and Email servers. Endpoints using DNS as a method of transmission for data exfiltration, Command And Control, or evasion of security controls can often be detected by noting an unusually large volume of DNS traffic. \ NOTE:Deprecated because existing detection is doing the same. This detection is replaced with two other variations, if you are using MLTK then you can use this search `ESCU - DNS Query Length Outliers - MLTK - Rule` or use the standard deviation version `ESCU - DNS Query Length With High Standard Deviation - Rule`, as an alternantive. how_to_implement = To successfully implement this search, we must ensure that DNS data is being ingested and mapped to the appropriate fields in the Network_Resolution data model. Fields like src_category are automatically provided by the Assets and Identity Framework shipped with Splunk Enterprise Security. You will need to ensure you are using the Assets and Identity Framework and populating the src_category field. You will also need to enable the `cim_corporate_web_domain_search()` macro which will essentially filter out the DNS queries made to the corporate web domains to reduce alert fatigue. annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Actions on Objectives"], "mitre_attack": ["T1048.003"], "nist": ["DE.CM"]} @@ -3812,9 +3812,9 @@ type = detection asset_type = Endpoint confidence = medium explanation = The search takes the DNS records and their answers results of the discovered_dns_records lookup and finds if any records have changed by searching DNS response from the Network_Resolution datamodel across the last day. -how_to_implement = To successfully implement this search you will need to ensure that DNS data is populating the `Network_Resolution` data model. It also requires that the `discover_dns_record` lookup table be populated by the included support search "Discover DNS record".\ -**Splunk>Phantom Playbook Integration**\ -If Splunk>Phantom is also configured in your environment, a Playbook called "DNS Hijack Enrichment" can be configured to run when any results are found by this detection search. The playbook takes in the DNS record changed and uses Geoip, whois, Censys and PassiveTotal to detect if DNS issuers changed. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, add the correct hostname to the \"Phantom Instance\" field in the Adaptive Response Actions when configuring this detection search, and set the corresponding Playbook to active.\ +how_to_implement = To successfully implement this search you will need to ensure that DNS data is populating the `Network_Resolution` data model. It also requires that the `discover_dns_record` lookup table be populated by the included support search "Discover DNS record". \ +**Splunk>Phantom Playbook Integration** \ +If Splunk>Phantom is also configured in your environment, a Playbook called "DNS Hijack Enrichment" can be configured to run when any results are found by this detection search. The playbook takes in the DNS record changed and uses Geoip, whois, Censys and PassiveTotal to detect if DNS issuers changed. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, add the correct hostname to the \"Phantom Instance\" field in the Adaptive Response Actions when configuring this detection search, and set the corresponding Playbook to active. \ (Playbook Link:`https://my.phantom.us/4.2/playbook/dns-hijack-enrichment/`) annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Command and Control"], "mitre_attack": ["T1071.004"], "nist": ["DE.CM"]} known_false_positives = Legitimate DNS changes can be detected in this search. Investigate, verify and update the list of provided current answers for the domains in question as appropriate. @@ -3824,7 +3824,7 @@ providing_technologies = null type = detection asset_type = Endpoint confidence = medium -explanation = Detect a renamed instance of procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. Modify the query as needed.\ +explanation = Detect a renamed instance of procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. Modify the query as needed. \ During triage, confirm this is procdump.exe executing. If it is the first time a Sysinternals utility has been ran, it is possible there will be a -accepteula on the command line. Review other endpoint data sources for cross process (injection) into lsass.exe. how_to_implement = To successfully implement this search you need to be ingesting information on process that include the name of the process responsible for the changes from your endpoints into the `Endpoint` datamodel in the `Processes` node. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1003.001"], "nist": ["DE.AE"]} @@ -4935,7 +4935,7 @@ providing_technologies = ["Microsoft Sysmon"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies the use of default or publicly known named pipes used with Cobalt Strike. A named pipe is a named, one-way or duplex pipe for communication between the pipe server and one or more pipe clients. Cobalt Strike uses named pipes in many ways and has default values used with the Artifact Kit and Malleable C2 Profiles. The following query assists with identifying these default named pipes. Each EDR product presents named pipes a little different. Consider taking the values and generating a query based on the product of choice.\ +explanation = The following analytic identifies the use of default or publicly known named pipes used with Cobalt Strike. A named pipe is a named, one-way or duplex pipe for communication between the pipe server and one or more pipe clients. Cobalt Strike uses named pipes in many ways and has default values used with the Artifact Kit and Malleable C2 Profiles. The following query assists with identifying these default named pipes. Each EDR product presents named pipes a little different. Consider taking the values and generating a query based on the product of choice. \ Upon triage, review the process performing the named pipe. If it is explorer.exe, It is possible it was injected into by another process. Review recent parallel processes to identify suspicious patterns or behaviors. A parallel process may have a network connection, review and follow the connection back to identify any file modifications. how_to_implement = To successfully implement this search, you need to be ingesting logs with the process name, parent process, and command-line executions from your endpoints. If you are using Sysmon, you must have at least version 6.0.4 of the Sysmon TA. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1055"], "nist": ["DE.CM"]} @@ -5236,9 +5236,9 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies `copy` or `[System.IO.File]::Copy` being used to capture the SAM, SYSTEM or SECURITY hives identified in script block. This will catch the most basic use cases for credentials being taken for offline cracking.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies `copy` or `[System.IO.File]::Copy` being used to capture the SAM, SYSTEM or SECURITY hives identified in script block. This will catch the most basic use cases for credentials being taken for offline cracking. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1003.002", "T1003"], "nist": ["DE.CM"]} @@ -5259,9 +5259,9 @@ providing_technologies = ["Microsoft Sysmon"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies the common PowerShell stager used by PowerShell-Empire. Each stager that may use PowerShell all uses the same pattern. The initial HTTP will be base64 encoded and use `system.net.webclient`. Note that some obfuscation may evade the analytic.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies the common PowerShell stager used by PowerShell-Empire. Each stager that may use PowerShell all uses the same pattern. The initial HTTP will be base64 encoded and use `system.net.webclient`. Note that some obfuscation may evade the analytic. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1059", "T1059.001"], "nist": ["DE.CM"]} @@ -5273,8 +5273,8 @@ type = detection asset_type = Windows confidence = medium explanation = This search identifies endpoints that have caused a relatively high number of account lockouts in a short period. -how_to_implement = You must ingest your Windows security event logs in the `Change` datamodel under the nodename is `Account_Management`, for this search to execute successfully. Please consider updating the cron schedule and the count of lockouts you want to monitor, according to your environment.\ -**Splunk>Phantom Playbook Integration** If Splunk>Phantom is also configured in your environment, a Playbook called "Excessive Account Lockouts Enrichment and Response" can be configured to run when any results are found by this detection search. The Playbook executes the Contextual and Investigative searches in this Story, conducts additional information gathering on Windows endpoints, and takes a response action to shut down the affected endpoint. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, add the correct hostname to the "Phantom Instance" field in the Adaptive Response Actions when configuring this detection search, and set the corresponding Playbook to active.\ +how_to_implement = You must ingest your Windows security event logs in the `Change` datamodel under the nodename is `Account_Management`, for this search to execute successfully. Please consider updating the cron schedule and the count of lockouts you want to monitor, according to your environment. \ +**Splunk>Phantom Playbook Integration** If Splunk>Phantom is also configured in your environment, a Playbook called "Excessive Account Lockouts Enrichment and Response" can be configured to run when any results are found by this detection search. The Playbook executes the Contextual and Investigative searches in this Story, conducts additional information gathering on Windows endpoints, and takes a response action to shut down the affected endpoint. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, add the correct hostname to the "Phantom Instance" field in the Adaptive Response Actions when configuring this detection search, and set the corresponding Playbook to active. \ Playbook Link:`https://my.phantom.us/4.1/playbook/excessive-account-lockouts-enrichment-and-response/`) annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", "Exploitation", "Installation"], "mitre_attack": ["T1078", "T1078.002"], "nist": ["DE.AE"]} known_false_positives = It's possible that a widely used system, such as a kiosk, could cause a large number of account lockouts. @@ -5344,9 +5344,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all.\ -\ -This analytic identifies common Mimikatz functions that may be identified in the script block, including `mimikatz`. This will catch the most basic use cases for Pass the Ticket, Pass the Hash and `-DumprCreds`.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all. \ + \ +This analytic identifies common Mimikatz functions that may be identified in the script block, including `mimikatz`. This will catch the most basic use cases for Pass the Ticket, Pass the Hash and `-DumprCreds`. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1003", "T1059.001"], "nist": ["DE.CM"]} @@ -5527,7 +5527,7 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = Adversaries may abuse Regsvr32.exe to proxy execution of malicious code. Regsvr32.exe is a command-line program used to register and unregister object linking and embedding controls, including dynamic link libraries (DLLs), on Windows systems. Regsvr32.exe is also a Microsoft signed binary.This variation of the technique is often referred to as a "Squiblydoo" attack.\ +explanation = Adversaries may abuse Regsvr32.exe to proxy execution of malicious code. Regsvr32.exe is a command-line program used to register and unregister object linking and embedding controls, including dynamic link libraries (DLLs), on Windows systems. Regsvr32.exe is also a Microsoft signed binary.This variation of the technique is often referred to as a "Squiblydoo" attack. \ Upon investigating, look for network connections to remote destinations (internal or external). Be cautious to modify the query to look for "scrobj.dll", the ".dll" is not required to load scrobj. "scrobj.dll" will be loaded by "regsvr32.exe" upon execution. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1218", "T1218.010"], "nist": ["DE.CM"]} @@ -5728,11 +5728,11 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies the use of WMI Event Subscription to establish persistence or perform privilege escalation. WMI can be used to install event filters, providers, consumers, and bindings that execute code when a defined event occurs. WMI subscription execution is proxied by the WMI Provider Host process (WmiPrvSe.exe) and thus may result in elevated SYSTEM privileges. This analytic is restricted by commonly added process execution and a path. If the volume is low enough, remove the values and flag on any new subscriptions.\ -All event subscriptions have three components\ -1. Filter - WQL Query for the events we want. EventID equals 19\ -1. Consumer - An action to take upon triggering the filter. EventID equals 20\ -1. Binding - Registers a filter to a consumer. EventID equals 21\ +explanation = The following analytic identifies the use of WMI Event Subscription to establish persistence or perform privilege escalation. WMI can be used to install event filters, providers, consumers, and bindings that execute code when a defined event occurs. WMI subscription execution is proxied by the WMI Provider Host process (WmiPrvSe.exe) and thus may result in elevated SYSTEM privileges. This analytic is restricted by commonly added process execution and a path. If the volume is low enough, remove the values and flag on any new subscriptions. \ +All event subscriptions have three components \ +1. Filter - WQL Query for the events we want. EventID equals 19 \ +1. Consumer - An action to take upon triggering the filter. EventID equals 20 \ +1. Binding - Registers a filter to a consumer. EventID equals 21 \ Monitor for the creation of new WMI EventFilter, EventConsumer, and FilterToConsumerBinding. It may be pertinent to review all 3 to identify the flow of execution. In addition, EventCode 4104 may assist with any other PowerShell script usage that registered the subscription. how_to_implement = To successfully implement this search, you need to be ingesting logs with that provide WMI Event Subscription from your endpoints. If you are using Sysmon, you must have at least version 6.0.4 of the Sysmon TA and have enabled EventID 19, 20 and 21. Tune and filter known good to limit the volume. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1546.003", "T1546"], "nist": ["DE.CM"]} @@ -5933,7 +5933,7 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify the execution of the `Get-DomainUser` commandlet with specific parameters. `Get-DomainUser` is part of PowerView, a PowerShell tool used to perform enumeration on Windows Active Directory networks. As the name suggests, `Get-DomainUser` is used to identify domain users and combining it with `-PreauthNotRequired` allows adversaries to discover domain accounts with Kerberos Pre Authentication disabled.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify the execution of the `Get-DomainUser` commandlet with specific parameters. `Get-DomainUser` is part of PowerView, a PowerShell tool used to perform enumeration on Windows Active Directory networks. As the name suggests, `Get-DomainUser` is used to identify domain users and combining it with `-PreauthNotRequired` allows adversaries to discover domain accounts with Kerberos Pre Authentication disabled. \ Red Teams and adversaries alike use may leverage PowerView to enumerate these accounts and attempt to crack their passwords offline. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1558", "T1558.004"], "nist": ["DE.CM"]} @@ -6184,11 +6184,11 @@ providing_technologies = ["Microsoft Sysmon"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies "dsquery.exe" execution with arguments looking for `TrustedDomain` query directly on the command-line. This is typically indicative of an Administrator or adversary perform domain trust discovery. Note that this query does not identify any other variations of "Dsquery.exe" usage.\ -Within this detection, it is assumed `dsquery.exe` is not moved or renamed.\ -The search will return the first time and last time these command-line arguments were used for these executions, as well as the target system, the user, process "dsquery.exe" and its parent process.\ -DSQuery.exe is natively found in `C:\Windows\system32` and `C:\Windows\syswow64` and only on Server operating system.\ -The following DLL(s) are loaded when DSQuery.exe is launched `dsquery.dll`. If found loaded by another process, it is possible dsquery is running within that process context in memory.\ +explanation = The following analytic identifies "dsquery.exe" execution with arguments looking for `TrustedDomain` query directly on the command-line. This is typically indicative of an Administrator or adversary perform domain trust discovery. Note that this query does not identify any other variations of "Dsquery.exe" usage. \ +Within this detection, it is assumed `dsquery.exe` is not moved or renamed. \ +The search will return the first time and last time these command-line arguments were used for these executions, as well as the target system, the user, process "dsquery.exe" and its parent process. \ +DSQuery.exe is natively found in `C:\Windows\system32` and `C:\Windows\syswow64` and only on Server operating system. \ +The following DLL(s) are loaded when DSQuery.exe is launched `dsquery.dll`. If found loaded by another process, it is possible dsquery is running within that process context in memory. \ In addition to trust discovery, review parallel processes for additional behaviors performed. Identify the parent process and capture any files (batch files, for example) being used. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1482"], "nist": ["DE.CM"]} @@ -6209,7 +6209,7 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = Detect procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. This query does not monitor for the internal name (original_file_name=procdump) of the PE or look for procdump64.exe. Modify the query as needed.\ +explanation = Detect procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. This query does not monitor for the internal name (original_file_name=procdump) of the PE or look for procdump64.exe. Modify the query as needed. \ During triage, confirm this is procdump.exe executing. If it is the first time a Sysinternals utility has been ran, it is possible there will be a -accepteula on the command line. Review other endpoint data sources for cross process (injection) into lsass.exe. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1003.001", "T1003"], "nist": ["DE.CM"]} @@ -6440,12 +6440,12 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = This analytic identifies suspicious behavior related to ProxyShell against on-premise Microsoft Exchange servers. This analytic has been replaced by GUID d436f9e7-0ee7-4a47-864b-6dea2c4e2752 which utilizes the Web Datamodel.\ -Modification of this analytic is requried to ensure fields are mapped accordingly.\ -\ -A suspicious event will have `PowerShell`, the method `POST` and `autodiscover.json`. This is indicative of accessing PowerShell on the back end of Exchange with SSRF.\ -\ -An event will look similar to `POST /autodiscover/autodiscover.json a=dsxvu@fnsso.flq/powershell/?X-Rps-CAT=VgEAVAdXaW5kb3d...` (abbreviated)\ +explanation = This analytic identifies suspicious behavior related to ProxyShell against on-premise Microsoft Exchange servers. This analytic has been replaced by GUID d436f9e7-0ee7-4a47-864b-6dea2c4e2752 which utilizes the Web Datamodel. \ +Modification of this analytic is requried to ensure fields are mapped accordingly. \ + \ +A suspicious event will have `PowerShell`, the method `POST` and `autodiscover.json`. This is indicative of accessing PowerShell on the back end of Exchange with SSRF. \ + \ +An event will look similar to `POST /autodiscover/autodiscover.json a=dsxvu@fnsso.flq/powershell/?X-Rps-CAT=VgEAVAdXaW5kb3d...` (abbreviated) \ Review the source attempting to perform this activity against your environment. In addition, review PowerShell logs and access recently granted to Exchange roles. how_to_implement = The following analytic requires on-premise Exchange to be logging to Splunk using the TA - https://splunkbase.splunk.com/app/3225. Ensure logs are parsed correctly, or tune the analytic for your environment. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", "Installation"], "mitre_attack": ["T1190", "T1133"], "nist": ["DE.CM"]} @@ -6456,10 +6456,10 @@ providing_technologies = null type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies the usage of Exchange PowerShell modules that were recently used for a proof of concept related to ProxyShell. Adversaries may abuse a limited set of PwSh Modules related to Exchange once gained access via ProxyShell or ProxyNotShell.\ -Inherently, the usage of the modules is not malicious, but reviewing parallel processes, and user, of the session will assist with determining the intent.\ -Module - New-MailboxExportRequest will begin the process of exporting contents of a primary mailbox or archive to a .pst file.\ -Module - New-managementroleassignment can assign a management role to a management role group, management role assignment policy, user, or universal security group (USG).\ +explanation = The following analytic identifies the usage of Exchange PowerShell modules that were recently used for a proof of concept related to ProxyShell. Adversaries may abuse a limited set of PwSh Modules related to Exchange once gained access via ProxyShell or ProxyNotShell. \ +Inherently, the usage of the modules is not malicious, but reviewing parallel processes, and user, of the session will assist with determining the intent. \ +Module - New-MailboxExportRequest will begin the process of exporting contents of a primary mailbox or archive to a .pst file. \ +Module - New-managementroleassignment can assign a management role to a management role group, management role assignment policy, user, or universal security group (USG). \ Module - New-MailboxSearch cmdlet to create a mailbox search and either get an estimate of search results, place search results on In-Place Hold or copy them to a Discovery mailbox. You can also place all contents in a mailbox on hold by not specifying a search query, which accomplishes similar results as Litigation Hold. \ Module - Get-Recipient cmdlet to view existing recipient objects in your organization. This cmdlet returns all mail-enabled objects (for example, mailboxes, mail users, mail contacts, and distribution groups). how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1059", "T1059.001"], "nist": ["DE.CM"]} @@ -6560,10 +6560,10 @@ providing_technologies = null type = detection asset_type = Endpoint confidence = medium -explanation = Fodhelper.exe has a known UAC bypass as it attempts to look for specific registry keys upon execution, that do not exist. Therefore, an attacker can write its malicious commands in these registry keys to be executed by fodhelper.exe with the highest privilege.\ -* `HKCU:\Software\Classes\ms-settings\shell\open\command`\ -* `HKCU:\Software\Classes\ms-settings\shell\open\command\DelegateExecute`\ -* `HKCU:\Software\Classes\ms-settings\shell\open\command\(default)`\ +explanation = Fodhelper.exe has a known UAC bypass as it attempts to look for specific registry keys upon execution, that do not exist. Therefore, an attacker can write its malicious commands in these registry keys to be executed by fodhelper.exe with the highest privilege. \ +* `HKCU:\Software\Classes\ms-settings\shell\open\command` \ +* `HKCU:\Software\Classes\ms-settings\shell\open\command\DelegateExecute` \ +* `HKCU:\Software\Classes\ms-settings\shell\open\command\(default)` \ Upon triage, fodhelper.exe will have a child process and read access will occur on the registry keys. Isolate the endpoint and review parallel processes for additional behavior. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1112", "T1548.002", "T1548"], "nist": ["DE.CM"]} @@ -6674,9 +6674,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies Get-DomainTrust from PowerView in order to gather domain trust information.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies Get-DomainTrust from PowerView in order to gather domain trust information. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1482"], "nist": ["DE.CM"]} @@ -6717,9 +6717,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies Get-ForestTrust from PowerSploit in order to gather domain trust information.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies Get-ForestTrust from PowerSploit in order to gather domain trust information. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1482", "T1059.001"], "nist": ["DE.CM"]} @@ -6740,9 +6740,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies the usage of `Get-WMIObject Win32_Group`, which is typically used as a way to identify groups on the endpoint. Typically, by itself, is not malicious but may raise suspicion based on time of day, endpoint and username.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies the usage of `Get-WMIObject Win32_Group`, which is typically used as a way to identify groups on the endpoint. Typically, by itself, is not malicious but may raise suspicion based on time of day, endpoint and username. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1069", "T1069.001"], "nist": ["DE.AE"]} @@ -7303,7 +7303,7 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic detects the creation of suspicious configuration files, /etc/at.allow or /etc/at.deny, in Linux. These files are commonly abused by malware, adversaries, or red teamers to establish persistence on compromised hosts. The configuration files determine which users are allowed to execute the "at" application, which is used for scheduling tasks in Linux. Attackers can add their user or a compromised username to these files to execute malicious code using "at." It's important to consider potential false positives as administrators or network operators may create these files for legitimate automation purposes. Adjust the filter macros to minimize false positives.\ +explanation = The following analytic detects the creation of suspicious configuration files, /etc/at.allow or /etc/at.deny, in Linux. These files are commonly abused by malware, adversaries, or red teamers to establish persistence on compromised hosts. The configuration files determine which users are allowed to execute the "at" application, which is used for scheduling tasks in Linux. Attackers can add their user or a compromised username to these files to execute malicious code using "at." It's important to consider potential false positives as administrators or network operators may create these files for legitimate automation purposes. Adjust the filter macros to minimize false positives. \ Identifying the creation of these configuration files is valuable for a SOC as it indicates potential unauthorized activities or an attacker attempting to establish persistence. If a true positive is found, further investigation is necessary to examine the contents of the created configuration file and determine the source of creation. The impact of a true positive can vary but could result in unauthorized execution of malicious code, data theft, or other detrimental consequences. Analysts should review the file path, creation time, and associated processes to assess the extent of the attack and initiate appropriate response actions. how_to_implement = To successfully implement this search, you need to be ingesting logs with the file name, file path, and process_guid executions from your endpoints into the Endpoint datamodel. If you are using Sysmon, you can use the Add-on for Linux Sysmon from Splunkbase. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.003", "T1053"], "nist": ["DE.AE"]} @@ -7314,8 +7314,8 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic detects the execution of the "At" application in Linux, which can be used by attackers to create persistence entries on a compromised host. The "At" application can be used for automation purposes by administrators or network operators, so the filter macros should be updated to remove false positives. If a true positive is found, it suggests an attacker is trying to maintain access to the environment or potentially deliver additional malicious payloads, leading to data theft, ransomware, or other damaging outcomes. To implement this analytic, ensure you are ingesting logs with the required fields from your endpoints into the Endpoint datamodel. When a true positive is detected, it suggests that an attacker is attempting to establish persistence or deliver additional malicious payloads by leveraging the "At" application. This behavior can lead to data theft, ransomware attacks, or other damaging outcomes.\ -During triage, the SOC analyst should review the context surrounding the execution of the "At" application. This includes identifying the user, the parent process responsible for invoking the application, and the specific command-line arguments used. It is important to consider whether the execution is expected behavior by an administrator or network operator for legitimate automation purposes.\ +explanation = The following analytic detects the execution of the "At" application in Linux, which can be used by attackers to create persistence entries on a compromised host. The "At" application can be used for automation purposes by administrators or network operators, so the filter macros should be updated to remove false positives. If a true positive is found, it suggests an attacker is trying to maintain access to the environment or potentially deliver additional malicious payloads, leading to data theft, ransomware, or other damaging outcomes. To implement this analytic, ensure you are ingesting logs with the required fields from your endpoints into the Endpoint datamodel. When a true positive is detected, it suggests that an attacker is attempting to establish persistence or deliver additional malicious payloads by leveraging the "At" application. This behavior can lead to data theft, ransomware attacks, or other damaging outcomes. \ +During triage, the SOC analyst should review the context surrounding the execution of the "At" application. This includes identifying the user, the parent process responsible for invoking the application, and the specific command-line arguments used. It is important to consider whether the execution is expected behavior by an administrator or network operator for legitimate automation purposes. \ The presence of "At" application execution may indicate an attacker's attempt to maintain unauthorized access to the environment. Immediate investigation and response are necessary to mitigate further risks, identify the attacker's intentions, and prevent potential harm to the organization. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.002", "T1053"], "nist": ["DE.AE"]} @@ -7556,9 +7556,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic detects the suspicious editing of cron jobs in Linux via the crontab command-line parameter. This tactic could be used by adversaries or malware to schedule execution of their malicious code, potentially leading to system compromise or unauthorized persistent access. It pinpoints this activity by monitoring command-line executions involving 'crontab' and the edit parameter (-e).\ -Recognizing such activity is vital for a SOC as cron job manipulations might signal unauthorized persistence attempts or scheduled malicious actions, potentially resulting in substantial harm. A true positive signifies an active threat, with implications ranging from unauthorized access to broader network compromise.\ -To implement this analytic, logs capturing process name, parent process, and command-line executions from your endpoints must be ingested.\ +explanation = The following analytic detects the suspicious editing of cron jobs in Linux via the crontab command-line parameter. This tactic could be used by adversaries or malware to schedule execution of their malicious code, potentially leading to system compromise or unauthorized persistent access. It pinpoints this activity by monitoring command-line executions involving 'crontab' and the edit parameter (-e). \ +Recognizing such activity is vital for a SOC as cron job manipulations might signal unauthorized persistence attempts or scheduled malicious actions, potentially resulting in substantial harm. A true positive signifies an active threat, with implications ranging from unauthorized access to broader network compromise. \ +To implement this analytic, logs capturing process name, parent process, and command-line executions from your endpoints must be ingested. \ Known false positives could stem from valid administrative tasks or automation processes using crontab. To reduce these, fine-tune the filter macros according to the benign activities within your environment. These adjustments ensure legitimate actions aren't mistaken for threats, allowing analysts to focus on genuine potential risks. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.003", "T1053"], "nist": ["DE.AE"]} @@ -7929,7 +7929,7 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic is designed to identify suspicious command lines that may append user entries to either /etc/at.allow or /etc/at.deny. These files can be exploited by malicious actors for persistence on a compromised Linux host by altering permissions for scheduled tasks using the at command.\ +explanation = The following analytic is designed to identify suspicious command lines that may append user entries to either /etc/at.allow or /etc/at.deny. These files can be exploited by malicious actors for persistence on a compromised Linux host by altering permissions for scheduled tasks using the at command. \ In this context, an attacker can create a user or add an existing user to these configuration files to execute their malicious code through scheduled tasks. The detection of such anomalous behavior can serve as an effective indicator warranting further investigation to validate if the activity is indeed malicious or a false positive. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.002", "T1053"], "nist": ["DE.AE"]} @@ -7950,8 +7950,8 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic is designed to detect potential tampering with cronjob files on a Linux system. It specifically searches for command lines that may be used to append code to existing cronjob files, a technique often employed by adversaries, malware, and red teamers for persistence or privilege escalation. Altering existing or sometimes normal cronjob script files allows malicious code to be executed automatically.\ -The analytic operates by monitoring logs for specific process names, parent processes, and command-line executions from your endpoints. It specifically checks for any 'echo' command which modifies files in directories commonly associated with cron jobs such as '/etc/cron*', '/var/spool/cron/', and '/etc/anacrontab'. If such activity is detected, an alert is triggered.\ +explanation = The following analytic is designed to detect potential tampering with cronjob files on a Linux system. It specifically searches for command lines that may be used to append code to existing cronjob files, a technique often employed by adversaries, malware, and red teamers for persistence or privilege escalation. Altering existing or sometimes normal cronjob script files allows malicious code to be executed automatically. \ +The analytic operates by monitoring logs for specific process names, parent processes, and command-line executions from your endpoints. It specifically checks for any 'echo' command which modifies files in directories commonly associated with cron jobs such as '/etc/cron*', '/var/spool/cron/', and '/etc/anacrontab'. If such activity is detected, an alert is triggered. \ This behavior is worth identifying for a SOC because malicious cron jobs can lead to system compromises and unauthorized data access, impacting business operations and data integrity. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.003", "T1053"], "nist": ["DE.AE"]} @@ -7962,9 +7962,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic detects potential unauthorized modifications to Linux cronjobs using text editors like "nano", "vi" or "vim". It identifies this behavior by tracking command-line executions that interact with paths related to cronjob configuration, a common Linux scheduling utility. Cronjob files may be manipulated by attackers for privilege escalation or persistent access, making such changes critical to monitor.\ The identified behavior is significant for a Security Operations Center (SOC) as it could indicate an ongoing attempt at establishing persistent access or privilege escalation, leading to data breaches, system compromise, or other malicious activities.\ -In case of a true positive, the impact could be severe. An attacker with escalated privileges or persistent access could carry out damaging actions, such as data theft, sabotage, or further network penetration.\ -To implement this analytic, ensure ingestion of logs tracking process name, parent process, and command-line executions from your endpoints. Utilize the Add-on for Linux Sysmon from Splunkbase if you're using Sysmon.\ +explanation = The following analytic detects potential unauthorized modifications to Linux cronjobs using text editors like "nano", "vi" or "vim". It identifies this behavior by tracking command-line executions that interact with paths related to cronjob configuration, a common Linux scheduling utility. Cronjob files may be manipulated by attackers for privilege escalation or persistent access, making such changes critical to monitor.\ The identified behavior is significant for a Security Operations Center (SOC) as it could indicate an ongoing attempt at establishing persistent access or privilege escalation, leading to data breaches, system compromise, or other malicious activities. \ +In case of a true positive, the impact could be severe. An attacker with escalated privileges or persistent access could carry out damaging actions, such as data theft, sabotage, or further network penetration. \ +To implement this analytic, ensure ingestion of logs tracking process name, parent process, and command-line executions from your endpoints. Utilize the Add-on for Linux Sysmon from Splunkbase if you're using Sysmon. \ Known false positives include legitimate administrative tasks, as these commands may also be used for benign purposes. Careful tuning and filtering based on known benign activity in your environment can minimize these instances. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.003", "T1053"], "nist": ["DE.AE"]} @@ -8035,7 +8035,7 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic is designed to detect suspicious file creation within the systemd timer directory on Linux platforms. Systemd is a system and service manager for Linux, similar to the combination of wininit.exe and services.exe on Windows. This process initializes a Linux system and starts defined services in unit files. Malicious actors, such as adversaries, malware, or red teamers, can exploit this feature by embedding a systemd service file for persistence on the targeted or compromised host.\ +explanation = The following analytic is designed to detect suspicious file creation within the systemd timer directory on Linux platforms. Systemd is a system and service manager for Linux, similar to the combination of wininit.exe and services.exe on Windows. This process initializes a Linux system and starts defined services in unit files. Malicious actors, such as adversaries, malware, or red teamers, can exploit this feature by embedding a systemd service file for persistence on the targeted or compromised host. \ The analytic works by monitoring logs with file name, file path, and process GUID data from your endpoints. If a .service file is created in certain systemd directories, the analytic triggers an alert. This behavior is significant for a Security Operations Center (SOC) as it may indicate a persistent threat within the network, with a potential impact of system compromise or data exfiltration. how_to_implement = To successfully implement this search, you need to be ingesting logs with the file name, file path, and process_guid executions from your endpoints. If you are using Sysmon, you can use the Add-on for Linux Sysmon from Splunkbase. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.006", "T1053"], "nist": ["DE.AE"]} @@ -8046,8 +8046,8 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic detects the restarting or re-enabling of services in the Linux platform. It focuses on the use of the systemctl or service tools for executing these actions. Adversaries may leverage this technique to repeatedly execute malicious payloads as a form of persistence. Linux hosts typically start services during boot to perform background system functions. However, administrators may also create legitimate services for specific tools or applications as part of task automation. In such cases, it is recommended to verify the service path of the registered script or executable and identify the creator of the service for further validation.\ -It's important to be aware that this analytic may generate false positives as administrators or network operators may use the same command-line for legitimate automation purposes. Filter macros should be updated accordingly to minimize false positives.\ +explanation = The following analytic detects the restarting or re-enabling of services in the Linux platform. It focuses on the use of the systemctl or service tools for executing these actions. Adversaries may leverage this technique to repeatedly execute malicious payloads as a form of persistence. Linux hosts typically start services during boot to perform background system functions. However, administrators may also create legitimate services for specific tools or applications as part of task automation. In such cases, it is recommended to verify the service path of the registered script or executable and identify the creator of the service for further validation. \ +It's important to be aware that this analytic may generate false positives as administrators or network operators may use the same command-line for legitimate automation purposes. Filter macros should be updated accordingly to minimize false positives. \ Identifying restarted or re-enabled services is valuable for a SOC as it can indicate potential malicious activities attempting to maintain persistence or execute unauthorized actions on Linux systems. By detecting and investigating these events, security analysts can respond promptly to mitigate risks and prevent further compromise. The impact of a true positive can range from unauthorized access to data destruction or other damaging outcomes. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.006", "T1053"], "nist": ["DE.AE"]} @@ -8338,9 +8338,9 @@ providing_technologies = null type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies the use of the EncodedCommand PowerShell parameter. This is typically used by Administrators to run complex scripts, but commonly used by adversaries to hide their code.\ -The analytic identifies all variations of EncodedCommand, as PowerShell allows the ability to shorten the parameter. For example enc, enco, encod and so forth. In addition, through our research it was identified that PowerShell will interpret different command switch types beyond the hyphen. We have added endash, emdash, horizontal bar, and forward slash.\ -During triage, review parallel events to determine legitimacy. Tune as needed based on admin scripts in use.\ +explanation = The following analytic identifies the use of the EncodedCommand PowerShell parameter. This is typically used by Administrators to run complex scripts, but commonly used by adversaries to hide their code. \ +The analytic identifies all variations of EncodedCommand, as PowerShell allows the ability to shorten the parameter. For example enc, enco, encod and so forth. In addition, through our research it was identified that PowerShell will interpret different command switch types beyond the hyphen. We have added endash, emdash, horizontal bar, and forward slash. \ +During triage, review parallel events to determine legitimacy. Tune as needed based on admin scripts in use. \ Alternatively, may use regex per matching here https://regexr.com/662ov. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1027"], "nist": ["DE.AE"]} @@ -8481,12 +8481,12 @@ providing_technologies = ["Microsoft Sysmon"] type = detection asset_type = Endpoint confidence = medium -explanation = The following hunting analytic identifies `msi.dll` being loaded by a binary not located in `system32`, `syswow64`, `winsxs` or `windows` paths. This behavior is most recently related to InstallerFileTakeOver, or CVE-2021-41379, and DLL side-loading. CVE-2021-41379 requires a binary to be dropped and `msi.dll` to be loaded by it. To Successful exploitation of this issue happens in four parts\ -\ -1. Generation of an MSI that will trigger bad behavior.\ -1. Preparing a directory for MSI installation.\ -1. Inducing an error state.\ -1. Racing to introduce a junction and a symlink to trick msiexec.exe to modify the attacker specified file.\ +explanation = The following hunting analytic identifies `msi.dll` being loaded by a binary not located in `system32`, `syswow64`, `winsxs` or `windows` paths. This behavior is most recently related to InstallerFileTakeOver, or CVE-2021-41379, and DLL side-loading. CVE-2021-41379 requires a binary to be dropped and `msi.dll` to be loaded by it. To Successful exploitation of this issue happens in four parts \ + \ +1. Generation of an MSI that will trigger bad behavior. \ +1. Preparing a directory for MSI installation. \ +1. Inducing an error state. \ +1. Racing to introduce a junction and a symlink to trick msiexec.exe to modify the attacker specified file. \ In addition, `msi.dll` has been abused in DLL side-loading attacks by being loaded by non-system binaries. how_to_implement = To successfully implement this search, you need to be ingesting logs with the process name and imageloaded executions from your endpoints. If you are using Sysmon, you must have at least version 6.0.4 of the Sysmon TA. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1574.002", "T1574"], "nist": ["DE.AE"]} @@ -8637,8 +8637,8 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = Monitor for signs that Ntdsutil is being used to Extract Active Directory database - NTDS.dit, typically used for offline password cracking. It may be used in normal circumstances with no command line arguments or shorthand variations of more common arguments. Ntdsutil.exe is typically seen run on a Windows Server. Typical command used to dump ntds.dit\ -ntdsutil "ac i ntds" "ifm" "create full C:\Temp" q q\ +explanation = Monitor for signs that Ntdsutil is being used to Extract Active Directory database - NTDS.dit, typically used for offline password cracking. It may be used in normal circumstances with no command line arguments or shorthand variations of more common arguments. Ntdsutil.exe is typically seen run on a Windows Server. Typical command used to dump ntds.dit \ +ntdsutil "ac i ntds" "ifm" "create full C:\Temp" q q \ This technique uses "Install from Media" (IFM), which will extract a copy of the Active Directory database. A successful export of the Active Directory database will yield a file modification named ntds.dit to the destination. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1003.003", "T1003"], "nist": ["DE.CM"]} @@ -8849,10 +8849,10 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes Windows Event Code 5145, "A network share object was checked to see whether client can be granted desired access". During our research into PetitPotam, CVE-2021-36942, we identified the ocurrence of this event on the target host with specific values.\ -To enable 5145 events via Group Policy - Computer Configuration->Polices->Windows Settings->Security Settings->Advanced Audit Policy Configuration. Expand this node, go to Object Access (Audit Polices->Object Access), then select the Setting Audit Detailed File Share Audit\ -It is possible this is not enabled by default and may need to be reviewed and enabled.\ -\ +explanation = The following analytic utilizes Windows Event Code 5145, "A network share object was checked to see whether client can be granted desired access". During our research into PetitPotam, CVE-2021-36942, we identified the ocurrence of this event on the target host with specific values. \ +To enable 5145 events via Group Policy - Computer Configuration->Polices->Windows Settings->Security Settings->Advanced Audit Policy Configuration. Expand this node, go to Object Access (Audit Polices->Object Access), then select the Setting Audit Detailed File Share Audit \ +It is possible this is not enabled by default and may need to be reviewed and enabled. \ + \ During triage, review parallel security events to identify further suspicious activity. how_to_implement = Windows Event Code 5145 is required to utilize this analytic and it may not be enabled in most environments. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1187"], "nist": ["DE.CM"]} @@ -8893,7 +8893,7 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic is designed to identify possible lateral movement attacks that involve the spawning of a PowerShell process as a child or grandchild process of commonly abused processes. These processes include services.exe, wmiprsve.exe, svchost.exe, wsmprovhost.exe, and mmc.exe.\ +explanation = The following analytic is designed to identify possible lateral movement attacks that involve the spawning of a PowerShell process as a child or grandchild process of commonly abused processes. These processes include services.exe, wmiprsve.exe, svchost.exe, wsmprovhost.exe, and mmc.exe. \ Such behavior is indicative of legitimate Windows features such as the Service Control Manager, Windows Management Instrumentation, Task Scheduler, Windows Remote Management, and the DCOM protocol being abused to start a process on a remote endpoint. This behavior is often seen during lateral movement techniques where adversaries or red teams abuse these services for lateral movement and remote code execution. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1021", "T1021.003", "T1021.006", "T1047", "T1053.005", "T1543.003", "T1059.001", "T1218.014"], "nist": ["DE.CM"]} @@ -8974,9 +8974,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies specific PowerShell modules typically used to enumerate an organizations domain or users.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies specific PowerShell modules typically used to enumerate an organizations domain or users. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1059", "T1059.001"], "nist": ["DE.CM"]} @@ -9017,10 +9017,10 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all.\ -\ -This analytic identifies `GetProcAddress` in the script block. This is not normal to be used by most PowerShell scripts and is typically unsafe/malicious. Many attack toolkits use GetProcAddress to obtain code execution.\ -In use, `$var_gpa = $var_unsafe_native_methods.GetMethod(GetProcAddress` and later referenced/executed elsewhere.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all. \ + \ +This analytic identifies `GetProcAddress` in the script block. This is not normal to be used by most PowerShell scripts and is typically unsafe/malicious. Many attack toolkits use GetProcAddress to obtain code execution. \ +In use, `$var_gpa = $var_unsafe_native_methods.GetMethod(GetProcAddress` and later referenced/executed elsewhere. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1059", "T1055", "T1059.001"], "nist": ["DE.CM"]} @@ -9031,11 +9031,11 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies `FromBase64String` within the script block. A typical malicious instance will include additional code.\ -Command example - `[Byte[]]$var_code = [System.Convert]::FromBase64String(38uqIyMjQ6rG....`\ -\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies `FromBase64String` within the script block. A typical malicious instance will include additional code. \ +Command example - `[Byte[]]$var_code = [System.Convert]::FromBase64String(38uqIyMjQ6rG....` \ + \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1059", "T1027", "T1059.001"], "nist": ["DE.CM"]} @@ -9056,9 +9056,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies PowerShell cmdlet - `get-localgroup` being ran. Typically, by itself, is not malicious but may raise suspicion based on time of day, endpoint and username.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies PowerShell cmdlet - `get-localgroup` being ran. Typically, by itself, is not malicious but may raise suspicion based on time of day, endpoint and username. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1069", "T1069.001"], "nist": ["DE.AE"]} @@ -9089,9 +9089,9 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies "MSF.Powershell","MSF.Powershell.Meterpreter","MSF.Powershell.Meterpreter.Kiwi","MSF.Powershell.Meterpreter.Transport" being used. This behavior is related to when a Meterpreter session is started and the operator runs load_kiwi.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies "MSF.Powershell","MSF.Powershell.Meterpreter","MSF.Powershell.Meterpreter.Kiwi","MSF.Powershell.Meterpreter.Transport" being used. This behavior is related to when a Meterpreter session is started and the operator runs load_kiwi. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = The following analytic requires PowerShell operational logs to be imported. Modify the powershell macro as needed to match the sourcetype or add index. This analytic is specific to 4104, or PowerShell Script Block Logging. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1059", "T1059.001"], "nist": ["DE.CM"]} @@ -9102,9 +9102,9 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all.\ -\ -This analytic identifies the use of PowerShell loading .net assembly via reflection. This is commonly found in malicious PowerShell usage, including Empire and Cobalt Strike. In addition, the `load(` value may be modifed by removing `(` and it will identify more events to review.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all. \ + \ +This analytic identifies the use of PowerShell loading .net assembly via reflection. This is commonly found in malicious PowerShell usage, including Empire and Cobalt Strike. In addition, the `load(` value may be modifed by removing `(` and it will identify more events to review. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1059", "T1059.001"], "nist": ["DE.CM"]} @@ -9235,9 +9235,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies new printer drivers being load by utilizing the Windows PrintService operational logs, EventCode 316. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare.\ -\ -Within the proof of concept code, the following event will occur - "Printer driver 1234 for Windows x64 Version-3 was added or updated. Files:- UNIDRV.DLL, kernelbase.dll, evil.dll. No user action is required."\ +explanation = The following analytic identifies new printer drivers being load by utilizing the Windows PrintService operational logs, EventCode 316. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare. \ + \ +Within the proof of concept code, the following event will occur - "Printer driver 1234 for Windows x64 Version-3 was added or updated. Files:- UNIDRV.DLL, kernelbase.dll, evil.dll. No user action is required." \ During triage, isolate the endpoint and review for source of exploitation. Capture any additional file modification events and review the source of where the exploitation began. how_to_implement = You will need to ensure PrintService Admin and Operational logs are being logged to Splunk from critical or all systems. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1547.012", "T1547"], "nist": ["DE.CM"]} @@ -9248,9 +9248,9 @@ providing_technologies = null type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies driver load errors utilizing the Windows PrintService Admin logs. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare.\ -Within the proof of concept code, the following error will occur - "The print spooler failed to load a plug-in module C:\Windows\system32\spool\DRIVERS\x64\3\meterpreter.dll, error code 0x45A. See the event user data for context information."\ -The analytic is based on file path and failure to load the plug-in.\ +explanation = The following analytic identifies driver load errors utilizing the Windows PrintService Admin logs. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare. \ +Within the proof of concept code, the following error will occur - "The print spooler failed to load a plug-in module C:\Windows\system32\spool\DRIVERS\x64\3\meterpreter.dll, error code 0x45A. See the event user data for context information." \ +The analytic is based on file path and failure to load the plug-in. \ During triage, isolate the endpoint and review for source of exploitation. Capture any additional file modification events. how_to_implement = You will need to ensure PrintService Admin and Operational logs are being logged to Splunk from critical or all systems. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1547.012", "T1547"], "nist": ["DE.CM"]} @@ -9831,9 +9831,9 @@ providing_technologies = ["Microsoft Sysmon"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic detects the registration of suspicious tasks on Windows using the Windows Security EventCode 4698, "A scheduled task was created." It specifically looks for tasks registered through schtasks.exe or TaskService that have command arguments containing the string "HTTP." This behavior is often associated with malware or attacks that utilize Living off the Land binaries (lolbins) to download additional files or payloads to the compromised machine.\ -The search returns information about the task, such as the task name, command, author, enabled status, hidden status, and arguments. Upon triage, it is important to identify the source of the scheduled task, whether it was registered through schtasks.exe or TaskService. Review the details of the created task and the command to be executed. Capture relevant artifacts on disk and examine them. Additionally, identify any parallel processes occurring within the same timeframe to determine the source of the attack.\ -Implementing this analytic requires ingesting logs with information about task schedules, specifically Windows Security Log EventCode 4698, from your endpoints. It is recommended to tune and filter known instances of task schedules used in your environment to minimize false positives.\ +explanation = The following analytic detects the registration of suspicious tasks on Windows using the Windows Security EventCode 4698, "A scheduled task was created." It specifically looks for tasks registered through schtasks.exe or TaskService that have command arguments containing the string "HTTP." This behavior is often associated with malware or attacks that utilize Living off the Land binaries (lolbins) to download additional files or payloads to the compromised machine. \ +The search returns information about the task, such as the task name, command, author, enabled status, hidden status, and arguments. Upon triage, it is important to identify the source of the scheduled task, whether it was registered through schtasks.exe or TaskService. Review the details of the created task and the command to be executed. Capture relevant artifacts on disk and examine them. Additionally, identify any parallel processes occurring within the same timeframe to determine the source of the attack. \ +Implementing this analytic requires ingesting logs with information about task schedules, specifically Windows Security Log EventCode 4698, from your endpoints. It is recommended to tune and filter known instances of task schedules used in your environment to minimize false positives. \ Detecting the registration of suspicious tasks with HTTP command arguments is valuable for a SOC as it indicates potential malicious activity or an attempt to establish persistence on the system. If a true positive is found, further investigation is warranted to analyze the nature and purpose of the scheduled task, identify any downloaded files or payloads, and mitigate the associated risks. The impact of a true positive can vary but may include data exfiltration, malware propagation, or unauthorized access to sensitive information. how_to_implement = To successfully implement this search, you need to be ingesting logs with the task schedule (Exa. Security Log EventCode 4698) endpoints. Tune and filter known instances of Task schedule used in your environment. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053"], "nist": ["DE.CM"]} @@ -9844,9 +9844,9 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic detects the creation of suspicious tasks in Windows, specifically tasks using the rundll32 command. It's implemented using Windows Security EventCode 4698 for A scheduled task was created, and looks for tasks executed either via schtasks.exe or TaskService. This behavior is worth identifying as it is commonly used by malware, such as TrickBot, that leverages rundll32 to execute its downloader.\ -If a true positive is found, it suggests an attacker is trying to persist within the environment or potentially deliver additional malicious payloads, leading to data theft, ransomware, or other damaging outcomes.\ -To implement this analytic, ensure you are ingesting logs with task schedule information from your endpoints. Be aware of potential false positives - legitimate uses of Task Scheduler in your environment may cause benign activities to be flagged.\ +explanation = The following analytic detects the creation of suspicious tasks in Windows, specifically tasks using the rundll32 command. It's implemented using Windows Security EventCode 4698 for A scheduled task was created, and looks for tasks executed either via schtasks.exe or TaskService. This behavior is worth identifying as it is commonly used by malware, such as TrickBot, that leverages rundll32 to execute its downloader. \ +If a true positive is found, it suggests an attacker is trying to persist within the environment or potentially deliver additional malicious payloads, leading to data theft, ransomware, or other damaging outcomes. \ +To implement this analytic, ensure you are ingesting logs with task schedule information from your endpoints. Be aware of potential false positives - legitimate uses of Task Scheduler in your environment may cause benign activities to be flagged. \ Upon triage, review the scheduled task's source and the command to be executed. Capture and inspect any relevant on-disk artifacts, and look for concurrent processes to identify the attack source. This approach helps analysts detect potential threats earlier and mitigate the risks. how_to_implement = To successfully implement this search, you need to be ingesting logs with the task schedule (Exa. Security Log EventCode 4698) endpoints. Tune and filter known instances of Task schedule used in your environment. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053"], "nist": ["DE.CM"]} @@ -9857,7 +9857,7 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic detects the creation of suspicious tasks on a remote Windows endpoint using the at.exe command with command-line arguments. This technique is commonly used by red teams and adversaries for lateral movement and remote code execution. The at.exe binary leverages the deprecated AT protocol, which may still work on previous versions of Windows. Attackers can enable this protocol on demand by modifying a system registry key. It is important to consider potential false positives. While administrators may create scheduled tasks on remote systems, this activity is typically limited to a small set of hosts or users.\ +explanation = The following analytic detects the creation of suspicious tasks on a remote Windows endpoint using the at.exe command with command-line arguments. This technique is commonly used by red teams and adversaries for lateral movement and remote code execution. The at.exe binary leverages the deprecated AT protocol, which may still work on previous versions of Windows. Attackers can enable this protocol on demand by modifying a system registry key. It is important to consider potential false positives. While administrators may create scheduled tasks on remote systems, this activity is typically limited to a small set of hosts or users. \ Identifying the creation of scheduled tasks on remote endpoints is crucial for a Security Operations Center (SOC) because it indicates potential unauthorized activity or an attacker attempting to establish persistence or execute malicious code. The impact of a true positive can be significant, leading to unauthorized access, data theft, or other damaging outcomes. During triage, investigate the source and purpose of the scheduled task, inspect relevant on-disk artifacts, and analyze concurrent processes to identify the extent of the attack and take appropriate response actions. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053", "T1053.002"], "nist": ["DE.CM"]} @@ -9978,10 +9978,10 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies `powershell.exe` usage, using Script Block Logging EventCode 4104, related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack.\ -What is a ServicePrincipleName?\ -A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name.\ -The following analytic identifies the use of KerberosRequestorSecurityToken class within the script block. Using .NET System.IdentityModel.Tokens.KerberosRequestorSecurityToken class in PowerShell is the equivelant of using setspn.exe.\ +explanation = The following analytic identifies `powershell.exe` usage, using Script Block Logging EventCode 4104, related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack. \ +What is a ServicePrincipleName? \ +A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name. \ +The following analytic identifies the use of KerberosRequestorSecurityToken class within the script block. Using .NET System.IdentityModel.Tokens.KerberosRequestorSecurityToken class in PowerShell is the equivelant of using setspn.exe. \ During triage, review parallel processes for further suspicious activity. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1558.003"], "nist": ["DE.CM"]} @@ -9992,13 +9992,13 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies `setspn.exe` usage related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack.\ -What is a ServicePrincipleName?\ -A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name.\ -Example usage includes the following\ -* setspn -T offense -Q */* 1. setspn -T attackrange.local -F -Q MSSQLSvc/* 1. setspn -Q */* > allspns.txt 1. setspn -q\ -Values\ -* -F = perform queries at the forest, rather than domain level 1. -T = perform query on the specified domain or forest (when -F is also used) 1. -Q = query for existence of SPN\ +explanation = The following analytic identifies `setspn.exe` usage related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack. \ +What is a ServicePrincipleName? \ +A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name. \ +Example usage includes the following \ +* setspn -T offense -Q */* 1. setspn -T attackrange.local -F -Q MSSQLSvc/* 1. setspn -Q */* > allspns.txt 1. setspn -q \ +Values \ +* -F = perform queries at the forest, rather than domain level 1. -T = perform query on the specified domain or forest (when -F is also used) 1. -Q = query for existence of SPN \ During triage, review parallel processes for further suspicious activity. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1558.003"], "nist": ["DE.CM"]} @@ -10059,9 +10059,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes Windows Security EventCode 4698, "A scheduled task was created," and EventCode 4699, "A scheduled task was deleted," to identify scheduled tasks that are created and deleted within a short time frame of less than 30 seconds. This behavior is indicative of a potential lateral movement attack where the Task Scheduler is abused to achieve code execution. Both red teams and adversaries may exploit the Task Scheduler for lateral movement and remote code execution.\ -To implement this analytic, ensure that you are ingesting Windows Security Event Logs with EventCode 4698 enabled. Additionally, the Windows TA (Technology Add-on) is required to parse and extract the necessary information from the logs.\ -It's important to note that while uncommon, legitimate applications may create and delete scheduled tasks within a short duration. Analysts should filter the results based on the specific context and environment to reduce false positives.\ +explanation = The following analytic utilizes Windows Security EventCode 4698, "A scheduled task was created," and EventCode 4699, "A scheduled task was deleted," to identify scheduled tasks that are created and deleted within a short time frame of less than 30 seconds. This behavior is indicative of a potential lateral movement attack where the Task Scheduler is abused to achieve code execution. Both red teams and adversaries may exploit the Task Scheduler for lateral movement and remote code execution. \ +To implement this analytic, ensure that you are ingesting Windows Security Event Logs with EventCode 4698 enabled. Additionally, the Windows TA (Technology Add-on) is required to parse and extract the necessary information from the logs. \ +It's important to note that while uncommon, legitimate applications may create and delete scheduled tasks within a short duration. Analysts should filter the results based on the specific context and environment to reduce false positives. \ Identifying short-lived scheduled tasks is valuable for a SOC as it can indicate malicious activities attempting to move laterally or execute unauthorized code on Windows systems. By detecting and investigating these events, security analysts can respond promptly to prevent further compromise and mitigate potential risks. The impact of a true positive could range from unauthorized access to data exfiltration or the execution of malicious payloads. how_to_implement = To successfully implement this search, you need to be ingesting Windows Security Event Logs with 4698 EventCode enabled. The Windows TA is also required. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.005"], "nist": ["DE.CM"]} @@ -10312,7 +10312,7 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = This search, detects execution of suspicious bash commands from various commonly leveraged bash scripts like (AutoSUID, LinEnum, LinPeas) to perform discovery of possible paths of privilege execution, password files, vulnerable directories, executables and file permissions on a Linux host.\ +explanation = This search, detects execution of suspicious bash commands from various commonly leveraged bash scripts like (AutoSUID, LinEnum, LinPeas) to perform discovery of possible paths of privilege execution, password files, vulnerable directories, executables and file permissions on a Linux host. \ The search logic specifically looks for high number of distinct commands run in a short period of time. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1059.004"], "nist": ["DE.CM"]} @@ -10393,13 +10393,13 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed:\ -* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist\ +explanation = The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed: \ +* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist \ Upon triage, capture the property list file being written to disk and review for further indicators. Contain the endpoint and triage further. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1543.001", "T1543"], "nist": ["DE.CM"]} @@ -10410,13 +10410,13 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed:\ -* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist\ +explanation = The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed: \ +* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist \ Upon triage, capture the property list file being written to disk and review for further indicators. Contain the endpoint and triage further. how_to_implement = OSQuery must be installed and configured to pick up process events (info at https://osquery.io) as well as using the Splunk OSQuery Add-on https://splunkbase.splunk.com/app/4402. Modify the macro and validate fields are correct. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1543.001", "T1543"], "nist": ["DE.CM"]} @@ -10527,7 +10527,7 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic, "Suspicious Scheduled Task from Public Directory", detects the registration of scheduled tasks aimed to execute a binary or script from public directories, a behavior often associated with malware deployment. It utilizes the Sysmon Event ID 1 data source, searching for instances where schtasks.exe is connected with the directories users\public, \programdata\, or \windows\temp and involves the /create command.\ +explanation = The following analytic, "Suspicious Scheduled Task from Public Directory", detects the registration of scheduled tasks aimed to execute a binary or script from public directories, a behavior often associated with malware deployment. It utilizes the Sysmon Event ID 1 data source, searching for instances where schtasks.exe is connected with the directories users\public, \programdata\, or \windows\temp and involves the /create command. \ The registration of such scheduled tasks in public directories could suggest that an attacker is trying to maintain persistence or execute malicious scripts. If confirmed as a true positive, this could lead to data compromise, unauthorized access, and potential lateral movement within the network. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.005", "T1053"], "nist": ["DE.AE"]} @@ -10628,8 +10628,8 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = This search looks for system processes that typically execute from `C:\Windows\System32\` or `C:\Windows\SysWOW64`. This may indicate a malicious process that is trying to hide as a legitimate process.\ -This detection utilizes a lookup that is deduped `system32` and `syswow64` directories from Server 2016 and Windows 10.\ +explanation = This search looks for system processes that typically execute from `C:\Windows\System32\` or `C:\Windows\SysWOW64`. This may indicate a malicious process that is trying to hide as a legitimate process. \ +This detection utilizes a lookup that is deduped `system32` and `syswow64` directories from Server 2016 and Windows 10. \ During triage, review the parallel processes - what process moved the native Windows binary? identify any artifacts on disk and review. If a remote destination is contacted, what is the reputation? how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1036", "T1036.003"], "nist": ["DE.AE"]} @@ -10730,9 +10730,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies the behavior of AMSI being tampered with. Implemented natively in many frameworks, the command will look similar to `SEtValuE($Null,(New-OBJEct COLlECtionS.GenerIC.HAshSEt{[StrINg]))}$ReF=[ReF].AsSeMbLY.GeTTyPe("System.Management.Automation.Amsi"+"Utils")` taken from Powershell-Empire.\ +explanation = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies the behavior of AMSI being tampered with. Implemented natively in many frameworks, the command will look similar to `SEtValuE($Null,(New-OBJEct COLlECtionS.GenerIC.HAshSEt{[StrINg]))}$ReF=[ReF].AsSeMbLY.GeTTyPe("System.Management.Automation.Amsi"+"Utils")` taken from Powershell-Empire. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1562", "T1059.001", "T1059"], "nist": ["DE.CM"]} @@ -10753,7 +10753,7 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following hunting analytic leverages Kerberos Event 4769, A Kerberos service ticket was requested, to identify a potential kerberoasting attack against Active Directory networks. Kerberoasting allows an adversary to request kerberos tickets for domain accounts typically used as service accounts and attempt to crack them offline allowing them to obtain privileged access to the domain.\ +explanation = The following hunting analytic leverages Kerberos Event 4769, A Kerberos service ticket was requested, to identify a potential kerberoasting attack against Active Directory networks. Kerberoasting allows an adversary to request kerberos tickets for domain accounts typically used as service accounts and attempt to crack them offline allowing them to obtain privileged access to the domain. \ The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number service ticket requests. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller and Kerberos events. The Advanced Security Audit policy setting `Audit Kerberos Authentication Service` within `Account Logon` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1558", "T1558.003"], "nist": ["DE.AE"]} @@ -11644,8 +11644,8 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies the use of Windows Curl.exe downloading a file to a suspicious location.\ --O or --output is used when a file is to be downloaded and placed in a specified location.\ +explanation = The following analytic identifies the use of Windows Curl.exe downloading a file to a suspicious location. \ +-O or --output is used when a file is to be downloaded and placed in a specified location. \ During triage, review parallel processes for further behavior. In addition, identify if the download was successful. If a file was downloaded, capture and analyze. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Command and Control"], "mitre_attack": ["T1105"], "nist": ["DE.CM"]} @@ -11656,13 +11656,13 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies the use of Windows Curl.exe uploading a file to a remote destination.\ -`-T` or `--upload-file` is used when a file is to be uploaded to a remotge destination.\ -\ -`-d` or `--data` POST is the HTTP method that was invented to send data to a receiving web application, and it is, for example, how most common HTML forms on the web work.\ -\ -HTTP multipart formposts are done with `-F`, but this appears to not be compatible with the Windows version of Curl. Will update if identified adversary tradecraft.\ -\ +explanation = The following analytic identifies the use of Windows Curl.exe uploading a file to a remote destination. \ +`-T` or `--upload-file` is used when a file is to be uploaded to a remotge destination. \ + \ +`-d` or `--data` POST is the HTTP method that was invented to send data to a receiving web application, and it is, for example, how most common HTML forms on the web work. \ + \ +HTTP multipart formposts are done with `-F`, but this appears to not be compatible with the Windows version of Curl. Will update if identified adversary tradecraft. \ + \ Adversaries may use one of the three methods based on the remote destination and what they are attempting to upload (zip vs txt). During triage, review parallel processes for further behavior. In addition, identify if the upload was successful in network logs. If a file was uploaded, isolate the endpoint and review. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Command and Control"], "mitre_attack": ["T1105"], "nist": ["DE.CM"]} @@ -11753,7 +11753,7 @@ providing_technologies = ["Microsoft Defender"] type = detection asset_type = Endpoint confidence = medium -explanation = This hunting analytic targets a range of security events from Microsoft Defender, focusing on the Exploit Guard and Attack Surface Reduction (ASR) features. It monitors specific Event IDs - Event IDs 1121 and 1126 indicate active blocking of unauthorized operations or dangerous network connections, whereas Event IDs 1122 and 1125 represent audit logs for similar activities. Event ID 1129 shows user overrides on blocked operations. For ASR-related activities, Event IDs 1131 and 1133 signal blocked operations, while 1132 and 1134 are audit logs. Event ID 5007 alerts on configuration changes, possibly indicating security breaches.\ +explanation = This hunting analytic targets a range of security events from Microsoft Defender, focusing on the Exploit Guard and Attack Surface Reduction (ASR) features. It monitors specific Event IDs - Event IDs 1121 and 1126 indicate active blocking of unauthorized operations or dangerous network connections, whereas Event IDs 1122 and 1125 represent audit logs for similar activities. Event ID 1129 shows user overrides on blocked operations. For ASR-related activities, Event IDs 1131 and 1133 signal blocked operations, while 1132 and 1134 are audit logs. Event ID 5007 alerts on configuration changes, possibly indicating security breaches. \ Additionally, the analytic utilizes a lookup to correlate ASR rule GUIDs with their descriptive names, enhancing understanding of the context behind these security alerts. This includes rules for blocking vulnerable drivers, restricting actions of Adobe Reader and Office applications, and protecting against various malware and unauthorized system changes. This comprehensive approach aids in assessing policy enforcement and potential security risks. how_to_implement = The following analytic requires collection of Windows Defender Operational logs in either XML or multi-line. To collect, setup a new input for the Windows Defender Operational logs. In addition, it does require a lookup that maps the ID to ASR Rule name. Note that Audit and block Event IDs have different fields, therefore the analytic will need to be modified for each type of event. The analytic can be modified to look for specific ASR rules, or to look for specific Event IDs. EventID 5007 is a change in the registry, and may be a false positive. This can be removed from the search if desired. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", "Installation"], "mitre_attack": ["T1566.001", "T1566.002", "T1059"], "nist": ["DE.AE"]} @@ -12744,9 +12744,9 @@ providing_technologies = ["Microsoft Sysmon"] type = detection asset_type = Endpoint confidence = medium -explanation = This analytic identifies instances where the Windows InstallUtil.exe binary loads `vaultcli.dll` and `Samlib.dll`. This technique can be employed to execute code that bypasses application control and captures credentials using tools like Mimikatz.\ -When `InstallUtil.exe` is used maliciously, it typically specifies the path to an executable on the filesystem. It is important to observe the parent process in such cases. Suspicious activity often involves being spawned from non-standard processes such as `Cmd.exe`, `PowerShell.exe`, or `Explorer.exe`.\ -Conversely, when used by developers, it is usually accompanied by multiple command-line switches/arguments and originates from Visual Studio.\ +explanation = This analytic identifies instances where the Windows InstallUtil.exe binary loads `vaultcli.dll` and `Samlib.dll`. This technique can be employed to execute code that bypasses application control and captures credentials using tools like Mimikatz. \ +When `InstallUtil.exe` is used maliciously, it typically specifies the path to an executable on the filesystem. It is important to observe the parent process in such cases. Suspicious activity often involves being spawned from non-standard processes such as `Cmd.exe`, `PowerShell.exe`, or `Explorer.exe`. \ +Conversely, when used by developers, it is usually accompanied by multiple command-line switches/arguments and originates from Visual Studio. \ During triage, review any resulting network connections, file modifications, and concurrent processes. Capture any artifacts for further review.' how_to_implement = To successfully implement this search, you need to be ingesting logs with the process name, parent process, and module loads from your endpoints. If you are using Sysmon, you must have at least version 6.0.4 of the Sysmon TA. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1218.004", "T1218"], "nist": ["DE.CM"]} @@ -12767,9 +12767,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies the Windows InstallUtil.exe binary making a remote network connection. This technique may be used to download and execute code while bypassing application control.\ -When `InstallUtil.exe` is used in a malicous manner, the path to an executable on the filesystem is typically specified. Take note of the parent process. In a suspicious instance, this will be spawned from a non-standard process like `Cmd.exe`, `PowerShell.exe` or `Explorer.exe`.\ -If used by a developer, typically this will be found with multiple command-line switches/arguments and spawn from Visual Studio.\ +explanation = The following analytic identifies the Windows InstallUtil.exe binary making a remote network connection. This technique may be used to download and execute code while bypassing application control. \ +When `InstallUtil.exe` is used in a malicous manner, the path to an executable on the filesystem is typically specified. Take note of the parent process. In a suspicious instance, this will be spawned from a non-standard process like `Cmd.exe`, `PowerShell.exe` or `Explorer.exe`. \ +If used by a developer, typically this will be found with multiple command-line switches/arguments and spawn from Visual Studio. \ During triage review resulting network connections, file modifications, and parallel processes. Capture any artifacts and review further. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1218.004", "T1218"], "nist": ["DE.CM"]} @@ -12780,10 +12780,10 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies the Windows InstallUtil.exe binary. This will execute code while bypassing application control using the `/u` (uninstall) switch.\ -InstallUtil uses the functions install and uninstall within the System.Configuration.Install namespace to process .net assembly. Install function requires admin privileges, however, uninstall function can be run as an unprivileged user.\ -When `InstallUtil.exe` is used in a malicous manner, the path to an executable on the filesystem is typically specified. Take note of the parent process. In a suspicious instance, this will be spawned from a non-standard process like `Cmd.exe`, `PowerShell.exe` or `Explorer.exe`.\ -If used by a developer, typically this will be found with multiple command-line switches/arguments and spawn from Visual Studio.\ +explanation = The following analytic identifies the Windows InstallUtil.exe binary. This will execute code while bypassing application control using the `/u` (uninstall) switch. \ +InstallUtil uses the functions install and uninstall within the System.Configuration.Install namespace to process .net assembly. Install function requires admin privileges, however, uninstall function can be run as an unprivileged user. \ +When `InstallUtil.exe` is used in a malicous manner, the path to an executable on the filesystem is typically specified. Take note of the parent process. In a suspicious instance, this will be spawned from a non-standard process like `Cmd.exe`, `PowerShell.exe` or `Explorer.exe`. \ +If used by a developer, typically this will be found with multiple command-line switches/arguments and spawn from Visual Studio. \ During triage review resulting network connections, file modifications, and parallel processes. Capture any artifacts and review further. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1218.004", "T1218"], "nist": ["DE.CM"]} @@ -12794,10 +12794,10 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies the Windows InstallUtil.exe binary making a remote network connection. This technique may be used to download and execute code while bypassing application control using the `/u` (uninstall) switch.\ -InstallUtil uses the functions install and uninstall within the System.Configuration.Install namespace to process .net assembly. Install function requires admin privileges, however, uninstall function can be run as an unprivileged user.\ -When `InstallUtil.exe` is used in a malicous manner, the path to an executable on the filesystem is typically specified. Take note of the parent process. In a suspicious instance, this will be spawned from a non-standard process like `Cmd.exe`, `PowerShell.exe` or `Explorer.exe`.\ -If used by a developer, typically this will be found with multiple command-line switches/arguments and spawn from Visual Studio.\ +explanation = The following analytic identifies the Windows InstallUtil.exe binary making a remote network connection. This technique may be used to download and execute code while bypassing application control using the `/u` (uninstall) switch. \ +InstallUtil uses the functions install and uninstall within the System.Configuration.Install namespace to process .net assembly. Install function requires admin privileges, however, uninstall function can be run as an unprivileged user. \ +When `InstallUtil.exe` is used in a malicous manner, the path to an executable on the filesystem is typically specified. Take note of the parent process. In a suspicious instance, this will be spawned from a non-standard process like `Cmd.exe`, `PowerShell.exe` or `Explorer.exe`. \ +If used by a developer, typically this will be found with multiple command-line switches/arguments and spawn from Visual Studio. \ During triage review resulting network connections, file modifications, and parallel processes. Capture any artifacts and review further. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1218.004", "T1218"], "nist": ["DE.CM"]} @@ -12808,9 +12808,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies the Windows InstallUtil.exe binary passing a HTTP request on the command-line. This technique may be used to download and execute code while bypassing application control.\ -When `InstallUtil.exe` is used in a malicous manner, the path to an executable on the filesystem is typically specified. Take note of the parent process. In a suspicious instance, this will be spawned from a non-standard process like `Cmd.exe`, `PowerShell.exe` or `Explorer.exe`.\ -If used by a developer, typically this will be found with multiple command-line switches/arguments and spawn from Visual Studio.\ +explanation = The following analytic identifies the Windows InstallUtil.exe binary passing a HTTP request on the command-line. This technique may be used to download and execute code while bypassing application control. \ +When `InstallUtil.exe` is used in a malicous manner, the path to an executable on the filesystem is typically specified. Take note of the parent process. In a suspicious instance, this will be spawned from a non-standard process like `Cmd.exe`, `PowerShell.exe` or `Explorer.exe`. \ +If used by a developer, typically this will be found with multiple command-line switches/arguments and spawn from Visual Studio. \ During triage review resulting network connections, file modifications, and parallel processes. Capture any artifacts and review further. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1218.004", "T1218"], "nist": ["DE.CM"]} @@ -12881,7 +12881,7 @@ providing_technologies = null type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic leverages Event ID 4769, `A Kerberos service ticket was requested`, to identify more than 30 computer service ticket requests from one source. When a domain joined endpoint connects to other remote endpoint, it will first request a Kerberos Service Ticket with the computer name as the Service Name. A user requesting a large number of computer service tickets for different endpoints could represent malicious behavior like lateral movement, malware staging, reconnaissance, etc.\ +explanation = The following analytic leverages Event ID 4769, `A Kerberos service ticket was requested`, to identify more than 30 computer service ticket requests from one source. When a domain joined endpoint connects to other remote endpoint, it will first request a Kerberos Service Ticket with the computer name as the Service Name. A user requesting a large number of computer service tickets for different endpoints could represent malicious behavior like lateral movement, malware staging, reconnaissance, etc. \ Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold as needed. how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller and Kerberos events. The Advanced Security Audit policy setting `Audit Kerberos Authentication Service` within `Account Logon` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", "Exploitation", "Installation"], "mitre_attack": ["T1135", "T1078"], "nist": ["DE.AE"]} @@ -13402,8 +13402,8 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = This detection identifies instances of the Windows utility `mshta.exe` being used to write files to world-writable directories, a technique commonly leveraged by adversaries to execute malicious scripts or payloads. Starting from 26 February 2024, APT29 has been observed distributing phishing attachments that lead to the download and execution of the ROOTSAW dropper via a compromised website. The ROOTSAW payload, utilizing obfuscated JavaScript, downloads a file named `invite.txt` to the `C:\Windows\Tasks` directory. This file is then decoded and decompressed to execute a malicious payload, often leveraging legitimate Windows binaries for malicious purposes, as seen with `SqlDumper.exe` in this campaign. \\ -The analytic is designed to detect the initial file write operation by `mshta.exe` to directories that are typically writable by any user, such as `C:\Windows\Tasks`, `C:\Windows\Temp`, and others. This behavior is indicative of an attempt to establish persistence, execute code, or both, as part of a multi-stage infection process. The detection focuses on the use of `mshta.exe` to write to these locations, which is a deviation from the utility's legitimate use cases and thus serves as a strong indicator of compromise (IoC). \\ +explanation = This detection identifies instances of the Windows utility `mshta.exe` being used to write files to world-writable directories, a technique commonly leveraged by adversaries to execute malicious scripts or payloads. Starting from 26 February 2024, APT29 has been observed distributing phishing attachments that lead to the download and execution of the ROOTSAW dropper via a compromised website. The ROOTSAW payload, utilizing obfuscated JavaScript, downloads a file named `invite.txt` to the `C:\Windows\Tasks` directory. This file is then decoded and decompressed to execute a malicious payload, often leveraging legitimate Windows binaries for malicious purposes, as seen with `SqlDumper.exe` in this campaign. \ \ +The analytic is designed to detect the initial file write operation by `mshta.exe` to directories that are typically writable by any user, such as `C:\Windows\Tasks`, `C:\Windows\Temp`, and others. This behavior is indicative of an attempt to establish persistence, execute code, or both, as part of a multi-stage infection process. The detection focuses on the use of `mshta.exe` to write to these locations, which is a deviation from the utility's legitimate use cases and thus serves as a strong indicator of compromise (IoC). \ \ The ROOTSAW campaign associated with APT29 utilizes a sophisticated obfuscation technique and leverages multiple stages of payloads, ultimately leading to the execution of the WINELOADER malware. This detection aims to catch the early stages of such attacks, enabling defenders to respond before full compromise occurs. how_to_implement = The analytic is designed to be run against Sysmon event logs collected from endpoints. The analytic requires the Sysmon event logs to be ingested into Splunk. The search focuses on EventCode 11 where the Image is `mshta.exe` and the TargetFilename is within world-writable directories such as `C:\Windows\Tasks`, `C:\Windows\Temp`, and others. The detection is designed to catch the initial file write operation by `mshta.exe` to these locations, which is indicative of an attempt to establish persistence or execute malicious code. The analytic can be modified to include additional world-writable directories as needed. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1218.005"], "nist": ["DE.CM"]} @@ -13524,8 +13524,8 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies one source endpoint failing to authenticate with 30 unique disabled domain users using the Kerberos protocol within 5 minutes. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using Kerberos to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. As attackers progress in a breach, mistakes will be made. In certain scenarios, adversaries may execute a password spraying attack against disabled users. Event 4768 is generated every time the Key Distribution Center issues a Kerberos Ticket Granting Ticket (TGT). Failure code `0x12` stands for `clients credentials have been revoked` (account disabled, expired or locked out).\ -This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will only trigger on domain controllers, not on member servers or workstations.\ +explanation = The following analytic identifies one source endpoint failing to authenticate with 30 unique disabled domain users using the Kerberos protocol within 5 minutes. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using Kerberos to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. As attackers progress in a breach, mistakes will be made. In certain scenarios, adversaries may execute a password spraying attack against disabled users. Event 4768 is generated every time the Key Distribution Center issues a Kerberos Ticket Granting Ticket (TGT). Failure code `0x12` stands for `clients credentials have been revoked` (account disabled, expired or locked out). \ +This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will only trigger on domain controllers, not on member servers or workstations. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source ip and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller and Kerberos events. The Advanced Security Audit policy setting `Audit Kerberos Authentication Service` within `Account Logon` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.CM"]} @@ -13536,8 +13536,8 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies one source endpoint failing to authenticate with 30 unique invalid domain users using the Kerberos protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using Kerberos to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. As attackers progress in a breach, mistakes will be made. In certain scenarios, adversaries may execute a password spraying attack using an invalid list of users. Event 4768 is generated every time the Key Distribution Center issues a Kerberos Ticket Granting Ticket (TGT). Failure code 0x6 stands for `client not found in Kerberos database` (the attempted user is not a valid domain user).\ -This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will only trigger on domain controllers, not on member servers or workstations.\ +explanation = The following analytic identifies one source endpoint failing to authenticate with 30 unique invalid domain users using the Kerberos protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using Kerberos to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. As attackers progress in a breach, mistakes will be made. In certain scenarios, adversaries may execute a password spraying attack using an invalid list of users. Event 4768 is generated every time the Key Distribution Center issues a Kerberos Ticket Granting Ticket (TGT). Failure code 0x6 stands for `client not found in Kerberos database` (the attempted user is not a valid domain user). \ +This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will only trigger on domain controllers, not on member servers or workstations. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source ip and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller and Kerberos events. The Advanced Security Audit policy setting `Audit Kerberos Authentication Service` within `Account Logon` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.CM"]} @@ -13548,8 +13548,8 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies one source endpoint failing to authenticate with 30 unique invalid users using the NTLM protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using NTLM to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. As attackers progress in a breach, mistakes will be made. In certain scenarios, adversaries may execute a password spraying attack using an invalid list of users. Event 4776 is generated on the computer that is authoritative for the provided credentials. For domain accounts, the domain controller is authoritative. For local accounts, the local computer is authoritative. Error code 0xC0000064 stands for `The username you typed does not exist` (the attempted user is a legitimate domain user).\ -This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will only trigger on domain controllers, not on member servers or workstations.\ +explanation = The following analytic identifies one source endpoint failing to authenticate with 30 unique invalid users using the NTLM protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using NTLM to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. As attackers progress in a breach, mistakes will be made. In certain scenarios, adversaries may execute a password spraying attack using an invalid list of users. Event 4776 is generated on the computer that is authoritative for the provided credentials. For domain accounts, the domain controller is authoritative. For local accounts, the local computer is authoritative. Error code 0xC0000064 stands for `The username you typed does not exist` (the attempted user is a legitimate domain user). \ +This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will only trigger on domain controllers, not on member servers or workstations. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source workstation name and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller events. The Advanced Security Audit policy setting `Audit Credential Validation' within `Account Logon` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.CM"]} @@ -13560,8 +13560,8 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies a source user failing to authenticate with 30 unique users using explicit credentials on a host. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. Event 4648 is generated when a process attempts an account logon by explicitly specifying that accounts credentials. This event generates on domain controllers, member servers, and workstations.\ -This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will trigger on the potenfially malicious host, perhaps controlled via a trojan or operated by an insider threat, from where a password spraying attack is being executed.\ +explanation = The following analytic identifies a source user failing to authenticate with 30 unique users using explicit credentials on a host. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. Event 4648 is generated when a process attempts an account logon by explicitly specifying that accounts credentials. This event generates on domain controllers, member servers, and workstations. \ +This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will trigger on the potenfially malicious host, perhaps controlled via a trojan or operated by an insider threat, from where a password spraying attack is being executed. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source account, attempted user accounts and the endpoint were the behavior was identified. how_to_implement = To successfully implement this search, you need to be ingesting Windows Event Logs from domain controllers as well as member servers and workstations. The Advanced Security Audit policy setting `Audit Logon` within `Logon/Logoff` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.CM"]} @@ -13572,8 +13572,8 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies one source endpoint failing to authenticate with 30 unique valid users using the NTLM protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using NTLM to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. Event 4776 is generated on the computer that is authoritative for the provided credentials. For domain accounts, the domain controller is authoritative. For local accounts, the local computer is authoritative. Error code 0xC000006A means: misspelled or bad password (the attempted user is a legitimate domain user).\ -This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will only trigger on domain controllers, not on member servers or workstations.\ +explanation = The following analytic identifies one source endpoint failing to authenticate with 30 unique valid users using the NTLM protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using NTLM to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. Event 4776 is generated on the computer that is authoritative for the provided credentials. For domain accounts, the domain controller is authoritative. For local accounts, the local computer is authoritative. Error code 0xC000006A means: misspelled or bad password (the attempted user is a legitimate domain user). \ +This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will only trigger on domain controllers, not on member servers or workstations. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source workstation name and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller events. The Advanced Security Audit policy setting `Audit Credential Validation` within `Account Logon` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.CM"]} @@ -13584,8 +13584,8 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies a source process name failing to authenticate with 30 uniquer users. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. Event 4625 generates on domain controllers, member servers, and workstations when an account fails to logon. Logon Type 2 describes an iteractive logon attempt.\ -This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will trigger on the potenfially malicious host, perhaps controlled via a trojan or operated by an insider threat, from where a password spraying attack is being executed. This could be a domain controller as well as a member server or workstation.\ +explanation = The following analytic identifies a source process name failing to authenticate with 30 uniquer users. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. Event 4625 generates on domain controllers, member servers, and workstations when an account fails to logon. Logon Type 2 describes an iteractive logon attempt. \ +This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will trigger on the potenfially malicious host, perhaps controlled via a trojan or operated by an insider threat, from where a password spraying attack is being executed. This could be a domain controller as well as a member server or workstation. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source process name, source account and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Windows Event Logs from domain controllers aas well as member servers and workstations. The Advanced Security Audit policy setting `Audit Logon` within `Logon/Logoff` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.CM"]} @@ -13596,8 +13596,8 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies one source endpoint failing to authenticate with 30 unique users using the Kerberos protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using Kerberos to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. Event 4771 is generated when the Key Distribution Center fails to issue a Kerberos Ticket Granting Ticket (TGT). Failure code 0x18 stands for `wrong password provided` (the attempted user is a legitimate domain user).\ -This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will only trigger on domain controllers, not on member servers or workstations.\ +explanation = The following analytic identifies one source endpoint failing to authenticate with 30 unique users using the Kerberos protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using Kerberos to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. Event 4771 is generated when the Key Distribution Center fails to issue a Kerberos Ticket Granting Ticket (TGT). Failure code 0x18 stands for `wrong password provided` (the attempted user is a legitimate domain user). \ +This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will only trigger on domain controllers, not on member servers or workstations. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source ip and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller and Kerberos events. The Advanced Security Audit policy setting `Audit Kerberos Authentication Service` within `Account Logon` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.CM"]} @@ -13608,8 +13608,8 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies a source host failing to authenticate against a remote host with 30 unique users. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. Event 4625 documents each and every failed attempt to logon to the local computer. This event generates on domain controllers, member servers, and workstations. Logon Type 3 describes an remote authentication attempt.\ -This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will trigger on the host that is the target of the password spraying attack. This could be a domain controller as well as a member server or workstation.\ +explanation = The following analytic identifies a source host failing to authenticate against a remote host with 30 unique users. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment to obtain initial access or elevate privileges. Active Directory environments can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold when needed. Event 4625 documents each and every failed attempt to logon to the local computer. This event generates on domain controllers, member servers, and workstations. Logon Type 3 describes an remote authentication attempt. \ +This logic can be used for real time security monitoring as well as threat hunting exercises. This detection will trigger on the host that is the target of the password spraying attack. This could be a domain controller as well as a member server or workstation. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source process name, source account and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Windows Event Logs from domain controllers as as well as member servers and workstations. The Advanced Security Audit policy setting `Audit Logon` within `Logon/Logoff` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.CM"]} @@ -13790,11 +13790,11 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic is an enhanced version of two previous analytics that identifies common GrantedAccess permission requests and CallTrace DLLs in order to detect credential dumping.\ -GrantedAccess is the requested permissions by the SourceImage into the TargetImage.\ -\ -CallTrace Stack trace of where open process is called. Included is the DLL and the relative virtual address of the functions in the call stack right before the open process call.\ -dbgcore.dll or dbghelp.dll are two core Windows debug DLLs that have minidump functions which provide a way for applications to produce crashdump files that contain a useful subset of the entire process context.\ +explanation = The following analytic is an enhanced version of two previous analytics that identifies common GrantedAccess permission requests and CallTrace DLLs in order to detect credential dumping. \ +GrantedAccess is the requested permissions by the SourceImage into the TargetImage. \ + \ +CallTrace Stack trace of where open process is called. Included is the DLL and the relative virtual address of the functions in the call stack right before the open process call. \ +dbgcore.dll or dbghelp.dll are two core Windows debug DLLs that have minidump functions which provide a way for applications to produce crashdump files that contain a useful subset of the entire process context. \ The idea behind using ntdll.dll is to blend in by using native api of ntdll.dll. For example in sekurlsa module there are many ntdll exported api, like RtlCopyMemory, used to execute this module which is related to lsass dumping. how_to_implement = To successfully implement this search, you need to be ingesting logs with the process name, parent process, and command-line executions from your endpoints. If you are using Sysmon, you must have at least version 6.0.4 of the Sysmon TA. Enabling EventCode 10 TargetProcess lsass.exe is required. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1003.001", "T1003"], "nist": ["DE.CM"]} @@ -14265,7 +14265,7 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies a process attempting to delete a scheduled task SD (Security Descriptor) from within the registry path of that task. This may occur from a non-standard process running and may not come from reg.exe. This particular behavior will remove the actual Task Name from the Task Scheduler GUI and from the command-line query - schtasks.exe /query. In addition, in order to perform this action, the user context will need to be SYSTEM.\ +explanation = The following analytic identifies a process attempting to delete a scheduled task SD (Security Descriptor) from within the registry path of that task. This may occur from a non-standard process running and may not come from reg.exe. This particular behavior will remove the actual Task Name from the Task Scheduler GUI and from the command-line query - schtasks.exe /query. In addition, in order to perform this action, the user context will need to be SYSTEM. \ Identifying the deletion of a scheduled task's Security Descriptor from the registry is significant for a SOC as it may indicate malicious activity attempting to remove evidence of a scheduled task, potentially for defense evasion purposes. If a true positive is detected, it suggests an attacker with privileged access attempting to remove traces of their activities, which can have a significant impact on the security and functionality of affected systems. Immediate investigation and response are required to mitigate further risks and preserve the integrity of the environment. how_to_implement = To successfully implement this search you need to be ingesting information on process that include the name of the process responsible for the changes from your endpoints into the `Endpoint` datamodel in the `Registry` node. In addition, confirm the latest CIM App 4.20 or higher is installed and the latest TA for the endpoint product. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.005", "T1562"], "nist": ["DE.AE"]} @@ -14456,7 +14456,7 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic detects the creation of suspicious scheduled tasks in Windows, specifically tasks created using schtasks.exe with the -create flag and an XML parameter in the command-line. This technique is commonly employed by threat actors, adversaries, and red teamers to establish persistence or achieve privilege escalation on targeted hosts. Notably, malware like Trickbot and Winter-Vivern have been observed using XML files to create scheduled tasks. Monitoring and investigating this activity is crucial to mitigate potential security risks. It is important to be aware that scripts or administrators may trigger this analytic, leading to potential false positives. To minimize false positives, adjust the filter based on the parent process or application.\ +explanation = The following analytic detects the creation of suspicious scheduled tasks in Windows, specifically tasks created using schtasks.exe with the -create flag and an XML parameter in the command-line. This technique is commonly employed by threat actors, adversaries, and red teamers to establish persistence or achieve privilege escalation on targeted hosts. Notably, malware like Trickbot and Winter-Vivern have been observed using XML files to create scheduled tasks. Monitoring and investigating this activity is crucial to mitigate potential security risks. It is important to be aware that scripts or administrators may trigger this analytic, leading to potential false positives. To minimize false positives, adjust the filter based on the parent process or application. \ When a true positive is detected, it suggests an attacker's attempt to gain persistence or execute additional malicious payloads, potentially resulting in data theft, ransomware, or other damaging outcomes. During triage, review the source of the scheduled task, the command to be executed, and capture any relevant on-disk artifacts. Analyze concurrent processes to identify the source of the attack. This analytic enables analysts to detect and respond to potential threats early, mitigating the associated risks effectively. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.005", "T1053"], "nist": ["DE.CM"]} @@ -14477,7 +14477,7 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic detects the creation of a new task with the highest execution privilege via Schtasks.exe. This tactic is often observed in AsyncRAT attacks, where the scheduled task is used for persistence and privilege escalation. AsyncRAT sets up a scheduled task with parameters '/rl' and 'highest', triggering this technique. It's a strong indicator of potential malware or adversaries seeking to establish persistence and escalate privileges through scheduled tasks. This is crucial for a Security Operations Center (SOC) as it can prevent unauthorized system access and potential data breaches.\ +explanation = The following analytic detects the creation of a new task with the highest execution privilege via Schtasks.exe. This tactic is often observed in AsyncRAT attacks, where the scheduled task is used for persistence and privilege escalation. AsyncRAT sets up a scheduled task with parameters '/rl' and 'highest', triggering this technique. It's a strong indicator of potential malware or adversaries seeking to establish persistence and escalate privileges through scheduled tasks. This is crucial for a Security Operations Center (SOC) as it can prevent unauthorized system access and potential data breaches. \ The analytic works by monitoring logs for process name, parent process, and command-line executions. In the presence of the '*/rl ' and ' highest *' commands in a schtasks.exe process, an alert is triggered. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053", "T1053.005"], "nist": ["DE.CM"]} @@ -15108,9 +15108,9 @@ providing_technologies = ["Microsoft Sysmon"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies one source endpoint failing to authenticate with multiple disabled domain users using the Kerberos protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using Kerberos to obtain initial access or elevate privileges. As attackers progress in a breach, mistakes will be made. In certain scenarios, adversaries may execute a password spraying attack against disabled users. Event 4768 is generated every time the Key Distribution Center issues a Kerberos Ticket Granting Ticket (TGT). Failure code `0x12` stands for `clients credentials have been revoked` (account disabled, expired or locked out).\ -The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ -This detection will only trigger on domain controllers, not on member servers or workstations.\ +explanation = The following analytic identifies one source endpoint failing to authenticate with multiple disabled domain users using the Kerberos protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using Kerberos to obtain initial access or elevate privileges. As attackers progress in a breach, mistakes will be made. In certain scenarios, adversaries may execute a password spraying attack against disabled users. Event 4768 is generated every time the Key Distribution Center issues a Kerberos Ticket Granting Ticket (TGT). Failure code `0x12` stands for `clients credentials have been revoked` (account disabled, expired or locked out). \ +The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ +This detection will only trigger on domain controllers, not on member servers or workstations. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source ip and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller and Kerberos events. The Advanced Security Audit policy setting `Audit Kerberos Authentication Service` within `Account Logon` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.AE"]} @@ -15121,9 +15121,9 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies one source endpoint failing to authenticate with multiple invalid domain users using the Kerberos protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using Kerberos to obtain initial access or elevate privileges. As attackers progress in a breach, mistakes will be made. In certain scenarios, adversaries may execute a password spraying attack using an invalid list of users. Event 4768 is generated every time the Key Distribution Center issues a Kerberos Ticket Granting Ticket (TGT). Failure code 0x6 stands for `client not found in Kerberos database` (the attempted user is not a valid domain user).\ -The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ -This detection will only trigger on domain controllers, not on member servers or workstations.\ +explanation = The following analytic identifies one source endpoint failing to authenticate with multiple invalid domain users using the Kerberos protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using Kerberos to obtain initial access or elevate privileges. As attackers progress in a breach, mistakes will be made. In certain scenarios, adversaries may execute a password spraying attack using an invalid list of users. Event 4768 is generated every time the Key Distribution Center issues a Kerberos Ticket Granting Ticket (TGT). Failure code 0x6 stands for `client not found in Kerberos database` (the attempted user is not a valid domain user). \ +The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ +This detection will only trigger on domain controllers, not on member servers or workstations. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source ip and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller and Kerberos events. The Advanced Security Audit policy setting `Audit Kerberos Authentication Service` within `Account Logon` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.AE"]} @@ -15134,9 +15134,9 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies one source endpoint failing to authenticate with multiple invalid users using the NTLM protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using NTLM to obtain initial access or elevate privileges. As attackers progress in a breach, mistakes will be made. In certain scenarios, adversaries may execute a password spraying attack using an invalid list of users. Event 4776 is generated on the computer that is authoritative for the provided credentials. For domain accounts, the domain controller is authoritative. For local accounts, the local computer is authoritative. Error code 0xC0000064 stands for `The username you typed does not exist` (the attempted user is a legitimate domain user).\ -The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ -This detection will only trigger on domain controllers, not on member servers or workstations.\ +explanation = The following analytic identifies one source endpoint failing to authenticate with multiple invalid users using the NTLM protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using NTLM to obtain initial access or elevate privileges. As attackers progress in a breach, mistakes will be made. In certain scenarios, adversaries may execute a password spraying attack using an invalid list of users. Event 4776 is generated on the computer that is authoritative for the provided credentials. For domain accounts, the domain controller is authoritative. For local accounts, the local computer is authoritative. Error code 0xC0000064 stands for `The username you typed does not exist` (the attempted user is a legitimate domain user). \ +The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ +This detection will only trigger on domain controllers, not on member servers or workstations. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source workstation name and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller events. The Advanced Security Audit policy setting `Audit Credential Validation' within `Account Logon` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.AE"]} @@ -15147,9 +15147,9 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies a source user failing to authenticate with multiple users using explicit credentials on a host. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment to obtain initial access or elevate privileges. Event 4648 is generated when a process attempts an account logon by explicitly specifying that accounts credentials. This event generates on domain controllers, member servers, and workstations.\ -The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ -This detection will trigger on the potenfially malicious host, perhaps controlled via a trojan or operated by an insider threat, from where a password spraying attack is being executed.\ +explanation = The following analytic identifies a source user failing to authenticate with multiple users using explicit credentials on a host. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment to obtain initial access or elevate privileges. Event 4648 is generated when a process attempts an account logon by explicitly specifying that accounts credentials. This event generates on domain controllers, member servers, and workstations. \ +The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ +This detection will trigger on the potenfially malicious host, perhaps controlled via a trojan or operated by an insider threat, from where a password spraying attack is being executed. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source account, attempted user accounts and the endpoint were the behavior was identified. how_to_implement = To successfully implement this search, you need to be ingesting Windows Event Logs from domain controllers as well as member servers and workstations. The Advanced Security Audit policy setting `Audit Logon` within `Logon/Logoff` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.AE"]} @@ -15160,9 +15160,9 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies one source endpoint failing to authenticate with multiple valid users using the Kerberos protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using Kerberos to obtain initial access or elevate privileges. Event 4771 is generated when the Key Distribution Center fails to issue a Kerberos Ticket Granting Ticket (TGT). Failure code 0x18 stands for `wrong password provided` (the attempted user is a legitimate domain user).\ -The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ -This detection will only trigger on domain controllers, not on member servers or workstations.\ +explanation = The following analytic identifies one source endpoint failing to authenticate with multiple valid users using the Kerberos protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using Kerberos to obtain initial access or elevate privileges. Event 4771 is generated when the Key Distribution Center fails to issue a Kerberos Ticket Granting Ticket (TGT). Failure code 0x18 stands for `wrong password provided` (the attempted user is a legitimate domain user). \ +The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ +This detection will only trigger on domain controllers, not on member servers or workstations. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source ip and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller and Kerberos events. The Advanced Security Audit policy setting `Audit Kerberos Authentication Service` within `Account Logon` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.AE"]} @@ -15173,9 +15173,9 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies a source process name failing to authenticate with multiple users. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment to obtain initial access or elevate privileges. Event 4625 generates on domain controllers, member servers, and workstations when an account fails to logon. Logon Type 2 describes an iteractive logon attempt.\ -The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ -This detection will trigger on the potenfially malicious host, perhaps controlled via a trojan or operated by an insider threat, from where a password spraying attack is being executed. This could be a domain controller as well as a member server or workstation.\ +explanation = The following analytic identifies a source process name failing to authenticate with multiple users. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment to obtain initial access or elevate privileges. Event 4625 generates on domain controllers, member servers, and workstations when an account fails to logon. Logon Type 2 describes an iteractive logon attempt. \ +The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ +This detection will trigger on the potenfially malicious host, perhaps controlled via a trojan or operated by an insider threat, from where a password spraying attack is being executed. This could be a domain controller as well as a member server or workstation. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source process name, source account and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Windows Event Logs from domain controllers aas well as member servers and workstations. The Advanced Security Audit policy setting `Audit Logon` within `Logon/Logoff` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.AE"]} @@ -15186,9 +15186,9 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies one source endpoint failing to authenticate with multiple valid users using the NTLM protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using NTLM to obtain initial access or elevate privileges. Event 4776 is generated on the computer that is authoritative for the provided credentials. For domain accounts, the domain controller is authoritative. For local accounts, the local computer is authoritative. Error code 0xC000006A means: misspelled or bad password (the attempted user is a legitimate domain user).\ -The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ -This detection will only trigger on domain controllers, not on member servers or workstations.\ +explanation = The following analytic identifies one source endpoint failing to authenticate with multiple valid users using the NTLM protocol. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment using NTLM to obtain initial access or elevate privileges. Event 4776 is generated on the computer that is authoritative for the provided credentials. For domain accounts, the domain controller is authoritative. For local accounts, the local computer is authoritative. Error code 0xC000006A means: misspelled or bad password (the attempted user is a legitimate domain user). \ +The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ +This detection will only trigger on domain controllers, not on member servers or workstations. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source workstation name and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller events. The Advanced Security Audit policy setting `Audit Credential Validation` within `Account Logon` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.AE"]} @@ -15199,9 +15199,9 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic identifies a source host failing to authenticate against a remote host with multiple users. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment to obtain initial access or elevate privileges. Event 4625 documents each and every failed attempt to logon to the local computer. This event generates on domain controllers, member servers, and workstations. Logon Type 3 describes an remote authentication attempt.\ -The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ -This detection will trigger on the host that is the target of the password spraying attack. This could be a domain controller as well as a member server or workstation.\ +explanation = The following analytic identifies a source host failing to authenticate against a remote host with multiple users. This behavior could represent an adversary performing a Password Spraying attack against an Active Directory environment to obtain initial access or elevate privileges. Event 4625 documents each and every failed attempt to logon to the local computer. This event generates on domain controllers, member servers, and workstations. Logon Type 3 describes an remote authentication attempt. \ +The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ +This detection will trigger on the host that is the target of the password spraying attack. This could be a domain controller as well as a member server or workstation. \ The analytics returned fields allow analysts to investigate the event further by providing fields like source process name, source account and attempted user accounts. how_to_implement = To successfully implement this search, you need to be ingesting Windows Event Logs from domain controllers as as well as member servers and workstations. The Advanced Security Audit policy setting `Audit Logon` within `Logon/Logoff` needs to be enabled. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1110.003", "T1110"], "nist": ["DE.AE"]} @@ -15262,8 +15262,8 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic is designed to detect anomalous behavior associated with the BlackLotus Campaign, a sophisticated bootkit attack reported by ESET and further investigated in a blog by Microsoft, which provided hunting queries for security analysts. The primary focus of this analytic is to identify instances of Winlogon.exe, a critical Windows process, connecting to public IP space, which is indicative of potential malicious activity.\ The BlackLotus Campaign is a bootkit-based attack that compromises system integrity by infecting the Master Boot Record (MBR) and Volume Boot Record (VBR). This malware variant can bypass traditional security measures, load before the operating system, and maintain persistence on the target system.\ -Winlogon.exe is a critical Windows process responsible for managing user logon and logoff processes. Under normal circumstances, Winlogon.exe should not be connecting to public IP addresses. However, if it does, it may indicate that the process has been compromised as part of the BlackLotus Campaign or another malicious operation.\ +explanation = The following analytic is designed to detect anomalous behavior associated with the BlackLotus Campaign, a sophisticated bootkit attack reported by ESET and further investigated in a blog by Microsoft, which provided hunting queries for security analysts. The primary focus of this analytic is to identify instances of Winlogon.exe, a critical Windows process, connecting to public IP space, which is indicative of potential malicious activity.\ The BlackLotus Campaign is a bootkit-based attack that compromises system integrity by infecting the Master Boot Record (MBR) and Volume Boot Record (VBR). This malware variant can bypass traditional security measures, load before the operating system, and maintain persistence on the target system. \ +Winlogon.exe is a critical Windows process responsible for managing user logon and logoff processes. Under normal circumstances, Winlogon.exe should not be connecting to public IP addresses. However, if it does, it may indicate that the process has been compromised as part of the BlackLotus Campaign or another malicious operation. \ This analytic monitors network connections made by Winlogon.exe and triggers an alert if it detects connections to public IP space. By identifying such anomalous behavior, security analysts can investigate further and respond swiftly to potential threats. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1542.003"], "nist": ["DE.AE"]} @@ -15304,9 +15304,9 @@ providing_technologies = ["Carbon Black Response", "CrowdStrike Falcon", "Micros type = detection asset_type = Endpoint confidence = medium -explanation = The following query utilizes Windows Security EventCode 4698, indicating 'a scheduled task was created', to identify potentially suspicious tasks. These tasks may be registered on Windows through either schtasks.exe or TaskService, and are set up to execute a command with a native Windows shell such as PowerShell, Cmd, Wscript, or Cscript.\ -The search will return the initial and final times the task was registered, along with details like the 'Command' set to be executed, 'Task Name', 'Author', whether it's 'Enabled', and if it is 'Hidden'.\ -Schtasks.exe is typically found in C:\Windows\system32 and C:\Windows\syswow64. The DLL 'taskschd.dll' is loaded when either schtasks.exe or TaskService is launched. If this DLL is found loaded by another process, it's possible that a scheduled task is being registered within the context of that process in memory.\ +explanation = The following query utilizes Windows Security EventCode 4698, indicating 'a scheduled task was created', to identify potentially suspicious tasks. These tasks may be registered on Windows through either schtasks.exe or TaskService, and are set up to execute a command with a native Windows shell such as PowerShell, Cmd, Wscript, or Cscript. \ +The search will return the initial and final times the task was registered, along with details like the 'Command' set to be executed, 'Task Name', 'Author', whether it's 'Enabled', and if it is 'Hidden'. \ +Schtasks.exe is typically found in C:\Windows\system32 and C:\Windows\syswow64. The DLL 'taskschd.dll' is loaded when either schtasks.exe or TaskService is launched. If this DLL is found loaded by another process, it's possible that a scheduled task is being registered within the context of that process in memory. \ During triage, it's essential to identify the source of the scheduled task. Was it registered via schtasks.exe or TaskService? Review the job that was created and the command set to be executed. It's also recommended to capture and review any artifacts on disk, and identify any parallel processes within the same timeframe to locate the source. how_to_implement = To successfully implement this search, you need to be ingesting Windows Security Event Logs with 4698 EventCode enabled. The Windows TA is also required. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.005", "T1053"], "nist": ["DE.CM"]} @@ -15317,9 +15317,9 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following analytic utilizes Windows Security EventCode 4698, which indicates the creation of a scheduled task on a Windows system. The purpose of this query is to identify suspicious tasks that have been registered using either schtasks.exe or TaskService and involve executing a command from a user-writable file path.\ -When this analytic is triggered, it provides information such as the first and last registration time of the task, the command to be executed, the task name, author, and whether it is set as hidden or not. It is worth noting that schtasks.exe is commonly located in C:\Windows\system32 and C:\Windows\syswow64, and it loads the taskschd.dll DLL when launched. If this DLL is loaded by another process, it suggests that a scheduled task may be registered within that process's context in memory.\ -During the triage process, it is essential to identify the source of the scheduled task creation, whether it was initiated through schtasks.exe or TaskService. The analyst should review the task that was created, including the command to be executed. Additionally, any artifacts on disk related to the task should be captured and analyzed. It is also recommended to identify any parallel processes that occurred within the same timeframe to determine the source of the task creation.\ +explanation = The following analytic utilizes Windows Security EventCode 4698, which indicates the creation of a scheduled task on a Windows system. The purpose of this query is to identify suspicious tasks that have been registered using either schtasks.exe or TaskService and involve executing a command from a user-writable file path. \ +When this analytic is triggered, it provides information such as the first and last registration time of the task, the command to be executed, the task name, author, and whether it is set as hidden or not. It is worth noting that schtasks.exe is commonly located in C:\Windows\system32 and C:\Windows\syswow64, and it loads the taskschd.dll DLL when launched. If this DLL is loaded by another process, it suggests that a scheduled task may be registered within that process's context in memory. \ +During the triage process, it is essential to identify the source of the scheduled task creation, whether it was initiated through schtasks.exe or TaskService. The analyst should review the task that was created, including the command to be executed. Additionally, any artifacts on disk related to the task should be captured and analyzed. It is also recommended to identify any parallel processes that occurred within the same timeframe to determine the source of the task creation. \ By conducting this triage process, security analysts can gain insights into potentiallymalicious or suspicious scheduled tasks, helping them identify the source and assess the impact of the task. This analytic is valuable for a Security Operations Center (SOC) as it can detect unauthorized or suspicious activity that could indicate an attacker's attempt to establish persistence or execute unauthorized commands on the system. how_to_implement = To successfully implement this search, you need to be ingesting Windows Security Event Logs with 4698 EventCode enabled. The Windows TA is also required. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.005", "T1053"], "nist": ["DE.CM"]} @@ -15330,10 +15330,10 @@ providing_technologies = ["Microsoft Windows"] type = detection asset_type = Endpoint confidence = medium -explanation = The following hunting analytic aims to identify suspicious tasks that have been registered and executed in Windows using EventID 200 (action run) and 201 (action completed) from the Windows Task Scheduler logs. This analytic helps detect evasive techniques used to register tasks on Windows systems. It is recommended to filter the results based on the ActionName field by specifying specific paths that are not commonly used in your environment.\ -After implementing this analytic, it is important to review parallel events related to the scheduled tasks. EventID 106 will be generated when a new task is created, but it does not necessarily mean that the task has been executed. Analysts should capture any files on disk associated with the task and perform further analysis.\ -To implement this analytic, Task Scheduler logs must be collected. This can be done by adding a stanza for [WinEventLog://Microsoft-Windows-TaskScheduler/Operational] in the inputs.conf file and setting renderXml=false. It is worth noting that not translating the logs into XML may require specific extraction of items from the Message field.\ -False positives are expected with this analytic, so it is important to filter the results based on the paths or specific keywords of interest in the ActionName field to reduce noise.\ +explanation = The following hunting analytic aims to identify suspicious tasks that have been registered and executed in Windows using EventID 200 (action run) and 201 (action completed) from the Windows Task Scheduler logs. This analytic helps detect evasive techniques used to register tasks on Windows systems. It is recommended to filter the results based on the ActionName field by specifying specific paths that are not commonly used in your environment. \ +After implementing this analytic, it is important to review parallel events related to the scheduled tasks. EventID 106 will be generated when a new task is created, but it does not necessarily mean that the task has been executed. Analysts should capture any files on disk associated with the task and perform further analysis. \ +To implement this analytic, Task Scheduler logs must be collected. This can be done by adding a stanza for [WinEventLog://Microsoft-Windows-TaskScheduler/Operational] in the inputs.conf file and setting renderXml=false. It is worth noting that not translating the logs into XML may require specific extraction of items from the Message field. \ +False positives are expected with this analytic, so it is important to filter the results based on the paths or specific keywords of interest in the ActionName field to reduce noise. \ Identifying and analyzing scheduled tasks that have been executed is crucial for a Security Operations Center (SOC) as it helps detect potentially malicious or unauthorized activities on Windows systems. By capturing and investigating the associated events, analysts can uncover signs of persistence mechanisms, unauthorized code execution, or suspicious behaviors. The impact of a true positive could range from unauthorized access to data exfiltration or the execution of malicious payloads. how_to_implement = Task Scheduler logs are required to be collected. Enable logging with inputs.conf by adding a stanza for [WinEventLog://Microsoft-Windows-TaskScheduler/Operational] and renderXml=false. Note, not translating it in XML may require a proper extraction of specific items in the Message. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.005"], "nist": ["DE.AE"]} @@ -15414,11 +15414,11 @@ providing_technologies = null type = detection asset_type = Endpoint confidence = medium -explanation = This analytic looks for the creation of WMI permanent event subscriptions. The following analytic identifies the use of WMI Event Subscription to establish persistence or perform privilege escalation. WMI can be used to install event filters, providers, consumers, and bindings that execute code when a defined event occurs. WMI subscription execution is proxied by the WMI Provider Host process (WmiPrvSe.exe) and thus may result in elevated SYSTEM privileges. This analytic is restricted by commonly added process execution and a path. If the volume is low enough, remove the values and flag on any new subscriptions.\ -All event subscriptions have three components\ -1. Filter - WQL Query for the events we want. EventID = 19\ -1. Consumer - An action to take upon triggering the filter. EventID = 20\ -1. Binding - Registers a filter to a consumer. EventID = 21\ +explanation = This analytic looks for the creation of WMI permanent event subscriptions. The following analytic identifies the use of WMI Event Subscription to establish persistence or perform privilege escalation. WMI can be used to install event filters, providers, consumers, and bindings that execute code when a defined event occurs. WMI subscription execution is proxied by the WMI Provider Host process (WmiPrvSe.exe) and thus may result in elevated SYSTEM privileges. This analytic is restricted by commonly added process execution and a path. If the volume is low enough, remove the values and flag on any new subscriptions. \ +All event subscriptions have three components \ +1. Filter - WQL Query for the events we want. EventID = 19 \ +1. Consumer - An action to take upon triggering the filter. EventID = 20 \ +1. Binding - Registers a filter to a consumer. EventID = 21 \ Monitor for the creation of new WMI EventFilter, EventConsumer, and FilterToConsumerBinding. It may be pertinent to review all 3 to identify the flow of execution. In addition, EventCode 4104 may assist with any other PowerShell script usage that registered the subscription. how_to_implement = To successfully implement this search, you must be collecting Sysmon data using Sysmon version 6.1 or greater and have Sysmon configured to generate alerts for WMI activity (eventID= 19, 20, 21). In addition, you must have at least version 6.0.4 of the Sysmon TA installed to properly parse the fields. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1546.003", "T1546"], "nist": ["DE.CM"]} @@ -15449,8 +15449,8 @@ providing_technologies = null type = detection asset_type = Endpoint confidence = medium -explanation = The following hunting analytic identifies the use of `wmic.exe` enumerating local groups on the endpoint.\ -Typically, by itself, is not malicious but may raise suspicion based on time of day, endpoint and username.\ +explanation = The following hunting analytic identifies the use of `wmic.exe` enumerating local groups on the endpoint. \ +Typically, by itself, is not malicious but may raise suspicion based on time of day, endpoint and username. \ During triage, review parallel processes and identify any further suspicious behavior. how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1069", "T1069.001"], "nist": ["DE.AE"]} @@ -15552,14 +15552,14 @@ type = detection asset_type = Endpoint confidence = medium explanation = The following analytic uses a pre trained deep learning model to detect Domain Generation Algorithm (DGA) generated domains. The model is trained independently and is then made available for download. One of the prominent indicators of a domain being DGA generated is if the domain name consists of unusual character sequences or concatenated dictionary words. Adversaries often use clever techniques to obfuscate machine generated domain names as human generated. Predicting DGA generated domain names requires analysis and building a model based on carefully chosen features. The deep learning model we have developed uses the domain name to analyze patterns of character sequences along with carefully chosen custom features to predict if a domain is DGA generated. The model takes a domain name consisting of second-level and top-level domain names as input and outputs a dga_score. Higher the dga_score, the more likely the input domain is a DGA domain. The threshold for flagging a domain as DGA is set at 0.5. -how_to_implement = Steps to deploy DGA detection model into Splunk App DSDL.\ This detection depends on the Splunk app for Data Science and Deep Learning which can be found here - https://splunkbase.splunk.com/app/4607/ and the Network Resolution datamodel which can be found here - https://splunkbase.splunk.com/app/1621/. The detection uses a pre-trained deep learning model that needs to be deployed in DSDL app. Follow the steps for deployment here - https://github.com/splunk/security_content/wiki/How-to-deploy-pre-trained-Deep-Learning-models-for-ESCU. * Download the artifacts .tar.gz file from the link `https://seal.splunkresearch.com/pretrained_dga_model_dsdl.tar.gz`\ -* Download the pretrained_dga_model_dsdl.ipynb Jupyter notebook from `https://github.com/splunk/security_content/notebooks`\ -* Login to the Jupyter Lab for pretrained_dga_model_dsdl container. This container should be listed on Containers page for DSDL app.\ -* Below steps need to be followed inside Jupyter lab\ -* Upload the pretrained_dga_model_dsdl.tar.gz file into `app/model/data` path using the upload option in the jupyter notebook.\ -* Untar the artifact `pretrained_dga_model_dsdl.tar.gz` using `tar -xf app/model/data/pretrained_dga_model_dsdl.tar.gz -C app/model/data`\ -* Upload `pretrained_dga_model_dsdl.pynb` into Jupyter lab notebooks folder using the upload option in Jupyter lab\ -* Save the notebook using the save option in jupyter notebook.\ +how_to_implement = Steps to deploy DGA detection model into Splunk App DSDL.\ This detection depends on the Splunk app for Data Science and Deep Learning which can be found here - https://splunkbase.splunk.com/app/4607/ and the Network Resolution datamodel which can be found here - https://splunkbase.splunk.com/app/1621/. The detection uses a pre-trained deep learning model that needs to be deployed in DSDL app. Follow the steps for deployment here - https://github.com/splunk/security_content/wiki/How-to-deploy-pre-trained-Deep-Learning-models-for-ESCU. * Download the artifacts .tar.gz file from the link `https://seal.splunkresearch.com/pretrained_dga_model_dsdl.tar.gz` \ +* Download the pretrained_dga_model_dsdl.ipynb Jupyter notebook from `https://github.com/splunk/security_content/notebooks` \ +* Login to the Jupyter Lab for pretrained_dga_model_dsdl container. This container should be listed on Containers page for DSDL app. \ +* Below steps need to be followed inside Jupyter lab \ +* Upload the pretrained_dga_model_dsdl.tar.gz file into `app/model/data` path using the upload option in the jupyter notebook. \ +* Untar the artifact `pretrained_dga_model_dsdl.tar.gz` using `tar -xf app/model/data/pretrained_dga_model_dsdl.tar.gz -C app/model/data` \ +* Upload `pretrained_dga_model_dsdl.pynb` into Jupyter lab notebooks folder using the upload option in Jupyter lab \ +* Save the notebook using the save option in jupyter notebook. \ * Upload `pretrained_dga_model_dsdl.json` into `notebooks/data` folder. annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Command and Control"], "mitre_attack": ["T1568.002"], "nist": ["DE.AE"]} known_false_positives = False positives may be present if domain name is similar to dga generated domains. @@ -15570,14 +15570,14 @@ type = detection asset_type = Endpoint confidence = medium explanation = The following analytic uses a pre trained deep learning model to detect DNS data exfiltration. The model is trained on the data we collected and is inferred on live data. This detection detects low throughput DNS Tunneling (data exfiltration) using features computed from past events between the same src and domain. The search uses macros from URL ToolBox app to generate features used by the model. The model is a deep learning model that accepts DNS request as input along with a few custom features to generate a pred_is_exfiltration_proba score. The higher the pred_is_exfiltration_proba, the more likely the DNS request is data exfiltration. The threshold for flagging a request as DNS exfiltration is set at 0.5. -how_to_implement = Steps to deploy detect DNS data exfiltration model into Splunk App DSDL. This detection depends on the Splunk app for Data Science and Deep Learning which can be found here - https://splunkbase.splunk.com/app/4607/ and the Network Resolution datamodel which can be found here - https://splunkbase.splunk.com/app/1621/. The detection uses a pre-trained deep learning model that needs to be deployed in DSDL app. Follow the steps for deployment here - `https://github.com/splunk/security_content/wiki/How-to-deploy-pre-trained-Deep-Learning-models-for-ESCU`.\ -* Download the `artifacts .tar.gz` file from the link - https://seal.splunkresearch.com/detect_dns_data_exfiltration_using_pretrained_model_in_dsdl.tar.gz Download the `detect_dns_data_exfiltration_using_pretrained_model_in_dsdl.ipynb` Jupyter notebook from https://github.com/splunk/security_content/notebooks\ -* Login to the Jupyter Lab assigned for detect_dns_data_exfiltration_using_pretrained_model_in_dsdl container. This container should be listed on Containers page for DSDL app.\ -* Below steps need to be followed inside Jupyter lab\ -* Upload the detect_dns_data_exfiltration_using_pretrained_model_in_dsdl.tar.gz file into `app/model/data` path using the upload option in the jupyter notebook.\ -* Untar the artifact detect_dns_data_exfiltration_using_pretrained_model_in_dsdl.tar.gz using `tar -xf app/model/data/detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.tar.gz -C app/model/data`\ -* Upload detect_dns_data_exfiltration_using_pretrained_model_in_dsdl.pynb into Jupyter lab notebooks folder using the upload option in Jupyter lab\ -* Save the notebook using the save option in jupyter notebook.\ +how_to_implement = Steps to deploy detect DNS data exfiltration model into Splunk App DSDL. This detection depends on the Splunk app for Data Science and Deep Learning which can be found here - https://splunkbase.splunk.com/app/4607/ and the Network Resolution datamodel which can be found here - https://splunkbase.splunk.com/app/1621/. The detection uses a pre-trained deep learning model that needs to be deployed in DSDL app. Follow the steps for deployment here - `https://github.com/splunk/security_content/wiki/How-to-deploy-pre-trained-Deep-Learning-models-for-ESCU`. \ +* Download the `artifacts .tar.gz` file from the link - https://seal.splunkresearch.com/detect_dns_data_exfiltration_using_pretrained_model_in_dsdl.tar.gz Download the `detect_dns_data_exfiltration_using_pretrained_model_in_dsdl.ipynb` Jupyter notebook from https://github.com/splunk/security_content/notebooks \ +* Login to the Jupyter Lab assigned for detect_dns_data_exfiltration_using_pretrained_model_in_dsdl container. This container should be listed on Containers page for DSDL app. \ +* Below steps need to be followed inside Jupyter lab \ +* Upload the detect_dns_data_exfiltration_using_pretrained_model_in_dsdl.tar.gz file into `app/model/data` path using the upload option in the jupyter notebook. \ +* Untar the artifact detect_dns_data_exfiltration_using_pretrained_model_in_dsdl.tar.gz using `tar -xf app/model/data/detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.tar.gz -C app/model/data` \ +* Upload detect_dns_data_exfiltration_using_pretrained_model_in_dsdl.pynb into Jupyter lab notebooks folder using the upload option in Jupyter lab \ +* Save the notebook using the save option in jupyter notebook. \ * Upload `detect_dns_data_exfiltration_using_pretrained_model_in_dsdl.json` into `notebooks/data` folder. annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Actions on Objectives"], "mitre_attack": ["T1048.003"], "nist": ["DE.AE"]} known_false_positives = False positives may be present if DNS data exfiltration request look very similar to benign DNS requests. @@ -15588,11 +15588,11 @@ type = detection asset_type = Endpoint confidence = medium explanation = Malicious actors often abuse legitimate Dynamic DNS services to host malicious payloads or interactive Command And Control nodes. Attackers will automate domain resolution changes by routing dynamic domains to countless IP addresses to circumvent firewall blocks, block lists as well as frustrate a network defenders analytic and investigative processes. This search will look for DNS queries made from within your infrastructure to suspicious dynamic domains. -how_to_implement = First, you'll need to ingest data from your DNS operations. This can be done by ingesting logs from your server or data, collected passively by Splunk Stream or a similar solution. Specifically, data that contains the domain that is being queried and the IP of the host originating the request must be populating the `Network_Resolution` data model. This search also leverages a lookup file, `dynamic_dns_providers_default.csv`, which contains a non-exhaustive list of Dynamic DNS providers. Please consider updating the local lookup periodically by adding new domains to the list of `dynamic_dns_providers_local.csv`.\ -This search produces fields (query, answer, isDynDNS) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable event. To see the additional metadata, add the following fields, if not already present, to Incident Review. Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** DNS Query, **Field:** query\ -* **Label:** DNS Answer, **Field:** answer\ -* **Label:** IsDynamicDNS, **Field:** isDynDNS\ +how_to_implement = First, you'll need to ingest data from your DNS operations. This can be done by ingesting logs from your server or data, collected passively by Splunk Stream or a similar solution. Specifically, data that contains the domain that is being queried and the IP of the host originating the request must be populating the `Network_Resolution` data model. This search also leverages a lookup file, `dynamic_dns_providers_default.csv`, which contains a non-exhaustive list of Dynamic DNS providers. Please consider updating the local lookup periodically by adding new domains to the list of `dynamic_dns_providers_local.csv`. \ +This search produces fields (query, answer, isDynDNS) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable event. To see the additional metadata, add the following fields, if not already present, to Incident Review. Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** DNS Query, **Field:** query \ +* **Label:** DNS Answer, **Field:** answer \ +* **Label:** IsDynamicDNS, **Field:** isDynDNS \ Detailed documentation on how to create a new field within Incident Review may be found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery"], "mitre_attack": ["T1189"], "nist": ["DE.CM"]} known_false_positives = Some users and applications may leverage Dynamic DNS to reach out to some domains on the Internet since dynamic DNS by itself is not malicious, however this activity must be verified. @@ -15703,15 +15703,15 @@ type = detection asset_type = Endpoint confidence = medium explanation = The following analytic uses a pre trained deep learning model to detect suspicious DNS TXT records. The model is trained independently and is then made available for download. The DNS TXT records are categorized into commonly identified types like email, verification, http using regular expressions https://www.tide-project.nl/blog/wtmc2020/. The TXT records that do not match regular expressions for well known types are labeled as 1 for "unknown/suspicious" and otherwise 0 for "not suspicious". The deep learning model we have developed uses DNS TXT responses to analyze patterns of character sequences to predict if a DNS TXT is suspicious or not. The higher the pred_is_unknown_proba, the more likely the DNS TXT record is suspicious. The threshold for flagging a domain as suspicious is set at 0.5. -how_to_implement = Steps to deploy detect suspicious DNS TXT records model into Splunk App DSDL. This detection depends on the Splunk app for Data Science and Deep Learning which can be found here - `https://splunkbase.splunk.com/app/4607/` and the Network Resolution datamodel which can be found here - `https://splunkbase.splunk.com/app/1621/`. The detection uses a pre-trained deep learning model that needs to be deployed in DSDL app. Follow the steps for deployment here - `https://github.com/splunk/security_content/wiki/How-to-deploy-pre-trained-Deep-Learning-models-for-ESCU`.\ -* Download the `artifacts .tar.gz` file from the link - `https://seal.splunkresearch.com/detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.tar.gz`.\ -* Download the `detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.ipynb` Jupyter notebook from `https://github.com/splunk/security_content/notebooks`.\ -* Login to the Jupyter Lab assigned for `detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl` container. This container should be listed on Containers page for DSDL app.\ -* Below steps need to be followed inside Jupyter lab.\ -* Upload the `detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.tar.gz` file into `app/model/data` path using the upload option in the jupyter notebook.\ -* Untar the artifact `detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.tar.gz` using `tar -xf app/model/data/detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.tar.gz -C app/model/data`.\ -* Upload detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.ipynb` into Jupyter lab notebooks folder using the upload option in Jupyter lab.\ -* Save the notebook using the save option in Jupyter notebook.\ +how_to_implement = Steps to deploy detect suspicious DNS TXT records model into Splunk App DSDL. This detection depends on the Splunk app for Data Science and Deep Learning which can be found here - `https://splunkbase.splunk.com/app/4607/` and the Network Resolution datamodel which can be found here - `https://splunkbase.splunk.com/app/1621/`. The detection uses a pre-trained deep learning model that needs to be deployed in DSDL app. Follow the steps for deployment here - `https://github.com/splunk/security_content/wiki/How-to-deploy-pre-trained-Deep-Learning-models-for-ESCU`. \ +* Download the `artifacts .tar.gz` file from the link - `https://seal.splunkresearch.com/detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.tar.gz`. \ +* Download the `detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.ipynb` Jupyter notebook from `https://github.com/splunk/security_content/notebooks`. \ +* Login to the Jupyter Lab assigned for `detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl` container. This container should be listed on Containers page for DSDL app. \ +* Below steps need to be followed inside Jupyter lab. \ +* Upload the `detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.tar.gz` file into `app/model/data` path using the upload option in the jupyter notebook. \ +* Untar the artifact `detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.tar.gz` using `tar -xf app/model/data/detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.tar.gz -C app/model/data`. \ +* Upload detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.ipynb` into Jupyter lab notebooks folder using the upload option in Jupyter lab. \ +* Save the notebook using the save option in Jupyter notebook. \ * Upload `detect_suspicious_dns_txt_records_using_pretrained_model_in_dsdl.json` into `notebooks/data` folder. annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Command and Control"], "mitre_attack": ["T1568.002"], "nist": ["DE.AE"]} known_false_positives = False positives may be present if DNS TXT record contents are similar to benign DNS TXT record contents. @@ -15772,11 +15772,11 @@ type = detection asset_type = Endpoint confidence = medium explanation = This search allows you to identify DNS requests that are unusually large for the record type being requested in your environment. -how_to_implement = To successfully implement this search, you will need to ensure that DNS data is populating the Network_Resolution data model. In addition, the Machine Learning Toolkit (MLTK) version 4.2 or greater must be installed on your search heads, along with any required dependencies. Finally, the support search "Baseline of DNS Query Length - MLTK" must be executed before this detection search, because it builds a machine-learning (ML) model over the historical data used by this search. It is important that this search is run in the same app context as the associated support search, so that the model created by the support search is available for use. You should periodically re-run the support search to rebuild the model with the latest data available in your environment.\ -This search produces fields (`query`,`query_length`,`count`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** DNS Query, **Field:** query\ -* **Label:** DNS Query Length, **Field:** query_length\ -* **Label:** Number of events, **Field:** count\ +how_to_implement = To successfully implement this search, you will need to ensure that DNS data is populating the Network_Resolution data model. In addition, the Machine Learning Toolkit (MLTK) version 4.2 or greater must be installed on your search heads, along with any required dependencies. Finally, the support search "Baseline of DNS Query Length - MLTK" must be executed before this detection search, because it builds a machine-learning (ML) model over the historical data used by this search. It is important that this search is run in the same app context as the associated support search, so that the model created by the support search is available for use. You should periodically re-run the support search to rebuild the model with the latest data available in your environment. \ +This search produces fields (`query`,`query_length`,`count`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** DNS Query, **Field:** query \ +* **Label:** DNS Query Length, **Field:** query_length \ +* **Label:** Number of events, **Field:** count \ Detailed documentation on how to create a new field within Incident Review may be found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Command and Control"], "mitre_attack": ["T1071.004", "T1071"], "nist": ["DE.AE"]} known_false_positives = If you are seeing more results than desired, you may consider reducing the value for threshold in the search. You should also periodically re-run the support search to re-build the ML model on the latest data. @@ -15937,9 +15937,9 @@ type = detection asset_type = Endpoint confidence = medium explanation = This search uses the Machine Learning Toolkit (MLTK) to identify spikes in the number of Server Message Block (SMB) connections. -how_to_implement = To successfully implement this search, you will need to ensure that DNS data is populating the Network_Traffic data model. In addition, the latest version of Machine Learning Toolkit (MLTK) must be installed on your search heads, along with any required dependencies. Finally, the support search "Baseline of SMB Traffic - MLTK" must be executed before this detection search, because it builds a machine-learning (ML) model over the historical data used by this search. It is important that this search is run in the same app context as the associated support search, so that the model created by the support search is available for use. You should periodically re-run the support search to rebuild the model with the latest data available in your environment.\ -This search produces a field (Number of events,count) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. This field contributes additional context to the notable. To see the additional metadata, add the following field, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** Number of events, **Field:** count\ +how_to_implement = To successfully implement this search, you will need to ensure that DNS data is populating the Network_Traffic data model. In addition, the latest version of Machine Learning Toolkit (MLTK) must be installed on your search heads, along with any required dependencies. Finally, the support search "Baseline of SMB Traffic - MLTK" must be executed before this detection search, because it builds a machine-learning (ML) model over the historical data used by this search. It is important that this search is run in the same app context as the associated support search, so that the model created by the support search is available for use. You should periodically re-run the support search to rebuild the model with the latest data available in your environment. \ +This search produces a field (Number of events,count) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. This field contributes additional context to the notable. To see the additional metadata, add the following field, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** Number of events, **Field:** count \ Detailed documentation on how to create a new field within Incident Review is found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1021.002", "T1021"], "nist": ["DE.AE"]} known_false_positives = If you are seeing more results than desired, you may consider reducing the value of the threshold in the search. You should also periodically re-run the support search to re-build the ML model on the latest data. Please update the `smb_traffic_spike_mltk_filter` macro to filter out false positive results @@ -16039,8 +16039,8 @@ providing_technologies = null type = detection asset_type = Network confidence = medium -explanation = The following analytic detects potential exploitation of the critical Adobe ColdFusion vulnerability, CVE-2023-26360. This flaw, rooted in the deserialization of untrusted data, enables Unauthenticated Arbitrary File Read. Exploitation often targets specific ColdFusion paths, especially related to CKEditor's file manager.\ -Our analytic pinpoints exploitation by monitoring web requests to the "/cf_scripts/scripts/ajax/ckeditor/*" path. This focus helps differentiate malicious activity from standard ColdFusion traffic. For SOCs, detecting such attempts is vital given the vulnerability's CVSS score of 9.8, signaling its severity. Successful exploitation can lead to unauthorized data access, further attacks, or severe operational disruptions.\ +explanation = The following analytic detects potential exploitation of the critical Adobe ColdFusion vulnerability, CVE-2023-26360. This flaw, rooted in the deserialization of untrusted data, enables Unauthenticated Arbitrary File Read. Exploitation often targets specific ColdFusion paths, especially related to CKEditor's file manager. \ +Our analytic pinpoints exploitation by monitoring web requests to the "/cf_scripts/scripts/ajax/ckeditor/*" path. This focus helps differentiate malicious activity from standard ColdFusion traffic. For SOCs, detecting such attempts is vital given the vulnerability's CVSS score of 9.8, signaling its severity. Successful exploitation can lead to unauthorized data access, further attacks, or severe operational disruptions. \ If a true positive arises, it indicates an active breach attempt, potentially causing data theft, operational disruption, or reputational damage. In essence, this analytic provides a targeted approach to identify attempts exploiting a high-risk ColdFusion vulnerability. While false positives may occur from legitimate accesses, any alerts should be treated as high-priority, warranting immediate investigation to ensure security. how_to_implement = This detection requires the Web datamodel to be populated from a supported Technology Add-On like Splunk for Apache, Splunk for Nginx, or Splunk for Palo Alto. annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery"], "mitre_attack": ["T1190"], "nist": ["DE.CM"]} @@ -16061,9 +16061,9 @@ providing_technologies = null type = detection asset_type = Web Server confidence = medium -explanation = The following analytic detects attempts to exploit the Citrix Bleed vulnerability, which can lead to the leaking of session tokens. The vulnerability, identified as CVE-2023-4966, pertains to sensitive information disclosure in NetScaler ADC and NetScaler Gateway when set up as various server configurations. The analytic specifically searches for HTTP requests with a 200 status code targeting the /oauth/idp/.well-known/openid-configuration URL endpoint. By parsing web traffic and filtering based on the aforementioned criteria along with specific user agent details, HTTP method, source and destination IPs, and the sourcetype, the analytic aims to identify potentially malicious requests that fit the profile of this exploit.\ -This behavior is essential for a Security Operations Center (SOC) to identify because if successfully exploited, attackers can gain unauthorized access, leading to a potential breach or further malicious activities within the organization's network. As the Citrix Bleed vulnerability can disclose session tokens, a successful exploit can allow attackers to impersonate legitimate users, bypassing authentication mechanisms and accessing sensitive data or systems.\ -If a true positive is confirmed, it implies that an attacker is actively exploiting the vulnerability within the organization's environment. This could lead to severe consequences, including unauthorized data access, further propagation within the network, and potential disruptions or exfiltration of critical information.\ +explanation = The following analytic detects attempts to exploit the Citrix Bleed vulnerability, which can lead to the leaking of session tokens. The vulnerability, identified as CVE-2023-4966, pertains to sensitive information disclosure in NetScaler ADC and NetScaler Gateway when set up as various server configurations. The analytic specifically searches for HTTP requests with a 200 status code targeting the /oauth/idp/.well-known/openid-configuration URL endpoint. By parsing web traffic and filtering based on the aforementioned criteria along with specific user agent details, HTTP method, source and destination IPs, and the sourcetype, the analytic aims to identify potentially malicious requests that fit the profile of this exploit. \ +This behavior is essential for a Security Operations Center (SOC) to identify because if successfully exploited, attackers can gain unauthorized access, leading to a potential breach or further malicious activities within the organization's network. As the Citrix Bleed vulnerability can disclose session tokens, a successful exploit can allow attackers to impersonate legitimate users, bypassing authentication mechanisms and accessing sensitive data or systems. \ +If a true positive is confirmed, it implies that an attacker is actively exploiting the vulnerability within the organization's environment. This could lead to severe consequences, including unauthorized data access, further propagation within the network, and potential disruptions or exfiltration of critical information. \ Upon flagging such activity, it's crucial for analysts to swiftly validate the alert, assess the nature and extent of the exposure, and implement necessary measures to mitigate the threat. Reviewing the details such as user agent, source, and destination IP can help in understanding the context and intent of the attack. While it's imperative to patch vulnerable systems to prevent this exploitation, early detection through this analytic provides a valuable layer of defense, enabling timely response to thwart potential breaches. how_to_implement = This detection requires the Web datamodel to be populated from a supported Technology Add-On like Splunk for Apache, Splunk for Nginx, or Splunk for Palo Alto. We recommend hunting in the environment first to understand the scope of the issue and then deploying this detection to monitor for future exploitation attempts. Limit or restrict to Citrix devices only if possible. annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery"], "mitre_attack": ["T1190"], "nist": ["DE.CM"]} @@ -16074,9 +16074,9 @@ providing_technologies = null type = detection asset_type = Network confidence = medium -explanation = This analytic is designed to assist in hunting for potential exploitation attempts against Citrix ADC in relation to CVE-2023-3519. This vulnerability, identified within Citrix ADC and NetScaler Gateway, appears to be linked with SAML processing components, with an overflow issue allowing for possible memory corruption. Preliminary findings indicate that for the exploit to be viable, SAML has to be enabled. The analytic targets POST requests to certain web endpoints which have been associated with the exploitation process.\ -Given the specific nature of the vulnerability, upon deploying this analytic it is recommended to filter and narrow the focus towards your ADC assets to reduce potential noise and improve the signal of the analytic. Please note that the exploitation of this vulnerability has been reported in the wild, therefore monitoring for potential signs of exploitation should be considered high priority.\ -The search query provided examines web data for POST requests made to specific URLs associated with the exploitation of this vulnerability. It aggregates and presents data to highlight potential exploitation attempts, taking into account elements like user agent, HTTP method, URL length, source, and destination.\ +explanation = This analytic is designed to assist in hunting for potential exploitation attempts against Citrix ADC in relation to CVE-2023-3519. This vulnerability, identified within Citrix ADC and NetScaler Gateway, appears to be linked with SAML processing components, with an overflow issue allowing for possible memory corruption. Preliminary findings indicate that for the exploit to be viable, SAML has to be enabled. The analytic targets POST requests to certain web endpoints which have been associated with the exploitation process. \ +Given the specific nature of the vulnerability, upon deploying this analytic it is recommended to filter and narrow the focus towards your ADC assets to reduce potential noise and improve the signal of the analytic. Please note that the exploitation of this vulnerability has been reported in the wild, therefore monitoring for potential signs of exploitation should be considered high priority. \ +The search query provided examines web data for POST requests made to specific URLs associated with the exploitation of this vulnerability. It aggregates and presents data to highlight potential exploitation attempts, taking into account elements like user agent, HTTP method, URL length, source, and destination. \ Please be aware that this analytic is based on current understanding of the vulnerability, and adjustments may be required as more information becomes available. how_to_implement = This detection requires the Web datamodel to be populated from a supported Technology Add-On like Splunk for Apache, Splunk for Nginx, or Splunk for Palo Alto. annotations = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery"], "mitre_attack": ["T1190"], "nist": ["DE.AE"]} @@ -16087,9 +16087,9 @@ providing_technologies = null type = detection asset_type = Network confidence = medium -explanation = The following analytic detects a potentially malicious file upload attempt to Documentum, an enterprise content management platform, via specific suspicious URLs and the HTTP POST method. This detection occurs through pattern recognition within the datamodel=Web, focusing on URL patterns that follow "/documentum/upload.aspx?parentid=", "/documentum/upload.aspx?filename=", "/documentum/upload.aspx?uploadId=*", combined with the HTTP POST method, indicative of a file upload attempt.\ -This behavior is significant for a Security Operations Center (SOC) to identify, as it can signify a potential attack vector. Malicious actors might use this method to upload a harmful script or other exploitable content to Documentum, thereby establishing a foothold in the environment, spreading malware, or enabling further exploitation.\ -The impact of this behavior, if a true positive, can be quite significant. An attacker could compromise the Documentum application, manipulate or steal sensitive content, and potentially gain unauthorized access to other system resources. An intrusion of this nature could disrupt business operations, result in data breaches, and even damage the organization's reputation.\ +explanation = The following analytic detects a potentially malicious file upload attempt to Documentum, an enterprise content management platform, via specific suspicious URLs and the HTTP POST method. This detection occurs through pattern recognition within the datamodel=Web, focusing on URL patterns that follow "/documentum/upload.aspx?parentid=", "/documentum/upload.aspx?filename=", "/documentum/upload.aspx?uploadId=*", combined with the HTTP POST method, indicative of a file upload attempt. \ +This behavior is significant for a Security Operations Center (SOC) to identify, as it can signify a potential attack vector. Malicious actors might use this method to upload a harmful script or other exploitable content to Documentum, thereby establishing a foothold in the environment, spreading malware, or enabling further exploitation. \ +The impact of this behavior, if a true positive, can be quite significant. An attacker could compromise the Documentum application, manipulate or steal sensitive content, and potentially gain unauthorized access to other system resources. An intrusion of this nature could disrupt business operations, result in data breaches, and even damage the organization's reputation. \ However, it's important to note that false positives may occur. For example, legitimate but uncommon file uploads might match these URL patterns. It's crucial to verify any alerts generated by this analytic to ensure accurate threat detection. This analytic provides critical insights into potential attack attempts and assists in maintaining the integrity and security of enterprise content management systems like Documentum. how_to_implement = Dependent upon the placement of the ShareFile application, ensure the latest Technology Add-On is eneabled. This detection requires the Web datamodel to be populated from a supported Technology Add-On like Suricata, Splunk for Apache, Splunk for Nginx, or Splunk for Palo Alto. The ShareFile application is IIS based, therefore ingesting IIS logs and reviewing for the same pattern would identify this activity, successful or not. annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery"], "mitre_attack": ["T1190"], "nist": ["DE.AE"]} @@ -16220,10 +16220,10 @@ providing_technologies = null type = detection asset_type = Network confidence = medium -explanation = CVE-2022-40684 is a Fortinet appliance auth bypass that is actively being exploited and a POC is released publicy. The POC adds a SSH key to the appliance. Note that the exploit can be used with any HTTP method (GET, POST, PUT, DELETE, etc). The REST API request failing is not an indication that an attacker was unsuccessful. Horizon3 was able to modify the admin SSH keys though a REST API request that reportedly failed. The collection /api/v2/ endpoints can be used to configure the system and modify the administrator user. Any logs found that meet the above conditions and also have a URL containing /api/v2/ should be cause for concern. Further investigation of any matching log entries can reveal any damage an attack has done. Additionally, an attacker may perform the following actions to further compromise a system Modify the admin SSH key to enable the attacker to login to the compromised system.\ -Add new local users.\ -Update networking configurations to reroute traffic.\ -Download the system configuration.\ +explanation = CVE-2022-40684 is a Fortinet appliance auth bypass that is actively being exploited and a POC is released publicy. The POC adds a SSH key to the appliance. Note that the exploit can be used with any HTTP method (GET, POST, PUT, DELETE, etc). The REST API request failing is not an indication that an attacker was unsuccessful. Horizon3 was able to modify the admin SSH keys though a REST API request that reportedly failed. The collection /api/v2/ endpoints can be used to configure the system and modify the administrator user. Any logs found that meet the above conditions and also have a URL containing /api/v2/ should be cause for concern. Further investigation of any matching log entries can reveal any damage an attack has done. Additionally, an attacker may perform the following actions to further compromise a system Modify the admin SSH key to enable the attacker to login to the compromised system. \ +Add new local users. \ +Update networking configurations to reroute traffic. \ +Download the system configuration. \ Initiate packet captures to capture other sensitive system information. Reference Horizon3.ai how_to_implement = This detection requires the Web datamodel to be populated from a supported Technology Add-On like Splunk for Apache. Splunk for Nginx, or Splunk for Palo Alto. annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery", "Installation"], "mitre_attack": ["T1190", "T1133"], "nist": ["DE.CM"]} @@ -16234,16 +16234,16 @@ providing_technologies = null type = detection asset_type = Web Server confidence = medium -explanation = The following hunting query assists with quickly assessing CVE-2021-44228, or Log4Shell, activity mapped to the Web Datamodel. This is a combination query attempting to identify, score and dashboard. Because the Log4Shell vulnerability requires the string to be in the logs, this will work to identify the activity anywhere in the HTTP headers using _raw. Modify the first line to use the same pattern matching against other log sources. Scoring is based on a simple rubric of 0-5. 5 being the best match, and less than 5 meant to identify additional patterns that will equate to a higher total score.\ -The first jndi match identifies the standard pattern of `{jndi:`\ -jndi_fastmatch is meant to identify any jndi in the logs. The score is set low and is meant to be the "base" score used later.\ -jndi_proto is a protocol match that identifies `jndi` and one of `ldap, ldaps, rmi, dns, nis, iiop, corba, nds, http, https.`\ -all_match is a very well written regex by https://gist.github.com/Schvenn that identifies nearly all patterns of this attack behavior.\ -env works to identify environment variables in the header, meant to capture `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `env`.\ -uri_detect is string match looking for the common uri paths currently being scanned/abused in the wild.\ -keywords matches on enumerated values that, like `$ctx:loginId`, that may be found in the header used by the adversary.\ -lookup matching is meant to catch some basic obfuscation that has been identified using upper, lower and date.\ -Scoring will then occur based on any findings. The base score is meant to be 2 , created by jndi_fastmatch. Everything else is meant to increase that score.\ +explanation = The following hunting query assists with quickly assessing CVE-2021-44228, or Log4Shell, activity mapped to the Web Datamodel. This is a combination query attempting to identify, score and dashboard. Because the Log4Shell vulnerability requires the string to be in the logs, this will work to identify the activity anywhere in the HTTP headers using _raw. Modify the first line to use the same pattern matching against other log sources. Scoring is based on a simple rubric of 0-5. 5 being the best match, and less than 5 meant to identify additional patterns that will equate to a higher total score. \ +The first jndi match identifies the standard pattern of `{jndi:` \ +jndi_fastmatch is meant to identify any jndi in the logs. The score is set low and is meant to be the "base" score used later. \ +jndi_proto is a protocol match that identifies `jndi` and one of `ldap, ldaps, rmi, dns, nis, iiop, corba, nds, http, https.` \ +all_match is a very well written regex by https://gist.github.com/Schvenn that identifies nearly all patterns of this attack behavior. \ +env works to identify environment variables in the header, meant to capture `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `env`. \ +uri_detect is string match looking for the common uri paths currently being scanned/abused in the wild. \ +keywords matches on enumerated values that, like `$ctx:loginId`, that may be found in the header used by the adversary. \ +lookup matching is meant to catch some basic obfuscation that has been identified using upper, lower and date. \ +Scoring will then occur based on any findings. The base score is meant to be 2 , created by jndi_fastmatch. Everything else is meant to increase that score. \ Finally, a simple table is created to show the scoring and the _raw field. Sort based on score or columns of interest. how_to_implement = Out of the box, the Web datamodel is required to be pre-filled. However, tested was performed against raw httpd access logs. Change the first line to any dataset to pass the regex's against. annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery", "Installation"], "mitre_attack": ["T1190", "T1133"], "nist": ["DE.AE"]} @@ -16474,9 +16474,9 @@ providing_technologies = null type = detection asset_type = Web Server confidence = medium -explanation = The following analytic is designed to detect potential exploitation attempts against VMWare vRealize Network Insight that align with the characteristics of CVE-2023-20887. This specific vulnerability is a critical security flaw that, if exploited, could allow an attacker to execute arbitrary code on the affected system.\ -The analytic operates by monitoring web traffic, specifically HTTP POST requests, directed towards a specific URL endpoint ("/saas./resttosaasservlet"). This endpoint is known to be vulnerable and is a common target for attackers exploiting this vulnerability.\ -The behavior this analytic detects is the sending of HTTP POST requests to the vulnerable endpoint. This is a significant indicator of an attempted exploit as it is the primary method used to trigger the vulnerability. The analytic detects this behavior by analyzing web traffic data and identifying HTTP POST requests directed at the vulnerable endpoint.\ +explanation = The following analytic is designed to detect potential exploitation attempts against VMWare vRealize Network Insight that align with the characteristics of CVE-2023-20887. This specific vulnerability is a critical security flaw that, if exploited, could allow an attacker to execute arbitrary code on the affected system. \ +The analytic operates by monitoring web traffic, specifically HTTP POST requests, directed towards a specific URL endpoint ("/saas./resttosaasservlet"). This endpoint is known to be vulnerable and is a common target for attackers exploiting this vulnerability. \ +The behavior this analytic detects is the sending of HTTP POST requests to the vulnerable endpoint. This is a significant indicator of an attempted exploit as it is the primary method used to trigger the vulnerability. The analytic detects this behavior by analyzing web traffic data and identifying HTTP POST requests directed at the vulnerable endpoint. \ Identifying this behavior is crucial for a Security Operations Center (SOC) as it can indicate an active attempt to exploit a known vulnerability within the network. If the identified behavior is a true positive, it suggests an attacker is attempting to exploit the CVE-2023-20887 vulnerability in VMWare vRealize Network Insight. The impact of such an attack could be severe, potentially allowing the attacker to execute arbitrary code on the affected system, leading to unauthorized access, data theft, or further propagation within the network. how_to_implement = To successfully implement this search, you need to be ingesting web or proxy logs, or ensure it is being filled by a proxy like device, into the Web Datamodel. Restrict to specific dest assets to reduce false positives. annotations = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery", "Exploitation", "Installation"], "mitre_attack": ["T1133", "T1190", "T1210", "T1068"], "nist": ["DE.CM"]} @@ -16739,7 +16739,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Mauricio Velazco"}] spec_version = 3 searches = ["ESCU - AdsiSearcher Account Discovery - Rule", "ESCU - Domain Account Discovery with Dsquery - Rule", "ESCU - Domain Account Discovery With Net App - Rule", "ESCU - Domain Account Discovery with Wmic - Rule", "ESCU - Domain Controller Discovery with Nltest - Rule", "ESCU - Domain Controller Discovery with Wmic - Rule", "ESCU - Domain Group Discovery with Adsisearcher - Rule", "ESCU - Domain Group Discovery With Dsquery - Rule", "ESCU - Domain Group Discovery With Net - Rule", "ESCU - Domain Group Discovery With Wmic - Rule", "ESCU - DSQuery Domain Discovery - Rule", "ESCU - Elevated Group Discovery With Net - Rule", "ESCU - Elevated Group Discovery with PowerView - Rule", "ESCU - Elevated Group Discovery With Wmic - Rule", "ESCU - Get ADDefaultDomainPasswordPolicy with Powershell - Rule", "ESCU - Get ADDefaultDomainPasswordPolicy with Powershell Script Block - Rule", "ESCU - Get ADUser with PowerShell - Rule", "ESCU - Get ADUser with PowerShell Script Block - Rule", "ESCU - Get ADUserResultantPasswordPolicy with Powershell - Rule", "ESCU - Get ADUserResultantPasswordPolicy with Powershell Script Block - Rule", "ESCU - Get DomainPolicy with Powershell - Rule", "ESCU - Get DomainPolicy with Powershell Script Block - Rule", "ESCU - Get-DomainTrust with PowerShell - Rule", "ESCU - Get-DomainTrust with PowerShell Script Block - Rule", "ESCU - Get DomainUser with PowerShell - Rule", "ESCU - Get DomainUser with PowerShell Script Block - Rule", "ESCU - Get-ForestTrust with PowerShell - Rule", "ESCU - Get-ForestTrust with PowerShell Script Block - Rule", "ESCU - Get WMIObject Group Discovery - Rule", "ESCU - Get WMIObject Group Discovery with Script Block Logging - Rule", "ESCU - GetAdComputer with PowerShell - Rule", "ESCU - GetAdComputer with PowerShell Script Block - Rule", "ESCU - GetAdGroup with PowerShell - Rule", "ESCU - GetAdGroup with PowerShell Script Block - Rule", "ESCU - GetCurrent User with PowerShell - Rule", "ESCU - GetCurrent User with PowerShell Script Block - Rule", "ESCU - GetDomainComputer with PowerShell - Rule", "ESCU - GetDomainComputer with PowerShell Script Block - Rule", "ESCU - GetDomainController with PowerShell - Rule", "ESCU - GetDomainController with PowerShell Script Block - Rule", "ESCU - GetDomainGroup with PowerShell - Rule", "ESCU - GetDomainGroup with PowerShell Script Block - Rule", "ESCU - GetLocalUser with PowerShell - Rule", "ESCU - GetLocalUser with PowerShell Script Block - Rule", "ESCU - GetNetTcpconnection with PowerShell - Rule", "ESCU - GetNetTcpconnection with PowerShell Script Block - Rule", "ESCU - GetWmiObject Ds Computer with PowerShell - Rule", "ESCU - GetWmiObject Ds Computer with PowerShell Script Block - Rule", "ESCU - GetWmiObject Ds Group with PowerShell - Rule", "ESCU - GetWmiObject Ds Group with PowerShell Script Block - Rule", "ESCU - GetWmiObject DS User with PowerShell - Rule", "ESCU - GetWmiObject DS User with PowerShell Script Block - Rule", "ESCU - GetWmiObject User Account with PowerShell - Rule", "ESCU - GetWmiObject User Account with PowerShell Script Block - Rule", "ESCU - Local Account Discovery with Net - Rule", "ESCU - Local Account Discovery With Wmic - Rule", "ESCU - Net Localgroup Discovery - Rule", "ESCU - Network Connection Discovery With Arp - Rule", "ESCU - Network Connection Discovery With Net - Rule", "ESCU - Network Connection Discovery With Netstat - Rule", "ESCU - Network Discovery Using Route Windows App - Rule", "ESCU - NLTest Domain Trust Discovery - Rule", "ESCU - Password Policy Discovery with Net - Rule", "ESCU - PowerShell Get LocalGroup Discovery - Rule", "ESCU - Powershell Get LocalGroup Discovery with Script Block Logging - Rule", "ESCU - Remote System Discovery with Adsisearcher - Rule", "ESCU - Remote System Discovery with Dsquery - Rule", "ESCU - Remote System Discovery with Net - Rule", "ESCU - Remote System Discovery with Wmic - Rule", "ESCU - ServicePrincipalNames Discovery with PowerShell - Rule", "ESCU - ServicePrincipalNames Discovery with SetSPN - Rule", "ESCU - System User Discovery With Query - Rule", "ESCU - System User Discovery With Whoami - Rule", "ESCU - User Discovery With Env Vars PowerShell - Rule", "ESCU - User Discovery With Env Vars PowerShell Script Block - Rule", "ESCU - Windows AD Abnormal Object Access Activity - Rule", "ESCU - Windows AD Privileged Object Access Activity - Rule", "ESCU - Windows File Share Discovery With Powerview - Rule", "ESCU - Windows Find Domain Organizational Units with GetDomainOU - Rule", "ESCU - Windows Find Interesting ACL with FindInterestingDomainAcl - Rule", "ESCU - Windows Forest Discovery with GetForestDomain - Rule", "ESCU - Windows Get Local Admin with FindLocalAdminAccess - Rule", "ESCU - Windows Hidden Schedule Task Settings - Rule", "ESCU - Windows Lateral Tool Transfer RemCom - Rule", "ESCU - Windows Linked Policies In ADSI Discovery - Rule", "ESCU - Windows PowerView AD Access Control List Enumeration - Rule", "ESCU - Windows Root Domain linked policies Discovery - Rule", "ESCU - Windows Service Create RemComSvc - Rule", "ESCU - Windows Suspect Process With Authentication Traffic - Rule", "ESCU - Wmic Group Discovery - Rule"] description = Monitor for activities and techniques associated with Discovery and Reconnaissance within with Active Directory environments. -narrative = Discovery consists of techniques an adversay uses to gain knowledge about an internal environment or network. These techniques provide adversaries with situational awareness and allows them to have the necessary information before deciding how to act or who/what to target next.\ +narrative = Discovery consists of techniques an adversay uses to gain knowledge about an internal environment or network. These techniques provide adversaries with situational awareness and allows them to have the necessary information before deciding how to act or who/what to target next. \ Once an attacker obtains an initial foothold in an Active Directory environment, she is forced to engage in Discovery techniques in the initial phases of a breach to better understand and navigate the target network. Some examples include but are not limited to enumerating domain users, domain admins, computers, domain controllers, network shares, group policy objects, domain trusts, etc. [analytic_story://Active Directory Kerberos Attacks] @@ -16751,7 +16751,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Mauricio Velazco"}] spec_version = 3 searches = ["ESCU - Disabled Kerberos Pre-Authentication Discovery With Get-ADUser - Rule", "ESCU - Disabled Kerberos Pre-Authentication Discovery With PowerView - Rule", "ESCU - Kerberoasting spn request with RC4 encryption - Rule", "ESCU - Kerberos Pre-Authentication Flag Disabled in UserAccountControl - Rule", "ESCU - Kerberos Pre-Authentication Flag Disabled with PowerShell - Rule", "ESCU - Kerberos Service Ticket Request Using RC4 Encryption - Rule", "ESCU - Kerberos TGT Request Using RC4 Encryption - Rule", "ESCU - Kerberos User Enumeration - Rule", "ESCU - Mimikatz PassTheTicket CommandLine Parameters - Rule", "ESCU - PetitPotam Suspicious Kerberos TGT Request - Rule", "ESCU - Rubeus Command Line Parameters - Rule", "ESCU - Rubeus Kerberos Ticket Exports Through Winlogon Access - Rule", "ESCU - ServicePrincipalNames Discovery with PowerShell - Rule", "ESCU - ServicePrincipalNames Discovery with SetSPN - Rule", "ESCU - Suspicious Kerberos Service Ticket Request - Rule", "ESCU - Suspicious Ticket Granting Ticket Request - Rule", "ESCU - Unknown Process Using The Kerberos Protocol - Rule", "ESCU - Unusual Number of Computer Service Tickets Requested - Rule", "ESCU - Unusual Number of Kerberos Service Tickets Requested - Rule", "ESCU - Windows Computer Account Created by Computer Account - Rule", "ESCU - Windows Computer Account Requesting Kerberos Ticket - Rule", "ESCU - Windows Computer Account With SPN - Rule", "ESCU - Windows Domain Admin Impersonation Indicator - Rule", "ESCU - Windows Get-AdComputer Unconstrained Delegation Discovery - Rule", "ESCU - Windows Kerberos Local Successful Logon - Rule", "ESCU - Windows Multiple Disabled Users Failed To Authenticate Wth Kerberos - Rule", "ESCU - Windows Multiple Invalid Users Fail To Authenticate Using Kerberos - Rule", "ESCU - Windows Multiple Users Failed To Authenticate Using Kerberos - Rule", "ESCU - Windows PowerView Constrained Delegation Discovery - Rule", "ESCU - Windows PowerView Kerberos Service Ticket Request - Rule", "ESCU - Windows PowerView SPN Discovery - Rule", "ESCU - Windows PowerView Unconstrained Delegation Discovery - Rule", "ESCU - Windows Unusual Count Of Disabled Users Failed Auth Using Kerberos - Rule", "ESCU - Windows Unusual Count Of Invalid Users Fail To Auth Using Kerberos - Rule", "ESCU - Windows Unusual Count Of Users Failed To Auth Using Kerberos - Rule"] description = Monitor for activities and techniques associated with Kerberos based attacks within with Active Directory environments. -narrative = Kerberos, initially named after Cerberus, the three-headed dog in Greek mythology, is a network authentication protocol that allows computers and users to prove their identity through a trusted third-party. This trusted third-party issues Kerberos tickets using symmetric encryption to allow users access to services and network resources based on their privilege level. Kerberos is the default authentication protocol used on Windows Active Directory networks since the introduction of Windows Server 2003. With Kerberos being the backbone of Windows authentication, it is commonly abused by adversaries across the different phases of a breach including initial access, privilege escalation, defense evasion, credential access, lateral movement, etc.\ +narrative = Kerberos, initially named after Cerberus, the three-headed dog in Greek mythology, is a network authentication protocol that allows computers and users to prove their identity through a trusted third-party. This trusted third-party issues Kerberos tickets using symmetric encryption to allow users access to services and network resources based on their privilege level. Kerberos is the default authentication protocol used on Windows Active Directory networks since the introduction of Windows Server 2003. With Kerberos being the backbone of Windows authentication, it is commonly abused by adversaries across the different phases of a breach including initial access, privilege escalation, defense evasion, credential access, lateral movement, etc. \ This Analytic Story groups detection use cases in which the Kerberos protocol is abused. Defenders can leverage these analytics to detect and hunt for adversaries engaging in Kerberos based attacks. [analytic_story://Active Directory Lateral Movement] @@ -16763,10 +16763,10 @@ maintainers = [{"company": "Mauricio Velazco Splunk", "email": "-", "name": "Dav spec_version = 3 searches = ["ESCU - Detect Activity Related to Pass the Hash Attacks - Rule", "ESCU - Active Directory Lateral Movement Identified - Rule", "ESCU - Detect PsExec With accepteula Flag - Rule", "ESCU - Detect Renamed PSExec - Rule", "ESCU - Executable File Written in Administrative SMB Share - Rule", "ESCU - Impacket Lateral Movement Commandline Parameters - Rule", "ESCU - Impacket Lateral Movement smbexec CommandLine Parameters - Rule", "ESCU - Impacket Lateral Movement WMIExec Commandline Parameters - Rule", "ESCU - Interactive Session on Remote Endpoint with PowerShell - Rule", "ESCU - Mmc LOLBAS Execution Process Spawn - Rule", "ESCU - Possible Lateral Movement PowerShell Spawn - Rule", "ESCU - PowerShell Invoke CIMMethod CIMSession - Rule", "ESCU - PowerShell Start or Stop Service - Rule", "ESCU - Randomly Generated Scheduled Task Name - Rule", "ESCU - Randomly Generated Windows Service Name - Rule", "ESCU - Remote Desktop Process Running On System - Rule", "ESCU - Remote Process Instantiation via DCOM and PowerShell - Rule", "ESCU - Remote Process Instantiation via DCOM and PowerShell Script Block - Rule", "ESCU - Remote Process Instantiation via WinRM and PowerShell - Rule", "ESCU - Remote Process Instantiation via WinRM and PowerShell Script Block - Rule", "ESCU - Remote Process Instantiation via WinRM and Winrs - Rule", "ESCU - Remote Process Instantiation via WMI - Rule", "ESCU - Remote Process Instantiation via WMI and PowerShell - Rule", "ESCU - Remote Process Instantiation via WMI and PowerShell Script Block - Rule", "ESCU - Scheduled Task Creation on Remote Endpoint using At - Rule", "ESCU - Scheduled Task Initiation on Remote Endpoint - Rule", "ESCU - Schtasks scheduling job on remote system - Rule", "ESCU - Services LOLBAS Execution Process Spawn - Rule", "ESCU - Short Lived Scheduled Task - Rule", "ESCU - Short Lived Windows Accounts - Rule", "ESCU - Svchost LOLBAS Execution Process Spawn - Rule", "ESCU - Unusual Number of Computer Service Tickets Requested - Rule", "ESCU - Unusual Number of Remote Endpoint Authentication Events - Rule", "ESCU - Windows Administrative Shares Accessed On Multiple Hosts - Rule", "ESCU - Windows Enable Win32 ScheduledJob via Registry - Rule", "ESCU - Windows Large Number of Computer Service Tickets Requested - Rule", "ESCU - Windows Local Administrator Credential Stuffing - Rule", "ESCU - Windows PowerShell Get CIMInstance Remote Computer - Rule", "ESCU - Windows PowerShell WMI Win32 ScheduledJob - Rule", "ESCU - Windows Rapid Authentication On Multiple Hosts - Rule", "ESCU - Windows RDP Connection Successful - Rule", "ESCU - Windows Remote Create Service - Rule", "ESCU - Windows Service Create with Tscon - Rule", "ESCU - Windows Service Created with Suspicious Service Path - Rule", "ESCU - Windows Service Created Within Public Path - Rule", "ESCU - Windows Service Creation on Remote Endpoint - Rule", "ESCU - Windows Service Creation Using Registry Entry - Rule", "ESCU - Windows Service Initiation on Remote Endpoint - Rule", "ESCU - Windows Special Privileged Logon On Multiple Hosts - Rule", "ESCU - WinEvent Scheduled Task Created Within Public Path - Rule", "ESCU - Wmiprsve LOLBAS Execution Process Spawn - Rule", "ESCU - Wsmprovhost LOLBAS Execution Process Spawn - Rule", "ESCU - Remote Desktop Network Traffic - Rule", "ESCU - Investigate Successful Remote Desktop Authentications - Response Task"] description = Detect and investigate tactics, techniques, and procedures around how attackers move laterally within an Active Directory environment. Since lateral movement is often a necessary step in a breach, it is important for cyber defenders to deploy detection coverage. -narrative = Once attackers gain a foothold within an enterprise, they will seek to expand their accesses and leverage techniques that facilitate lateral movement. Attackers will often spend quite a bit of time and effort moving laterally. Because lateral movement renders an attacker the most vulnerable to detection, it's an excellent focus for detection and investigation.\ -Indications of lateral movement in an Active Directory network can include the abuse of system utilities (such as `psexec.exe`), unauthorized use of remote desktop services, `file/admin$` shares, WMI, PowerShell, Service Control Manager, the DCOM protocol, WinRM or the abuse of scheduled tasks. Organizations must be extra vigilant in detecting lateral movement techniques and look for suspicious activity in and around high-value strategic network assets, such as Active Directory, which are often considered the primary target or "crown jewels" to a persistent threat actor.\ -An adversary can use lateral movement for multiple purposes, including remote execution of tools, pivoting to additional systems, obtaining access to specific information or files, access to additional credentials, exfiltrating data, or delivering a secondary effect. Adversaries may use legitimate credentials alongside inherent network and operating-system functionality to remotely connect to other systems and remain under the radar of network defenders.\ -If there is evidence of lateral movement, it is imperative for analysts to collect evidence of the associated offending hosts. For example, an attacker might leverage host A to gain access to host B. From there, the attacker may try to move laterally to host C. In this example, the analyst should gather as much information as possible from all three hosts.\ +narrative = Once attackers gain a foothold within an enterprise, they will seek to expand their accesses and leverage techniques that facilitate lateral movement. Attackers will often spend quite a bit of time and effort moving laterally. Because lateral movement renders an attacker the most vulnerable to detection, it's an excellent focus for detection and investigation. \ +Indications of lateral movement in an Active Directory network can include the abuse of system utilities (such as `psexec.exe`), unauthorized use of remote desktop services, `file/admin$` shares, WMI, PowerShell, Service Control Manager, the DCOM protocol, WinRM or the abuse of scheduled tasks. Organizations must be extra vigilant in detecting lateral movement techniques and look for suspicious activity in and around high-value strategic network assets, such as Active Directory, which are often considered the primary target or "crown jewels" to a persistent threat actor. \ +An adversary can use lateral movement for multiple purposes, including remote execution of tools, pivoting to additional systems, obtaining access to specific information or files, access to additional credentials, exfiltrating data, or delivering a secondary effect. Adversaries may use legitimate credentials alongside inherent network and operating-system functionality to remotely connect to other systems and remain under the radar of network defenders. \ +If there is evidence of lateral movement, it is imperative for analysts to collect evidence of the associated offending hosts. For example, an attacker might leverage host A to gain access to host B. From there, the attacker may try to move laterally to host C. In this example, the analyst should gather as much information as possible from all three hosts. \ It is also important to collect authentication logs for each host, to ensure that the offending accounts are well-documented. Analysts should account for all processes to ensure that the attackers did not install unauthorized software. [analytic_story://Active Directory Password Spraying] @@ -16778,8 +16778,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Mauricio Velazco"}] spec_version = 3 searches = ["ESCU - Detect Excessive Account Lockouts From Endpoint - Rule", "ESCU - Detect Excessive User Account Lockouts - Rule", "ESCU - Windows Create Local Account - Rule", "ESCU - Windows Multiple Disabled Users Failed To Authenticate Wth Kerberos - Rule", "ESCU - Windows Multiple Invalid Users Fail To Authenticate Using Kerberos - Rule", "ESCU - Windows Multiple Invalid Users Failed To Authenticate Using NTLM - Rule", "ESCU - Windows Multiple Users Fail To Authenticate Wth ExplicitCredentials - Rule", "ESCU - Windows Multiple Users Failed To Authenticate From Host Using NTLM - Rule", "ESCU - Windows Multiple Users Failed To Authenticate From Process - Rule", "ESCU - Windows Multiple Users Failed To Authenticate Using Kerberos - Rule", "ESCU - Windows Multiple Users Remotely Failed To Authenticate From Host - Rule", "ESCU - Windows Unusual Count Of Disabled Users Failed Auth Using Kerberos - Rule", "ESCU - Windows Unusual Count Of Invalid Users Fail To Auth Using Kerberos - Rule", "ESCU - Windows Unusual Count Of Invalid Users Failed To Auth Using NTLM - Rule", "ESCU - Windows Unusual Count Of Users Fail To Auth Wth ExplicitCredentials - Rule", "ESCU - Windows Unusual Count Of Users Failed To Auth Using Kerberos - Rule", "ESCU - Windows Unusual Count Of Users Failed To Authenticate From Process - Rule", "ESCU - Windows Unusual Count Of Users Failed To Authenticate Using NTLM - Rule", "ESCU - Windows Unusual Count Of Users Remotely Failed To Auth From Host - Rule"] description = Monitor for activities and techniques associated with Password Spraying attacks within Active Directory environments. -narrative = In a password spraying attack, adversaries leverage one or a small list of commonly used / popular passwords against a large volume of usernames to acquire valid account credentials. Unlike a Brute Force attack that targets a specific user or small group of users with a large number of passwords, password spraying follows the opposite aproach and increases the chances of obtaining valid credentials while avoiding account lockouts. This allows adversaries to remain undetected if the target organization does not have the proper monitoring and detection controls in place.\ -Password Spraying can be leveraged by adversaries across different stages in an attack. It can be used to obtain an iniial access to an environment but can also be used to escalate privileges when access has been already achieved. In some scenarios, this technique capitalizes on a security policy most organizations implement, password rotation. As enterprise users change their passwords, it is possible some pick predictable, seasonal passwords such as `$CompanyNameWinter`, `Summer2021`, etc.\ +narrative = In a password spraying attack, adversaries leverage one or a small list of commonly used / popular passwords against a large volume of usernames to acquire valid account credentials. Unlike a Brute Force attack that targets a specific user or small group of users with a large number of passwords, password spraying follows the opposite aproach and increases the chances of obtaining valid credentials while avoiding account lockouts. This allows adversaries to remain undetected if the target organization does not have the proper monitoring and detection controls in place. \ +Password Spraying can be leveraged by adversaries across different stages in an attack. It can be used to obtain an iniial access to an environment but can also be used to escalate privileges when access has been already achieved. In some scenarios, this technique capitalizes on a security policy most organizations implement, password rotation. As enterprise users change their passwords, it is possible some pick predictable, seasonal passwords such as `$CompanyNameWinter`, `Summer2021`, etc. \ Specifically, this Analytic Story is focused on detecting possible Password Spraying attacks against Active Directory environments leveraging Windows Event Logs in the `Account Logon` and `Logon/Logoff` Advanced Audit Policy categories. It presents 16 detection analytics which can aid defenders in identifying instances where one source user, source host or source process attempts to authenticate against a target or targets using a high or statiscally unsual, number of unique users. A user, host or process attempting to authenticate with multiple users is not common behavior for legitimate systems and should be monitored by security teams. Possible false positive scenarios include but are not limited to vulnerability scanners, remote administration tools, multi-user systems and missconfigured systems. These should be easily spotted when first implementing the detection and addded to an allow list or lookup table. The presented detections can also be used in Threat Hunting exercises. [analytic_story://Active Directory Privilege Escalation] @@ -16791,8 +16791,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Mauricio Velazco"}] spec_version = 3 searches = ["ESCU - Active Directory Privilege Escalation Identified - Rule", "ESCU - Kerberos Service Ticket Request Using RC4 Encryption - Rule", "ESCU - Rubeus Command Line Parameters - Rule", "ESCU - ServicePrincipalNames Discovery with PowerShell - Rule", "ESCU - ServicePrincipalNames Discovery with SetSPN - Rule", "ESCU - Suspicious Computer Account Name Change - Rule", "ESCU - Suspicious Kerberos Service Ticket Request - Rule", "ESCU - Suspicious Ticket Granting Ticket Request - Rule", "ESCU - Unusual Number of Computer Service Tickets Requested - Rule", "ESCU - Unusual Number of Remote Endpoint Authentication Events - Rule", "ESCU - Windows Administrative Shares Accessed On Multiple Hosts - Rule", "ESCU - Windows Admon Default Group Policy Object Modified - Rule", "ESCU - Windows Admon Group Policy Object Created - Rule", "ESCU - Windows Default Group Policy Object Modified - Rule", "ESCU - Windows Default Group Policy Object Modified with GPME - Rule", "ESCU - Windows DnsAdmins New Member Added - Rule", "ESCU - Windows Domain Admin Impersonation Indicator - Rule", "ESCU - Windows File Share Discovery With Powerview - Rule", "ESCU - Windows Findstr GPP Discovery - Rule", "ESCU - Windows Group Policy Object Created - Rule", "ESCU - Windows Large Number of Computer Service Tickets Requested - Rule", "ESCU - Windows Local Administrator Credential Stuffing - Rule", "ESCU - Windows PowerSploit GPP Discovery - Rule", "ESCU - Windows PowerView AD Access Control List Enumeration - Rule", "ESCU - Windows Rapid Authentication On Multiple Hosts - Rule", "ESCU - Windows Special Privileged Logon On Multiple Hosts - Rule"] description = Monitor for activities and techniques associated with Privilege Escalation attacks within Active Directory environments. -narrative = Privilege Escalation consists of techniques that adversaries use to gain higher-level permissions on a system or network. Adversaries can often enter and explore a network with unprivileged access but require elevated permissions to follow through on their objectives. Common approaches are to take advantage of system weaknesses, misconfigurations, and vulnerabilities.\ -Active Directory is a central component of most enterprise networks, providing authentication and authorization services for users, computers, and other resources. It stores sensitive information such as passwords, user accounts, and security policies, and is therefore a high-value target for attackers. Privilege escalation attacks in Active Directory typically involve exploiting vulnerabilities or misconfigurations across the network to gain elevated privileges, such as Domain Administrator access. Once an attacker has escalated their privileges and taken full control of a domain, they can easily move laterally throughout the network, access sensitive data, and carry out further attacks. Security teams should monitor for privilege escalation attacks in Active Directory to identify a breach before attackers achieve operational success.\ +narrative = Privilege Escalation consists of techniques that adversaries use to gain higher-level permissions on a system or network. Adversaries can often enter and explore a network with unprivileged access but require elevated permissions to follow through on their objectives. Common approaches are to take advantage of system weaknesses, misconfigurations, and vulnerabilities. \ +Active Directory is a central component of most enterprise networks, providing authentication and authorization services for users, computers, and other resources. It stores sensitive information such as passwords, user accounts, and security policies, and is therefore a high-value target for attackers. Privilege escalation attacks in Active Directory typically involve exploiting vulnerabilities or misconfigurations across the network to gain elevated privileges, such as Domain Administrator access. Once an attacker has escalated their privileges and taken full control of a domain, they can easily move laterally throughout the network, access sensitive data, and carry out further attacks. Security teams should monitor for privilege escalation attacks in Active Directory to identify a breach before attackers achieve operational success. \ The following analytic story groups detection opportunities that seek to identify an adversary attempting to escalate privileges in an Active Directory network. [analytic_story://Adobe ColdFusion Arbitrary Code Execution CVE-2023-29298 CVE-2023-26360] @@ -16804,7 +16804,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Adobe ColdFusion Access Control Bypass - Rule", "ESCU - Adobe ColdFusion Unauthenticated Arbitrary File Read - Rule"] description = In July 2023, a significant vulnerability, CVE-2023-29298, affecting Adobe ColdFusion was uncovered by Rapid7, shedding light on an access control bypass mechanism. This vulnerability allows attackers to access sensitive ColdFusion Administrator endpoints by exploiting a flaw in the URL path validation. Disturbingly, this flaw can be chained with another critical vulnerability, CVE-2023-26360, which has been actively exploited. The latter enables unauthorized arbitrary code execution and file reading. Adobe has promptly addressed these vulnerabilities, but the intricacies and potential ramifications of their combination underscore the importance of immediate action by organizations. With active exploitation in the wild and the ability to bypass established security measures, the situation is alarming. Organizations are urged to apply the updates provided by Adobe immediately, considering the active threat landscape and the severe implications of these chained vulnerabilities. -narrative = Adobe ColdFusion, a prominent application server, has been thrust into the cybersecurity spotlight due to two intertwined vulnerabilities. The first, CVE-2023-29298, identified by Rapid7 in July 2023, pertains to an access control bypass in ColdFusion's security mechanisms. This flaw allows attackers to access protected ColdFusion Administrator endpoints simply by manipulating the URL path, specifically by inserting an additional forward slash. Compounding the threat is the revelation that CVE-2023-29298 can be chained with CVE-2023-26360, another severe ColdFusion vulnerability. This latter vulnerability, which has seen active exploitation, permits unauthorized attackers to execute arbitrary code or read arbitrary files on the affected system. In practice, an attacker could exploit the access control bypass to access sensitive ColdFusion endpoints and subsequently exploit the arbitrary code execution vulnerability, broadening their control and access over the targeted system. The consequences of these vulnerabilities are manifold. Attackers can potentially login to the ColdFusion Administrator with known credentials, bruteforce their way in, leak sensitive information, or exploit other vulnerabilities in the exposed CFM and CFC files. This combination of vulnerabilities significantly heightens the risk profile for organizations using the affected versions of Adobe ColdFusion. Addressing the urgency, Adobe released fixes for these vulnerabilities in July 2023, urging organizations to update to ColdFusion 2023 GA build, ColdFusion 2021 Update 7, and ColdFusion 2018 Update 17. However, Rapid7's disclosure highlights a potential incomplete fix, suggesting that organizations should remain vigilant and proactive in their security measures.\ +narrative = Adobe ColdFusion, a prominent application server, has been thrust into the cybersecurity spotlight due to two intertwined vulnerabilities. The first, CVE-2023-29298, identified by Rapid7 in July 2023, pertains to an access control bypass in ColdFusion's security mechanisms. This flaw allows attackers to access protected ColdFusion Administrator endpoints simply by manipulating the URL path, specifically by inserting an additional forward slash. Compounding the threat is the revelation that CVE-2023-29298 can be chained with CVE-2023-26360, another severe ColdFusion vulnerability. This latter vulnerability, which has seen active exploitation, permits unauthorized attackers to execute arbitrary code or read arbitrary files on the affected system. In practice, an attacker could exploit the access control bypass to access sensitive ColdFusion endpoints and subsequently exploit the arbitrary code execution vulnerability, broadening their control and access over the targeted system. The consequences of these vulnerabilities are manifold. Attackers can potentially login to the ColdFusion Administrator with known credentials, bruteforce their way in, leak sensitive information, or exploit other vulnerabilities in the exposed CFM and CFC files. This combination of vulnerabilities significantly heightens the risk profile for organizations using the affected versions of Adobe ColdFusion. Addressing the urgency, Adobe released fixes for these vulnerabilities in July 2023, urging organizations to update to ColdFusion 2023 GA build, ColdFusion 2021 Update 7, and ColdFusion 2018 Update 17. However, Rapid7's disclosure highlights a potential incomplete fix, suggesting that organizations should remain vigilant and proactive in their security measures. \ In conclusion, the discovery of these vulnerabilities and their potential to be exploited in tandem presents a significant security challenge. Organizations using Adobe ColdFusion must prioritize the application of security updates, monitor their systems closely for signs of intrusion, and remain updated on any further developments related to these vulnerabilities. [analytic_story://AgentTesla] @@ -16838,19 +16838,19 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rico Valdez"}] spec_version = 3 searches = ["ESCU - Suspicious Java Classes - Rule", "ESCU - Web Servers Executing Suspicious Processes - Rule", "ESCU - Unusually Long Content-Type Length - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Investigate Suspicious Strings in HTTP Header - Response Task", "ESCU - Investigate Web POSTs From src - Response Task"] description = Detect and investigate activities--such as unusually long `Content-Type` length, suspicious java classes and web servers executing suspicious processes--consistent with attempts to exploit Apache Struts vulnerabilities. -narrative = In March of 2017, a remote code-execution vulnerability in the Jakarta Multipart parser in Apache Struts, a widely used open-source framework for creating Java web applications, was disclosed and assigned to CVE-2017-5638. About two months later, hackers exploited the flaw to carry out the world's 5th largest data breach. The target, credit giant Equifax, told investigators that it had become aware of the vulnerability two months before the attack.\ -The exploit involved manipulating the `Content-Type HTTP` header to execute commands embedded in the header.\ -This Analytic Story contains two different searches that help to identify activity that may be related to this issue. The first search looks for characteristics of the `Content-Type` header consistent with attempts to exploit the vulnerability. This should be a relatively pertinent indicator, as the `Content-Type` header is generally consistent and does not have a large degree of variation.\ -The second search looks for the execution of various commands typically entered on the command shell when an attacker first lands on a system. These commands are not generally executed on web servers during the course of day-to-day operation, but they may be used when the system is undergoing maintenance or troubleshooting.\ -First, it is helpful is to understand how often the notable event is generated, as well as the commonalities in some of these events. This may help determine whether this is a common occurrence that is of a lesser concern or a rare event that may require more extensive investigation. It can also help to understand whether the issue is restricted to a single user or system or is broader in scope.\ -When looking at the target of the behavior illustrated by the event, you should note the sensitivity of the user and or/system to help determine the potential impact. It is also helpful to see what other events involving the target have occurred in the recent past. This can help tie different events together and give further situational awareness regarding the target.\ -Various types of information for external systems should be reviewed and (potentially) collected if the incident is, indeed, judged to be malicious. Information like this can be useful in generating your own threat intelligence to create alerts in the future.\ -Looking at the country, responsible party, and fully qualified domain names associated with the external IP address--as well as the registration information associated with those domain names, if they are frequently visited by others--can help you answer the question of "who," in regard to the external system. Answering that can help qualify the event and may serve useful for tracking. In addition, there are various sources that can provide some reputation information on the IP address or domain name, which can assist in determining if the event is malicious in nature. Finally, determining whether or not there are other events associated with the IP address may help connect some dots or show other events that should be brought into scope.\ -Gathering various data elements on the system of interest can sometimes help quickly determine that something suspicious may be happening. Some of these items include determining who else may have recently logged into the system, whether any unusual scheduled tasks exist, whether the system is communicating on suspicious ports, whether there are modifications to sensitive registry keys, and whether there are any known vulnerabilities on the system. This information can often highlight other activity commonly seen in attack scenarios or give more information about how the system may have been targeted.\ -hen a specific service or application is targeted, it is often helpful to know the associated version to help determine whether or not it is vulnerable to a specific exploit.\ -hen it is suspected there is an attack targeting a web server, it is helpful to look at some of the behavior of the web service to see if there is evidence that the service has been compromised. Some indications of this might be network connections to external resources, the web service spawning child processes that are not associated with typical behavior, and whether the service wrote any files that might be malicious in nature.\ -In the event that a suspicious file is found, we can review more information about it to help determine if it is, in fact, malicious. Identifying the file type, any processes that have the file open, what processes created and/or modified the file, and the number of systems that may have this file can help to determine if the file is malicious. Also, determining the file hash and checking it against reputation sources, such as VirusTotal, can sometimes quickly help determine whether it is malicious in nature.\ -Often, a simple inspection of a suspect process name and path can tell you if the system has been compromised. For example, if `svchost.exe` is found running from a location other than `C:\Windows\System32`, it is likely something malicious designed to hide in plain sight when simply reviewing process names. Similarly, if the process itself seems legitimate, but the parent process is running from the temporary browser cache, there may be activity initiated via a compromised website the user visited.\ +narrative = In March of 2017, a remote code-execution vulnerability in the Jakarta Multipart parser in Apache Struts, a widely used open-source framework for creating Java web applications, was disclosed and assigned to CVE-2017-5638. About two months later, hackers exploited the flaw to carry out the world's 5th largest data breach. The target, credit giant Equifax, told investigators that it had become aware of the vulnerability two months before the attack. \ +The exploit involved manipulating the `Content-Type HTTP` header to execute commands embedded in the header. \ +This Analytic Story contains two different searches that help to identify activity that may be related to this issue. The first search looks for characteristics of the `Content-Type` header consistent with attempts to exploit the vulnerability. This should be a relatively pertinent indicator, as the `Content-Type` header is generally consistent and does not have a large degree of variation. \ +The second search looks for the execution of various commands typically entered on the command shell when an attacker first lands on a system. These commands are not generally executed on web servers during the course of day-to-day operation, but they may be used when the system is undergoing maintenance or troubleshooting. \ +First, it is helpful is to understand how often the notable event is generated, as well as the commonalities in some of these events. This may help determine whether this is a common occurrence that is of a lesser concern or a rare event that may require more extensive investigation. It can also help to understand whether the issue is restricted to a single user or system or is broader in scope. \ +When looking at the target of the behavior illustrated by the event, you should note the sensitivity of the user and or/system to help determine the potential impact. It is also helpful to see what other events involving the target have occurred in the recent past. This can help tie different events together and give further situational awareness regarding the target. \ +Various types of information for external systems should be reviewed and (potentially) collected if the incident is, indeed, judged to be malicious. Information like this can be useful in generating your own threat intelligence to create alerts in the future. \ +Looking at the country, responsible party, and fully qualified domain names associated with the external IP address--as well as the registration information associated with those domain names, if they are frequently visited by others--can help you answer the question of "who," in regard to the external system. Answering that can help qualify the event and may serve useful for tracking. In addition, there are various sources that can provide some reputation information on the IP address or domain name, which can assist in determining if the event is malicious in nature. Finally, determining whether or not there are other events associated with the IP address may help connect some dots or show other events that should be brought into scope. \ +Gathering various data elements on the system of interest can sometimes help quickly determine that something suspicious may be happening. Some of these items include determining who else may have recently logged into the system, whether any unusual scheduled tasks exist, whether the system is communicating on suspicious ports, whether there are modifications to sensitive registry keys, and whether there are any known vulnerabilities on the system. This information can often highlight other activity commonly seen in attack scenarios or give more information about how the system may have been targeted. \ +hen a specific service or application is targeted, it is often helpful to know the associated version to help determine whether or not it is vulnerable to a specific exploit. \ +hen it is suspected there is an attack targeting a web server, it is helpful to look at some of the behavior of the web service to see if there is evidence that the service has been compromised. Some indications of this might be network connections to external resources, the web service spawning child processes that are not associated with typical behavior, and whether the service wrote any files that might be malicious in nature. \ +In the event that a suspicious file is found, we can review more information about it to help determine if it is, in fact, malicious. Identifying the file type, any processes that have the file open, what processes created and/or modified the file, and the number of systems that may have this file can help to determine if the file is malicious. Also, determining the file hash and checking it against reputation sources, such as VirusTotal, can sometimes quickly help determine whether it is malicious in nature. \ +Often, a simple inspection of a suspect process name and path can tell you if the system has been compromised. For example, if `svchost.exe` is found running from a location other than `C:\Windows\System32`, it is likely something malicious designed to hide in plain sight when simply reviewing process names. Similarly, if the process itself seems legitimate, but the parent process is running from the temporary browser cache, there may be activity initiated via a compromised website the user visited. \ It can also be very helpful to examine various behaviors of the process of interest or the parent of the process that is of interest. For example, if it turns out that the process of interest is malicious, it would be good to see if the parent to that process spawned other processes that might also be worth further scrutiny. If a process is suspect, reviewing the network connections made around the time of the event and/or if the process spawned any child processes could be helpful in determining whether it is malicious or executing a malicious script. [analytic_story://APT29 Diplomatic Deceptions with WINELOADER] @@ -16917,8 +16917,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "David Dorsey"}] spec_version = 3 searches = ["ESCU - aws detect attach to role policy - Rule", "ESCU - aws detect permanent key creation - Rule", "ESCU - aws detect role creation - Rule", "ESCU - aws detect sts assume role abuse - Rule", "ESCU - aws detect sts get session token abuse - Rule", "ESCU - AWS Investigate User Activities By AccessKeyId - Response Task", "ESCU - Get Notable History - Response Task"] description = Track when a user assumes an IAM role in another AWS account to obtain cross-account access to services and resources in that account. Accessing new roles could be an indication of malicious activity. -narrative = Amazon Web Services (AWS) admins manage access to AWS resources and services across the enterprise using AWS's Identity and Access Management (IAM) functionality. IAM provides the ability to create and manage AWS users, groups, and roles-each with their own unique set of privileges and defined access to specific resources (such as EC2 instances, the AWS Management Console, API, or the command-line interface). Unlike conventional (human) users, IAM roles are assumable by anyone in the organization. They provide users with dynamically created temporary security credentials that expire within a set time period.\ -Herein lies the rub. In between the time between when the temporary credentials are issued and when they expire is a period of opportunity, where a user could leverage the temporary credentials to wreak havoc-spin up or remove instances, create new users, elevate privileges, and other malicious activities-throughout the environment.\ +narrative = Amazon Web Services (AWS) admins manage access to AWS resources and services across the enterprise using AWS's Identity and Access Management (IAM) functionality. IAM provides the ability to create and manage AWS users, groups, and roles-each with their own unique set of privileges and defined access to specific resources (such as EC2 instances, the AWS Management Console, API, or the command-line interface). Unlike conventional (human) users, IAM roles are assumable by anyone in the organization. They provide users with dynamically created temporary security credentials that expire within a set time period. \ +Herein lies the rub. In between the time between when the temporary credentials are issued and when they expire is a period of opportunity, where a user could leverage the temporary credentials to wreak havoc-spin up or remove instances, create new users, elevate privileges, and other malicious activities-throughout the environment. \ This Analytic Story includes searches that will help you monitor your AWS CloudTrail logs for evidence of suspicious cross-account activity. For example, while accessing multiple AWS accounts and roles may be perfectly valid behavior, it may be suspicious when an account requests privileges of an account it has not accessed in the past. After identifying suspicious activities, you can use the provided investigative searches to help you probe more deeply. [analytic_story://AWS Defense Evasion] @@ -16941,7 +16941,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - ASL AWS CreateAccessKey - Rule", "ESCU - ASL AWS IAM Delete Policy - Rule", "ESCU - ASL AWS Password Policy Changes - Rule", "ESCU - AWS Create Policy Version to allow all resources - Rule", "ESCU - AWS CreateAccessKey - Rule", "ESCU - AWS CreateLoginProfile - Rule", "ESCU - AWS IAM Assume Role Policy Brute Force - Rule", "ESCU - AWS IAM Delete Policy - Rule", "ESCU - AWS IAM Failure Group Deletion - Rule", "ESCU - AWS IAM Successful Group Deletion - Rule", "ESCU - AWS Password Policy Changes - Rule", "ESCU - AWS SetDefaultPolicyVersion - Rule", "ESCU - AWS UpdateLoginProfile - Rule"] description = This analytic story contains detections that query your AWS Cloudtrail for activities related to privilege escalation. -narrative = Amazon Web Services provides a neat feature called Identity and Access Management (IAM) that enables organizations to manage various AWS services and resources in a secure way. All IAM users have roles, groups and policies associated with them which governs and sets permissions to allow a user to access specific restrictions.\ +narrative = Amazon Web Services provides a neat feature called Identity and Access Management (IAM) that enables organizations to manage various AWS services and resources in a secure way. All IAM users have roles, groups and policies associated with them which governs and sets permissions to allow a user to access specific restrictions. \ However, if these IAM policies are misconfigured and have specific combinations of weak permissions; it can allow attackers to escalate their privileges and further compromise the organization. Rhino Security Labs have published comprehensive blogs detailing various AWS Escalation methods. By using this as an inspiration, Splunks research team wants to highlight how these attack vectors look in AWS Cloudtrail logs and provide you with detection queries to uncover these potentially malicious events via this Analytic Story. [analytic_story://AWS Identity and Access Management Account Takeover] @@ -16986,9 +16986,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - ASL AWS Excessive Security Scanning - Rule", "ESCU - AWS Excessive Security Scanning - Rule", "ESCU - Detect API activity from users without MFA - Rule", "ESCU - Detect AWS API Activities From Unapproved Accounts - Rule", "ESCU - Detect new API calls from user roles - Rule", "ESCU - Detect Spike in AWS API Activity - Rule", "ESCU - Detect Spike in Security Group Activity - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Investigate AWS User Activities by user field - Response Task"] description = Detect and investigate dormant user accounts for your AWS environment that have become active again. Because inactive and ad-hoc accounts are common attack targets, it's critical to enable governance within your environment. -narrative = It seems obvious that it is critical to monitor and control the users who have access to your cloud infrastructure. Nevertheless, it's all too common for enterprises to lose track of ad-hoc accounts, leaving their servers vulnerable to attack. In fact, this was the very oversight that led to Tesla's cryptojacking attack in February, 2018.\ -In addition to compromising the security of your data, when bad actors leverage your compute resources, it can incur monumental costs, since you will be billed for any new EC2 instances and increased bandwidth usage.\ -Fortunately, you can leverage Amazon Web Services (AWS) CloudTrail--a tool that helps you enable governance, compliance, and risk auditing of your AWS account--to give you increased visibility into your user and resource activity by recording AWS Management Console actions and API calls. You can identify which users and accounts called AWS, the source IP address from which the calls were made, and when the calls occurred.\ +narrative = It seems obvious that it is critical to monitor and control the users who have access to your cloud infrastructure. Nevertheless, it's all too common for enterprises to lose track of ad-hoc accounts, leaving their servers vulnerable to attack. In fact, this was the very oversight that led to Tesla's cryptojacking attack in February, 2018. \ +In addition to compromising the security of your data, when bad actors leverage your compute resources, it can incur monumental costs, since you will be billed for any new EC2 instances and increased bandwidth usage. \ +Fortunately, you can leverage Amazon Web Services (AWS) CloudTrail--a tool that helps you enable governance, compliance, and risk auditing of your AWS account--to give you increased visibility into your user and resource activity by recording AWS Management Console actions and API calls. You can identify which users and accounts called AWS, the source IP address from which the calls were made, and when the calls occurred. \ The detection searches in this Analytic Story are designed to help you uncover AWS API activities from users not listed in the identity table, as well as similar activities from disabled accounts. [analytic_story://Azorult] @@ -17033,9 +17033,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Mauricio Velazco"}] spec_version = 3 searches = ["ESCU - Azure AD Admin Consent Bypassed by Service Principal - Rule", "ESCU - Azure AD Application Administrator Role Assigned - Rule", "ESCU - Azure AD Global Administrator Role Assigned - Rule", "ESCU - Azure AD PIM Role Assigned - Rule", "ESCU - Azure AD PIM Role Assignment Activated - Rule", "ESCU - Azure AD Privileged Authentication Administrator Role Assigned - Rule", "ESCU - Azure AD Privileged Role Assigned to Service Principal - Rule", "ESCU - Azure AD Service Principal New Client Credentials - Rule", "ESCU - Azure AD Service Principal Owner Added - Rule"] description = Monitor for activities and techniques associated with Privilege Escalation attacks within Azure Active Directory tenants. -narrative = Privilege Escalation consists of techniques that adversaries use to gain higher-level permissions on a system or network. Adversaries can often enter and explore a network with unprivileged access but require elevated permissions to follow through on their objectives. Common approaches are to take advantage of system weaknesses, misconfigurations or vulnerabilities.\ -Azure Active Directory (Azure AD) is Microsofts enterprise cloud-based identity and access management (IAM) service. Azure AD is the backbone of most of Azure services like Office 365 and Microsoft Teams. It can sync with on-premise Active Directory environments and provide authentication to other cloud-based systems via the OAuth protocol. According to Microsoft, Azure AD manages more than 1.2 billion identities and processes over 8 billion authentications per day.\ -Privilege escalation attacks in Azure AD typically involve abusing misconfigurations to gain elevated privileges, such as Global Administrator access. Once an attacker has escalated their privileges and taken full control of a tenant, they may abuse every service that leverages Azure AD including moving laterally to Azure virtual machines to access sensitive data and carry out further attacks. Security teams should monitor for privilege escalation attacks in Azure Active Directory to identify breaches before attackers achieve operational success.\ +narrative = Privilege Escalation consists of techniques that adversaries use to gain higher-level permissions on a system or network. Adversaries can often enter and explore a network with unprivileged access but require elevated permissions to follow through on their objectives. Common approaches are to take advantage of system weaknesses, misconfigurations or vulnerabilities. \ +Azure Active Directory (Azure AD) is Microsofts enterprise cloud-based identity and access management (IAM) service. Azure AD is the backbone of most of Azure services like Office 365 and Microsoft Teams. It can sync with on-premise Active Directory environments and provide authentication to other cloud-based systems via the OAuth protocol. According to Microsoft, Azure AD manages more than 1.2 billion identities and processes over 8 billion authentications per day. \ +Privilege escalation attacks in Azure AD typically involve abusing misconfigurations to gain elevated privileges, such as Global Administrator access. Once an attacker has escalated their privileges and taken full control of a tenant, they may abuse every service that leverages Azure AD including moving laterally to Azure virtual machines to access sensitive data and carry out further attacks. Security teams should monitor for privilege escalation attacks in Azure Active Directory to identify breaches before attackers achieve operational success. \ The following analytic story groups detection opportunities that seek to identify an adversary attempting to escalate privileges in Azure AD tenants. [analytic_story://Baron Samedit CVE-2021-3156] @@ -17113,8 +17113,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "David Dorsey"}] spec_version = 3 searches = ["ESCU - Monitor Email For Brand Abuse - Rule", "ESCU - Monitor DNS For Brand Abuse - Rule", "ESCU - Monitor Web Traffic For Brand Abuse - Rule", "ESCU - Get Email Info - Response Task", "ESCU - Get Emails From Specific Sender - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Get Process Responsible For The DNS Traffic - Response Task"] description = Detect and investigate activity that may indicate that an adversary is using faux domains to mislead users into interacting with malicious infrastructure. Monitor DNS, email, and web traffic for permutations of your brand name. -narrative = While you can educate your users and customers about the risks and threats posed by typosquatting, phishing, and corporate espionage, human error is a persistent fact of life. Of course, your adversaries are all too aware of this reality and will happily leverage it for nefarious purposes whenever possible3phishing with lookalike addresses, embedding faux command-and-control domains in malware, and hosting malicious content on domains that closely mimic your corporate servers. This is where brand monitoring comes in.\ -You can use our adaptation of `DNSTwist`, together with the support searches in this Analytic Story, to generate permutations of specified brands and external domains. Splunk can monitor email, DNS requests, and web traffic for these permutations and provide you with early warnings and situational awareness--powerful elements of an effective defense.\ +narrative = While you can educate your users and customers about the risks and threats posed by typosquatting, phishing, and corporate espionage, human error is a persistent fact of life. Of course, your adversaries are all too aware of this reality and will happily leverage it for nefarious purposes whenever possible3phishing with lookalike addresses, embedding faux command-and-control domains in malware, and hosting malicious content on domains that closely mimic your corporate servers. This is where brand monitoring comes in. \ +You can use our adaptation of `DNSTwist`, together with the support searches in this Analytic Story, to generate permutations of specified brands and external domains. Splunk can monitor email, DNS requests, and web traffic for these permutations and provide you with early warnings and situational awareness--powerful elements of an effective defense. \ Notable events will include IP addresses, URLs, and user data. Drilling down can provide you with even more actionable intelligence, including likely geographic information, contextual searches to help you scope the problem, and investigative searches. [analytic_story://Brute Ratel C4] @@ -17236,9 +17236,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Citrix ADC Exploitation CVE-2023-3519 - Rule"] description = The CVE-2023-3519 vulnerability in NetScaler (formerly Citrix) Application Delivery Controller (ADC) and NetScaler Gateway has been exploited by threat actors, as detailed in a recent advisory. The unauthenticated remote code execution vulnerability was utilized as a zero-day to establish a webshell on a non-production environment NetScaler ADC appliance within a critical infrastructure organization. This facilitated the execution of discovery on the victim's active directory and the collection and exfiltration of data. The advisory offers a comprehensive examination of the threat actors' tactics, techniques, and procedures (TTPs), alongside recommended detection methods and incident response guidelines. Immediate patch application from Citrix and the use of the detection guidance in the advisory is strongly recommended for critical infrastructure organizations to mitigate system compromises. -narrative = Recent advisories have highlighted the exploitation of CVE-2023-3519, a critical vulnerability in Citrix's NetScaler Application Delivery Controller (ADC) and NetScaler Gateway. In June 2023, threat actors utilized this vulnerability to implant a webshell on a NetScaler ADC appliance within a critical infrastructure organization's non-production environment. This action granted them the ability to perform active directory discovery, data collection, and exfiltration. Notably, attempts for lateral movement to a domain controller were obstructed by network-segmentation controls.\ -The compromised organization reported the breach, leading Citrix to issue a patch on July 18, 2023. Multiple advisories have since outlined the threat actors' tactics, techniques, and procedures (TTPs), including their initial access, persistence, privilege escalation, defense evasion, credential access, discovery, collection, command and control, and impact. These advisories also provide detection methods and recommend incident response measures.\ -The threat actors executed several activities during their attack, such as uploading a TGZ file with a generic webshell, discovery script, and setuid binary on the ADC appliance; conducting SMB scanning on the subnet; using the webshell for active directory enumeration and data exfiltration; and accessing NetScaler configuration files and decryption keys. They also decrypted an active directory credential, queried the active directory for various information, encrypted collected data, exfiltrated it as an image file, and attempted to erase their artifacts. Despite these actions, further discovery and lateral movement were impeded due to the organization's network-segmentation controls. \\ +narrative = Recent advisories have highlighted the exploitation of CVE-2023-3519, a critical vulnerability in Citrix's NetScaler Application Delivery Controller (ADC) and NetScaler Gateway. In June 2023, threat actors utilized this vulnerability to implant a webshell on a NetScaler ADC appliance within a critical infrastructure organization's non-production environment. This action granted them the ability to perform active directory discovery, data collection, and exfiltration. Notably, attempts for lateral movement to a domain controller were obstructed by network-segmentation controls. \ +The compromised organization reported the breach, leading Citrix to issue a patch on July 18, 2023. Multiple advisories have since outlined the threat actors' tactics, techniques, and procedures (TTPs), including their initial access, persistence, privilege escalation, defense evasion, credential access, discovery, collection, command and control, and impact. These advisories also provide detection methods and recommend incident response measures. \ +The threat actors executed several activities during their attack, such as uploading a TGZ file with a generic webshell, discovery script, and setuid binary on the ADC appliance; conducting SMB scanning on the subnet; using the webshell for active directory enumeration and data exfiltration; and accessing NetScaler configuration files and decryption keys. They also decrypted an active directory credential, queried the active directory for various information, encrypted collected data, exfiltrated it as an image file, and attempted to erase their artifacts. Despite these actions, further discovery and lateral movement were impeded due to the organization's network-segmentation controls. \ \ Advisories suggest conducting specific checks on the ADC shell interface to detect signs of compromise. If a compromise is detected, organizations should isolate potentially affected hosts, reimage compromised hosts, provide new account credentials, collect and review artifacts, and report the compromise. To mitigate the threat, organizations are advised to promptly install the relevant updates for NetScaler ADC and NetScaler Gateway, adhere to cybersecurity best practices, and apply robust network-segmentation controls on NetScaler appliances and other internet-facing devices. [analytic_story://Citrix ShareFile RCE CVE-2023-24489] @@ -17250,9 +17250,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Detect Webshell Exploit Behavior - Rule", "ESCU - Citrix ShareFile Exploitation CVE-2023-24489 - Rule"] description = A critical vulnerability has been discovered in ShareFile's Storage Zones Controller software (CVE-2023-24489), used by numerous organizations for file sharing and storage. The vulnerability allows unauthenticated arbitrary file upload and remote code execution due to a cryptographic bug in the software's encryption but lack of authentication system. The risk comes from a failing encryption check, allowing potential cybercriminals to upload malicious files to the server. The bug was found in the Documentum Connector's .aspx files. The security risk has a potentially large impact due to the software's wide use and the sensitivity of the stored data. Citrix has released a security update to address this issue. -narrative = The ShareFile Storage Zones Controller is a .NET web application running under IIS, which manages the storage of files in ShareFile's system. It was discovered that this software has a critical vulnerability (CVE-2023-24489) in the file upload functionality provided by the Documentum Connector's .aspx files. Specifically, the security flaw lies in the encryption check in the file upload process which could be bypassed, allowing for unauthenticated arbitrary file uploads and remote code execution.\ -The application sets the current principal from a session cookie, but if this is missing, the application continues without authentication. The application uses AES encryption, with CBC mode and PKCS#7 padding. A decryption check is in place which returns an error if the decryption fails, but this can be bypassed by supplying a ciphertext that results in valid padding after decryption, thereby not causing an exception.\ -The Documentum Connector's upload.aspx file, when uploading a file, calls the ProcessRawPostedFile function, which allows a path traversal due to improper sanitization of the 'uploadId' parameter. It allows the 'filename' and 'uploadId' parameters to be concatenated, and while the 'filename' parameter is sanitized, the 'uploadId' is not. The 'parentid' parameter is passed in but is also not used.\ +narrative = The ShareFile Storage Zones Controller is a .NET web application running under IIS, which manages the storage of files in ShareFile's system. It was discovered that this software has a critical vulnerability (CVE-2023-24489) in the file upload functionality provided by the Documentum Connector's .aspx files. Specifically, the security flaw lies in the encryption check in the file upload process which could be bypassed, allowing for unauthenticated arbitrary file uploads and remote code execution. \ +The application sets the current principal from a session cookie, but if this is missing, the application continues without authentication. The application uses AES encryption, with CBC mode and PKCS#7 padding. A decryption check is in place which returns an error if the decryption fails, but this can be bypassed by supplying a ciphertext that results in valid padding after decryption, thereby not causing an exception. \ +The Documentum Connector's upload.aspx file, when uploading a file, calls the ProcessRawPostedFile function, which allows a path traversal due to improper sanitization of the 'uploadId' parameter. It allows the 'filename' and 'uploadId' parameters to be concatenated, and while the 'filename' parameter is sanitized, the 'uploadId' is not. The 'parentid' parameter is passed in but is also not used. \ The vulnerability enables an attacker to upload a webshell or any other malicious file, by providing a properly padded encrypted string for the 'parentid' parameter, and specifying the path for the 'uploadId' and the name for the 'filename'. An attacker can achieve remote code execution by requesting the uploaded file. The issue was addressed by Citrix in a recent security update. [analytic_story://Clop Ransomware] @@ -17275,9 +17275,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "David Dorsey"}] spec_version = 3 searches = ["ESCU - Abnormally High Number Of Cloud Instances Launched - Rule", "ESCU - Cloud Compute Instance Created By Previously Unseen User - Rule", "ESCU - Cloud Compute Instance Created In Previously Unused Region - Rule", "ESCU - Cloud Compute Instance Created With Previously Unseen Image - Rule", "ESCU - Cloud Compute Instance Created With Previously Unseen Instance Type - Rule", "ESCU - AWS Investigate Security Hub alerts by dest - Response Task", "ESCU - AWS Investigate User Activities By ARN - Response Task", "ESCU - Get EC2 Instance Details by instanceId - Response Task", "ESCU - Get EC2 Launch Details - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Investigate AWS activities via region name - Response Task"] description = Monitor your cloud compute instances for activities related to cryptojacking/cryptomining. New instances that originate from previously unseen regions, users who launch abnormally high numbers of instances, or compute instances started by previously unseen users are just a few examples of potentially malicious behavior. -narrative = Cryptomining is an intentionally difficult, resource-intensive business. Its complexity was designed into the process to ensure that the number of blocks mined each day would remain steady. So, it's par for the course that ambitious, but unscrupulous, miners make amassing the computing power of large enterprises--a practice known as cryptojacking--a top priority.\ -Cryptojacking has attracted an increasing amount of media attention since its explosion in popularity in the fall of 2017. The attacks have moved from in-browser exploits and mobile phones to enterprise cloud services, such as Amazon Web Services (AWS), Google Cloud Platform (GCP), and Azure. It's difficult to determine exactly how widespread the practice has become, since bad actors continually evolve their ability to escape detection, including employing unlisted endpoints, moderating their CPU usage, and hiding the mining pool's IP address behind a free CDN.\ -When malicious miners appropriate a cloud instance, often spinning up hundreds of new instances, the costs can become astronomical for the account holder. So it is critically important to monitor your systems for suspicious activities that could indicate that your network has been infiltrated.\ +narrative = Cryptomining is an intentionally difficult, resource-intensive business. Its complexity was designed into the process to ensure that the number of blocks mined each day would remain steady. So, it's par for the course that ambitious, but unscrupulous, miners make amassing the computing power of large enterprises--a practice known as cryptojacking--a top priority. \ +Cryptojacking has attracted an increasing amount of media attention since its explosion in popularity in the fall of 2017. The attacks have moved from in-browser exploits and mobile phones to enterprise cloud services, such as Amazon Web Services (AWS), Google Cloud Platform (GCP), and Azure. It's difficult to determine exactly how widespread the practice has become, since bad actors continually evolve their ability to escape detection, including employing unlisted endpoints, moderating their CPU usage, and hiding the mining pool's IP address behind a free CDN. \ +When malicious miners appropriate a cloud instance, often spinning up hundreds of new instances, the costs can become astronomical for the account holder. So it is critically important to monitor your systems for suspicious activities that could indicate that your network has been infiltrated. \ This Analytic Story is focused on detecting suspicious new instances in your cloud environment to help prevent cryptominers from gaining a foothold. It contains detection searches that will detect when a previously unused instance type or AMI is used. It also contains support searches to build lookup files to ensure proper execution of the detection searches. [analytic_story://Cloud Federated Credential Abuse] @@ -17300,15 +17300,15 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Anomalous usage of 7zip - Rule", "ESCU - CMD Echo Pipe - Escalation - Rule", "ESCU - Cobalt Strike Named Pipes - Rule", "ESCU - Detect Regsvr32 Application Control Bypass - Rule", "ESCU - DLLHost with no Command Line Arguments with Network - Rule", "ESCU - GPUpdate with no Command Line Arguments with Network - Rule", "ESCU - Rundll32 with no Command Line Arguments with Network - Rule", "ESCU - SearchProtocolHost with no Command Line with Network - Rule", "ESCU - Services Escalate Exe - Rule", "ESCU - Suspicious DLLHost no Command Line Arguments - Rule", "ESCU - Suspicious GPUpdate no Command Line Arguments - Rule", "ESCU - Suspicious microsoft workflow compiler rename - Rule", "ESCU - Suspicious msbuild path - Rule", "ESCU - Suspicious MSBuild Rename - Rule", "ESCU - Suspicious Rundll32 no Command Line Arguments - Rule", "ESCU - Suspicious Rundll32 StartW - Rule", "ESCU - Suspicious SearchProtocolHost no Command Line Arguments - Rule"] description = Cobalt Strike is threat emulation software. Red teams and penetration testers use Cobalt Strike to demonstrate the risk of a breach and evaluate mature security programs. Most recently, Cobalt Strike has become the choice tool by threat groups due to its ease of use and extensibility. -narrative = This Analytic Story supports you to detect Tactics, Techniques and Procedures (TTPs) from Cobalt Strike. Cobalt Strike has many ways to be enhanced by using aggressor scripts, malleable C2 profiles, default attack packages, and much more. For endpoint behavior, Cobalt Strike is most commonly identified via named pipes, spawn to processes, and DLL function names. Many additional variables are provided for in memory operation of the beacon implant. On the network, depending on the malleable C2 profile used, it is near infinite in the amount of ways to conceal the C2 traffic with Cobalt Strike. Not every query may be specific to Cobalt Strike the tool, but the methodologies and techniques used by it.\ -Splunk Threat Research reviewed all publicly available instances of Malleabe C2 Profiles and generated a list of the most commonly used spawnto and pipenames.\ -`Spawnto_x86` and `spawnto_x64` is the process that Cobalt Strike will spawn and injects shellcode into.\ -Pipename sets the named pipe name used in Cobalt Strikes Beacon SMB C2 traffic.\ -With that, new detections were generated focused on these spawnto processes spawning without command line arguments. Similar, the named pipes most commonly used by Cobalt Strike added as a detection. In generating content for Cobalt Strike, the following is considered:\ -- Is it normal for spawnto_ value to have no command line arguments? No command line arguments and a network connection?\ -- What is the default, or normal, process lineage for spawnto_ value?\ -- Does the spawnto_ value make network connections?\ -- Is it normal for spawnto_ value to load jscript, vbscript, Amsi.dll, and clr.dll?\ +narrative = This Analytic Story supports you to detect Tactics, Techniques and Procedures (TTPs) from Cobalt Strike. Cobalt Strike has many ways to be enhanced by using aggressor scripts, malleable C2 profiles, default attack packages, and much more. For endpoint behavior, Cobalt Strike is most commonly identified via named pipes, spawn to processes, and DLL function names. Many additional variables are provided for in memory operation of the beacon implant. On the network, depending on the malleable C2 profile used, it is near infinite in the amount of ways to conceal the C2 traffic with Cobalt Strike. Not every query may be specific to Cobalt Strike the tool, but the methodologies and techniques used by it. \ +Splunk Threat Research reviewed all publicly available instances of Malleabe C2 Profiles and generated a list of the most commonly used spawnto and pipenames. \ +`Spawnto_x86` and `spawnto_x64` is the process that Cobalt Strike will spawn and injects shellcode into. \ +Pipename sets the named pipe name used in Cobalt Strikes Beacon SMB C2 traffic. \ +With that, new detections were generated focused on these spawnto processes spawning without command line arguments. Similar, the named pipes most commonly used by Cobalt Strike added as a detection. In generating content for Cobalt Strike, the following is considered: \ +- Is it normal for spawnto_ value to have no command line arguments? No command line arguments and a network connection? \ +- What is the default, or normal, process lineage for spawnto_ value? \ +- Does the spawnto_ value make network connections? \ +- Is it normal for spawnto_ value to load jscript, vbscript, Amsi.dll, and clr.dll? \ While investigating a detection related to this Analytic Story, keep in mind the parent process, process path, and any file modifications that may occur. Tuning may need to occur to remove any false positives. [analytic_story://ColdRoot MacOS RAT] @@ -17320,8 +17320,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Jose Hernandez"}] spec_version = 3 searches = ["ESCU - Osquery pack - ColdRoot detection - Rule", "ESCU - MacOS - Re-opened Applications - Rule", "ESCU - Processes Tapping Keyboard Events - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Investigate Network Traffic From src ip - Response Task"] description = Leverage searches that allow you to detect and investigate unusual activities that relate to the ColdRoot Remote Access Trojan that affects MacOS. An example of some of these activities are changing sensative binaries in the MacOS sub-system, detecting process names and executables associated with the RAT, detecting when a keyboard tab is installed on a MacOS machine and more. -narrative = Conventional wisdom holds that Apple's MacOS operating system is significantly less vulnerable to attack than Windows machines. While that point is debatable, it is true that attacks against MacOS systems are much less common. However, this fact does not mean that Macs are impervious to breaches. To the contrary, research has shown that that Mac malware is increasing at an alarming rate. According to AV-test, in 2018, there were 86,865 new MacOS malware variants, up from 27,338 the year before—a 31% increase. In contrast, the independent research firm found that new Windows malware had increased from 65.17M to 76.86M during that same period, less than half the rate of growth. The bottom line is that while the numbers look a lot smaller than Windows, it's definitely time to take Mac security more seriously.\ -This Analytic Story addresses the ColdRoot remote access trojan (RAT), which was uploaded to Github in 2016, but was still escaping detection by the first quarter of 2018, when a new, more feature-rich variant was discovered masquerading as an Apple audio driver. Among other capabilities, the Pascal-based ColdRoot can heist passwords from users' keychains and remotely control infected machines without detection. In the initial report of his findings, Patrick Wardle, Chief Research Officer for Digita Security, explained that the new ColdRoot RAT could start and kill processes on the breached system, spawn new remote-desktop sessions, take screen captures and assemble them into a live stream of the victim's desktop, and more.\ +narrative = Conventional wisdom holds that Apple's MacOS operating system is significantly less vulnerable to attack than Windows machines. While that point is debatable, it is true that attacks against MacOS systems are much less common. However, this fact does not mean that Macs are impervious to breaches. To the contrary, research has shown that that Mac malware is increasing at an alarming rate. According to AV-test, in 2018, there were 86,865 new MacOS malware variants, up from 27,338 the year before—a 31% increase. In contrast, the independent research firm found that new Windows malware had increased from 65.17M to 76.86M during that same period, less than half the rate of growth. The bottom line is that while the numbers look a lot smaller than Windows, it's definitely time to take Mac security more seriously. \ +This Analytic Story addresses the ColdRoot remote access trojan (RAT), which was uploaded to Github in 2016, but was still escaping detection by the first quarter of 2018, when a new, more feature-rich variant was discovered masquerading as an Apple audio driver. Among other capabilities, the Pascal-based ColdRoot can heist passwords from users' keychains and remotely control infected machines without detection. In the initial report of his findings, Patrick Wardle, Chief Research Officer for Digita Security, explained that the new ColdRoot RAT could start and kill processes on the breached system, spawn new remote-desktop sessions, take screen captures and assemble them into a live stream of the victim's desktop, and more. \ Searches in this Analytic Story leverage the capabilities of OSquery to address ColdRoot detection from several different angles, such as looking for the existence of associated files and processes, and monitoring for signs of an installed keylogger. [analytic_story://Collection and Staging] @@ -17333,8 +17333,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rico Valdez"}] spec_version = 3 searches = ["ESCU - Email files written outside of the Outlook directory - Rule", "ESCU - Email servers sending high volume traffic to hosts - Rule", "ESCU - Suspicious writes to System Volume Information - Rule", "ESCU - Detect Renamed 7-Zip - Rule", "ESCU - Detect Renamed WinRAR - Rule", "ESCU - Suspicious writes to windows Recycle Bin - Rule", "ESCU - Hosts receiving high volume of network traffic from email server - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task"] description = Monitor for and investigate activities--such as suspicious writes to the Windows Recycling Bin or email servers sending high amounts of traffic to specific hosts, for example--that may indicate that an adversary is harvesting and exfiltrating sensitive data. -narrative = A common adversary goal is to identify and exfiltrate data of value from a target organization. This data may include email conversations and addresses, confidential company information, links to network design/infrastructure, important dates, and so on.\ -Attacks are composed of three activities: identification, collection, and staging data for exfiltration. Identification typically involves scanning systems and observing user activity. Collection can involve the transfer of large amounts of data from various repositories. Staging/preparation includes moving data to a central location and compressing (and optionally encoding and/or encrypting) it. All of these activities provide opportunities for defenders to identify their presence.\ +narrative = A common adversary goal is to identify and exfiltrate data of value from a target organization. This data may include email conversations and addresses, confidential company information, links to network design/infrastructure, important dates, and so on. \ +Attacks are composed of three activities: identification, collection, and staging data for exfiltration. Identification typically involves scanning systems and observing user activity. Collection can involve the transfer of large amounts of data from various repositories. Staging/preparation includes moving data to a central location and compressing (and optionally encoding and/or encrypting) it. All of these activities provide opportunities for defenders to identify their presence. \ Use the searches to detect and monitor suspicious behavior related to these activities. [analytic_story://Command And Control] @@ -17346,7 +17346,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rico Valdez"}] spec_version = 3 searches = ["ESCU - Detect Spike in blocked Outbound Traffic from your AWS - Rule", "ESCU - Clients Connecting to Multiple DNS Servers - Rule", "ESCU - Detect Long DNS TXT Record Response - Rule", "ESCU - Detection of DNS Tunnels - Rule", "ESCU - DNS Query Requests Resolved by Unauthorized DNS Servers - Rule", "ESCU - Detect Remote Access Software Usage File - Rule", "ESCU - Detect Remote Access Software Usage FileInfo - Rule", "ESCU - Detect Remote Access Software Usage Process - Rule", "ESCU - DNS Exfiltration Using Nslookup App - Rule", "ESCU - Excessive Usage of NSLOOKUP App - Rule", "ESCU - Windows Remote Access Software Hunt - Rule", "ESCU - Detect DGA domains using pretrained model in DSDL - Rule", "ESCU - Detect DNS Data Exfiltration using pretrained model in DSDL - Rule", "ESCU - Detect hosts connecting to dynamic domain providers - Rule", "ESCU - Detect Large Outbound ICMP Packets - Rule", "ESCU - Detect Remote Access Software Usage DNS - Rule", "ESCU - Detect Remote Access Software Usage Traffic - Rule", "ESCU - Detect suspicious DNS TXT records using pretrained model in DSDL - Rule", "ESCU - DNS Query Length Outliers - MLTK - Rule", "ESCU - DNS Query Length With High Standard Deviation - Rule", "ESCU - Excessive DNS Failures - Rule", "ESCU - Multiple Archive Files Http Post Traffic - Rule", "ESCU - Plain HTTP POST Exfiltrated Data - Rule", "ESCU - Prohibited Network Traffic Allowed - Rule", "ESCU - Protocol or Port Mismatch - Rule", "ESCU - TOR Traffic - Rule", "ESCU - Detect Remote Access Software Usage URL - Rule", "ESCU - AWS Investigate User Activities By ARN - Response Task", "ESCU - AWS Network ACL Details from ID - Response Task", "ESCU - AWS Network Interface details via resourceId - Response Task", "ESCU - Get All AWS Activity From IP Address - Response Task", "ESCU - Get DNS Server History for a host - Response Task", "ESCU - Get DNS traffic ratio - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task", "ESCU - Get Process Information For Port Activity - Response Task", "ESCU - Get Process Responsible For The DNS Traffic - Response Task"] description = Detect and investigate tactics, techniques, and procedures leveraged by attackers to establish and operate Command And Control channels. Implants installed by attackers on compromised endpoints use these channels to receive instructions and send data back to the malicious operators. -narrative = Threat actors typically architect and implement an infrastructure to use in various ways during the course of their attack campaigns. In some cases, they leverage this infrastructure for scanning and performing reconnaissance activities. In others, they may use this infrastructure to launch actual attacks. One of the most important functions of this infrastructure is to establish servers that will communicate with implants on compromised endpoints. These servers establish a command and control channel that is used to proxy data between the compromised endpoint and the attacker. These channels relay commands from the attacker to the compromised endpoint and the output of those commands back to the attacker.\ +narrative = Threat actors typically architect and implement an infrastructure to use in various ways during the course of their attack campaigns. In some cases, they leverage this infrastructure for scanning and performing reconnaissance activities. In others, they may use this infrastructure to launch actual attacks. One of the most important functions of this infrastructure is to establish servers that will communicate with implants on compromised endpoints. These servers establish a command and control channel that is used to proxy data between the compromised endpoint and the attacker. These channels relay commands from the attacker to the compromised endpoint and the output of those commands back to the attacker. \ Because this communication is so critical for an adversary, they often use techniques designed to hide the true nature of the communications. There are many different techniques used to establish and communicate over these channels. This Analytic Story provides searches that look for a variety of the techniques used for these channels, as well as indications that these channels are active, by examining logs associated with border control devices and network-access control lists. [analytic_story://Compromised User Account] @@ -17391,8 +17391,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rico Valdez"}] spec_version = 3 searches = ["ESCU - Detect Mimikatz Using Loaded Images - Rule", "ESCU - Dump LSASS via procdump Rename - Rule", "ESCU - Unsigned Image Loaded by LSASS - Rule", "ESCU - Access LSASS Memory for Dump Creation - Rule", "ESCU - Attempted Credential Dump From Registry via Reg exe - Rule", "ESCU - Create Remote Thread into LSASS - Rule", "ESCU - Creation of lsass Dump with Taskmgr - Rule", "ESCU - Creation of Shadow Copy - Rule", "ESCU - Creation of Shadow Copy with wmic and powershell - Rule", "ESCU - Credential Dumping via Copy Command from Shadow Copy - Rule", "ESCU - Credential Dumping via Symlink to Shadow Copy - Rule", "ESCU - Detect Copy of ShadowCopy with Script Block Logging - Rule", "ESCU - Detect Credential Dumping through LSASS access - Rule", "ESCU - Dump LSASS via comsvcs DLL - Rule", "ESCU - Dump LSASS via procdump - Rule", "ESCU - Enable WDigest UseLogonCredential Registry - Rule", "ESCU - Esentutl SAM Copy - Rule", "ESCU - Extraction of Registry Hives - Rule", "ESCU - Ntdsutil Export NTDS - Rule", "ESCU - Potential password in username - Rule", "ESCU - SAM Database File Access Attempt - Rule", "ESCU - SecretDumps Offline NTDS Dumping Tool - Rule", "ESCU - Set Default PowerShell Execution Policy To Unrestricted or Bypass - Rule", "ESCU - Windows AD Replication Request Initiated by User Account - Rule", "ESCU - Windows AD Replication Request Initiated from Unsanctioned Location - Rule", "ESCU - Windows Credential Dumping LSASS Memory Createdump - Rule", "ESCU - Windows Hunting System Account Targeting Lsass - Rule", "ESCU - Windows Mimikatz Binary Execution - Rule", "ESCU - Windows Non-System Account Targeting Lsass - Rule", "ESCU - Windows Possible Credential Dumping - Rule", "ESCU - Investigate Failed Logins for Multiple Destinations - Response Task", "ESCU - Investigate Pass the Hash Attempts - Response Task", "ESCU - Investigate Pass the Ticket Attempts - Response Task", "ESCU - Investigate Previous Unseen User - Response Task"] description = Uncover activity consistent with credential dumping, a technique wherein attackers compromise systems and attempt to obtain and exfiltrate passwords. The threat actors use these pilfered credentials to further escalate privileges and spread throughout a target environment. The included searches in this Analytic Story are designed to identify attempts to credential dumping. -narrative = Credential dumping—gathering credentials from a target system, often hashed or encrypted—is a common attack technique. Even though the credentials may not be in plain text, an attacker can still exfiltrate the data and set to cracking it offline, on their own systems. The threat actors target a variety of sources to extract them, including the Security Accounts Manager (SAM), Local Security Authority (LSA), NTDS from Domain Controllers, or the Group Policy Preference (GPP) files.\ -Once attackers obtain valid credentials, they use them to move throughout a target network with ease, discovering new systems and identifying assets of interest. Credentials obtained in this manner typically include those of privileged users, which may provide access to more sensitive information and system operations.\ +narrative = Credential dumping—gathering credentials from a target system, often hashed or encrypted—is a common attack technique. Even though the credentials may not be in plain text, an attacker can still exfiltrate the data and set to cracking it offline, on their own systems. The threat actors target a variety of sources to extract them, including the Security Accounts Manager (SAM), Local Security Authority (LSA), NTDS from Domain Controllers, or the Group Policy Preference (GPP) files. \ +Once attackers obtain valid credentials, they use them to move throughout a target network with ease, discovering new systems and identifying assets of interest. Credentials obtained in this manner typically include those of privileged users, which may provide access to more sensitive information and system operations. \ The detection searches in this Analytic Story monitor access to the Local Security Authority Subsystem Service (LSASS) process, the usage of shadowcopies for credential dumping and some other techniques for credential dumping. [analytic_story://CVE-2022-40684 Fortinet Appliance Auth bypass] @@ -17426,9 +17426,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Confluence CVE-2023-22515 Trigger Vulnerability - Rule", "ESCU - Confluence Data Center and Server Privilege Escalation - Rule", "ESCU - Web Remote ShellServlet Access - Rule"] description = On October 4, 2023, Atlassian disclosed a critical privilege escalation vulnerability, CVE-2023-22515, affecting on-premises instances of Confluence Server and Confluence Data Center. This flaw might allow external attackers to exploit accessible Confluence instances, creating unauthorized Confluence administrator accounts. Indicators suggest the vulnerability is remotely exploitable. The affected versions range from 8.0.0 to 8.5.1, but versions prior to 8.0.0 and Atlassian Cloud sites are unaffected. Atlassian advises customers to update to a fixed version or implement mitigation strategies. Indicators of compromise (IoCs) and mitigation steps, such as blocking access to /setup/* endpoints, are provided. -narrative = Upon Atlassian's disclosure of CVE-2023-22515, there's an immediate need to assess the threat landscape of on-premises Confluence installations. As the vulnerability affects privilege escalation and may be exploited remotely, SIEM solutions should be poised to detect potential threats.\ -By monitoring for specific indicators of compromise, security teams can get ahead of any potential breaches. Key indicators include unexpected members in the 'confluence-administrator' group, newly created user accounts, and specific HTTP requests to /setup/*.action endpoints. Any unusual spikes or patterns associated with these indicators might signify an ongoing or attempted exploitation.\ -Furthermore, an audit trail of past logs is essential. Analyzing older logs might uncover any unnoticed exploitation, allowing for a post-incident analysis and ensuring affected systems are patched or isolated. An alert mechanism should be established for any access or changes related to /setup/* endpoints.\ +narrative = Upon Atlassian's disclosure of CVE-2023-22515, there's an immediate need to assess the threat landscape of on-premises Confluence installations. As the vulnerability affects privilege escalation and may be exploited remotely, SIEM solutions should be poised to detect potential threats. \ +By monitoring for specific indicators of compromise, security teams can get ahead of any potential breaches. Key indicators include unexpected members in the 'confluence-administrator' group, newly created user accounts, and specific HTTP requests to /setup/*.action endpoints. Any unusual spikes or patterns associated with these indicators might signify an ongoing or attempted exploitation. \ +Furthermore, an audit trail of past logs is essential. Analyzing older logs might uncover any unnoticed exploitation, allowing for a post-incident analysis and ensuring affected systems are patched or isolated. An alert mechanism should be established for any access or changes related to /setup/* endpoints. \ In parallel, updating the affected Confluence Server and Data Center versions to the fixed releases is paramount. If immediate updates aren't feasible, interim mitigation measures, such as blocking external network access to /setup/*, should be implemented, and logs around this activity should be monitored. [analytic_story://CVE-2023-23397 Outlook Elevation of Privilege] @@ -17451,11 +17451,11 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - MSHTML Module Load in Office Product - Rule", "ESCU - Office Document Spawned Child Process To Download - Rule", "ESCU - Office Product Spawn CMD Process - Rule", "ESCU - Office Product Spawning BITSAdmin - Rule", "ESCU - Office Product Spawning CertUtil - Rule", "ESCU - Office Product Spawning MSHTA - Rule", "ESCU - Office Product Spawning Rundll32 with no DLL - Rule", "ESCU - Office Product Spawning Windows Script Host - Rule", "ESCU - Office Product Spawning Wmic - Rule"] description = CVE-2023-36884 is an unpatched zero-day vulnerability affecting Windows and Microsoft Office products. The vulnerability allows for remote code execution through specially crafted Microsoft Office documents, enabling an attacker to operate in the context of the victim. As of now, there are no security updates available. However, users of Microsoft Defender for Office and the "Block all Office applications from creating child processes" Attack Surface Reduction Rule are safeguarded against this exploit. For other users, temporary mitigation can be achieved by adding specific application names to a designated registry key. -narrative = CVE-2023-36884 is a serious security vulnerability that affects a range of Microsoft Office products and Windows systems. It is a zero-day flaw, meaning it was already being exploited before Microsoft became aware of it or had a chance to develop a patch.\ -An attacker exploiting this vulnerability would create a Microsoft Office document containing malicious code. This document, when opened by the victim, allows for remote code execution, giving the attacker the ability to run their own code on the victim's machine. This poses a significant risk as the attacker could perform actions like data theft, system damage, or creating backdoors for future access.\ -Currently, there is no security patch available from Microsoft, which makes the issue more critical. Microsoft is working on investigating these vulnerabilities and will likely provide a security update either through their monthly release cycle or an out-of-cycle update, based on the urgency.\ -In the meantime, users of Microsoft Defender for Office and those utilizing the "Block all Office applications from creating child processes" Attack Surface Reduction Rule are protected from attempts to exploit this vulnerability. This is because these protections add an extra layer of security, blocking the malicious code from executing.\ -For users who are not using these protections, Microsoft recommends a workaround by adding specific application names to a particular Windows registry key (HKEY_LOCAL_MACHINE\SOFTWARE\Policies\Microsoft\Internet Explorer\Main\FeatureControl\FEATURE_BLOCK_CROSS_PROTOCOL_FILE_NAVIGATION) with data set as "1". This action aims to mitigate the risk until a permanent fix is available.\ +narrative = CVE-2023-36884 is a serious security vulnerability that affects a range of Microsoft Office products and Windows systems. It is a zero-day flaw, meaning it was already being exploited before Microsoft became aware of it or had a chance to develop a patch. \ +An attacker exploiting this vulnerability would create a Microsoft Office document containing malicious code. This document, when opened by the victim, allows for remote code execution, giving the attacker the ability to run their own code on the victim's machine. This poses a significant risk as the attacker could perform actions like data theft, system damage, or creating backdoors for future access. \ +Currently, there is no security patch available from Microsoft, which makes the issue more critical. Microsoft is working on investigating these vulnerabilities and will likely provide a security update either through their monthly release cycle or an out-of-cycle update, based on the urgency. \ +In the meantime, users of Microsoft Defender for Office and those utilizing the "Block all Office applications from creating child processes" Attack Surface Reduction Rule are protected from attempts to exploit this vulnerability. This is because these protections add an extra layer of security, blocking the malicious code from executing. \ +For users who are not using these protections, Microsoft recommends a workaround by adding specific application names to a particular Windows registry key (HKEY_LOCAL_MACHINE\SOFTWARE\Policies\Microsoft\Internet Explorer\Main\FeatureControl\FEATURE_BLOCK_CROSS_PROTOCOL_FILE_NAVIGATION) with data set as "1". This action aims to mitigate the risk until a permanent fix is available. \ The disclosure of this flaw involved multiple entities including Microsoft Threat Intelligence, Vlad Stolyarov, Clement Lecigne and Bahare Sabouri from Google's Threat Analysis Group (TAG), Paul Rascagneres and Tom Lancaster from Volexity, and the Microsoft Office Product Group Security Team. This collective effort indicates the severity and importance of addressing this issue. [analytic_story://Cyclops Blink] @@ -17489,9 +17489,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - CMD Carry Out String Command Parameter - Rule", "ESCU - Cmdline Tool Not Executed In CMD Shell - Rule", "ESCU - Create local admin accounts using net exe - Rule", "ESCU - Create or delete windows shares using net exe - Rule", "ESCU - Delete ShadowCopy With PowerShell - Rule", "ESCU - Deleting Of Net Users - Rule", "ESCU - Deleting Shadow Copies - Rule", "ESCU - Detect PsExec With accepteula Flag - Rule", "ESCU - Detect Regasm Spawning a Process - Rule", "ESCU - Detect Renamed PSExec - Rule", "ESCU - Executables Or Script Creation In Suspicious Path - Rule", "ESCU - Execution of File with Multiple Extensions - Rule", "ESCU - Non Chrome Process Accessing Chrome Default Dir - Rule", "ESCU - Non Firefox Process Access Firefox Profile Dir - Rule", "ESCU - PowerShell 4104 Hunting - Rule", "ESCU - Powershell Remote Services Add TrustedHost - Rule", "ESCU - Registry Keys Used For Persistence - Rule", "ESCU - Set Default PowerShell Execution Policy To Unrestricted or Bypass - Rule", "ESCU - Suspicious Process File Path - Rule", "ESCU - System Processes Run From Unexpected Locations - Rule", "ESCU - Windows Access Token Manipulation SeDebugPrivilege - Rule", "ESCU - Windows Archive Collected Data via Rar - Rule", "ESCU - Windows AutoIt3 Execution - Rule", "ESCU - Windows CAB File on Disk - Rule", "ESCU - Windows Credentials from Password Stores Chrome Extension Access - Rule", "ESCU - Windows Credentials from Password Stores Chrome LocalState Access - Rule", "ESCU - Windows Credentials from Password Stores Chrome Login Data Access - Rule", "ESCU - Windows Credentials from Password Stores Creation - Rule", "ESCU - Windows Credentials from Password Stores Deletion - Rule", "ESCU - Windows Credentials from Password Stores Query - Rule", "ESCU - Windows Indicator Removal Via Rmdir - Rule", "ESCU - Windows Modify Registry AuthenticationLevelOverride - Rule", "ESCU - Windows Modify Registry DisableRemoteDesktopAntiAlias - Rule", "ESCU - Windows Modify Registry DisableSecuritySettings - Rule", "ESCU - Windows Modify Registry DontShowUI - Rule", "ESCU - Windows Modify Registry ProxyEnable - Rule", "ESCU - Windows Modify Registry ProxyServer - Rule", "ESCU - Windows MSIExec Spawn WinDBG - Rule", "ESCU - Windows System Reboot CommandLine - Rule", "ESCU - Windows System Shutdown CommandLine - Rule", "ESCU - Windows WinDBG Spawning AutoIt3 - Rule"] description = Telekom Security CTI has uncovered a new phishing-driven malware campaign distributing DarkGate malware. This campaign utilizes stolen email threads to trick users into downloading malicious payloads via hyperlinks. An initial false link to Emotet stirred the security community, but deeper analysis confirmed its true identity as DarkGate, with characteristics like AutoIt scripts and a known command-and-control protocol. This report by Fabian Marquardt details the intricate infection mechanisms, including MSI and VBS file deliveries, sophisticated evasion techniques, and a robust configuration extraction method surpassing current standards. The single developer behind DarkGate, active on cybercrime forums, has shifted the malware's use from private to a rent-out model, implying an expected rise in its deployment. Researchers have also developed a decryption technique for the DarkGate malware, which aids in static analysis and detection, though it requires careful validation to avoid false positives. -narrative = Telekom Security CTi has recently put a spotlight on the proliferation of DarkGate malware via a sophisticated malspam campaign, initially mistaken for the notorious Emotet malware. The campaign smartly manipulates stolen email conversations, embedding hyperlinks that, once clicked, activate a malware download. Fabian Marquardt's analysis traces the infection's footprint, revealing a dual delivery mechanism through MSI and VBS files. These files, cloaked in legitimate wrappers or obscured with junk code, ultimately download the malware via embedded scripts.\ -Marquardt delves into the AutoIt script-based infection, uncovering the calculated use of compiled scripts and base64-encoded data to disguise the execution of malicious shellcode. The subsequent stages of infection exhibit the malware's capability to evade detection, leveraging memory allocation techniques to bypass security measures. Marquardt also explores the loader's function, which decrypts further malicious payloads by interacting with the script's encoded components.\ -The analytical narrative captures a cross-section of the cybersecurity landscape, reflecting the shift in DarkGate's operational strategy from exclusive use by the developer to a broader dissemination through a Malware-as-a-Service (MaaS) model. This transition suggests an anticipated escalation in DarkGate-related attacks.\ +narrative = Telekom Security CTi has recently put a spotlight on the proliferation of DarkGate malware via a sophisticated malspam campaign, initially mistaken for the notorious Emotet malware. The campaign smartly manipulates stolen email conversations, embedding hyperlinks that, once clicked, activate a malware download. Fabian Marquardt's analysis traces the infection's footprint, revealing a dual delivery mechanism through MSI and VBS files. These files, cloaked in legitimate wrappers or obscured with junk code, ultimately download the malware via embedded scripts. \ +Marquardt delves into the AutoIt script-based infection, uncovering the calculated use of compiled scripts and base64-encoded data to disguise the execution of malicious shellcode. The subsequent stages of infection exhibit the malware's capability to evade detection, leveraging memory allocation techniques to bypass security measures. Marquardt also explores the loader's function, which decrypts further malicious payloads by interacting with the script's encoded components. \ +The analytical narrative captures a cross-section of the cybersecurity landscape, reflecting the shift in DarkGate's operational strategy from exclusive use by the developer to a broader dissemination through a Malware-as-a-Service (MaaS) model. This transition suggests an anticipated escalation in DarkGate-related attacks. \ Significantly, the report contributes to cybersecurity defenses by outlining a more effective method for extracting malware configurations, providing the community with the means to anticipate and mitigate the evolving threats posed by this pernicious malware. With the insights gained, researchers and security professionals are better equipped to adapt their strategies, constructing more robust defenses against the sophisticated tactics employed by DarkGate and similar malware strains. [analytic_story://DarkSide Ransomware] @@ -17525,7 +17525,7 @@ maintainers = [{"company": "Shannon Davis, Splunk", "email": "-", "name": "Bhavi spec_version = 3 searches = ["ESCU - AWS AMI Attribute Modification for Exfiltration - Rule", "ESCU - AWS Disable Bucket Versioning - Rule", "ESCU - AWS EC2 Snapshot Shared Externally - Rule", "ESCU - AWS Exfiltration via Anomalous GetObject API Activity - Rule", "ESCU - AWS Exfiltration via Batch Service - Rule", "ESCU - AWS Exfiltration via Bucket Replication - Rule", "ESCU - AWS Exfiltration via DataSync Task - Rule", "ESCU - AWS Exfiltration via EC2 Snapshot - Rule", "ESCU - AWS S3 Exfiltration Behavior Identified - Rule", "ESCU - Gdrive suspicious file sharing - Rule", "ESCU - O365 PST export alert - Rule", "ESCU - O365 Suspicious Admin Email Forwarding - Rule", "ESCU - O365 Suspicious User Email Forwarding - Rule", "ESCU - Detect Certipy File Modifications - Rule", "ESCU - DNS Exfiltration Using Nslookup App - Rule", "ESCU - Excessive Usage of NSLOOKUP App - Rule", "ESCU - Linux Curl Upload File - Rule", "ESCU - Mailsniper Invoke functions - Rule", "ESCU - Detect DGA domains using pretrained model in DSDL - Rule", "ESCU - Detect SNICat SNI Exfiltration - Rule", "ESCU - High Volume of Bytes Out to Url - Rule", "ESCU - Multiple Archive Files Http Post Traffic - Rule", "ESCU - Plain HTTP POST Exfiltrated Data - Rule", "ESCU - Get Notable History - Response Task"] description = Data exfiltration refers to the unauthorized transfer or extraction of sensitive or valuable data from a compromised system or network during a cyber attack. It is a critical phase in many targeted attacks, where adversaries aim to steal confidential information, such as intellectual property, financial records, personal data, or trade secrets. -narrative = This Analytic Story supports you to detect Tactics, Techniques and Procedures (TTPs) leveraged by adversaries to exfiltrate data from your environments. Exfiltration comes in many flavors and its done differently on every environment. Adversaries can collect data over encrypted or non-encrypted channels. They can utilise Command And Control channels that are already in place to exfiltrate data. They can use both standard data transfer protocols such as FTP, SCP, etc to exfiltrate data. Or they can use non-standard protocols such as DNS, ICMP, etc with specially crafted fields to try and circumvent security technologies in place.\ +narrative = This Analytic Story supports you to detect Tactics, Techniques and Procedures (TTPs) leveraged by adversaries to exfiltrate data from your environments. Exfiltration comes in many flavors and its done differently on every environment. Adversaries can collect data over encrypted or non-encrypted channels. They can utilise Command And Control channels that are already in place to exfiltrate data. They can use both standard data transfer protocols such as FTP, SCP, etc to exfiltrate data. Or they can use non-standard protocols such as DNS, ICMP, etc with specially crafted fields to try and circumvent security technologies in place. \ Techniques for getting data out of a target network typically include transferring it over their command and control channel or an alternate channel and may also include putting size limits on the transmission. In context of the cloud, this refers to the unauthorized transfer or extraction of sensitive data from cloud-based systems or services. It involves the compromise of cloud infrastructure or accounts to gain access to valuable information stored in the cloud environment. Attackers may employ various techniques, such as exploiting vulnerabilities, stealing login credentials, or using malicious code to exfiltrate data from cloud repositories or services without detection. [analytic_story://Data Protection] @@ -17559,9 +17559,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "David Dorsey"}] spec_version = 3 searches = ["ESCU - Abnormally High AWS Instances Launched by User - Rule", "ESCU - Abnormally High AWS Instances Launched by User - MLTK - Rule", "ESCU - EC2 Instance Started In Previously Unseen Region - Rule", "ESCU - EC2 Instance Started With Previously Unseen AMI - Rule", "ESCU - EC2 Instance Started With Previously Unseen Instance Type - Rule", "ESCU - EC2 Instance Started With Previously Unseen User - Rule", "ESCU - AWS Investigate User Activities By ARN - Response Task", "ESCU - Get EC2 Instance Details by instanceId - Response Task", "ESCU - Get EC2 Launch Details - Response Task", "ESCU - Get Logon Rights Modifications For Endpoint - Response Task", "ESCU - Get Logon Rights Modifications For User - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Investigate AWS activities via region name - Response Task"] description = Monitor your AWS EC2 instances for activities related to cryptojacking/cryptomining. New instances that originate from previously unseen regions, users who launch abnormally high numbers of instances, or EC2 instances started by previously unseen users are just a few examples of potentially malicious behavior. -narrative = Cryptomining is an intentionally difficult, resource-intensive business. Its complexity was designed into the process to ensure that the number of blocks mined each day would remain steady. So, it's par for the course that ambitious, but unscrupulous, miners make amassing the computing power of large enterprises--a practice known as cryptojacking--a top priority.\ -Cryptojacking has attracted an increasing amount of media attention since its explosion in popularity in the fall of 2017. The attacks have moved from in-browser exploits and mobile phones to enterprise cloud services, such as Amazon Web Services (AWS). It's difficult to determine exactly how widespread the practice has become, since bad actors continually evolve their ability to escape detection, including employing unlisted endpoints, moderating their CPU usage, and hiding the mining pool's IP address behind a free CDN.\ -When malicious miners appropriate a cloud instance, often spinning up hundreds of new instances, the costs can become astronomical for the account holder. So, it is critically important to monitor your systems for suspicious activities that could indicate that your network has been infiltrated.\ +narrative = Cryptomining is an intentionally difficult, resource-intensive business. Its complexity was designed into the process to ensure that the number of blocks mined each day would remain steady. So, it's par for the course that ambitious, but unscrupulous, miners make amassing the computing power of large enterprises--a practice known as cryptojacking--a top priority. \ +Cryptojacking has attracted an increasing amount of media attention since its explosion in popularity in the fall of 2017. The attacks have moved from in-browser exploits and mobile phones to enterprise cloud services, such as Amazon Web Services (AWS). It's difficult to determine exactly how widespread the practice has become, since bad actors continually evolve their ability to escape detection, including employing unlisted endpoints, moderating their CPU usage, and hiding the mining pool's IP address behind a free CDN. \ +When malicious miners appropriate a cloud instance, often spinning up hundreds of new instances, the costs can become astronomical for the account holder. So, it is critically important to monitor your systems for suspicious activities that could indicate that your network has been infiltrated. \ This Analytic Story is focused on detecting suspicious new instances in your EC2 environment to help prevent such a disaster. It contains detection searches that will detect when a previously unused instance type or AMI is used. It also contains support searches to build lookup files to ensure proper execution of the detection searches. [analytic_story://AWS Suspicious Provisioning Activities] @@ -17573,7 +17573,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "David Dorsey"}] spec_version = 3 searches = ["ESCU - AWS Cloud Provisioning From Previously Unseen City - Rule", "ESCU - AWS Cloud Provisioning From Previously Unseen Country - Rule", "ESCU - AWS Cloud Provisioning From Previously Unseen IP Address - Rule", "ESCU - AWS Cloud Provisioning From Previously Unseen Region - Rule", "ESCU - AWS Investigate Security Hub alerts by dest - Response Task", "ESCU - AWS Investigate User Activities By ARN - Response Task", "ESCU - Get All AWS Activity From City - Response Task", "ESCU - Get All AWS Activity From Country - Response Task", "ESCU - Get All AWS Activity From IP Address - Response Task", "ESCU - Get All AWS Activity From Region - Response Task"] description = Monitor your AWS provisioning activities for behaviors originating from unfamiliar or unusual locations. These behaviors may indicate that malicious activities are occurring somewhere within your network. -narrative = Because most enterprise AWS activities originate from familiar geographic locations, monitoring for activity from unknown or unusual regions is an important security measure. This indicator can be especially useful in environments where it is impossible to add specific IPs to an allow list because they vary.\ +narrative = Because most enterprise AWS activities originate from familiar geographic locations, monitoring for activity from unknown or unusual regions is an important security measure. This indicator can be especially useful in environments where it is impossible to add specific IPs to an allow list because they vary. \ This Analytic Story was designed to provide you with flexibility in the precision you employ in specifying legitimate geographic regions. It can be as specific as an IP address or a city, or as broad as a region (think state) or an entire country. By determining how precise you want your geographical locations to be and monitoring for new locations that haven't previously accessed your environment, you can detect adversaries as they begin to probe your environment. Since there are legitimate reasons for activities from unfamiliar locations, this is not a standalone indicator. Nevertheless, location can be a relevant piece of information that you may wish to investigate further. [analytic_story://Common Phishing Frameworks] @@ -17585,7 +17585,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Splunk Research Team spec_version = 3 searches = ["ESCU - Detect DNS requests to Phishing Sites leveraging EvilGinx2 - Rule", "ESCU - Get Certificate logs for a domain - Response Task"] description = Detect DNS and web requests to fake websites generated by the EvilGinx2 toolkit. These websites are designed to fool unwitting users who have clicked on a malicious link in a phishing email. -narrative = As most people know, these emails use fraudulent domains, [email scraping](https://www.cyberscoop.com/emotet-trojan-phishing-scraping-templates-cofense-geodo/), familiar contact names inserted as senders, and other tactics to lure targets into clicking a malicious link, opening an attachment with a [nefarious payload](https://www.cyberscoop.com/emotet-trojan-phishing-scraping-templates-cofense-geodo/), or entering sensitive personal information that perpetrators may intercept. This attack technique requires a relatively low level of skill and allows adversaries to easily cast a wide net. Because phishing is a technique that relies on human psychology, you will never be able to eliminate this vulnerability 100%. But you can use automated detection to significantly reduce the risks.\ +narrative = As most people know, these emails use fraudulent domains, [email scraping](https://www.cyberscoop.com/emotet-trojan-phishing-scraping-templates-cofense-geodo/), familiar contact names inserted as senders, and other tactics to lure targets into clicking a malicious link, opening an attachment with a [nefarious payload](https://www.cyberscoop.com/emotet-trojan-phishing-scraping-templates-cofense-geodo/), or entering sensitive personal information that perpetrators may intercept. This attack technique requires a relatively low level of skill and allows adversaries to easily cast a wide net. Because phishing is a technique that relies on human psychology, you will never be able to eliminate this vulnerability 100%. But you can use automated detection to significantly reduce the risks. \ This Analytic Story focuses on detecting signs of MiTM attacks enabled by [EvilGinx2](https://github.com/kgretzky/evilginx2), a toolkit that sets up a transparent proxy between the targeted site and the user. In this way, the attacker is able to intercept credentials and two-factor identification tokens. It employs a proxy template to allow a registered domain to impersonate targeted sites, such as Linkedin, Amazon, Okta, Github, Twitter, Instagram, Reddit, Office 365, and others. It can even register SSL certificates and camouflage them via a URL shortener, making them difficult to detect. Searches in this story look for signs of MiTM attacks enabled by EvilGinx2. [analytic_story://Container Implantation Monitoring and Investigation] @@ -17652,7 +17652,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "David Dorsey"}] spec_version = 3 searches = ["ESCU - Prohibited Software On Endpoint - Rule", "ESCU - Attacker Tools On Endpoint - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task"] description = Identify and investigate prohibited/unauthorized software or processes that may be concealing malicious behavior within your environment. -narrative = It is critical to identify unauthorized software and processes running on enterprise endpoints and determine whether they are likely to be malicious. This Analytic Story requires the user to populate the Interesting Processes table within Enterprise Security with prohibited processes. An included support search will augment this data, adding information on processes thought to be malicious. This search requires data from endpoint detection-and-response solutions, endpoint data sources (such as Sysmon), or Windows Event Logs--assuming that the Active Directory administrator has enabled process tracking within the System Event Audit Logs.\ +narrative = It is critical to identify unauthorized software and processes running on enterprise endpoints and determine whether they are likely to be malicious. This Analytic Story requires the user to populate the Interesting Processes table within Enterprise Security with prohibited processes. An included support search will augment this data, adding information on processes thought to be malicious. This search requires data from endpoint detection-and-response solutions, endpoint data sources (such as Sysmon), or Windows Event Logs--assuming that the Active Directory administrator has enabled process tracking within the System Event Audit Logs. \ It is important to investigate any software identified as suspicious, in order to understand how it was installed or executed. Analyzing authentication logs or any historic notable events might elicit additional investigative leads of interest. For best results, schedule the search to run every two weeks. [analytic_story://Office 365 Detections] @@ -17697,7 +17697,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "David Dorsey"}] spec_version = 3 searches = ["ESCU - EC2 Instance Modified With Previously Unseen User - Rule", "ESCU - AWS Investigate User Activities By ARN - Response Task", "ESCU - Get EC2 Instance Details by instanceId - Response Task", "ESCU - Get Notable History - Response Task"] description = Identify unusual changes to your AWS EC2 instances that may indicate malicious activity. Modifications to your EC2 instances by previously unseen users is an example of an activity that may warrant further investigation. -narrative = A common attack technique is to infiltrate a cloud instance and make modifications. The adversary can then secure access to your infrastructure or hide their activities. So it's important to stay alert to changes that may indicate that your environment has been compromised.\ +narrative = A common attack technique is to infiltrate a cloud instance and make modifications. The adversary can then secure access to your infrastructure or hide their activities. So it's important to stay alert to changes that may indicate that your environment has been compromised. \ Searches within this Analytic Story can help you detect the presence of a threat by monitoring for EC2 instances that have been created or changed--either by users that have never previously performed these activities or by known users who modify or create instances in a way that have not been done before. This story also provides investigative searches that help you go deeper once you detect suspicious behavior. [analytic_story://Web Fraud Detection] @@ -17709,11 +17709,11 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Jim Apger"}] spec_version = 3 searches = ["ESCU - Web Fraud - Account Harvesting - Rule", "ESCU - Web Fraud - Anomalous User Clickspeed - Rule", "ESCU - Web Fraud - Password Sharing Across Accounts - Rule", "ESCU - Get Emails From Specific Sender - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Get Web Session Information via session id - Response Task"] description = Monitor your environment for activity consistent with common attack techniques bad actors use when attempting to compromise web servers or other web-related assets. -narrative = The Federal Bureau of Investigations (FBI) defines Internet fraud as the use of Internet services or software with Internet access to defraud victims or to otherwise take advantage of them. According to the Bureau, Internet crime schemes are used to steal millions of dollars each year from victims and continue to plague the Internet through various methods. The agency includes phishing scams, data breaches, Denial of Service (DOS) attacks, email account compromise, malware, spoofing, and ransomware in this category.\ -These crimes are not the fraud itself, but rather the attack techniques commonly employed by fraudsters in their pursuit of data that enables them to commit malicious actssuch as obtaining and using stolen credit cards. They represent a serious problem that is steadily increasing and not likely to go away anytime soon.\ -When developing a strategy for preventing fraud in your environment, its important to look across all of your web services for evidence that attackers are abusing enterprise resources to enumerate systems, harvest data for secondary fraudulent activity, or abuse terms of service.This Analytic Story looks for evidence of common Internet attack techniques that could be indicative of web fraud in your environmentincluding account harvesting, anomalous user clickspeed, and password sharing across accounts, to name just a few.\ -The account-harvesting search focuses on web pages used for user-account registration. It detects the creation of a large number of user accounts using the same email domain name, a type of activity frequently seen in advance of a fraud campaign.\ -The anomalous clickspeed search looks for users who are moving through your website at a faster-than-normal speed or with a perfect click cadence (high periodicity or low standard deviation), which could indicate that the user is a script, not an actual human.\ +narrative = The Federal Bureau of Investigations (FBI) defines Internet fraud as the use of Internet services or software with Internet access to defraud victims or to otherwise take advantage of them. According to the Bureau, Internet crime schemes are used to steal millions of dollars each year from victims and continue to plague the Internet through various methods. The agency includes phishing scams, data breaches, Denial of Service (DOS) attacks, email account compromise, malware, spoofing, and ransomware in this category. \ +These crimes are not the fraud itself, but rather the attack techniques commonly employed by fraudsters in their pursuit of data that enables them to commit malicious actssuch as obtaining and using stolen credit cards. They represent a serious problem that is steadily increasing and not likely to go away anytime soon. \ +When developing a strategy for preventing fraud in your environment, its important to look across all of your web services for evidence that attackers are abusing enterprise resources to enumerate systems, harvest data for secondary fraudulent activity, or abuse terms of service.This Analytic Story looks for evidence of common Internet attack techniques that could be indicative of web fraud in your environmentincluding account harvesting, anomalous user clickspeed, and password sharing across accounts, to name just a few. \ +The account-harvesting search focuses on web pages used for user-account registration. It detects the creation of a large number of user accounts using the same email domain name, a type of activity frequently seen in advance of a fraud campaign. \ +The anomalous clickspeed search looks for users who are moving through your website at a faster-than-normal speed or with a perfect click cadence (high periodicity or low standard deviation), which could indicate that the user is a script, not an actual human. \ Another search detects incidents wherein a single password is used across multiple accounts, which may indicate that a fraudster has infiltrated your environment and embedded a common password within a script. [analytic_story://Detect Zerologon Attack] @@ -17747,9 +17747,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rico Valdez"}] spec_version = 3 searches = ["ESCU - First time seen command line argument - Rule", "ESCU - Create local admin accounts using net exe - Rule", "ESCU - Detect New Local Admin account - Rule", "ESCU - Detect PsExec With accepteula Flag - Rule", "ESCU - Detect Renamed PSExec - Rule", "ESCU - Malicious PowerShell Process - Execution Policy Bypass - Rule", "ESCU - Processes launching netsh - Rule", "ESCU - Registry Keys Used For Persistence - Rule", "ESCU - Sc exe Manipulating Windows Services - Rule", "ESCU - Scheduled Task Deleted Or Created via CMD - Rule", "ESCU - Single Letter Process On Endpoint - Rule", "ESCU - Suspicious Reg exe Process - Rule", "ESCU - Detect Outbound SMB Traffic - Rule", "ESCU - SMB Traffic Spike - Rule", "ESCU - SMB Traffic Spike - MLTK - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process File Activity - Response Task", "ESCU - Get Process Info - Response Task", "ESCU - Get Process Information For Port Activity - Response Task"] description = Monitor for suspicious activities associated with DHS Technical Alert US-CERT TA18-074A. Some of the activities that adversaries used in these compromises included spearfishing attacks, malware, watering-hole domains, many and more. -narrative = The frequency of nation-state cyber attacks has increased significantly over the last decade. Employing numerous tactics and techniques, these attacks continue to escalate in complexity.\ -There is a wide range of motivations for these state-sponsored hacks, including stealing valuable corporate, military, or diplomatic dataѿall of which could confer advantages in various arenas. They may also target critical infrastructure.\ -One joint Technical Alert (TA) issued by the Department of Homeland and the FBI in mid-March of 2018 attributed some cyber activity targeting utility infrastructure to operatives sponsored by the Russian government. The hackers executed spearfishing attacks, installed malware, employed watering-hole domains, and more. While they caused no physical damage, the attacks provoked fears that a nation-state could turn off water, redirect power, or compromise a nuclear power plant.\ +narrative = The frequency of nation-state cyber attacks has increased significantly over the last decade. Employing numerous tactics and techniques, these attacks continue to escalate in complexity. \ +There is a wide range of motivations for these state-sponsored hacks, including stealing valuable corporate, military, or diplomatic dataѿall of which could confer advantages in various arenas. They may also target critical infrastructure. \ +One joint Technical Alert (TA) issued by the Department of Homeland and the FBI in mid-March of 2018 attributed some cyber activity targeting utility infrastructure to operatives sponsored by the Russian government. The hackers executed spearfishing attacks, installed malware, employed watering-hole domains, and more. While they caused no physical damage, the attacks provoked fears that a nation-state could turn off water, redirect power, or compromise a nuclear power plant. \ Suspicious activities--spikes in SMB traffic, processes that launch netsh (to modify the network configuration), suspicious registry modifications, and many more--may all be events you may wish to investigate further. While the use of these technique may be an indication that a nation-state actor is attempting to compromise your environment, it is important to note that these techniques are often employed by other groups, as well. [analytic_story://Disabling Security Tools] @@ -17772,7 +17772,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - Large Volume of DNS ANY Queries - Rule", "ESCU - Get Notable History - Response Task"] description = DNS poses a serious threat as a Denial of Service (DOS) amplifier, if it responds to `ANY` queries. This Analytic Story can help you detect attackers who may be abusing your company's DNS infrastructure to launch amplification attacks, causing Denial of Service to other victims. -narrative = The Domain Name System (DNS) is the protocol used to map domain names to IP addresses. It has been proven to work very well for its intended function. However if DNS is misconfigured, servers can be abused by attackers to levy amplification or redirection attacks against victims. Because DNS responses to `ANY` queries are so much larger than the queries themselves--and can be made with a UDP packet, which does not require a handshake--attackers can spoof the source address of the packet and cause much more data to be sent to the victim than if they sent the traffic themselves. The `ANY` requests are will be larger than normal DNS server requests, due to the fact that the server provides significant details, such as MX records and associated IP addresses. A large volume of this traffic can result in a DOS on the victim's machine. This misconfiguration leads to two possible victims, the first being the DNS servers participating in an attack and the other being the hosts that are the targets of the DOS attack.\ +narrative = The Domain Name System (DNS) is the protocol used to map domain names to IP addresses. It has been proven to work very well for its intended function. However if DNS is misconfigured, servers can be abused by attackers to levy amplification or redirection attacks against victims. Because DNS responses to `ANY` queries are so much larger than the queries themselves--and can be made with a UDP packet, which does not require a handshake--attackers can spoof the source address of the packet and cause much more data to be sent to the victim than if they sent the traffic themselves. The `ANY` requests are will be larger than normal DNS server requests, due to the fact that the server provides significant details, such as MX records and associated IP addresses. A large volume of this traffic can result in a DOS on the victim's machine. This misconfiguration leads to two possible victims, the first being the DNS servers participating in an attack and the other being the hosts that are the targets of the DOS attack. \ The search in this story can help you to detect if attackers are abusing your company's DNS infrastructure to launch DNS amplification attacks causing Denial of Service to other victims. [analytic_story://DNS Hijacking] @@ -17784,14 +17784,14 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - Clients Connecting to Multiple DNS Servers - Rule", "ESCU - DNS Query Requests Resolved by Unauthorized DNS Servers - Rule", "ESCU - DNS record changed - Rule", "ESCU - Detect DGA domains using pretrained model in DSDL - Rule", "ESCU - Detect DNS Data Exfiltration using pretrained model in DSDL - Rule", "ESCU - Detect hosts connecting to dynamic domain providers - Rule", "ESCU - Detect suspicious DNS TXT records using pretrained model in DSDL - Rule", "ESCU - Get DNS Server History for a host - Response Task"] description = Secure your environment against DNS hijacks with searches that help you detect and investigate unauthorized changes to DNS records. -narrative = Dubbed the Achilles heel of the Internet (see https://www.f5.com/labs/articles/threat-intelligence/dns-is-still-the-achilles-heel-of-the-internet-25613), DNS plays a critical role in routing web traffic but is notoriously vulnerable to attack. One reason is its distributed nature. It relies on unstructured connections between millions of clients and servers over inherently insecure protocols.\ -The gravity and extent of the importance of securing DNS from attacks is undeniable. The fallout of compromised DNS can be disastrous. Not only can hackers bring down an entire business, they can intercept confidential information, emails, and login credentials, as well.\ -On January 22, 2019, the US Department of Homeland Security 2019's Cybersecurity and Infrastructure Security Agency (CISA) raised awareness of some high-profile DNS hijacking attacks against infrastructure, both in the United States and abroad. It issued Emergency Directive 19-01 (see https://cyber.dhs.gov/ed/19-01/), which summarized the activity and required government agencies to take the following four actions, all within 10 days:\ -1. For all .gov or other agency-managed domains, audit public DNS records on all authoritative and secondary DNS servers, verify that they resolve to the intended location or report them to CISA.\ -1. Update the passwords for all accounts on systems that can make changes to each agency 2019's DNS records.\ -1. Implement multi-factor authentication (MFA) for all accounts on systems that can make changes to each agency's 2019 DNS records or, if impossible, provide CISA with the names of systems, the reasons why MFA cannot be enabled within the required timeline, and an ETA for when it can be enabled.\ -1. CISA will begin regular delivery of newly added certificates to Certificate Transparency (CT) logs for agency domains via the Cyber Hygiene service. Upon receipt, agencies must immediately begin monitoring CT log data for certificates issued that they did not request. If an agency confirms that a certificate was unauthorized, it must report the certificate to the issuing certificate authority and to CISA. Of course, it makes sense to put equivalent actions in place within your environment, as well.\ -In DNS hijacking, the attacker assumes control over an account or makes use of a DNS service exploit to make changes to DNS records. Once they gain access, attackers can substitute their own MX records, name-server records, and addresses, redirecting emails and traffic through their infrastructure, where they can read, copy, or modify information seen. They can also generate valid encryption certificates to help them avoid browser-certificate checks. In one notable attack on the Internet service provider, GoDaddy, the hackers altered Sender Policy Framework (SPF) records a relatively minor change that did not inflict excessive damage but allowed for more effective spam campaigns.\ +narrative = Dubbed the Achilles heel of the Internet (see https://www.f5.com/labs/articles/threat-intelligence/dns-is-still-the-achilles-heel-of-the-internet-25613), DNS plays a critical role in routing web traffic but is notoriously vulnerable to attack. One reason is its distributed nature. It relies on unstructured connections between millions of clients and servers over inherently insecure protocols. \ +The gravity and extent of the importance of securing DNS from attacks is undeniable. The fallout of compromised DNS can be disastrous. Not only can hackers bring down an entire business, they can intercept confidential information, emails, and login credentials, as well. \ +On January 22, 2019, the US Department of Homeland Security 2019's Cybersecurity and Infrastructure Security Agency (CISA) raised awareness of some high-profile DNS hijacking attacks against infrastructure, both in the United States and abroad. It issued Emergency Directive 19-01 (see https://cyber.dhs.gov/ed/19-01/), which summarized the activity and required government agencies to take the following four actions, all within 10 days: \ +1. For all .gov or other agency-managed domains, audit public DNS records on all authoritative and secondary DNS servers, verify that they resolve to the intended location or report them to CISA. \ +1. Update the passwords for all accounts on systems that can make changes to each agency 2019's DNS records. \ +1. Implement multi-factor authentication (MFA) for all accounts on systems that can make changes to each agency's 2019 DNS records or, if impossible, provide CISA with the names of systems, the reasons why MFA cannot be enabled within the required timeline, and an ETA for when it can be enabled. \ +1. CISA will begin regular delivery of newly added certificates to Certificate Transparency (CT) logs for agency domains via the Cyber Hygiene service. Upon receipt, agencies must immediately begin monitoring CT log data for certificates issued that they did not request. If an agency confirms that a certificate was unauthorized, it must report the certificate to the issuing certificate authority and to CISA. Of course, it makes sense to put equivalent actions in place within your environment, as well. \ +In DNS hijacking, the attacker assumes control over an account or makes use of a DNS service exploit to make changes to DNS records. Once they gain access, attackers can substitute their own MX records, name-server records, and addresses, redirecting emails and traffic through their infrastructure, where they can read, copy, or modify information seen. They can also generate valid encryption certificates to help them avoid browser-certificate checks. In one notable attack on the Internet service provider, GoDaddy, the hackers altered Sender Policy Framework (SPF) records a relatively minor change that did not inflict excessive damage but allowed for more effective spam campaigns. \ The searches in this Analytic Story help you detect and investigate activities that may indicate that DNS hijacking has taken place within your environment. [analytic_story://Domain Trust Discovery] @@ -17836,8 +17836,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - Email Attachments With Lots Of Spaces - Rule", "ESCU - Suspicious Email Attachment Extensions - Rule", "ESCU - Prohibited Software On Endpoint - Rule", "ESCU - Detect Use of cmd exe to Launch Script Interpreters - Rule", "ESCU - Detection of tools built by NirSoft - Rule", "ESCU - Registry Keys Used For Persistence - Rule", "ESCU - SMB Traffic Spike - Rule", "ESCU - SMB Traffic Spike - MLTK - Rule", "ESCU - Get History Of Email Sources - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task", "ESCU - Get Process Information For Port Activity - Response Task"] description = Detect rarely used executables, specific registry paths that may confer malware survivability and persistence, instances where cmd.exe is used to launch script interpreters, and other indicators that the Emotet financial malware has compromised your environment. -narrative = The trojan downloader known as Emotet first surfaced in 2014, when it was discovered targeting the banking industry to steal credentials. However, according to a joint technical alert (TA) issued by three government agencies (https://www.us-cert.gov/ncas/alerts/TA18-201A), Emotet has evolved far beyond those beginnings to become what a ThreatPost article called a threat-delivery service(see https://threatpost.com/emotet-malware-evolves-beyond-banking-to-threat-delivery-service/134342/). For example, in early 2018, Emotet was found to be using its loader function to spread the Quakbot and Ransomware variants.\ -According to the TA, the the malware continues to be among the most costly and destructive malware affecting the private and public sectors. Researchers have linked it to the threat group Mealybug, which has also been on the security communitys radar since 2014.\ +narrative = The trojan downloader known as Emotet first surfaced in 2014, when it was discovered targeting the banking industry to steal credentials. However, according to a joint technical alert (TA) issued by three government agencies (https://www.us-cert.gov/ncas/alerts/TA18-201A), Emotet has evolved far beyond those beginnings to become what a ThreatPost article called a threat-delivery service(see https://threatpost.com/emotet-malware-evolves-beyond-banking-to-threat-delivery-service/134342/). For example, in early 2018, Emotet was found to be using its loader function to spread the Quakbot and Ransomware variants. \ +According to the TA, the the malware continues to be among the most costly and destructive malware affecting the private and public sectors. Researchers have linked it to the threat group Mealybug, which has also been on the security communitys radar since 2014. \ The searches in this Analytic Story will help you find executables that are rarely used in your environment, specific registry paths that malware often uses to ensure survivability and persistence, instances where cmd.exe is used to launch script interpreters, and other indicators that Emotet or other malware has compromised your environment. [analytic_story://F5 Authentication Bypass with TMUI] @@ -17849,8 +17849,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - F5 TMUI Authentication Bypass - Rule"] description = Research into leading software revealed vulnerabilities in both Apache Tomcat and the F5 BIG-IP suite. Apache's AJP protocol vulnerability, designated CVE-2022-26377, relates to AJP request smuggling. Successful exploitation enables unauthorized system activities. F5 BIG-IP Virtual Edition exhibited a distinct vulnerability, an authentication bypass in the Traffic Management User Interface (TMUI), resulting in system compromise. Assigned CVE-2023-46747, this vulnerability also arose from request smuggling, bearing similarity to CVE-2022-26377. Given the wide adoption of both Apache Tomcat and F5 products, these vulnerabilities present grave risks to organizations. Remediation and vulnerability detection mechanisms are essential to address these threats effectively. -narrative = Both Apache Tomcat's AJP protocol and F5's BIG-IP Virtual Edition have been exposed to critical vulnerabilities. Apache's CVE-2022-26377 pertains to request smuggling by manipulating the "Transfer-Encoding" header. If successfully exploited, this allows attackers to bypass security controls and undertake unauthorized actions.\ -Similarly, F5 BIG-IP unveiled an authentication bypass vulnerability, CVE-2023-46747. Originating from the TMUI, this vulnerability leads to full system compromise. While distinct, it shares characteristics with Apache's vulnerability, primarily rooted in request smuggling. This vulnerability drew from past F5 CVEs, particularly CVE-2020-5902 and CVE-2022-1388, both previously exploited in real-world scenarios. These highlighted vulnerabilities in Apache HTTP and Apache Tomcat services, as well as authentication flaws in the F5 BIG-IP API.\ +narrative = Both Apache Tomcat's AJP protocol and F5's BIG-IP Virtual Edition have been exposed to critical vulnerabilities. Apache's CVE-2022-26377 pertains to request smuggling by manipulating the "Transfer-Encoding" header. If successfully exploited, this allows attackers to bypass security controls and undertake unauthorized actions. \ +Similarly, F5 BIG-IP unveiled an authentication bypass vulnerability, CVE-2023-46747. Originating from the TMUI, this vulnerability leads to full system compromise. While distinct, it shares characteristics with Apache's vulnerability, primarily rooted in request smuggling. This vulnerability drew from past F5 CVEs, particularly CVE-2020-5902 and CVE-2022-1388, both previously exploited in real-world scenarios. These highlighted vulnerabilities in Apache HTTP and Apache Tomcat services, as well as authentication flaws in the F5 BIG-IP API. \ Nuclei detection templates offer a proactive solution for identifying and mitigating these vulnerabilities. Integrated into vulnerability management frameworks, these templates notify organizations of potential risks, forming a base for further detection strategies. For detection engineers, understanding these vulnerabilities is crucial. Recognizing the mechanisms and effects of request smuggling, especially in Apache's and F5's context, provides a roadmap to effective detection and response. Prompt detection is a linchpin, potentially stymieing further, more destructive attacks. [analytic_story://F5 BIG-IP Vulnerability CVE-2022-1388] @@ -17939,8 +17939,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rod Soto"}] spec_version = 3 searches = ["ESCU - GCP Detect gcploit framework - Rule", "ESCU - GCP Detect accounts with high risk roles by project - Rule", "ESCU - GCP Detect high risk permissions by resource and account - Rule", "ESCU - gcp detect oauth token abuse - Rule", "ESCU - Get Notable History - Response Task"] description = Track when a user assumes an IAM role in another GCP account to obtain cross-account access to services and resources in that account. Accessing new roles could be an indication of malicious activity. -narrative = Google Cloud Platform (GCP) admins manage access to GCP resources and services across the enterprise using GCP Identity and Access Management (IAM) functionality. IAM provides the ability to create and manage GCP users, groups, and roles-each with their own unique set of privileges and defined access to specific resources (such as Compute instances, the GCP Management Console, API, or the command-line interface). Unlike conventional (human) users, IAM roles are potentially assumable by anyone in the organization. They provide users with dynamically created temporary security credentials that expire within a set time period.\ -In between the time between when the temporary credentials are issued and when they expire is a period of opportunity, where a user could leverage the temporary credentials to wreak havoc-spin up or remove instances, create new users, elevate privileges, and other malicious activities-throughout the environment.\ +narrative = Google Cloud Platform (GCP) admins manage access to GCP resources and services across the enterprise using GCP Identity and Access Management (IAM) functionality. IAM provides the ability to create and manage GCP users, groups, and roles-each with their own unique set of privileges and defined access to specific resources (such as Compute instances, the GCP Management Console, API, or the command-line interface). Unlike conventional (human) users, IAM roles are potentially assumable by anyone in the organization. They provide users with dynamically created temporary security credentials that expire within a set time period. \ +In between the time between when the temporary credentials are issued and when they expire is a period of opportunity, where a user could leverage the temporary credentials to wreak havoc-spin up or remove instances, create new users, elevate privileges, and other malicious activities-throughout the environment. \ This Analytic Story includes searches that will help you monitor your GCP Audit logs logs for evidence of suspicious cross-account activity. For example, while accessing multiple GCP accounts and roles may be perfectly valid behavior, it may be suspicious when an account requests privileges of an account it has not accessed in the past. After identifying suspicious activities, you can use the provided investigative searches to help you probe more deeply. [analytic_story://Graceful Wipe Out Attack] @@ -17963,8 +17963,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Email servers sending high volume traffic to hosts - Rule", "ESCU - Dump LSASS via procdump Rename - Rule", "ESCU - Any Powershell DownloadString - Rule", "ESCU - Detect Exchange Web Shell - Rule", "ESCU - Detect New Local Admin account - Rule", "ESCU - Detect PsExec With accepteula Flag - Rule", "ESCU - Detect Renamed PSExec - Rule", "ESCU - Detect Webshell Exploit Behavior - Rule", "ESCU - Dump LSASS via comsvcs DLL - Rule", "ESCU - Dump LSASS via procdump - Rule", "ESCU - Malicious PowerShell Process - Execution Policy Bypass - Rule", "ESCU - Nishang PowershellTCPOneLine - Rule", "ESCU - Ntdsutil Export NTDS - Rule", "ESCU - PowerShell - Connect To Internet With Hidden Window - Rule", "ESCU - Set Default PowerShell Execution Policy To Unrestricted or Bypass - Rule", "ESCU - W3WP Spawning Shell - Rule"] description = HAFNIUM group was identified by Microsoft as exploiting 4 Microsoft Exchange CVEs in the wild - CVE-2021-26855, CVE-2021-26857, CVE-2021-26858 and CVE-2021-27065. -narrative = On Tuesday, March 2, 2021, Microsoft released a set of security patches for its mail server, Microsoft Exchange. These patches respond to a group of vulnerabilities known to impact Exchange 2013, 2016, and 2019. It is important to note that an Exchange 2010 security update has also been issued, though the CVEs do not reference that version as being vulnerable.\ -While the CVEs do not shed much light on the specifics of the vulnerabilities or exploits, the first vulnerability (CVE-2021-26855) has a remote network attack vector that allows the attacker, a group Microsoft named HAFNIUM, to authenticate as the Exchange server. Three additional vulnerabilities (CVE-2021-26857, CVE-2021-26858, and CVE-2021-27065) were also identified as part of this activity. When chained together along with CVE-2021-26855 for initial access, the attacker would have complete control over the Exchange server. This includes the ability to run code as SYSTEM and write to any path on the server.\ +narrative = On Tuesday, March 2, 2021, Microsoft released a set of security patches for its mail server, Microsoft Exchange. These patches respond to a group of vulnerabilities known to impact Exchange 2013, 2016, and 2019. It is important to note that an Exchange 2010 security update has also been issued, though the CVEs do not reference that version as being vulnerable. \ +While the CVEs do not shed much light on the specifics of the vulnerabilities or exploits, the first vulnerability (CVE-2021-26855) has a remote network attack vector that allows the attacker, a group Microsoft named HAFNIUM, to authenticate as the Exchange server. Three additional vulnerabilities (CVE-2021-26857, CVE-2021-26858, and CVE-2021-27065) were also identified as part of this activity. When chained together along with CVE-2021-26855 for initial access, the attacker would have complete control over the Exchange server. This includes the ability to run code as SYSTEM and write to any path on the server. \ The following Splunk detections assist with identifying the HAFNIUM groups tradecraft and methodology. [analytic_story://Hermetic Wiper] @@ -17987,9 +17987,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rico Valdez"}] spec_version = 3 searches = ["ESCU - First time seen command line argument - Rule", "ESCU - Suspicious File Write - Rule", "ESCU - Create or delete windows shares using net exe - Rule", "ESCU - Remote Desktop Process Running On System - Rule", "ESCU - Detect Outbound SMB Traffic - Rule", "ESCU - DNS Query Length Outliers - MLTK - Rule", "ESCU - DNS Query Length With High Standard Deviation - Rule", "ESCU - Remote Desktop Network Traffic - Rule", "ESCU - SMB Traffic Spike - Rule", "ESCU - SMB Traffic Spike - MLTK - Rule", "ESCU - Get DNS Server History for a host - Response Task", "ESCU - Get DNS traffic ratio - Response Task", "ESCU - Get History Of Email Sources - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Get Outbound Emails to Hidden Cobra Threat Actors - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task", "ESCU - Get Process Information For Port Activity - Response Task", "ESCU - Get Process Responsible For The DNS Traffic - Response Task", "ESCU - Investigate Successful Remote Desktop Authentications - Response Task"] description = Monitor for and investigate activities, including the creation or deletion of hidden shares and file writes, that may be evidence of infiltration by North Korean government-sponsored cybercriminals. Details of this activity were reported in DHS Report TA-18-149A. -narrative = North Korea's government-sponsored "cyber army" has been slowly building momentum and gaining sophistication over the last 15 years or so. As a result, the group's activity, which the US government refers to as "Hidden Cobra," has surreptitiously crept onto the collective radar as a preeminent global threat.\ -These state-sponsored actors are thought to be responsible for everything from a hack on a South Korean nuclear plant to an attack on Sony in anticipation of its release of the movie "The Interview" at the end of 2014. They're also notorious for cyberespionage. In recent years, the group seems to be focused on financial crimes, such as cryptojacking.\ -In June of 2018, The Department of Homeland Security, together with the FBI and other U.S. government partners, issued Technical Alert (TA-18-149A) to advise the public about two variants of North Korean malware. One variant, dubbed "Joanap," is a multi-stage peer-to-peer botnet that allows North Korean state actors to exfiltrate data, download and execute secondary payloads, and initialize proxy communications. The other variant, "Brambul," is a Windows32 SMB worm that is dropped into a victim network. When executed, the malware attempts to spread laterally within a victim's local subnet, connecting via the SMB protocol and initiating brute-force password attacks. It reports details to the Hidden Cobra actors via email, so they can use the information for secondary remote operations.\ +narrative = North Korea's government-sponsored "cyber army" has been slowly building momentum and gaining sophistication over the last 15 years or so. As a result, the group's activity, which the US government refers to as "Hidden Cobra," has surreptitiously crept onto the collective radar as a preeminent global threat. \ +These state-sponsored actors are thought to be responsible for everything from a hack on a South Korean nuclear plant to an attack on Sony in anticipation of its release of the movie "The Interview" at the end of 2014. They're also notorious for cyberespionage. In recent years, the group seems to be focused on financial crimes, such as cryptojacking. \ +In June of 2018, The Department of Homeland Security, together with the FBI and other U.S. government partners, issued Technical Alert (TA-18-149A) to advise the public about two variants of North Korean malware. One variant, dubbed "Joanap," is a multi-stage peer-to-peer botnet that allows North Korean state actors to exfiltrate data, download and execute secondary payloads, and initialize proxy communications. The other variant, "Brambul," is a Windows32 SMB worm that is dropped into a victim network. When executed, the malware attempts to spread laterally within a victim's local subnet, connecting via the SMB protocol and initiating brute-force password attacks. It reports details to the Hidden Cobra actors via email, so they can use the information for secondary remote operations. \ Among other searches in this Analytic Story is a detection search that looks for the creation or deletion of hidden shares, such as, "adnim$," which the Hidden Cobra malware creates on the target system. Another looks for the creation of three malicious files associated with the malware. You can also use a search in this story to investigate activity that indicates that malware is sending email back to the attackers. [analytic_story://IcedID] @@ -18012,8 +18012,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Windows Disable Windows Event Logging Disable HTTP Logging - Rule", "ESCU - Windows IIS Components Add New Module - Rule", "ESCU - Windows IIS Components Get-WebGlobalModule Module Query - Rule", "ESCU - Windows IIS Components Module Failed to Load - Rule", "ESCU - Windows IIS Components New Module Added - Rule", "ESCU - Windows PowerShell Add Module to Global Assembly Cache - Rule", "ESCU - Windows PowerShell Disable HTTP Logging - Rule", "ESCU - Windows PowerShell IIS Components WebGlobalModule Usage - Rule", "ESCU - Windows Server Software Component GACUtil Install to GAC - Rule"] description = Adversaries may install malicious components that run on Internet Information Services (IIS) web servers to establish persistence. -narrative = IIS provides several mechanisms to extend the functionality of the web servers. For example, Internet Server Application Programming Interface (ISAPI) extensions and filters can be installed to examine and/or modify incoming and outgoing IIS web requests. Extensions and filters are deployed as DLL files that export three functions - Get{Extension/Filter}Version, Http{Extension/Filter}Proc, and (optionally) Terminate{Extension/Filter}. IIS modules may also be installed to extend IIS web servers.\ -Adversaries may install malicious ISAPI extensions and filters to observe and/or modify traffic, execute commands on compromised machines, or proxy command and control traffic. ISAPI extensions and filters may have access to all IIS web requests and responses. For example, an adversary may abuse these mechanisms to modify HTTP responses in order to distribute malicious commands/content to previously comprised hosts.\ +narrative = IIS provides several mechanisms to extend the functionality of the web servers. For example, Internet Server Application Programming Interface (ISAPI) extensions and filters can be installed to examine and/or modify incoming and outgoing IIS web requests. Extensions and filters are deployed as DLL files that export three functions - Get{Extension/Filter}Version, Http{Extension/Filter}Proc, and (optionally) Terminate{Extension/Filter}. IIS modules may also be installed to extend IIS web servers. \ +Adversaries may install malicious ISAPI extensions and filters to observe and/or modify traffic, execute commands on compromised machines, or proxy command and control traffic. ISAPI extensions and filters may have access to all IIS web requests and responses. For example, an adversary may abuse these mechanisms to modify HTTP responses in order to distribute malicious commands/content to previously comprised hosts. \ Adversaries may also install malicious IIS modules to observe and/or modify traffic. IIS 7.0 introduced modules that provide the same unrestricted access to HTTP requests and responses as ISAPI extensions and filters. IIS modules can be written as a DLL that exports RegisterModule, or as a .NET application that interfaces with ASP.NET APIs to access IIS HTTP requests. (reference MITRE) [analytic_story://Industroyer2] @@ -18080,8 +18080,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Ivanti EPMM Remote Unauthenticated API Access CVE-2023-35078 - Rule", "ESCU - Ivanti EPMM Remote Unauthenticated API Access CVE-2023-35082 - Rule"] description = Ivanti, a leading technology company, has disclosed two critical zero-day vulnerabilities in its Endpoint Manager Mobile (EPMM) product, CVE-2023-35078 and CVE-2023-35081. A recent update concerning CVE-2023-35082, closely related to CVE-2023-35078, reveals its impact on more versions of Ivanti's software than initially believed. The former allows unauthenticated attackers to obtain sensitive data, modify servers, and access the API, potentially leading to data breaches or malicious system modifications. Meanwhile, CVE-2023-35081 lets authenticated administrators remotely write arbitrary files to the server. Both vulnerabilities have been exploited in targeted attacks against government ministries and could be used in conjunction. With the presence of PoC code for CVE-2023-35078, the risk of broader exploitation has increased. While initially leveraged in limited attacks, the exploitation is expected to rise, possibly involving state-sponsored actors. Organizations are urged to apply immediate patches and conduct regular system assessments to ensure security. -narrative = Ivantis Endpoint Manager Mobile (EPMM) product, formerly known as MobileIron Core and extensively utilized by IT teams to manage mobile devices, applications, and content, has been found to harbor several critical vulnerabilities. Specifically, CVE-2023-35078 allows remote unauthenticated attackers to access sensitive data and make changes to servers. This flaw has been leveraged in targeted attacks against Norwegian government ministries. In addition, CVE-2023-35081 permits an authenticated attacker with administrative privileges to remotely write arbitrary files to the server.\ -Recently, attention has shifted to CVE-2023-35082, which was initially believed to affect only MobileIron Core 11.2 and below. Subsequent investigations revealed its wider influence, affecting EPMM versions 11.10, 11.9, 11.8, and MobileIron Core 11.7 and earlier. This vulnerability facilitates unauthorized access to the API via the URI path /mifs/asfV3/api/v2/.\ +narrative = Ivantis Endpoint Manager Mobile (EPMM) product, formerly known as MobileIron Core and extensively utilized by IT teams to manage mobile devices, applications, and content, has been found to harbor several critical vulnerabilities. Specifically, CVE-2023-35078 allows remote unauthenticated attackers to access sensitive data and make changes to servers. This flaw has been leveraged in targeted attacks against Norwegian government ministries. In addition, CVE-2023-35081 permits an authenticated attacker with administrative privileges to remotely write arbitrary files to the server. \ +Recently, attention has shifted to CVE-2023-35082, which was initially believed to affect only MobileIron Core 11.2 and below. Subsequent investigations revealed its wider influence, affecting EPMM versions 11.10, 11.9, 11.8, and MobileIron Core 11.7 and earlier. This vulnerability facilitates unauthorized access to the API via the URI path /mifs/asfV3/api/v2/. \ When combined, these vulnerabilities can be exploited to bypass administrative authentication and access control list (ACL) restrictions, leading to malicious file writing and potential OS command execution. Both have been actively exploited, possibly by state-sponsored actors, prompting urgent advisories from Ivanti and Rapid7, alongside CISA. Given the thousands of potentially vulnerable internet-exposed systems and the presence of PoC code for CVE-2023-35078, the risk of extensive exploitation escalates. The situation is further muddled by Ivanti's 2020 acquisition of MobileIron, which had its known issues. Collectively, these vulnerabilities present a significant risk to organizations utilizing Ivanti's EPMM, emphasizing the need for swift patching, vigilant monitoring, and timely application of fixes to counteract potential threats. [analytic_story://Ivanti Sentry Authentication Bypass CVE-2023-38035] @@ -18093,8 +18093,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Ivanti Sentry Authentication Bypass - Rule"] description = A critical vulnerability, designated as CVE-2023-38035, has been identified in Ivanti Sentry (formerly MobileIron Sentry). It affects all supported versions, including 9.18, 9.17, and 9.16, as well as older versions. The vulnerability allows an unauthenticated attacker to access the System Manager Portal (typically hosted on port 8443) and make configuration changes, potentially executing OS commands as root. However, the risk is low for users who haven't exposed port 8443 online. This flaw is distinct from other Ivanti products. It's imperative for organizations to check for unrecognized HTTP requests to /services/* as a potential indicator of compromise. -narrative = CVE-2023-38035 presents a significant security risk in the Ivanti Sentry administration interface. The vulnerability was identified shortly after another notable vulnerability in Ivanti EPMM (CVE-2023-35078) was discovered being exploited in the wild. The current vulnerability allows a malicious actor, without requiring authentication, to access the System Manager Portal, typically hosted on port 8443. Upon successful exploitation, the attacker can make configuration alterations to both the Sentry system and its underlying OS. The potential damage is significant, enabling the attacker to execute commands on the system with root privileges.\ -While this vulnerability scored high on the CVSS scale, its risk is relatively mitigated for clients who have not exposed port 8443 to the internet. The primary exploitation vector is the System Manager Portal, an administrative interface for Sentry.\ +narrative = CVE-2023-38035 presents a significant security risk in the Ivanti Sentry administration interface. The vulnerability was identified shortly after another notable vulnerability in Ivanti EPMM (CVE-2023-35078) was discovered being exploited in the wild. The current vulnerability allows a malicious actor, without requiring authentication, to access the System Manager Portal, typically hosted on port 8443. Upon successful exploitation, the attacker can make configuration alterations to both the Sentry system and its underlying OS. The potential damage is significant, enabling the attacker to execute commands on the system with root privileges. \ +While this vulnerability scored high on the CVSS scale, its risk is relatively mitigated for clients who have not exposed port 8443 to the internet. The primary exploitation vector is the System Manager Portal, an administrative interface for Sentry. \ As of now, definitive indicators of compromise (IoCs) are elusive. However, any unexpected HTTP requests to the endpoint /services/* could be a red flag. It's worth noting that the exploited endpoint might not be the sole vulnerable point, suggesting other potential gateways for attackers. Ivanti Sentry's system doesn't provide a typical Unix shell, but in the event of a known system breach, the /var/log/tomcat2/ directory contains access logs that may reveal accessed endpoints. Additionally, web interface logs may provide insights into suspicious activities and should be monitored closely. [analytic_story://JBoss Vulnerability] @@ -18106,20 +18106,20 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - Detect attackers scanning for vulnerable JBoss servers - Rule", "ESCU - Detect malicious requests to exploit JBoss servers - Rule", "ESCU - Get Notable History - Response Task"] description = In March of 2016, adversaries were seen using JexBoss--an open-source utility used for testing and exploiting JBoss application servers. These searches help detect evidence of these attacks, such as network connections to external resources or web services spawning atypical child processes, among others. -narrative = This Analytic Story looks for probing and exploitation attempts targeting JBoss application servers. While the vulnerabilities associated with this story are rather dated, they were leveraged in a spring 2016 campaign in connection with the Samsam ransomware variant. Incidents involving this ransomware are unique, in that they begin with attacks against vulnerable services, rather than the phishing or drive-by attacks more common with ransomware. In this case, vulnerable JBoss applications appear to be the target of choice.\ -It is helpful to understand how often a notable event generated by this story occurs, as well as the commonalities between some of these events, both of which may provide clues about whether this is a common occurrence of minimal concern or a rare event that may require more extensive investigation. It may also help to understand whether the issue is restricted to a single user/system or whether it is broader in scope.\ -When looking at the target of the behavior uncovered by the event, you should note the sensitivity of the user and or/system to help determine the potential impact. It is also helpful to identify other recent events involving the target. This can help tie different events together and give further situational awareness regarding the target host.\ -Various types of information for external systems should be reviewed and, potentially, collected if the incident is, indeed, judged to be malicious. This data may be useful for generating your own threat intelligence, so you can create future alerts.\ -The following factors may assist you in determining whether the event is malicious:\ -1. Country of origin\ -1. Responsible party\ -1. Fully qualified domain names associated with the external IP address\ -1. Registration of fully qualified domain names associated with external IP address Determining whether it is a dynamic domain frequently visited by others and/or how third parties categorize it can also help you qualify and understand the event and possible motivation for the attack. In addition, there are various sources that may provide reputation information on the IP address or domain name, which can assist you in determining whether the event is malicious in nature. Finally, determining whether there are other events associated with the IP address may help connect data points or expose other historic events that might be brought back into scope.\ -Gathering various data on the system of interest can sometimes help quickly determine whether something suspicious is happening. Some of these items include determining who else may have logged into the system recently, whether any unusual scheduled tasks exist, whether the system is communicating on suspicious ports, whether there are modifications to sensitive registry keys, and/or whether there are any known vulnerabilities on the system. This information can often highlight other activity commonly seen in attack scenarios or give more information about how the system may have been targeted.\ -hen a specific service or application is targeted, it is often helpful to know the associated version, to help determine whether it is vulnerable to a specific exploit.\ -If you suspect an attack targeting a web server, it is helpful to look at some of the behavior of the web service to see if there is evidence that the service has been compromised. Some indications of this might be network connections to external resources, the web service spawning child processes that are not associated with typical behavior, and whether the service wrote any files that might be malicious in nature.\ -If a suspicious file is found, we can review more information about it to help determine if it is, in fact, malicious. Identifying the file type, any processes that opened the file, the processes that may have created and/or modified the file, and how many other systems potentially have this file can you determine whether the file is malicious. Also, determining the file hash and checking it against reputation sources, such as VirusTotal, can sometimes help you quickly determine if it is malicious in nature.\ -Often, a simple inspection of a suspect process name and path can tell you if the system has been compromised. For example, if svchost.exe is found running from a location other than `C:\Windows\System32`, it is likely something malicious designed to hide in plain sight when simply reviewing process names.\ +narrative = This Analytic Story looks for probing and exploitation attempts targeting JBoss application servers. While the vulnerabilities associated with this story are rather dated, they were leveraged in a spring 2016 campaign in connection with the Samsam ransomware variant. Incidents involving this ransomware are unique, in that they begin with attacks against vulnerable services, rather than the phishing or drive-by attacks more common with ransomware. In this case, vulnerable JBoss applications appear to be the target of choice. \ +It is helpful to understand how often a notable event generated by this story occurs, as well as the commonalities between some of these events, both of which may provide clues about whether this is a common occurrence of minimal concern or a rare event that may require more extensive investigation. It may also help to understand whether the issue is restricted to a single user/system or whether it is broader in scope. \ +When looking at the target of the behavior uncovered by the event, you should note the sensitivity of the user and or/system to help determine the potential impact. It is also helpful to identify other recent events involving the target. This can help tie different events together and give further situational awareness regarding the target host. \ +Various types of information for external systems should be reviewed and, potentially, collected if the incident is, indeed, judged to be malicious. This data may be useful for generating your own threat intelligence, so you can create future alerts. \ +The following factors may assist you in determining whether the event is malicious: \ +1. Country of origin \ +1. Responsible party \ +1. Fully qualified domain names associated with the external IP address \ +1. Registration of fully qualified domain names associated with external IP address Determining whether it is a dynamic domain frequently visited by others and/or how third parties categorize it can also help you qualify and understand the event and possible motivation for the attack. In addition, there are various sources that may provide reputation information on the IP address or domain name, which can assist you in determining whether the event is malicious in nature. Finally, determining whether there are other events associated with the IP address may help connect data points or expose other historic events that might be brought back into scope. \ +Gathering various data on the system of interest can sometimes help quickly determine whether something suspicious is happening. Some of these items include determining who else may have logged into the system recently, whether any unusual scheduled tasks exist, whether the system is communicating on suspicious ports, whether there are modifications to sensitive registry keys, and/or whether there are any known vulnerabilities on the system. This information can often highlight other activity commonly seen in attack scenarios or give more information about how the system may have been targeted. \ +hen a specific service or application is targeted, it is often helpful to know the associated version, to help determine whether it is vulnerable to a specific exploit. \ +If you suspect an attack targeting a web server, it is helpful to look at some of the behavior of the web service to see if there is evidence that the service has been compromised. Some indications of this might be network connections to external resources, the web service spawning child processes that are not associated with typical behavior, and whether the service wrote any files that might be malicious in nature. \ +If a suspicious file is found, we can review more information about it to help determine if it is, in fact, malicious. Identifying the file type, any processes that opened the file, the processes that may have created and/or modified the file, and how many other systems potentially have this file can you determine whether the file is malicious. Also, determining the file hash and checking it against reputation sources, such as VirusTotal, can sometimes help you quickly determine if it is malicious in nature. \ +Often, a simple inspection of a suspect process name and path can tell you if the system has been compromised. For example, if svchost.exe is found running from a location other than `C:\Windows\System32`, it is likely something malicious designed to hide in plain sight when simply reviewing process names. \ It can also be helpful to examine various behaviors of and the parent of the process of interest. For example, if it turns out the process of interest is malicious, it would be good to see whether the parent process spawned other processes that might also warrant further scrutiny. If a process is suspect, a review of the network connections made around the time of the event and noting whether the process has spawned any child processes could be helpful in determining whether it is malicious or executing a malicious script. [analytic_story://Jenkins Server Vulnerabilities] @@ -18164,9 +18164,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Juniper Networks Remote Code Execution Exploit Detection - Rule"] description = Juniper Networks has resolved multiple critical vulnerabilities in the J-Web component of Junos OS on SRX and EX Series devices. These vulnerabilities, when chained together, could allow an unauthenticated, network-based attacker to remotely execute code on the devices. The vulnerabilities affect all versions of Junos OS on SRX and EX Series, but specific fixes have been released to address each vulnerability. Juniper Networks recommends applying the necessary fixes to mitigate potential remote code execution threats. As a workaround, users can disable J-Web or limit access to only trusted hosts. Proof-of-concept (PoC) exploit code has been released, demonstrating the severity of these flaws and the urgency to apply the fixes. -narrative = Juniper Networks, a networking hardware company, has released an "out-of-cycle" security update to address multiple flaws in the J-Web component of Junos OS that could be combined to achieve remote code execution on susceptible installations. The flaws have a cumulative CVSS rating of 9.8, making them critical in severity. They affect all versions of Junos OS on SRX and EX Series. The J-Web interface allows users to configure, manage, and monitor Junos OS devices. The vulnerabilities include two PHP external variable modification vulnerabilities (CVE-2023-36844 and CVE-2023-36845) and two missing authentications for critical function vulnerabilities (CVE-2023-36846 and CVE-2023-36847). These vulnerabilities could allow an unauthenticated, network-based attacker to control certain important environment variables, cause limited impact to the file system integrity, or upload arbitrary files via J-Web without any authentication.\ -The vulnerabilities have been addressed in specific Junos OS versions for EX Series and SRX Series devices. Users are recommended to apply the necessary fixes to mitigate potential remote code execution threats. As a workaround, Juniper Networks suggests disabling J-Web or limiting access to only trusted hosts.\ -Additionally, a PoC exploit has been released by watchTowr, combining CVE-2023-36846 and CVE-2023-36845 to upload a PHP file containing malicious shellcode and achieve code execution by injecting the PHPRC environment variable to point to a configuration file to load the booby-trapped PHP script. WatchTowr noted that this is an interesting bug chain, utilizing two bugs that would be near-useless in isolation and combining them for a "world-ending" unauthenticated remote code execution.\ +narrative = Juniper Networks, a networking hardware company, has released an "out-of-cycle" security update to address multiple flaws in the J-Web component of Junos OS that could be combined to achieve remote code execution on susceptible installations. The flaws have a cumulative CVSS rating of 9.8, making them critical in severity. They affect all versions of Junos OS on SRX and EX Series. The J-Web interface allows users to configure, manage, and monitor Junos OS devices. The vulnerabilities include two PHP external variable modification vulnerabilities (CVE-2023-36844 and CVE-2023-36845) and two missing authentications for critical function vulnerabilities (CVE-2023-36846 and CVE-2023-36847). These vulnerabilities could allow an unauthenticated, network-based attacker to control certain important environment variables, cause limited impact to the file system integrity, or upload arbitrary files via J-Web without any authentication. \ +The vulnerabilities have been addressed in specific Junos OS versions for EX Series and SRX Series devices. Users are recommended to apply the necessary fixes to mitigate potential remote code execution threats. As a workaround, Juniper Networks suggests disabling J-Web or limiting access to only trusted hosts. \ +Additionally, a PoC exploit has been released by watchTowr, combining CVE-2023-36846 and CVE-2023-36845 to upload a PHP file containing malicious shellcode and achieve code execution by injecting the PHPRC environment variable to point to a configuration file to load the booby-trapped PHP script. WatchTowr noted that this is an interesting bug chain, utilizing two bugs that would be near-useless in isolation and combining them for a "world-ending" unauthenticated remote code execution. \ In conclusion, these vulnerabilities pose a significant threat to Juniper SRX and EX Series devices, and it is imperative for users to apply the necessary fixes or implement the recommended workaround to mitigate the potential impact. [analytic_story://Kubernetes Scanning Activity] @@ -18310,17 +18310,17 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "David Dorsey"}] spec_version = 3 searches = ["ESCU - Suspicious Powershell Command-Line Arguments - Rule", "ESCU - Any Powershell DownloadFile - Rule", "ESCU - Any Powershell DownloadString - Rule", "ESCU - Detect Certify With PowerShell Script Block Logging - Rule", "ESCU - Detect Empire with PowerShell Script Block Logging - Rule", "ESCU - Detect Mimikatz With PowerShell Script Block Logging - Rule", "ESCU - GetLocalUser with PowerShell Script Block - Rule", "ESCU - GetWmiObject User Account with PowerShell Script Block - Rule", "ESCU - Malicious Powershell Executed As A Service - Rule", "ESCU - Malicious PowerShell Process - Encoded Command - Rule", "ESCU - Malicious PowerShell Process With Obfuscation Techniques - Rule", "ESCU - Possible Lateral Movement PowerShell Spawn - Rule", "ESCU - PowerShell 4104 Hunting - Rule", "ESCU - PowerShell - Connect To Internet With Hidden Window - Rule", "ESCU - Powershell COM Hijacking InprocServer32 Modification - Rule", "ESCU - Powershell Creating Thread Mutex - Rule", "ESCU - PowerShell Domain Enumeration - Rule", "ESCU - PowerShell Enable PowerShell Remoting - Rule", "ESCU - Powershell Enable SMB1Protocol Feature - Rule", "ESCU - Powershell Execute COM Object - Rule", "ESCU - Powershell Fileless Process Injection via GetProcAddress - Rule", "ESCU - Powershell Fileless Script Contains Base64 Encoded Content - Rule", "ESCU - PowerShell Invoke CIMMethod CIMSession - Rule", "ESCU - PowerShell Loading DotNET into Memory via Reflection - Rule", "ESCU - Powershell Processing Stream Of Data - Rule", "ESCU - PowerShell Script Block With URL Chain - Rule", "ESCU - Powershell Using memory As Backing Store - Rule", "ESCU - PowerShell WebRequest Using Memory Stream - Rule", "ESCU - Recon AVProduct Through Pwh or WMI - Rule", "ESCU - Recon Using WMI Class - Rule", "ESCU - ServicePrincipalNames Discovery with PowerShell - Rule", "ESCU - Set Default PowerShell Execution Policy To Unrestricted or Bypass - Rule", "ESCU - Unloading AMSI via Reflection - Rule", "ESCU - WMI Recon Running Process Or Services - Rule", "ESCU - Get History Of Email Sources - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task"] description = Attackers are finding stealthy ways "live off the land," leveraging utilities and tools that come standard on the endpoint--such as PowerShell--to achieve their goals without downloading binary files. These searches can help you detect and investigate PowerShell command-line options that may be indicative of malicious intent. -narrative = The searches in this Analytic Story monitor for parameters often used for malicious purposes. It is helpful to understand how often the notable events generated by this story occur, as well as the commonalities between some of these events. These factors may provide clues about whether this is a common occurrence of minimal concern or a rare event that may require more extensive investigation. Likewise, it is important to determine whether the issue is restricted to a single user/system or is broader in scope.\ -The following factors may assist you in determining whether the event is malicious:\ -1. Country of origin\ -1. Responsible party\ -1. Fully qualified domain names associated with the external IP address\ -1. Registration of fully qualified domain names associated with external IP address\ -Determining whether it is a dynamic domain frequently visited by others and/or how third parties categorize it can also help you answer some questions surrounding the attacker and details related to the external system. In addition, there are various sources--such as VirusTotal— that can provide some reputation information on the IP address or domain name, which can assist in determining whether the event is malicious. Finally, determining whether there are other events associated with the IP address may help connect data points or show other events that should be brought into scope.\ -Gathering data on the system of interest can sometimes help you quickly determine whether something suspicious is happening. Some of these items include finding out who else may have recently logged into the system, whether any unusual scheduled tasks exist, whether the system is communicating on suspicious ports, whether there are modifications to sensitive registry keys, and whether there are any known vulnerabilities on the system. This information can often highlight other activity commonly seen in attack scenarios or give more information about how the system may have been targeted.\ -Often, a simple inspection of the process name and path can tell you if the system has been compromised. For example, if `svchost.exe` is found running from a location other than `C:\Windows\System32`, it is likely something malicious designed to hide in plain sight when cursorily reviewing process names. Similarly, if the process itself seems legitimate, but the parent process is running from the temporary browser cache, that could be indicative of activity initiated via a compromised website a user visited.\ -It can also be very helpful to examine various behaviors of the process of interest or the parent of the process of interest. For example, if it turns out the process of interest is malicious, it would be good to see if the parent to that process spawned other processes that might be worth further scrutiny. If a process is suspect, a review of the network connections made in and around the time of the event and/or whether the process spawned any child processes could be helpful, as well.\ -In the event a system is suspected of having been compromised via a malicious website, we suggest reviewing the browsing activity from that system around the time of the event. If categories are given for the URLs visited, that can help you zero in on possible malicious sites.\ +narrative = The searches in this Analytic Story monitor for parameters often used for malicious purposes. It is helpful to understand how often the notable events generated by this story occur, as well as the commonalities between some of these events. These factors may provide clues about whether this is a common occurrence of minimal concern or a rare event that may require more extensive investigation. Likewise, it is important to determine whether the issue is restricted to a single user/system or is broader in scope. \ +The following factors may assist you in determining whether the event is malicious: \ +1. Country of origin \ +1. Responsible party \ +1. Fully qualified domain names associated with the external IP address \ +1. Registration of fully qualified domain names associated with external IP address \ +Determining whether it is a dynamic domain frequently visited by others and/or how third parties categorize it can also help you answer some questions surrounding the attacker and details related to the external system. In addition, there are various sources--such as VirusTotal— that can provide some reputation information on the IP address or domain name, which can assist in determining whether the event is malicious. Finally, determining whether there are other events associated with the IP address may help connect data points or show other events that should be brought into scope. \ +Gathering data on the system of interest can sometimes help you quickly determine whether something suspicious is happening. Some of these items include finding out who else may have recently logged into the system, whether any unusual scheduled tasks exist, whether the system is communicating on suspicious ports, whether there are modifications to sensitive registry keys, and whether there are any known vulnerabilities on the system. This information can often highlight other activity commonly seen in attack scenarios or give more information about how the system may have been targeted. \ +Often, a simple inspection of the process name and path can tell you if the system has been compromised. For example, if `svchost.exe` is found running from a location other than `C:\Windows\System32`, it is likely something malicious designed to hide in plain sight when cursorily reviewing process names. Similarly, if the process itself seems legitimate, but the parent process is running from the temporary browser cache, that could be indicative of activity initiated via a compromised website a user visited. \ +It can also be very helpful to examine various behaviors of the process of interest or the parent of the process of interest. For example, if it turns out the process of interest is malicious, it would be good to see if the parent to that process spawned other processes that might be worth further scrutiny. If a process is suspect, a review of the network connections made in and around the time of the event and/or whether the process spawned any child processes could be helpful, as well. \ +In the event a system is suspected of having been compromised via a malicious website, we suggest reviewing the browsing activity from that system around the time of the event. If categories are given for the URLs visited, that can help you zero in on possible malicious sites. \ Most recently we have added new content related to PowerShell Script Block logging, Windows EventCode 4104. Script block logging presents the deobfuscated and raw script executed on an endpoint. The analytics produced were tested against commonly used attack frameworks - PowerShell-Empire, Cobalt Strike and Covenant. In addition, we sampled publicly available samples that utilize PowerShell and validated coverage. The analytics are here to identify suspicious usage, cmdlets, or script values. 4104 events are enabled via the Windows registry and may generate a large volume of data if enabled globally. Enabling on critical systems or a limited set may be best. During triage of 4104 events, review parallel processes for other processes and command executed. Identify any file modifications and network communication and review accordingly. Fortunately, we get the full script to determine the level of threat identified. [analytic_story://Masquerading - Rename System Utilities] @@ -18332,8 +18332,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Execution of File With Spaces Before Extension - Rule", "ESCU - Suspicious Rundll32 Rename - Rule", "ESCU - Execution of File with Multiple Extensions - Rule", "ESCU - Sdelete Application Execution - Rule", "ESCU - Suspicious microsoft workflow compiler rename - Rule", "ESCU - Suspicious msbuild path - Rule", "ESCU - Suspicious MSBuild Rename - Rule", "ESCU - System Processes Run From Unexpected Locations - Rule", "ESCU - Windows DotNet Binary in Non Standard Path - Rule", "ESCU - Windows InstallUtil in Non Standard Path - Rule"] description = Adversaries may rename legitimate system utilities to try to evade security mechanisms concerning the usage of those utilities. -narrative = Security monitoring and control mechanisms may be in place for system utilities adversaries are capable of abusing. It may be possible to bypass those security mechanisms by renaming the utility prior to utilization (ex: rename rundll32.exe). An alternative case occurs when a legitimate utility is copied or moved to a different directory and renamed to avoid detections based on system utilities executing from non-standard paths.\ -The following content is here to assist with binaries within `system32` or `syswow64` being moved to a new location or an adversary bringing a the binary in to execute.\ +narrative = Security monitoring and control mechanisms may be in place for system utilities adversaries are capable of abusing. It may be possible to bypass those security mechanisms by renaming the utility prior to utilization (ex: rename rundll32.exe). An alternative case occurs when a legitimate utility is copied or moved to a different directory and renamed to avoid detections based on system utilities executing from non-standard paths. \ +The following content is here to assist with binaries within `system32` or `syswow64` being moved to a new location or an adversary bringing a the binary in to execute. \ There will be false positives as some native Windows processes are moved or ran by third party applications from different paths. If file names are mismatched between the file name on disk and that of the binarys PE metadata, this is a likely indicator that a binary was renamed after it was compiled. Collecting and comparing disk and resource filenames for binaries by looking to see if the InternalName, OriginalFilename, and or ProductName match what is expected could provide useful leads, but may not always be indicative of malicious activity. Do not focus on the possible names a file could have, but instead on the command-line arguments that are known to be used and are distinct because it will have a better rate of detection. [analytic_story://MetaSploit] @@ -18345,9 +18345,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Powershell Load Module in Meterpreter - Rule", "ESCU - Windows Apache Benchmark Binary - Rule"] description = The following analytic story highlights content related directly to MetaSploit, which may be default configurations attributed to MetaSploit or behaviors of known knowns that are related. -narrative = The Metasploit framework is a very powerful tool which can be used by cybercriminals as well as ethical hackers to probe systematic vulnerabilities on networks and servers. Because it is an open-source framework, it can be easily customized and used with most operating systems.\ -The Metasploit Project was undertaken in 2003 by H.D. Moore for use as a Perl-based portable network tool, with assistance from core developer Matt Miller. It was fully converted to Ruby by 2007, and the license was acquired by Rapid7 in 2009, where it remains as part of the Boston-based company repertoire of IDS signature development and targeted remote exploit, fuzzing, anti-forensic, and evasion tools.\\ -Portions of these other tools reside within the Metasploit framework, which is built into the Kali Linux OS. Rapid7 has also developed two proprietary OpenCore tools, Metasploit Pro, Metasploit Express.\\ +narrative = The Metasploit framework is a very powerful tool which can be used by cybercriminals as well as ethical hackers to probe systematic vulnerabilities on networks and servers. Because it is an open-source framework, it can be easily customized and used with most operating systems. \ +The Metasploit Project was undertaken in 2003 by H.D. Moore for use as a Perl-based portable network tool, with assistance from core developer Matt Miller. It was fully converted to Ruby by 2007, and the license was acquired by Rapid7 in 2009, where it remains as part of the Boston-based company repertoire of IDS signature development and targeted remote exploit, fuzzing, anti-forensic, and evasion tools.\ \ +Portions of these other tools reside within the Metasploit framework, which is built into the Kali Linux OS. Rapid7 has also developed two proprietary OpenCore tools, Metasploit Pro, Metasploit Express.\ \ This framework has become the go-to exploit development and mitigation tool. Prior to Metasploit, pen testers had to perform all probes manually by using a variety of tools that may or may not have supported the platform they were testing, writing their own code by hand, and introducing it onto networks manually. Remote testing was virtually unheard of, and that limited a security specialist reach to the local area and companies spending a fortune on in-house IT or security consultants. (ref. Varonis) [analytic_story://Meterpreter] @@ -18359,8 +18359,8 @@ maintainers = [{"company": "no", "email": "-", "name": "Michael Hart"}] spec_version = 3 searches = ["ESCU - Excessive distinct processes from Windows Temp - Rule", "ESCU - Excessive number of taskhost processes - Rule"] description = Meterpreter provides red teams, pen testers and threat actors interactive access to a compromised host to run commands, upload payloads, download files, and other actions. -narrative = This Analytic Story supports you to detect Tactics, Techniques and Procedures (TTPs) from Meterpreter. Meterpreter is a Metasploit payload for remote execution that leverages DLL injection to make it extremely difficult to detect. Since the software runs in memory, no new processes are created upon injection. It also leverages encrypted communication channels.\ -Meterpreter enables the operator to remotely run commands on the target machine, upload payloads, download files, dump password hashes, and much more. It is difficult to determine from the forensic evidence what actions the operator performed. Splunk Research, however, has observed anomalous behaviors on the compromised hosts that seem to only appear when Meterpreter is executing various commands. With that, we have written new detections targeted to these detections.\ +narrative = This Analytic Story supports you to detect Tactics, Techniques and Procedures (TTPs) from Meterpreter. Meterpreter is a Metasploit payload for remote execution that leverages DLL injection to make it extremely difficult to detect. Since the software runs in memory, no new processes are created upon injection. It also leverages encrypted communication channels. \ +Meterpreter enables the operator to remotely run commands on the target machine, upload payloads, download files, dump password hashes, and much more. It is difficult to determine from the forensic evidence what actions the operator performed. Splunk Research, however, has observed anomalous behaviors on the compromised hosts that seem to only appear when Meterpreter is executing various commands. With that, we have written new detections targeted to these detections. \ While investigating a detection related to this analytic story, please bear in mind that the detections look for anomalies in system behavior. It will be imperative to look for other signs in the endpoint and network logs for lateral movement, discovery and other actions to confirm that the host was compromised and a remote actor used it to progress on their objectives. [analytic_story://Microsoft MSHTML Remote Code Execution CVE-2021-40444] @@ -18372,7 +18372,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Control Loading from World Writable Directory - Rule", "ESCU - MSHTML Module Load in Office Product - Rule", "ESCU - Office Product Writing cab or inf - Rule", "ESCU - Office Spawning Control - Rule", "ESCU - Rundll32 Control RunDLL Hunt - Rule", "ESCU - Rundll32 Control RunDLL World Writable Directory - Rule"] description = CVE-2021-40444 is a remote code execution vulnerability in MSHTML, recently used to delivery targeted spearphishing documents. -narrative = Microsoft is aware of targeted attacks that attempt to exploit this vulnerability, CVE-2021-40444 by using specially-crafted Microsoft Office documents. MSHTML is a software component used to render web pages on Windows. Although it is 2019s most commonly associated with Internet Explorer, it is also used in other software. CVE-2021-40444 received a CVSS score of 8.8 out of 10. MSHTML is the beating heart of Internet Explorer, the vulnerability also exists in that browser. Although given its limited use, there is little risk of infection by that vector. Microsoft Office applications use the MSHTML component to display web content in Office documents. The attack depends on MSHTML loading a specially crafted ActiveX control when the target opens a malicious Office document. The loaded ActiveX control can then run arbitrary code to infect the system with more malware. At the moment all supported Windows versions are vulnerable. Since there is no patch available yet, Microsoft proposes a few methods to block these attacks.\ +narrative = Microsoft is aware of targeted attacks that attempt to exploit this vulnerability, CVE-2021-40444 by using specially-crafted Microsoft Office documents. MSHTML is a software component used to render web pages on Windows. Although it is 2019s most commonly associated with Internet Explorer, it is also used in other software. CVE-2021-40444 received a CVSS score of 8.8 out of 10. MSHTML is the beating heart of Internet Explorer, the vulnerability also exists in that browser. Although given its limited use, there is little risk of infection by that vector. Microsoft Office applications use the MSHTML component to display web content in Office documents. The attack depends on MSHTML loading a specially crafted ActiveX control when the target opens a malicious Office document. The loaded ActiveX control can then run arbitrary code to infect the system with more malware. At the moment all supported Windows versions are vulnerable. Since there is no patch available yet, Microsoft proposes a few methods to block these attacks. \ 1. Disable the installation of all ActiveX controls in Internet Explorer via the registry. Previously-installed ActiveX controls will still run, but no new ones will be added, including malicious ones. Open documents from the Internet in Protected View or Application Guard for Office, both of which prevent the current attack. This is a default setting but it may have been changed. [analytic_story://Microsoft SharePoint Server Elevation of Privilege CVE-2023-29357] @@ -18406,8 +18406,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rico Valdez"}] spec_version = 3 searches = ["ESCU - No Windows Updates in a time frame - Rule", "ESCU - Get Notable History - Response Task"] description = Monitor your enterprise to ensure that your endpoints are being patched and updated. Adversaries notoriously exploit known vulnerabilities that could be mitigated by applying routine security patches. -narrative = It is a common best practice to ensure that endpoints are being patched and updated in a timely manner, in order to reduce the risk of compromise via a publicly disclosed vulnerability. Timely application of updates/patches is important to eliminate known vulnerabilities that may be exploited by various threat actors.\ -Searches in this analytic story are designed to help analysts monitor endpoints for system patches and/or updates. This helps analysts identify any systems that are not successfully updated in a timely matter.\ +narrative = It is a common best practice to ensure that endpoints are being patched and updated in a timely manner, in order to reduce the risk of compromise via a publicly disclosed vulnerability. Timely application of updates/patches is important to eliminate known vulnerabilities that may be exploited by various threat actors. \ +Searches in this analytic story are designed to help analysts monitor endpoints for system patches and/or updates. This helps analysts identify any systems that are not successfully updated in a timely matter. \ Microsoft releases updates for Windows systems on a monthly cadence. They should be installed as soon as possible after following internal testing and validation procedures. Patches and updates for other systems or applications are typically released as needed. [analytic_story://MOVEit Transfer Critical Vulnerability] @@ -18419,11 +18419,11 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Windows MOVEit Transfer Writing ASPX - Rule"] description = A critical zero-day vulnerability has been discovered in the MOVEit Transfer file transfer software, widely used by businesses and developers worldwide. The vulnerability has been exploited by unknown threat actors to perform mass data theft from organizations. Progress Software Corporation, the developer of MOVEit, has issued a security advisory urging customers to take immediate action to protect their environments. They recommend blocking external traffic to ports 80 and 445 on the MOVEit server, and to check the c:\MOVEitTransfer\wwwroot\ folder for unusual files. A patch is currently released. -narrative = Hackers have been actively exploiting a zero-day vulnerability found in the MOVEit Transfer software. This software, developed by Progress Software Corporation, a US-based company and its subsidiary Ipswitch, is a managed file transfer solution. It is used by thousands of organizations worldwide, including Chase, Disney, GEICO, and MLB, and by 3.5 million developers. The software allows for secure file transfers between business partners and customers using SFTP, SCP, and HTTP-based uploads.\ -The zero-day vulnerability has been exploited to steal data on a large scale from various organizations. The identity of the threat actors and the exact timeline of the exploitation remains unclear. However, it has been confirmed that multiple organizations have experienced breaches and data theft.\ -In response to this critical situation, Progress released a security advisory warning customers of the vulnerability and providing mitigation strategies while a patch has been released. They urged customers to take immediate action to protect their MOVEit environments. They suggested blocking external traffic to ports 80 and 445 on the MOVEit server and checking the c:\MOVEitTransfer\wwwroot\ folder for unexpected files, including backups or large file downloads.\ -Blocking these ports will prevent external access to the web UI, prevent some MOVEit Automation tasks from working, block APIs, and prevent the Outlook MOVEit plugin from working. However, SFTP and FTP/s protocols can continue to be used for file transfers.\ -There is currently no detailed information about the zero-day vulnerability. But based on the ports blocked and the specific location to check for unusual files, the flaw is likely a web-facing vulnerability.\ +narrative = Hackers have been actively exploiting a zero-day vulnerability found in the MOVEit Transfer software. This software, developed by Progress Software Corporation, a US-based company and its subsidiary Ipswitch, is a managed file transfer solution. It is used by thousands of organizations worldwide, including Chase, Disney, GEICO, and MLB, and by 3.5 million developers. The software allows for secure file transfers between business partners and customers using SFTP, SCP, and HTTP-based uploads. \ +The zero-day vulnerability has been exploited to steal data on a large scale from various organizations. The identity of the threat actors and the exact timeline of the exploitation remains unclear. However, it has been confirmed that multiple organizations have experienced breaches and data theft. \ +In response to this critical situation, Progress released a security advisory warning customers of the vulnerability and providing mitigation strategies while a patch has been released. They urged customers to take immediate action to protect their MOVEit environments. They suggested blocking external traffic to ports 80 and 445 on the MOVEit server and checking the c:\MOVEitTransfer\wwwroot\ folder for unexpected files, including backups or large file downloads. \ +Blocking these ports will prevent external access to the web UI, prevent some MOVEit Automation tasks from working, block APIs, and prevent the Outlook MOVEit plugin from working. However, SFTP and FTP/s protocols can continue to be used for file transfers. \ +There is currently no detailed information about the zero-day vulnerability. But based on the ports blocked and the specific location to check for unusual files, the flaw is likely a web-facing vulnerability. \ While Progress has officially confirmed that the vulnerability is being actively exploited, it is clear from several reports that multiple organizations have already had data stolen using this zero-day vulnerability. The exploitation appears very similar to the mass exploitation of a GoAnywhere MFT zero-day in January 2023 and the December 2020 zero-day exploitation of Accellion FTA servers. These were both managed file transfer platforms heavily exploited by the Clop ransomware gang to steal data and extort organizations. [analytic_story://Netsh Abuse] @@ -18435,7 +18435,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - Processes created by netsh - Rule", "ESCU - Processes launching netsh - Rule", "ESCU - Windows Common Abused Cmd Shell Risk Behavior - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task"] description = Detect activities and various techniques associated with the abuse of `netsh.exe`, which can disable local firewall settings or set up a remote connection to a host from an infected system. -narrative = It is a common practice for attackers of all types to leverage native Windows tools and functionality to execute commands for malicious reasons. One such tool on Windows OS is `netsh.exe`,a command-line scripting utility that allows you to--either locally or remotely--display or modify the network configuration of a computer that is currently running. `Netsh.exe` can be used to discover and disable local firewall settings. It can also be used to set up a remote connection to a host from an infected system.\ +narrative = It is a common practice for attackers of all types to leverage native Windows tools and functionality to execute commands for malicious reasons. One such tool on Windows OS is `netsh.exe`,a command-line scripting utility that allows you to--either locally or remotely--display or modify the network configuration of a computer that is currently running. `Netsh.exe` can be used to discover and disable local firewall settings. It can also be used to set up a remote connection to a host from an infected system. \ To get started, run the detection search to identify parent processes of `netsh.exe`. [analytic_story://Network Discovery] @@ -18546,9 +18546,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "David Dorsey"}] spec_version = 3 searches = ["ESCU - First time seen command line argument - Rule", "ESCU - First Time Seen Running Windows Service - Rule", "ESCU - Sc exe Manipulating Windows Services - Rule", "ESCU - Get History Of Email Sources - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task"] description = Detect activities and various techniques associated with the Orangeworm Attack Group, a group that frequently targets the healthcare industry. -narrative = In May of 2018, the attack group Orangeworm was implicated for installing a custom backdoor called Trojan.Kwampirs within large international healthcare corporations in the United States, Europe, and Asia. This malware provides the attackers with remote access to the target system, decrypting and extracting a copy of its main DLL payload from its resource section. Before writing the payload to disk, it inserts a randomly generated string into the middle of the decrypted payload in an attempt to evade hash-based detections.\ -Awareness of the Orangeworm group first surfaced in January, 2015. It has conducted targeted attacks against related industries, as well, such as pharmaceuticals and healthcare IT solution providers.\ -Healthcare may be a promising target, because it is notoriously behind in technology, often using older operating systems and neglecting to patch computers. Even so, the group was able to evade detection for a full three years. Sources say that the malware spread quickly within the target networks, infecting computers used to control medical devices, such as MRI and X-ray machines.\ +narrative = In May of 2018, the attack group Orangeworm was implicated for installing a custom backdoor called Trojan.Kwampirs within large international healthcare corporations in the United States, Europe, and Asia. This malware provides the attackers with remote access to the target system, decrypting and extracting a copy of its main DLL payload from its resource section. Before writing the payload to disk, it inserts a randomly generated string into the middle of the decrypted payload in an attempt to evade hash-based detections. \ +Awareness of the Orangeworm group first surfaced in January, 2015. It has conducted targeted attacks against related industries, as well, such as pharmaceuticals and healthcare IT solution providers. \ +Healthcare may be a promising target, because it is notoriously behind in technology, often using older operating systems and neglecting to patch computers. Even so, the group was able to evade detection for a full three years. Sources say that the malware spread quickly within the target networks, infecting computers used to control medical devices, such as MRI and X-ray machines. \ This Analytic Story is designed to help you detect and investigate suspicious activities that may be indicative of an Orangeworm attack. One detection search looks for command-line arguments. Another monitors for uses of sc.exe, a non-essential Windows file that can manipulate Windows services. One of the investigative searches helps you get more information on web hosts that you suspect have been compromised. [analytic_story://Outlook RCE CVE-2024-21378] @@ -18571,9 +18571,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - PaperCut NG Suspicious Behavior Debug Log - Rule", "ESCU - Windows PaperCut NG Spawn Shell - Rule", "ESCU - PaperCut NG Remote Web Access Attempt - Rule"] description = The FBI has issued a joint advisory concerning the exploitation of a PaperCut MF/NG vulnerability (CVE-2023-27350) by malicious actors, which began in mid-April 2023 and has been ongoing. In early May 2023, a group identifying themselves as the Bl00dy Ransomware Gang targeted vulnerable PaperCut servers within the Education Facilities Subsector. The advisory provides information on detecting exploitation attempts and shares known indicators of compromise (IOCs) associated with the group's activities. -narrative = PaperCut MF/NG versions 19 and older have reached their end-of-life, as documented on the End of Life Policy page. Customers using these older versions are advised to purchase an updated license online for PaperCut NG or through their PaperCut Partner for PaperCut MF. For users with a currently supported version (version 20 or later), they can upgrade to any maintenance release version they are licensed for. If upgrading to a security patch is not possible, there are alternative options to enhance security. Users can lock down network access to their server(s) by blocking all inbound traffic from external IPs to the web management port (port 9191 and 9192 by default) and blocking all inbound traffic to the web management portal on the firewall to the server. Additionally, users can apply "Allow list" restrictions under Options > Advanced > Security > Allowed site server IP addresses, setting this to only allow the IP addresses of verified Site Servers on their network.\ -The vulnerabilities CVE-2023-27350 and CVE-2023-27351 have CVSS scores of 9.8 (Critical) and 8.2 (High), respectively. PaperCut and its partner network have activated response teams to assist PaperCut MF and NG customers, with service desks available 24/7 via their support page. The security response team at PaperCut has been working with external security advisors to compile a list of unpatched PaperCut MF/NG servers that have ports open on the public internet. They have been proactively reaching out to potentially exposed customers since Wednesday afternoon (AEST) and are working around the clock through the weekend.\ -The exploit was first detected in the wild on April 18th, 2023, at 03:30 AEST / April 17th, 2023, at 17:30 UTC. The earliest signature of suspicious activity on a customer server potentially linked to this vulnerability dates back to April 14th, 2023, at 01:29 AEST / April 13th, 2023, at 15:29 UTC.\ +narrative = PaperCut MF/NG versions 19 and older have reached their end-of-life, as documented on the End of Life Policy page. Customers using these older versions are advised to purchase an updated license online for PaperCut NG or through their PaperCut Partner for PaperCut MF. For users with a currently supported version (version 20 or later), they can upgrade to any maintenance release version they are licensed for. If upgrading to a security patch is not possible, there are alternative options to enhance security. Users can lock down network access to their server(s) by blocking all inbound traffic from external IPs to the web management port (port 9191 and 9192 by default) and blocking all inbound traffic to the web management portal on the firewall to the server. Additionally, users can apply "Allow list" restrictions under Options > Advanced > Security > Allowed site server IP addresses, setting this to only allow the IP addresses of verified Site Servers on their network. \ +The vulnerabilities CVE-2023-27350 and CVE-2023-27351 have CVSS scores of 9.8 (Critical) and 8.2 (High), respectively. PaperCut and its partner network have activated response teams to assist PaperCut MF and NG customers, with service desks available 24/7 via their support page. The security response team at PaperCut has been working with external security advisors to compile a list of unpatched PaperCut MF/NG servers that have ports open on the public internet. They have been proactively reaching out to potentially exposed customers since Wednesday afternoon (AEST) and are working around the clock through the weekend. \ +The exploit was first detected in the wild on April 18th, 2023, at 03:30 AEST / April 17th, 2023, at 17:30 UTC. The earliest signature of suspicious activity on a customer server potentially linked to this vulnerability dates back to April 14th, 2023, at 01:29 AEST / April 13th, 2023, at 15:29 UTC. \ Applying the security fixes should not have any negative impact. Users can follow their usual upgrade procedure to obtain the upgrade. Additional links on the -Check for updates- page (accessed through the Admin interface > About > Version info > Check for updates) allow customers to download fixes for previous major versions that are still supported (e.g., 20.1.7 and 21.2.11) as well as the current version available. PaperCut MF users are advised to follow their regular upgrade process and consult their PaperCut partner or reseller for assistance. [analytic_story://PetitPotam NTLM Relay on Active Directory Certificate Services] @@ -18618,32 +18618,32 @@ maintainers = [{"company": "iDefense", "email": "-", "name": "iDefense Cyber Esp spec_version = 3 searches = ["ESCU - First time seen command line argument - Rule", "ESCU - PowerShell - Connect To Internet With Hidden Window - Rule", "ESCU - Registry Keys Used For Persistence - Rule", "ESCU - Unusually Long Command Line - Rule", "ESCU - Unusually Long Command Line - MLTK - Rule", "ESCU - Get History Of Email Sources - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task"] description = Monitor your environment for suspicious behaviors that resemble the techniques employed by the MUDCARP threat group. -narrative = This story was created as a joint effort between iDefense and Splunk.\ -iDefense analysts have recently discovered a Windows executable file that, upon execution, spoofs a decryption tool and then drops a file that appears to be the custom-built javascript backdoor, "Orz," which is associated with the threat actors known as MUDCARP (as well as "temp.Periscope" and "Leviathan"). The file is executed using Wscript.\ -The MUDCARP techniques include the use of the compressed-folders module from Microsoft, zipfldr.dll, with RouteTheCall export to run the malicious process or command. After a successful reboot, the malware is made persistent by a manipulating `[HKEY_CURRENT_USER\SOFTWARE\Microsoft\Windows\CurrentVersion\Run]'help'='c:\\windows\\system32\\rundll32.exe c:\\windows\\system32\\zipfldr.dll,RouteTheCall c:\\programdata\\winapp.exe'`. Though this technique is not exclusive to MUDCARP, it has been spotted in the group's arsenal of advanced techniques seen in the wild.\ -This Analytic Story searches for evidence of tactics, techniques, and procedures (TTPs) that allow for the use of a endpoint detection-and-response (EDR) bypass technique to mask the true parent of a malicious process. It can also be set as a registry key for further sandbox evasion and to allow the malware to launch only after reboot.\ -If behavioral searches included in this story yield positive hits, iDefense recommends conducting IOC searches for the following:\ -1. www.chemscalere[.]com\ -1. chemscalere[.]com\ -1. about.chemscalere[.]com\ -1. autoconfig.chemscalere[.]com\ -1. autodiscover.chemscalere[.]com\ -1. catalog.chemscalere[.]com\ -1. cpanel.chemscalere[.]com\ -1. db.chemscalere[.]com\ -1. ftp.chemscalere[.]com\ -1. mail.chemscalere[.]com\ -1. news.chemscalere[.]com\ -1. update.chemscalere[.]com\ -1. webmail.chemscalere[.]com\ -1. www.candlelightparty[.]org\ -1. candlelightparty[.]org\ -1. newapp.freshasianews[.]com\ -In addition, iDefense also recommends that organizations review their environments for activity related to the following hashes:\ -1. cd195ee448a3657b5c2c2d13e9c7a2e2\ -1. b43ad826fe6928245d3c02b648296b43\ -1. 889a9b52566448231f112a5ce9b5dfaf\ -1. b8ec65dab97cdef3cd256cc4753f0c54\ +narrative = This story was created as a joint effort between iDefense and Splunk. \ +iDefense analysts have recently discovered a Windows executable file that, upon execution, spoofs a decryption tool and then drops a file that appears to be the custom-built javascript backdoor, "Orz," which is associated with the threat actors known as MUDCARP (as well as "temp.Periscope" and "Leviathan"). The file is executed using Wscript. \ +The MUDCARP techniques include the use of the compressed-folders module from Microsoft, zipfldr.dll, with RouteTheCall export to run the malicious process or command. After a successful reboot, the malware is made persistent by a manipulating `[HKEY_CURRENT_USER\SOFTWARE\Microsoft\Windows\CurrentVersion\Run]'help'='c:\\windows\\system32\\rundll32.exe c:\\windows\\system32\\zipfldr.dll,RouteTheCall c:\\programdata\\winapp.exe'`. Though this technique is not exclusive to MUDCARP, it has been spotted in the group's arsenal of advanced techniques seen in the wild. \ +This Analytic Story searches for evidence of tactics, techniques, and procedures (TTPs) that allow for the use of a endpoint detection-and-response (EDR) bypass technique to mask the true parent of a malicious process. It can also be set as a registry key for further sandbox evasion and to allow the malware to launch only after reboot. \ +If behavioral searches included in this story yield positive hits, iDefense recommends conducting IOC searches for the following: \ +1. www.chemscalere[.]com \ +1. chemscalere[.]com \ +1. about.chemscalere[.]com \ +1. autoconfig.chemscalere[.]com \ +1. autodiscover.chemscalere[.]com \ +1. catalog.chemscalere[.]com \ +1. cpanel.chemscalere[.]com \ +1. db.chemscalere[.]com \ +1. ftp.chemscalere[.]com \ +1. mail.chemscalere[.]com \ +1. news.chemscalere[.]com \ +1. update.chemscalere[.]com \ +1. webmail.chemscalere[.]com \ +1. www.candlelightparty[.]org \ +1. candlelightparty[.]org \ +1. newapp.freshasianews[.]com \ +In addition, iDefense also recommends that organizations review their environments for activity related to the following hashes: \ +1. cd195ee448a3657b5c2c2d13e9c7a2e2 \ +1. b43ad826fe6928245d3c02b648296b43 \ +1. 889a9b52566448231f112a5ce9b5dfaf \ +1. b8ec65dab97cdef3cd256cc4753f0c54 \ 1. 04d83cd3813698de28cfbba326d7647c [analytic_story://Prestige Ransomware] @@ -18666,11 +18666,11 @@ maintainers = [{"company": "no", "email": "-", "name": "Splunk Threat Research T spec_version = 3 searches = ["ESCU - Print Spooler Adding A Printer Driver - Rule", "ESCU - Print Spooler Failed to Load a Plug-in - Rule", "ESCU - Rundll32 with no Command Line Arguments with Network - Rule", "ESCU - Spoolsv Spawning Rundll32 - Rule", "ESCU - Spoolsv Suspicious Loaded Modules - Rule", "ESCU - Spoolsv Suspicious Process Access - Rule", "ESCU - Spoolsv Writing a DLL - Rule", "ESCU - Spoolsv Writing a DLL - Sysmon - Rule", "ESCU - Suspicious Rundll32 no Command Line Arguments - Rule"] description = The following analytic story identifies behaviors related PrintNightmare, or CVE-2021-34527 previously known as (CVE-2021-1675), to gain privilege escalation on the vulnerable machine. -narrative = This vulnerability affects the Print Spooler service, enabled by default on Windows systems, and allows adversaries to trick this service into installing a remotely hosted print driver using a low privileged user account. Successful exploitation effectively allows adversaries to execute code in the target system (Remote Code Execution) in the context of the Print Spooler service which runs with the highest privileges (Privilege Escalation).\ -The prerequisites for successful exploitation consist of:\ -1. Print Spooler service enabled on the target system\ -1. Network connectivity to the target system (initial access has been obtained)\ -1. Hash or password for a low privileged user ( or computer ) account.\ +narrative = This vulnerability affects the Print Spooler service, enabled by default on Windows systems, and allows adversaries to trick this service into installing a remotely hosted print driver using a low privileged user account. Successful exploitation effectively allows adversaries to execute code in the target system (Remote Code Execution) in the context of the Print Spooler service which runs with the highest privileges (Privilege Escalation). \ +The prerequisites for successful exploitation consist of: \ +1. Print Spooler service enabled on the target system \ +1. Network connectivity to the target system (initial access has been obtained) \ +1. Hash or password for a low privileged user ( or computer ) account. \ In the most impactful scenario, an attacker would be able to leverage this vulnerability to obtain a SYSTEM shell on a domain controller and so escalate their privileges from a low privileged domain account to full domain access in the target environment as shown below. [analytic_story://Prohibited Traffic Allowed or Protocol Mismatch] @@ -18803,7 +18803,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - Detect New Login Attempts to Routers - Rule", "ESCU - Detect ARP Poisoning - Rule", "ESCU - Detect IPv6 Network Infrastructure Threats - Rule", "ESCU - Detect Port Security Violation - Rule", "ESCU - Detect Rogue DHCP Server - Rule", "ESCU - Detect Software Download To Network Device - Rule", "ESCU - Detect Traffic Mirroring - Rule", "ESCU - Get Notable History - Response Task"] description = Validate the security configuration of network infrastructure and verify that only authorized users and systems are accessing critical assets. Core routing and switching infrastructure are common strategic targets for attackers. -narrative = Networking devices, such as routers and switches, are often overlooked as resources that attackers will leverage to subvert an enterprise. Advanced threats actors have shown a proclivity to target these critical assets as a means to siphon and redirect network traffic, flash backdoored operating systems, and implement cryptographic weakened algorithms to more easily decrypt network traffic.\ +narrative = Networking devices, such as routers and switches, are often overlooked as resources that attackers will leverage to subvert an enterprise. Advanced threats actors have shown a proclivity to target these critical assets as a means to siphon and redirect network traffic, flash backdoored operating systems, and implement cryptographic weakened algorithms to more easily decrypt network traffic. \ This Analytic Story helps you gain a better understanding of how your network devices are interacting with your hosts. By compromising your network devices, attackers can obtain direct access to the company's internal infrastructure— effectively increasing the attack surface and accessing private services/data. [analytic_story://Ryuk Ransomware] @@ -18837,11 +18837,11 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rico Valdez"}] spec_version = 3 searches = ["ESCU - Prohibited Software On Endpoint - Rule", "ESCU - Attacker Tools On Endpoint - Rule", "ESCU - Batch File Write to System32 - Rule", "ESCU - Common Ransomware Extensions - Rule", "ESCU - Common Ransomware Notes - Rule", "ESCU - Deleting Shadow Copies - Rule", "ESCU - Detect PsExec With accepteula Flag - Rule", "ESCU - Detect Renamed PSExec - Rule", "ESCU - File with Samsam Extension - Rule", "ESCU - Samsam Test File Write - Rule", "ESCU - Spike in File Writes - Rule", "ESCU - Remote Desktop Network Bruteforce - Rule", "ESCU - Remote Desktop Network Traffic - Rule", "ESCU - Detect attackers scanning for vulnerable JBoss servers - Rule", "ESCU - Detect malicious requests to exploit JBoss servers - Rule", "ESCU - Get Backup Logs For Endpoint - Response Task", "ESCU - Get History Of Email Sources - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task", "ESCU - Get Process Information For Port Activity - Response Task", "ESCU - Investigate Successful Remote Desktop Authentications - Response Task"] description = Leverage searches that allow you to detect and investigate unusual activities that might relate to the SamSam ransomware, including looking for file writes associated with SamSam, RDP brute force attacks, the presence of files with SamSam ransomware extensions, suspicious psexec use, and more. -narrative = The first version of the SamSam ransomware (a.k.a. Samas or SamsamCrypt) was launched in 2015 by a group of Iranian threat actors. The malicious software has affected and continues to affect thousands of victims and has raised almost $6M in ransom.\ -Although categorized under the heading of ransomware, SamSam campaigns have some importance distinguishing characteristics. Most notable is the fact that conventional ransomware is a numbers game. Perpetrators use a "spray-and-pray" approach with phishing campaigns or other mechanisms, charging a small ransom (typically under $1,000). The goal is to find a large number of victims willing to pay these mini-ransoms, adding up to a lucrative payday. They use relatively simple methods for infecting systems.\ -SamSam attacks are different beasts. They have become progressively more targeted and skillful than typical ransomware attacks. First, malicious actors break into a victim's network, surveil it, then run the malware manually. The attacks are tailored to cause maximum damage and the threat actors usually demand amounts in the tens of thousands of dollars.\ -In a typical attack on one large healthcare organization in 2018, the company ended up paying a ransom of four Bitcoins, then worth $56,707. Reports showed that access to the company's files was restored within two hours of paying the sum.\ -According to Sophos, SamSam previously leveraged RDP to gain access to targeted networks via brute force. SamSam is not spread automatically, like other malware. It requires skill because it forces the attacker to adapt their tactics to the individual environment. Next, the actors escalate their privileges to admin level. They scan the networks for worthy targets, using conventional tools, such as PsExec or PaExec, to deploy/execute, quickly encrypting files.\ +narrative = The first version of the SamSam ransomware (a.k.a. Samas or SamsamCrypt) was launched in 2015 by a group of Iranian threat actors. The malicious software has affected and continues to affect thousands of victims and has raised almost $6M in ransom. \ +Although categorized under the heading of ransomware, SamSam campaigns have some importance distinguishing characteristics. Most notable is the fact that conventional ransomware is a numbers game. Perpetrators use a "spray-and-pray" approach with phishing campaigns or other mechanisms, charging a small ransom (typically under $1,000). The goal is to find a large number of victims willing to pay these mini-ransoms, adding up to a lucrative payday. They use relatively simple methods for infecting systems. \ +SamSam attacks are different beasts. They have become progressively more targeted and skillful than typical ransomware attacks. First, malicious actors break into a victim's network, surveil it, then run the malware manually. The attacks are tailored to cause maximum damage and the threat actors usually demand amounts in the tens of thousands of dollars. \ +In a typical attack on one large healthcare organization in 2018, the company ended up paying a ransom of four Bitcoins, then worth $56,707. Reports showed that access to the company's files was restored within two hours of paying the sum. \ +According to Sophos, SamSam previously leveraged RDP to gain access to targeted networks via brute force. SamSam is not spread automatically, like other malware. It requires skill because it forces the attacker to adapt their tactics to the individual environment. Next, the actors escalate their privileges to admin level. They scan the networks for worthy targets, using conventional tools, such as PsExec or PaExec, to deploy/execute, quickly encrypting files. \ This Analytic Story includes searches designed to help detect and investigate signs of the SamSam ransomware, such as the creation of fileswrites to system32, writes with tell-tale extensions, batch files written to system32, and evidence of brute-force attacks via RDP. [analytic_story://Sandworm Tools] @@ -18864,14 +18864,14 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Linux Add Files In Known Crontab Directories - Rule", "ESCU - Linux Adding Crontab Using List Parameter - Rule", "ESCU - Linux At Allow Config File Creation - Rule", "ESCU - Linux At Application Execution - Rule", "ESCU - Linux Edit Cron Table Parameter - Rule", "ESCU - Linux Possible Append Command To At Allow Config File - Rule", "ESCU - Linux Possible Append Cronjob Entry on Existing Cronjob File - Rule", "ESCU - Linux Possible Cronjob Modification With Editor - Rule", "ESCU - Linux Service File Created In Systemd Directory - Rule", "ESCU - Linux Service Restarted - Rule", "ESCU - Linux Service Started Or Enabled - Rule", "ESCU - Possible Lateral Movement PowerShell Spawn - Rule", "ESCU - Randomly Generated Scheduled Task Name - Rule", "ESCU - Schedule Task with HTTP Command Arguments - Rule", "ESCU - Schedule Task with Rundll32 Command Trigger - Rule", "ESCU - Scheduled Task Creation on Remote Endpoint using At - Rule", "ESCU - Scheduled Task Deleted Or Created via CMD - Rule", "ESCU - Scheduled Task Initiation on Remote Endpoint - Rule", "ESCU - Schtasks Run Task On Demand - Rule", "ESCU - Schtasks scheduling job on remote system - Rule", "ESCU - Schtasks used for forcing a reboot - Rule", "ESCU - Short Lived Scheduled Task - Rule", "ESCU - Suspicious Scheduled Task from Public Directory - Rule", "ESCU - Svchost LOLBAS Execution Process Spawn - Rule", "ESCU - Windows Enable Win32 ScheduledJob via Registry - Rule", "ESCU - Windows Hidden Schedule Task Settings - Rule", "ESCU - Windows PowerShell ScheduleTask - Rule", "ESCU - Windows Registry Delete Task SD - Rule", "ESCU - Windows Scheduled Task Created Via XML - Rule", "ESCU - Windows Scheduled Task with Highest Privileges - Rule", "ESCU - Windows Schtasks Create Run As System - Rule", "ESCU - WinEvent Scheduled Task Created to Spawn Shell - Rule", "ESCU - WinEvent Scheduled Task Created Within Public Path - Rule", "ESCU - WinEvent Windows Task Scheduler Event Action Started - Rule"] description = The MITRE ATT&CK technique T1053 refers to Scheduled Task/Job. Adversaries might use task scheduling utilities to execute programs or scripts at a predefined date and time. This method is often used for persistence but can also be used for privilege escalation or to execute tasks under certain conditions. Scheduling tasks can be beneficial for an attacker as it can allow them to execute actions at times when the system is less likely to be monitored actively. Different operating systems have different utilities for task scheduling, for example, Unix-like systems have Cron, while Windows has Scheduled Tasks and At Jobs. -narrative = MITRE ATT&CK technique T1053, labeled "Scheduled Task/Job", is a categorization of methods that adversaries use to execute malicious code by scheduling tasks or jobs on a system. This technique is widely utilized for persistence, privilege escalation, and the remote execution of tasks. The technique is applicable across various environments and platforms, including Windows, Linux, and macOS.\ -The technique consists of multiple sub-techniques, each highlighting a distinct mechanism for scheduling tasks or jobs. These sub-techniques include T1053.001 (Scheduled Task), T1053.002 (At for Windows), T1053.003 (Cron), T1053.004 (Launchd), T1053.005 (At for Linux), and T1053.006 (Systemd Timers).\ -Scheduled Task (T1053.001) focuses on adversaries' methods for scheduling tasks on a Windows system to maintain persistence or escalate privileges. These tasks can be set to execute at specified times, in response to particular events, or after a defined time interval.\ -The At command for Windows (T1053.002) enables administrators to schedule tasks on a Windows system. Adversaries may exploit this command to execute programs at system startup or at a predetermined schedule for persistence.\ -Cron (T1053.003) is a built-in job scheduler found in Unix-like operating systems. Adversaries can use cron jobs to execute programs at system startup or on a scheduled basis for persistence.\ -Launchd (T1053.004) is a service management framework present in macOS. Adversaries may utilize launchd to maintain persistence on macOS systems by setting up daemons or agents to execute at specific times or in response to defined events.\ -The At command for Linux (T1053.005) enables administrators to schedule tasks on a Linux system. Adversaries can use this command to execute programs at system startup or on a scheduled basis for persistence.\ -Systemd Timers (T1053.006) offer a means of scheduling tasks on Linux systems using systemd. Adversaries can use systemd timers to execute programs at system startup or on a scheduled basis for persistence.\ +narrative = MITRE ATT&CK technique T1053, labeled "Scheduled Task/Job", is a categorization of methods that adversaries use to execute malicious code by scheduling tasks or jobs on a system. This technique is widely utilized for persistence, privilege escalation, and the remote execution of tasks. The technique is applicable across various environments and platforms, including Windows, Linux, and macOS. \ +The technique consists of multiple sub-techniques, each highlighting a distinct mechanism for scheduling tasks or jobs. These sub-techniques include T1053.001 (Scheduled Task), T1053.002 (At for Windows), T1053.003 (Cron), T1053.004 (Launchd), T1053.005 (At for Linux), and T1053.006 (Systemd Timers). \ +Scheduled Task (T1053.001) focuses on adversaries' methods for scheduling tasks on a Windows system to maintain persistence or escalate privileges. These tasks can be set to execute at specified times, in response to particular events, or after a defined time interval. \ +The At command for Windows (T1053.002) enables administrators to schedule tasks on a Windows system. Adversaries may exploit this command to execute programs at system startup or at a predetermined schedule for persistence. \ +Cron (T1053.003) is a built-in job scheduler found in Unix-like operating systems. Adversaries can use cron jobs to execute programs at system startup or on a scheduled basis for persistence. \ +Launchd (T1053.004) is a service management framework present in macOS. Adversaries may utilize launchd to maintain persistence on macOS systems by setting up daemons or agents to execute at specific times or in response to defined events. \ +The At command for Linux (T1053.005) enables administrators to schedule tasks on a Linux system. Adversaries can use this command to execute programs at system startup or on a scheduled basis for persistence. \ +Systemd Timers (T1053.006) offer a means of scheduling tasks on Linux systems using systemd. Adversaries can use systemd timers to execute programs at system startup or on a scheduled basis for persistence. \ Detection and mitigation strategies vary for each sub-technique. For instance, monitoring the creation of scheduled tasks or looking for uncorrelated changes to tasks that do not align with known software or patch cycles can be effective for detecting malicious activity related to this technique. Mitigation strategies may involve restricting permissions and applying application control solutions to prevent adversaries from scheduling tasks. [analytic_story://Signed Binary Proxy Execution InstallUtil] @@ -18883,10 +18883,10 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Windows DotNet Binary in Non Standard Path - Rule", "ESCU - Windows InstallUtil Credential Theft - Rule", "ESCU - Windows InstallUtil in Non Standard Path - Rule", "ESCU - Windows InstallUtil Remote Network Connection - Rule", "ESCU - Windows InstallUtil Uninstall Option - Rule", "ESCU - Windows InstallUtil Uninstall Option with Network - Rule", "ESCU - Windows InstallUtil URL in Command Line - Rule"] description = Adversaries may use InstallUtil to proxy execution of code through a trusted Windows utility. -narrative = InstallUtil is a command-line utility that allows for installation and uninstallation of resources by executing specific installer components specified in .NET binaries. InstallUtil is digitally signed by Microsoft and located in the .NET directories on a Windows system: C:\Windows\Microsoft.NET\Framework\v\InstallUtil.exe and C:\Windows\Microsoft.NET\Framework64\v\InstallUtil.exe.\ -There are multiple ways to instantiate InstallUtil and they are all outlined within Atomic Red Team - https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1218.004/T1218.004.md. Two specific ways may be used and that includes invoking via installer assembly class constructor through .NET and via InstallUtil.exe.\ -Typically, adversaries will utilize the most commonly found way to invoke via InstallUtil Uninstall method.\ -Note that parallel processes, and parent process, play a role in how InstallUtil is being used. In particular, a developer using InstallUtil will spawn from VisualStudio. Adversaries, will spawn from non-standard processes like Explorer.exe, cmd.exe or PowerShell.exe. It's important to review the command-line to identify the DLL being loaded.\ +narrative = InstallUtil is a command-line utility that allows for installation and uninstallation of resources by executing specific installer components specified in .NET binaries. InstallUtil is digitally signed by Microsoft and located in the .NET directories on a Windows system: C:\Windows\Microsoft.NET\Framework\v\InstallUtil.exe and C:\Windows\Microsoft.NET\Framework64\v\InstallUtil.exe. \ +There are multiple ways to instantiate InstallUtil and they are all outlined within Atomic Red Team - https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1218.004/T1218.004.md. Two specific ways may be used and that includes invoking via installer assembly class constructor through .NET and via InstallUtil.exe. \ +Typically, adversaries will utilize the most commonly found way to invoke via InstallUtil Uninstall method. \ +Note that parallel processes, and parent process, play a role in how InstallUtil is being used. In particular, a developer using InstallUtil will spawn from VisualStudio. Adversaries, will spawn from non-standard processes like Explorer.exe, cmd.exe or PowerShell.exe. It's important to review the command-line to identify the DLL being loaded. \ Parallel processes may also include csc.exe being used to compile a local `.cs` file. This file will be the input to the output. Developers usually do not build direct on the command shell, therefore this should raise suspicion. [analytic_story://Silver Sparrow] @@ -18931,8 +18931,8 @@ maintainers = [{"company": "Mauricio Velazco, Splunk", "email": "-", "name": "De spec_version = 3 searches = ["ESCU - Registry Keys Used For Persistence - Rule", "ESCU - Windows AD AdminSDHolder ACL Modified - Rule", "ESCU - Windows AD Cross Domain SID History Addition - Rule", "ESCU - Windows AD Domain Controller Audit Policy Disabled - Rule", "ESCU - Windows AD Domain Controller Promotion - Rule", "ESCU - Windows AD Domain Replication ACL Addition - Rule", "ESCU - Windows AD DSRM Account Changes - Rule", "ESCU - Windows AD DSRM Password Reset - Rule", "ESCU - Windows AD Privileged Account SID History Addition - Rule", "ESCU - Windows AD Replication Request Initiated by User Account - Rule", "ESCU - Windows AD Replication Request Initiated from Unsanctioned Location - Rule", "ESCU - Windows AD Same Domain SID History Addition - Rule", "ESCU - Windows AD ServicePrincipalName Added To Domain Account - Rule", "ESCU - Windows AD Short Lived Domain Account ServicePrincipalName - Rule", "ESCU - Windows AD Short Lived Domain Controller SPN Attribute - Rule", "ESCU - Windows AD Short Lived Server Object - Rule", "ESCU - Windows AD SID History Attribute Modified - Rule", "ESCU - Windows Admon Default Group Policy Object Modified - Rule", "ESCU - Windows Admon Group Policy Object Created - Rule", "ESCU - Windows Default Group Policy Object Modified - Rule", "ESCU - Windows Default Group Policy Object Modified with GPME - Rule", "ESCU - Windows Group Policy Object Created - Rule", "ESCU - Windows Security Support Provider Reg Query - Rule", "ESCU - Windows AD Replication Service Traffic - Rule", "ESCU - Windows AD Rogue Domain Controller Network Activity - Rule"] description = Monitor for activities and techniques associated with Windows Active Directory persistence techniques. -narrative = Persistence consists of techniques that adversaries use to keep access to systems across restarts, changed credentials, and other interruptions that could cut off their access. Active Directory is a centralized and hierarchical database that stores information about users, computers, and other resources on a network. It provides secure and efficient management of these resources and enables administrators to enforce security policies and delegate administrative tasks.\ -In 2015 Active Directory security researcher Sean Metcalf published a blog post titled `Sneaky Active Directory Persistence Tricks`. In this blog post, Sean described several methods through which an attacker could persist administrative access on an Active Directory network after having Domain Admin level rights for a short period of time. At the time of writing, 8 years after the initial blog post, most of these techniques are still possible since they abuse legitimate administrative functionality and not software vulnerabilities. Security engineers defending Active Directory networks should be aware of these technique available to adversaries post exploitation and deploy both preventive and detective security controls for them.\ +narrative = Persistence consists of techniques that adversaries use to keep access to systems across restarts, changed credentials, and other interruptions that could cut off their access. Active Directory is a centralized and hierarchical database that stores information about users, computers, and other resources on a network. It provides secure and efficient management of these resources and enables administrators to enforce security policies and delegate administrative tasks. \ +In 2015 Active Directory security researcher Sean Metcalf published a blog post titled `Sneaky Active Directory Persistence Tricks`. In this blog post, Sean described several methods through which an attacker could persist administrative access on an Active Directory network after having Domain Admin level rights for a short period of time. At the time of writing, 8 years after the initial blog post, most of these techniques are still possible since they abuse legitimate administrative functionality and not software vulnerabilities. Security engineers defending Active Directory networks should be aware of these technique available to adversaries post exploitation and deploy both preventive and detective security controls for them. \ This analytic story groups detection opportunities for most of the techniques described on Seans blog post as well as other high impact attacks against Active Directory networks and Domain Controllers like DCSync and DCShadow. For some of these detection opportunities, it is necessary to enable the necessary GPOs and SACLs required, otherwise the event codes will not trigger. Each detection includes a list of requirements for enabling logging. [analytic_story://Spearphishing Attachments] @@ -18944,13 +18944,13 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Splunk Research Team spec_version = 3 searches = ["ESCU - Gdrive suspicious file sharing - Rule", "ESCU - Gsuite suspicious calendar invite - Rule", "ESCU - Detect Outlook exe writing a zip file - Rule", "ESCU - Detect RTLO In File Name - Rule", "ESCU - Detect RTLO In Process - Rule", "ESCU - Excel Spawning PowerShell - Rule", "ESCU - Excel Spawning Windows Script Host - Rule", "ESCU - MSHTML Module Load in Office Product - Rule", "ESCU - Office Application Spawn rundll32 process - Rule", "ESCU - Office Document Creating Schedule Task - Rule", "ESCU - Office Document Executing Macro Code - Rule", "ESCU - Office Document Spawned Child Process To Download - Rule", "ESCU - Office Product Spawning BITSAdmin - Rule", "ESCU - Office Product Spawning CertUtil - Rule", "ESCU - Office Product Spawning MSHTA - Rule", "ESCU - Office Product Spawning Rundll32 with no DLL - Rule", "ESCU - Office Product Spawning Windows Script Host - Rule", "ESCU - Office Product Spawning Wmic - Rule", "ESCU - Office Product Writing cab or inf - Rule", "ESCU - Office Spawning Control - Rule", "ESCU - Process Creating LNK file in Suspicious Location - Rule", "ESCU - Windows ConHost with Headless Argument - Rule", "ESCU - Windows ISO LNK File Creation - Rule", "ESCU - Windows Office Product Spawning MSDT - Rule", "ESCU - Windows Phishing PDF File Executes URL Link - Rule", "ESCU - Windows Spearphishing Attachment Connect To None MS Office Domain - Rule", "ESCU - Windows Spearphishing Attachment Onenote Spawn Mshta - Rule", "ESCU - Winword Spawning Cmd - Rule", "ESCU - Winword Spawning PowerShell - Rule", "ESCU - Winword Spawning Windows Script Host - Rule"] description = Detect signs of malicious payloads that may indicate that your environment has been breached via a phishing attack. -narrative = Despite its simplicity, phishing remains the most pervasive and dangerous cyberthreat. In fact, research shows that as many as [91% of all successful attacks](https://digitalguardian.com/blog/91-percent-cyber-attacks-start-phishing-email-heres-how-protect-against-phishing) are initiated via a phishing email.\ -As most people know, these emails use fraudulent domains, [email scraping](https://www.cyberscoop.com/emotet-trojan-phishing-scraping-templates-cofense-geodo/), familiar contact names inserted as senders, and other tactics to lure targets into clicking a malicious link, opening an attachment with a [nefarious payload](https://www.cyberscoop.com/emotet-trojan-phishing-scraping-templates-cofense-geodo/), or entering sensitive personal information that perpetrators may intercept. This attack technique requires a relatively low level of skill and allows adversaries to easily cast a wide net. Worse, because its success relies on the gullibility of humans, it's impossible to completely "automate" it out of your environment. However, you can use ES and ESCU to detect and investigate potentially malicious payloads injected into your environment subsequent to a phishing attack.\ -While any kind of file may contain a malicious payload, some are more likely to be perceived as benign (and thus more often escape notice) by the average victim—especially when the attacker sends an email that seems to be from one of their contacts. An example is Microsoft Office files. Most corporate users are familiar with documents with the following suffixes: .doc/.docx (MS Word), .xls/.xlsx (MS Excel), and .ppt/.pptx (MS PowerPoint), so they may click without a second thought, slashing a hole in their organizations' security.\ -Following is a typical series of events, according to an [article by Trend Micro](https://blog.trendmicro.com/trendlabs-security-intelligence/rising-trend-attackers-using-lnk-files-download-malware/):\ -1. Attacker sends a phishing email. Recipient downloads the attached file, which is typically a .docx or .zip file with an embedded .lnk file\ -1. The .lnk file executes a PowerShell script\ -1. Powershell executes a reverse shell, rendering the exploit successful As a side note, adversaries are likely to use a tool like Empire to craft and obfuscate payloads and their post-injection activities, such as [exfiltration, lateral movement, and persistence](https://github.com/EmpireProject/Empire).\ +narrative = Despite its simplicity, phishing remains the most pervasive and dangerous cyberthreat. In fact, research shows that as many as [91% of all successful attacks](https://digitalguardian.com/blog/91-percent-cyber-attacks-start-phishing-email-heres-how-protect-against-phishing) are initiated via a phishing email. \ +As most people know, these emails use fraudulent domains, [email scraping](https://www.cyberscoop.com/emotet-trojan-phishing-scraping-templates-cofense-geodo/), familiar contact names inserted as senders, and other tactics to lure targets into clicking a malicious link, opening an attachment with a [nefarious payload](https://www.cyberscoop.com/emotet-trojan-phishing-scraping-templates-cofense-geodo/), or entering sensitive personal information that perpetrators may intercept. This attack technique requires a relatively low level of skill and allows adversaries to easily cast a wide net. Worse, because its success relies on the gullibility of humans, it's impossible to completely "automate" it out of your environment. However, you can use ES and ESCU to detect and investigate potentially malicious payloads injected into your environment subsequent to a phishing attack. \ +While any kind of file may contain a malicious payload, some are more likely to be perceived as benign (and thus more often escape notice) by the average victim—especially when the attacker sends an email that seems to be from one of their contacts. An example is Microsoft Office files. Most corporate users are familiar with documents with the following suffixes: .doc/.docx (MS Word), .xls/.xlsx (MS Excel), and .ppt/.pptx (MS PowerPoint), so they may click without a second thought, slashing a hole in their organizations' security. \ +Following is a typical series of events, according to an [article by Trend Micro](https://blog.trendmicro.com/trendlabs-security-intelligence/rising-trend-attackers-using-lnk-files-download-malware/): \ +1. Attacker sends a phishing email. Recipient downloads the attached file, which is typically a .docx or .zip file with an embedded .lnk file \ +1. The .lnk file executes a PowerShell script \ +1. Powershell executes a reverse shell, rendering the exploit successful As a side note, adversaries are likely to use a tool like Empire to craft and obfuscate payloads and their post-injection activities, such as [exfiltration, lateral movement, and persistence](https://github.com/EmpireProject/Empire). \ This Analytic Story focuses on detecting signs that a malicious payload has been injected into your environment. For example, one search detects outlook.exe writing a .zip file. Another looks for suspicious .lnk files launching processes. [analytic_story://Splunk Vulnerabilities] @@ -18973,12 +18973,12 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Java Writing JSP File - Rule", "ESCU - Linux Java Spawning Shell - Rule", "ESCU - Spring4Shell Payload URL Request - Rule", "ESCU - Web JSP Request via URL - Rule", "ESCU - Web Spring4Shell HTTP Request Class Module - Rule", "ESCU - Web Spring Cloud Function FunctionRouter - Rule"] description = Spring4Shell is the nickname given to a zero-day vulnerability in the Spring Core Framework, a programming and configuration model for Java-based enterprise applications. -narrative = An attacker could exploit Spring4Shell by sending a specially crafted request to a vulnerable server. However, exploitation of Spring4Shell requires certain prerequisites, whereas the original Log4Shell vulnerability affected all versions of Log4j 2 using the default configuration.\ -According to Spring, the following requirements were included in the vulnerability report, however the post cautions that there may be other ways in which this can be exploited so this may not be a complete list of requirements at this time:\ -- Java Development Kit (JDK) 9 or greater\ -- Apache Tomcat as the Servlet container\ -- Packaged as a WAR\ -- spring-webmvc or spring-webflux dependency\ +narrative = An attacker could exploit Spring4Shell by sending a specially crafted request to a vulnerable server. However, exploitation of Spring4Shell requires certain prerequisites, whereas the original Log4Shell vulnerability affected all versions of Log4j 2 using the default configuration. \ +According to Spring, the following requirements were included in the vulnerability report, however the post cautions that there may be other ways in which this can be exploited so this may not be a complete list of requirements at this time: \ +- Java Development Kit (JDK) 9 or greater \ +- Apache Tomcat as the Servlet container \ +- Packaged as a WAR \ +- spring-webmvc or spring-webflux dependency \ [analytic_story://SQL Injection] @@ -18990,7 +18990,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - SQL Injection with Long URLs - Rule", "ESCU - Get Notable History - Response Task"] description = Use the searches in this Analytic Story to help you detect structured query language (SQL) injection attempts characterized by long URLs that contain malicious parameters. -narrative = It is very common for attackers to inject SQL parameters into vulnerable web applications, which then interpret the malicious SQL statements.\ +narrative = It is very common for attackers to inject SQL parameters into vulnerable web applications, which then interpret the malicious SQL statements. \ This Analytic Story contains a search designed to identify attempts by attackers to leverage this technique to compromise a host and gain a foothold in the target environment. [analytic_story://Subvert Trust Controls SIP and Trust Provider Hijacking] @@ -19024,8 +19024,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - AWS Disable Bucket Versioning - Rule", "ESCU - AWS Exfiltration via Bucket Replication - Rule", "ESCU - AWS Exfiltration via DataSync Task - Rule", "ESCU - Detect New Open S3 buckets - Rule", "ESCU - Detect New Open S3 Buckets over AWS CLI - Rule", "ESCU - Detect S3 access from a new IP - Rule", "ESCU - Detect Spike in S3 Bucket deletion - Rule", "ESCU - AWS Investigate User Activities By ARN - Response Task", "ESCU - AWS S3 Bucket details via bucketName - Response Task", "ESCU - Get All AWS Activity From IP Address - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Investigate AWS activities via region name - Response Task"] description = Use the searches in this Analytic Story using Cloudtrail logs to to monitor your AWS S3 buckets for evidence of anomalous activity and suspicious behaviors, such as detecting open S3 buckets and buckets being accessed from a new IP, permission and policy updates to the bucket, potential misuse of other services leading to data being leaked. -narrative = One of the most common ways that attackers attempt to steal data from S3 is by gaining unauthorized access to S3 buckets and copying or exfiltrating data to external locations.\ -However, suspicious S3 activities can refer to any unusual behavior detected within an Amazon Web Services (AWS) Simple Storage Service (S3) bucket, including unauthorized access, unusual data transfer patterns, and access attempts from unknown IP addresses.\ +narrative = One of the most common ways that attackers attempt to steal data from S3 is by gaining unauthorized access to S3 buckets and copying or exfiltrating data to external locations. \ +However, suspicious S3 activities can refer to any unusual behavior detected within an Amazon Web Services (AWS) Simple Storage Service (S3) bucket, including unauthorized access, unusual data transfer patterns, and access attempts from unknown IP addresses. \ It is important for organizations to regularly monitor S3 activities for suspicious behavior and implement security best practices, such as using access controls, encryption, and strong authentication mechanisms, to protect sensitive data stored within S3 buckets. By staying vigilant and taking proactive measures, organizations can help prevent potential security breaches and minimize the impact of attacks if they do occur. [analytic_story://Suspicious AWS Traffic] @@ -19037,9 +19037,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - Detect Spike in blocked Outbound Traffic from your AWS - Rule", "ESCU - AWS Investigate User Activities By ARN - Response Task", "ESCU - AWS Network ACL Details from ID - Response Task", "ESCU - AWS Network Interface details via resourceId - Response Task", "ESCU - Get All AWS Activity From IP Address - Response Task", "ESCU - Get DNS Server History for a host - Response Task", "ESCU - Get DNS traffic ratio - Response Task", "ESCU - Get Notable History - Response Task", "ESCU - Get Process Info - Response Task", "ESCU - Get Process Information For Port Activity - Response Task", "ESCU - Get Process Responsible For The DNS Traffic - Response Task"] description = Leverage these searches to monitor your AWS network traffic for evidence of anomalous activity and suspicious behaviors, such as a spike in blocked outbound traffic in your virtual private cloud (VPC). -narrative = A virtual private cloud (VPC) is an on-demand managed cloud-computing service that isolates computing resources for each client. Inside the VPC container, the environment resembles a physical network.\ -Amazon's VPC service enables you to launch EC2 instances and leverage other Amazon resources. The traffic that flows in and out of this VPC can be controlled via network access-control rules and security groups. Amazon also has a feature called VPC Flow Logs that enables you to log IP traffic going to and from the network interfaces in your VPC. This data is stored using Amazon CloudWatch Logs.\ -Attackers may abuse the AWS infrastructure with insecure VPCs so they can co-opt AWS resources for command-and-control nodes, data exfiltration, and more. Once an EC2 instance is compromised, an attacker may initiate outbound network connections for malicious reasons. Monitoring these network traffic behaviors is crucial for understanding the type of traffic flowing in and out of your network and to alert you to suspicious activities.\ +narrative = A virtual private cloud (VPC) is an on-demand managed cloud-computing service that isolates computing resources for each client. Inside the VPC container, the environment resembles a physical network. \ +Amazon's VPC service enables you to launch EC2 instances and leverage other Amazon resources. The traffic that flows in and out of this VPC can be controlled via network access-control rules and security groups. Amazon also has a feature called VPC Flow Logs that enables you to log IP traffic going to and from the network interfaces in your VPC. This data is stored using Amazon CloudWatch Logs. \ +Attackers may abuse the AWS infrastructure with insecure VPCs so they can co-opt AWS resources for command-and-control nodes, data exfiltration, and more. Once an EC2 instance is compromised, an attacker may initiate outbound network connections for malicious reasons. Monitoring these network traffic behaviors is crucial for understanding the type of traffic flowing in and out of your network and to alert you to suspicious activities. \ The searches in this Analytic Story will monitor your AWS network traffic for evidence of anomalous activity and suspicious behaviors. [analytic_story://Suspicious Cloud Authentication Activities] @@ -19051,7 +19051,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rico Valdez"}] spec_version = 3 searches = ["ESCU - AWS Cross Account Activity From Previously Unseen Account - Rule", "ESCU - Detect AWS Console Login by New User - Rule", "ESCU - Detect AWS Console Login by User from New City - Rule", "ESCU - Detect AWS Console Login by User from New Country - Rule", "ESCU - Detect AWS Console Login by User from New Region - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Investigate AWS User Activities by user field - Response Task"] description = Monitor your cloud authentication events. Searches within this Analytic Story leverage the recent cloud updates to the Authentication data model to help you stay aware of and investigate suspicious login activity. -narrative = It is important to monitor and control who has access to your cloud infrastructure. Detecting suspicious logins will provide good starting points for investigations. Abusive behaviors caused by compromised credentials can lead to direct monetary costs, as you will be billed for any compute activity whether legitimate or otherwise.\ +narrative = It is important to monitor and control who has access to your cloud infrastructure. Detecting suspicious logins will provide good starting points for investigations. Abusive behaviors caused by compromised credentials can lead to direct monetary costs, as you will be billed for any compute activity whether legitimate or otherwise. \ This Analytic Story has data model versions of cloud searches leveraging Authentication data, including those looking for suspicious login activity, and cross-account activity for AWS. [analytic_story://Suspicious Cloud Instance Activities] @@ -19074,7 +19074,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "David Dorsey"}] spec_version = 3 searches = ["ESCU - Cloud Provisioning Activity From Previously Unseen City - Rule", "ESCU - Cloud Provisioning Activity From Previously Unseen Country - Rule", "ESCU - Cloud Provisioning Activity From Previously Unseen IP Address - Rule", "ESCU - Cloud Provisioning Activity From Previously Unseen Region - Rule", "ESCU - Get Notable History - Response Task"] description = Monitor your cloud infrastructure provisioning activities for behaviors originating from unfamiliar or unusual locations. These behaviors may indicate that malicious activities are occurring somewhere within your cloud environment. -narrative = Because most enterprise cloud infrastructure activities originate from familiar geographic locations, monitoring for activity from unknown or unusual regions is an important security measure. This indicator can be especially useful in environments where it is impossible to add specific IPs to an allow list because they vary.\ +narrative = Because most enterprise cloud infrastructure activities originate from familiar geographic locations, monitoring for activity from unknown or unusual regions is an important security measure. This indicator can be especially useful in environments where it is impossible to add specific IPs to an allow list because they vary. \ This Analytic Story was designed to provide you with flexibility in the precision you employ in specifying legitimate geographic regions. It can be as specific as an IP address or a city, or as broad as a region (think state) or an entire country. By determining how precise you want your geographical locations to be and monitoring for new locations that haven't previously accessed your environment, you can detect adversaries as they begin to probe your environment. Since there are legitimate reasons for activities from unfamiliar locations, this is not a standalone indicator. Nevertheless, location can be a relevant piece of information that you may wish to investigate further. [analytic_story://Suspicious Cloud User Activities] @@ -19086,7 +19086,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "David Dorsey"}] spec_version = 3 searches = ["ESCU - Abnormally High Number Of Cloud Infrastructure API Calls - Rule", "ESCU - Abnormally High Number Of Cloud Security Group API Calls - Rule", "ESCU - AWS IAM AccessDenied Discovery Events - Rule", "ESCU - AWS Lambda UpdateFunctionCode - Rule", "ESCU - Cloud API Calls From Previously Unseen User Roles - Rule", "ESCU - Cloud Security Groups Modifications by User - Rule", "ESCU - AWS Investigate User Activities By ARN - Response Task"] description = Detect and investigate suspicious activities by users and roles in your cloud environments. -narrative = It seems obvious that it is critical to monitor and control the users who have access to your cloud infrastructure. Nevertheless, it's all too common for enterprises to lose track of ad-hoc accounts, leaving their servers vulnerable to attack. In fact, this was the very oversight that led to Tesla's cryptojacking attack in February, 2018.\ +narrative = It seems obvious that it is critical to monitor and control the users who have access to your cloud infrastructure. Nevertheless, it's all too common for enterprises to lose track of ad-hoc accounts, leaving their servers vulnerable to attack. In fact, this was the very oversight that led to Tesla's cryptojacking attack in February, 2018. \ In addition to compromising the security of your data, when bad actors leverage your compute resources, it can incur monumental costs, since you will be billed for any new instances and increased bandwidth usage. [analytic_story://Suspicious Command-Line Executions] @@ -19109,9 +19109,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Detect HTML Help Renamed - Rule", "ESCU - Detect HTML Help Spawn Child Process - Rule", "ESCU - Detect HTML Help URL in Command Line - Rule", "ESCU - Detect HTML Help Using InfoTech Storage Handlers - Rule", "ESCU - Windows System Binary Proxy Execution Compiled HTML File Decompile - Rule"] description = Monitor and detect techniques used by attackers who leverage the mshta.exe process to execute malicious code. -narrative = Adversaries may abuse Compiled HTML files (.chm) to conceal malicious code. CHM files are commonly distributed as part of the Microsoft HTML Help system. CHM files are compressed compilations of various content such as HTML documents, images, and scripting/web related programming languages such VBA, JScript, Java, and ActiveX. CHM content is displayed using underlying components of the Internet Explorer browser loaded by the HTML Help executable program (hh.exe).\ -HH.exe relies upon hhctrl.ocx to load CHM topics.This will load upon execution of a chm file.\ -During investigation, review all parallel processes and child processes. It is possible for file modification events to occur and it is best to capture the CHM file and decompile it for further analysis.\ +narrative = Adversaries may abuse Compiled HTML files (.chm) to conceal malicious code. CHM files are commonly distributed as part of the Microsoft HTML Help system. CHM files are compressed compilations of various content such as HTML documents, images, and scripting/web related programming languages such VBA, JScript, Java, and ActiveX. CHM content is displayed using underlying components of the Internet Explorer browser loaded by the HTML Help executable program (hh.exe). \ +HH.exe relies upon hhctrl.ocx to load CHM topics.This will load upon execution of a chm file. \ +During investigation, review all parallel processes and child processes. It is possible for file modification events to occur and it is best to capture the CHM file and decompile it for further analysis. \ Upon usage of InfoTech Storage Handlers, ms-its, its, mk, itss.dll will load. [analytic_story://Suspicious DNS Traffic] @@ -19134,10 +19134,10 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - Email Attachments With Lots Of Spaces - Rule", "ESCU - Monitor Email For Brand Abuse - Rule", "ESCU - Suspicious Email Attachment Extensions - Rule", "ESCU - Suspicious Email - UBA Anomaly - Rule", "ESCU - Get Email Info - Response Task", "ESCU - Get Emails From Specific Sender - Response Task", "ESCU - Get Notable History - Response Task"] description = Email remains one of the primary means for attackers to gain an initial foothold within the modern enterprise. Detect and investigate suspicious emails in your environment with the help of the searches in this Analytic Story. -narrative = It is a common practice for attackers of all types to leverage targeted spearphishing campaigns and mass mailers to deliver weaponized email messages and attachments. Fortunately, there are a number of ways to monitor email data in Splunk to detect suspicious content.\ -Once a phishing message has been detected, the next steps are to answer the following questions:\ -1. Which users have received this or a similar message in the past?\ -1. When did the targeted campaign begin?\ +narrative = It is a common practice for attackers of all types to leverage targeted spearphishing campaigns and mass mailers to deliver weaponized email messages and attachments. Fortunately, there are a number of ways to monitor email data in Splunk to detect suspicious content. \ +Once a phishing message has been detected, the next steps are to answer the following questions: \ +1. Which users have received this or a similar message in the past? \ +1. When did the targeted campaign begin? \ 1. Have any users interacted with the content of the messages (by downloading an attachment or clicking on a malicious URL)?This Analytic Story provides detection searches to identify suspicious emails, as well as contextual and investigative searches to help answer some of these questions. [analytic_story://Suspicious GCP Storage Activities] @@ -19160,18 +19160,18 @@ maintainers = [{"company": "Michael Haag, Splunk", "email": "-", "name": "Bhavin spec_version = 3 searches = ["ESCU - Detect mshta inline hta execution - Rule", "ESCU - Detect mshta renamed - Rule", "ESCU - Detect MSHTA Url in Command Line - Rule", "ESCU - Detect Prohibited Applications Spawning cmd exe - Rule", "ESCU - Detect Rundll32 Inline HTA Execution - Rule", "ESCU - Registry Keys Used For Persistence - Rule", "ESCU - Suspicious mshta child process - Rule", "ESCU - Suspicious mshta spawn - Rule", "ESCU - Windows MSHTA Writing to World Writable Path - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task"] description = Monitor and detect techniques used by attackers who leverage the mshta.exe process to execute malicious code. -narrative = One common adversary tactic is to bypass application control solutions via the mshta.exe process, which loads Microsoft HTML applications (mshtml.dll) with the .hta suffix. In these cases, attackers use the trusted Windows utility to proxy execution of malicious files, whether an .hta application, javascript, or VBScript.\ -The searches in this story help you detect and investigate suspicious activity that may indicate that an attacker is leveraging mshta.exe to execute malicious code.\ -Triage\ -Validate execution\ -1. Determine if MSHTA.exe executed. Validate the OriginalFileName of MSHTA.exe and further PE metadata. If executed outside of c:\windows\system32 or c:\windows\syswow64, it should be highly suspect.\ -1. Determine if script code was executed with MSHTA.\ -Situational Awareness\ -The objective of this step is meant to identify suspicious behavioral indicators related to executed of Script code by MSHTA.exe.\ -1. Parent process. Is the parent process a known LOLBin? Is the parent process an Office Application?\ -1. Module loads. Are the known MSHTA.exe modules being loaded by a non-standard application? Is MSHTA loading any suspicious .DLLs?\ -1. Network connections. Any network connections? Review the reputation of the remote IP or domain.\ -Retrieval of script code\ +narrative = One common adversary tactic is to bypass application control solutions via the mshta.exe process, which loads Microsoft HTML applications (mshtml.dll) with the .hta suffix. In these cases, attackers use the trusted Windows utility to proxy execution of malicious files, whether an .hta application, javascript, or VBScript. \ +The searches in this story help you detect and investigate suspicious activity that may indicate that an attacker is leveraging mshta.exe to execute malicious code. \ +Triage \ +Validate execution \ +1. Determine if MSHTA.exe executed. Validate the OriginalFileName of MSHTA.exe and further PE metadata. If executed outside of c:\windows\system32 or c:\windows\syswow64, it should be highly suspect. \ +1. Determine if script code was executed with MSHTA. \ +Situational Awareness \ +The objective of this step is meant to identify suspicious behavioral indicators related to executed of Script code by MSHTA.exe. \ +1. Parent process. Is the parent process a known LOLBin? Is the parent process an Office Application? \ +1. Module loads. Are the known MSHTA.exe modules being loaded by a non-standard application? Is MSHTA loading any suspicious .DLLs? \ +1. Network connections. Any network connections? Review the reputation of the remote IP or domain. \ +Retrieval of script code \ The objective of this step is to confirm the executed script code is benign or malicious. [analytic_story://Suspicious Okta Activity] @@ -19183,8 +19183,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rico Valdez"}] spec_version = 3 searches = ["ESCU - Okta IDP Lifecycle Modifications - Rule", "ESCU - Okta Risk Threshold Exceeded - Rule", "ESCU - Okta Suspicious Use of a Session Cookie - Rule", "ESCU - Multiple Okta Users With Invalid Credentials From The Same IP - Rule", "ESCU - Okta Account Locked Out - Rule", "ESCU - Okta Account Lockout Events - Rule", "ESCU - Okta Failed SSO Attempts - Rule", "ESCU - Okta ThreatInsight Login Failure with High Unknown users - Rule", "ESCU - Okta ThreatInsight Suspected PasswordSpray Attack - Rule", "ESCU - Okta Two or More Rejected Okta Pushes - Rule", "ESCU - Investigate Okta Activity by app - Response Task", "ESCU - Investigate Okta Activity by IP Address - Response Task", "ESCU - Investigate User Activities In Okta - Response Task"] description = Monitor your Okta environment for suspicious activities. Due to the Covid outbreak, many users are migrating over to leverage cloud services more and more. Okta is a popular tool to manage multiple users and the web-based applications they need to stay productive. The searches in this story will help monitor your Okta environment for suspicious activities and associated user behaviors. -narrative = Okta is the leading single sign on (SSO) provider, allowing users to authenticate once to Okta, and from there access a variety of web-based applications. These applications are assigned to users and allow administrators to centrally manage which users are allowed to access which applications. It also provides centralized logging to help understand how the applications are used and by whom.\ -While SSO is a major convenience for users, it also provides attackers with an opportunity. If the attacker can gain access to Okta, they can access a variety of applications. As such monitoring the environment is important.\ +narrative = Okta is the leading single sign on (SSO) provider, allowing users to authenticate once to Okta, and from there access a variety of web-based applications. These applications are assigned to users and allow administrators to centrally manage which users are allowed to access which applications. It also provides centralized logging to help understand how the applications are used and by whom. \ +While SSO is a major convenience for users, it also provides attackers with an opportunity. If the attacker can gain access to Okta, they can access a variety of applications. As such monitoring the environment is important. \ With people moving quickly to adopt web-based applications and ways to manage them, many are still struggling to understand how best to monitor these environments. This analytic story provides searches to help monitor this environment, and identify events and activity that warrant further investigation such as credential stuffing or password spraying attacks, and users logging in from multiple locations when travel is disallowed. [analytic_story://Suspicious Regsvcs Regasm Activity] @@ -19229,8 +19229,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - Reg exe used to hide files directories via registry keys - Rule", "ESCU - Remote Registry Key modifications - Rule", "ESCU - Suspicious Changes to File Associations - Rule", "ESCU - Disable UAC Remote Restriction - Rule", "ESCU - Disabling Remote User Account Control - Rule", "ESCU - Monitor Registry Keys for Print Monitors - Rule", "ESCU - Registry Keys for Creating SHIM Databases - Rule", "ESCU - Registry Keys Used For Persistence - Rule", "ESCU - Registry Keys Used For Privilege Escalation - Rule", "ESCU - Windows Mshta Execution In Registry - Rule", "ESCU - Windows Service Creation Using Registry Entry - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task"] description = Monitor and detect registry changes initiated from remote locations, which can be a sign that an attacker has infiltrated your system. -narrative = Attackers are developing increasingly sophisticated techniques for hijacking target servers, while evading detection. One such technique that has become progressively more common is registry modification.\ -The registry is a key component of the Windows operating system. It has a hierarchical database called "registry" that contains settings, options, and values for executables. Once the threat actor gains access to a machine, they can use reg.exe to modify their account to obtain administrator-level privileges, maintain persistence, and move laterally within the environment.\ +narrative = Attackers are developing increasingly sophisticated techniques for hijacking target servers, while evading detection. One such technique that has become progressively more common is registry modification. \ +The registry is a key component of the Windows operating system. It has a hierarchical database called "registry" that contains settings, options, and values for executables. Once the threat actor gains access to a machine, they can use reg.exe to modify their account to obtain administrator-level privileges, maintain persistence, and move laterally within the environment. \ The searches in this story are designed to help you detect behaviors associated with manipulation of the Windows registry. [analytic_story://Suspicious WMI Use] @@ -19253,7 +19253,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "David Dorsey"}] spec_version = 3 searches = ["ESCU - Detect Prohibited Applications Spawning cmd exe - Rule", "ESCU - First Time Seen Child Process of Zoom - Rule", "ESCU - Get Process File Activity - Response Task"] description = Attackers are using Zoom as an vector to increase privileges on a sytems. This story detects new child processes of zoom and provides investigative actions for this detection. -narrative = Zoom is a leader in modern enterprise video communications and its usage has increased dramatically with a large amount of the population under stay-at-home orders due to the COVID-19 pandemic. With increased usage has come increased scrutiny and several security flaws have been found with this application on both Windows and macOS systems.\ +narrative = Zoom is a leader in modern enterprise video communications and its usage has increased dramatically with a large amount of the population under stay-at-home orders due to the COVID-19 pandemic. With increased usage has come increased scrutiny and several security flaws have been found with this application on both Windows and macOS systems. \ Current detections focus on finding new child processes of this application on a per host basis. Investigative searches are included to gather information needed during an investigation. [analytic_story://Swift Slicer] @@ -19309,7 +19309,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Suspicious microsoft workflow compiler rename - Rule", "ESCU - Suspicious microsoft workflow compiler usage - Rule"] description = Monitor and detect behaviors used by attackers who leverage trusted developer utilities to execute malicious code. -narrative = Adversaries may take advantage of trusted developer utilities to proxy execution of malicious payloads. There are many utilities used for software development related tasks that can be used to execute code in various forms to assist in development, debugging, and reverse engineering. These utilities may often be signed with legitimate certificates that allow them to execute on a system and proxy execution of malicious code through a trusted process that effectively bypasses application control solutions.\ +narrative = Adversaries may take advantage of trusted developer utilities to proxy execution of malicious payloads. There are many utilities used for software development related tasks that can be used to execute code in various forms to assist in development, debugging, and reverse engineering. These utilities may often be signed with legitimate certificates that allow them to execute on a system and proxy execution of malicious code through a trusted process that effectively bypasses application control solutions. \ The searches in this story help you detect and investigate suspicious activity that may indicate that an adversary is leveraging microsoft.workflow.compiler.exe to execute malicious code. [analytic_story://Trusted Developer Utilities Proxy Execution MSBuild] @@ -19321,19 +19321,19 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - MSBuild Suspicious Spawned By Script Process - Rule", "ESCU - Suspicious msbuild path - Rule", "ESCU - Suspicious MSBuild Rename - Rule", "ESCU - Suspicious MSBuild Spawn - Rule"] description = Monitor and detect techniques used by attackers who leverage the msbuild.exe process to execute malicious code. -narrative = Adversaries may use MSBuild to proxy execution of code through a trusted Windows utility. MSBuild.exe (Microsoft Build Engine) is a software build platform used by Visual Studio and is native to Windows. It handles XML formatted project files that define requirements for loading and building various platforms and configurations.\ -The inline task capability of MSBuild that was introduced in .NET version 4 allows for C# code to be inserted into an XML project file. MSBuild will compile and execute the inline task. MSBuild.exe is a signed Microsoft binary, so when it is used this way it can execute arbitrary code and bypass application control defenses that are configured to allow MSBuild.exe execution.\ -The searches in this story help you detect and investigate suspicious activity that may indicate that an adversary is leveraging msbuild.exe to execute malicious code.\ -Triage\ -Validate execution\ -1. Determine if MSBuild.exe executed. Validate the OriginalFileName of MSBuild.exe and further PE metadata.\ -1. Determine if script code was executed with MSBuild.\ -Situational Awareness\ -The objective of this step is meant to identify suspicious behavioral indicators related to executed of Script code by MSBuild.exe.\ -1. Parent process. Is the parent process a known LOLBin? Is the parent process an Office Application?\ -1. Module loads. Are the known MSBuild.exe modules being loaded by a non-standard application? Is MSbuild loading any suspicious .DLLs?\ -1. Network connections. Any network connections? Review the reputation of the remote IP or domain.\ -Retrieval of script code\ +narrative = Adversaries may use MSBuild to proxy execution of code through a trusted Windows utility. MSBuild.exe (Microsoft Build Engine) is a software build platform used by Visual Studio and is native to Windows. It handles XML formatted project files that define requirements for loading and building various platforms and configurations. \ +The inline task capability of MSBuild that was introduced in .NET version 4 allows for C# code to be inserted into an XML project file. MSBuild will compile and execute the inline task. MSBuild.exe is a signed Microsoft binary, so when it is used this way it can execute arbitrary code and bypass application control defenses that are configured to allow MSBuild.exe execution. \ +The searches in this story help you detect and investigate suspicious activity that may indicate that an adversary is leveraging msbuild.exe to execute malicious code. \ +Triage \ +Validate execution \ +1. Determine if MSBuild.exe executed. Validate the OriginalFileName of MSBuild.exe and further PE metadata. \ +1. Determine if script code was executed with MSBuild. \ +Situational Awareness \ +The objective of this step is meant to identify suspicious behavioral indicators related to executed of Script code by MSBuild.exe. \ +1. Parent process. Is the parent process a known LOLBin? Is the parent process an Office Application? \ +1. Module loads. Are the known MSBuild.exe modules being loaded by a non-standard application? Is MSbuild loading any suspicious .DLLs? \ +1. Network connections. Any network connections? Review the reputation of the remote IP or domain. \ +Retrieval of script code \ The objective of this step is to confirm the executed script code is benign or malicious. [analytic_story://Unusual Processes] @@ -19345,8 +19345,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Bhavin Patel"}] spec_version = 3 searches = ["ESCU - Uncommon Processes On Endpoint - Rule", "ESCU - Attacker Tools On Endpoint - Rule", "ESCU - Detect processes used for System Network Configuration Discovery - Rule", "ESCU - Detect Rare Executables - Rule", "ESCU - Rundll32 Shimcache Flush - Rule", "ESCU - RunDLL Loading DLL By Ordinal - Rule", "ESCU - Suspicious Copy on System32 - Rule", "ESCU - Suspicious Process Executed From Container File - Rule", "ESCU - System Processes Run From Unexpected Locations - Rule", "ESCU - Unusually Long Command Line - Rule", "ESCU - Unusually Long Command Line - MLTK - Rule", "ESCU - Verclsid CLSID Execution - Rule", "ESCU - Windows DotNet Binary in Non Standard Path - Rule", "ESCU - Windows InstallUtil in Non Standard Path - Rule", "ESCU - Windows NirSoft AdvancedRun - Rule", "ESCU - Windows Registry Payload Injection - Rule", "ESCU - Windows Remote Assistance Spawning Process - Rule", "ESCU - WinRM Spawning a Process - Rule", "ESCU - Wscript Or Cscript Suspicious Child Process - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task"] description = Quickly identify systems running new or unusual processes in your environment that could be indicators of suspicious activity. Processes run from unusual locations, those with conspicuously long command lines, and rare executables are all examples of activities that may warrant deeper investigation. -narrative = Being able to profile a host's processes within your environment can help you more quickly identify processes that seem out of place when compared to the rest of the population of hosts or asset types.\ -This Analytic Story lets you identify processes that are either a) not typically seen running or b) have some sort of suspicious command-line arguments associated with them. This Analytic Story will also help you identify the user running these processes and the associated process activity on the host.\ +narrative = Being able to profile a host's processes within your environment can help you more quickly identify processes that seem out of place when compared to the rest of the population of hosts or asset types. \ +This Analytic Story lets you identify processes that are either a) not typically seen running or b) have some sort of suspicious command-line arguments associated with them. This Analytic Story will also help you identify the user running these processes and the associated process activity on the host. \ In the event an unusual process is identified, it is imperative to better understand how that process was able to execute on the host, when it first executed, and whether other hosts are affected. This extra information may provide clues that can help the analyst further investigate any suspicious activity. [analytic_story://Use of Cleartext Protocols] @@ -19369,10 +19369,10 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - VMWare Aria Operations Exploit Attempt - Rule"] description = CVE-2023-20887 is a critical vulnerability affecting VMware's vRealize Network Insight (also known as VMware Aria Operations for Networks). It allows a remote, unauthenticated attacker to execute arbitrary commands with root privileges via the Apache Thrift RPC interface. The exploit, which has a severity score of 9.8, targets an endpoint ("/saas./resttosaasservlet") in the application and delivers a malicious payload designed to create a reverse shell, granting the attacker control over the system. VMware has released an advisory recommending users to update to the latest version to mitigate this threat. -narrative = CVE-2023-20887 is a highly critical vulnerability found in VMware's vRealize Network Insight. This software is widely used for intelligent operations management across physical, virtual, and cloud environments, so a vulnerability in it poses a significant risk to many organizations.\ -This particular vulnerability lies in the application's Apache Thrift RPC interface. The exploit allows an attacker to inject commands that are executed with root privileges, leading to a potential total compromise of the system. The attacker does not need to be authenticated, which further increases the risk posed by this vulnerability.\ -The exploit operates by sending a specially crafted payload to the "/saas./resttosaasservlet" endpoint. This payload contains a reverse shell command, which, when executed, allows the attacker to remotely control the victim's system. This control is obtained at the root level, providing the attacker with the ability to perform any action on the system.\ -What makes this vulnerability particularly dangerous is its high severity score of 9.8, indicating it is a critical threat. It's also noteworthy that the exploitation of this vulnerability leaves specific indicators such as abnormal traffic to the "/saas./resttosaasservlet" endpoint and suspicious ncat commands in network traffic, which can help in its detection.\ +narrative = CVE-2023-20887 is a highly critical vulnerability found in VMware's vRealize Network Insight. This software is widely used for intelligent operations management across physical, virtual, and cloud environments, so a vulnerability in it poses a significant risk to many organizations. \ +This particular vulnerability lies in the application's Apache Thrift RPC interface. The exploit allows an attacker to inject commands that are executed with root privileges, leading to a potential total compromise of the system. The attacker does not need to be authenticated, which further increases the risk posed by this vulnerability. \ +The exploit operates by sending a specially crafted payload to the "/saas./resttosaasservlet" endpoint. This payload contains a reverse shell command, which, when executed, allows the attacker to remotely control the victim's system. This control is obtained at the root level, providing the attacker with the ability to perform any action on the system. \ +What makes this vulnerability particularly dangerous is its high severity score of 9.8, indicating it is a critical threat. It's also noteworthy that the exploitation of this vulnerability leaves specific indicators such as abnormal traffic to the "/saas./resttosaasservlet" endpoint and suspicious ncat commands in network traffic, which can help in its detection. \ VMware has acknowledged the vulnerability and has published a security advisory recommending that users update to the latest version of the software. This update effectively patches the vulnerability and protects systems from this exploit. It's crucial that all users of the affected versions of VMware's vRealize Network Insight promptly apply the update to mitigate the risk posed by CVE-2023-20887. [analytic_story://VMware Server Side Injection and Privilege Escalation] @@ -19395,10 +19395,10 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Teoderick Contreras" spec_version = 3 searches = ["ESCU - Cmdline Tool Not Executed In CMD Shell - Rule", "ESCU - Creation of Shadow Copy - Rule", "ESCU - Creation of Shadow Copy with wmic and powershell - Rule", "ESCU - Detect PsExec With accepteula Flag - Rule", "ESCU - Dump LSASS via comsvcs DLL - Rule", "ESCU - Elevated Group Discovery With Net - Rule", "ESCU - Executables Or Script Creation In Suspicious Path - Rule", "ESCU - Extraction of Registry Hives - Rule", "ESCU - Impacket Lateral Movement Commandline Parameters - Rule", "ESCU - Impacket Lateral Movement smbexec CommandLine Parameters - Rule", "ESCU - Impacket Lateral Movement WMIExec Commandline Parameters - Rule", "ESCU - Malicious PowerShell Process - Encoded Command - Rule", "ESCU - Malicious PowerShell Process - Execution Policy Bypass - Rule", "ESCU - Net Localgroup Discovery - Rule", "ESCU - Network Connection Discovery With Arp - Rule", "ESCU - Network Connection Discovery With Netstat - Rule", "ESCU - Ntdsutil Export NTDS - Rule", "ESCU - Processes launching netsh - Rule", "ESCU - Remote WMI Command Attempt - Rule", "ESCU - Suspicious Copy on System32 - Rule", "ESCU - Suspicious Process File Path - Rule", "ESCU - Windows Common Abused Cmd Shell Risk Behavior - Rule", "ESCU - Windows DNS Gather Network Info - Rule", "ESCU - Windows Ldifde Directory Object Behavior - Rule", "ESCU - Windows Mimikatz Binary Execution - Rule", "ESCU - Windows Multiple Disabled Users Failed To Authenticate Wth Kerberos - Rule", "ESCU - Windows Multiple Invalid Users Fail To Authenticate Using Kerberos - Rule", "ESCU - Windows Multiple Invalid Users Failed To Authenticate Using NTLM - Rule", "ESCU - Windows Multiple Users Fail To Authenticate Wth ExplicitCredentials - Rule", "ESCU - Windows Multiple Users Failed To Authenticate From Host Using NTLM - Rule", "ESCU - Windows Multiple Users Failed To Authenticate From Process - Rule", "ESCU - Windows Multiple Users Failed To Authenticate Using Kerberos - Rule", "ESCU - Windows Multiple Users Remotely Failed To Authenticate From Host - Rule", "ESCU - Windows Proxy Via Netsh - Rule", "ESCU - Windows Proxy Via Registry - Rule", "ESCU - Windows Unusual Count Of Disabled Users Failed Auth Using Kerberos - Rule", "ESCU - Windows Unusual Count Of Invalid Users Fail To Auth Using Kerberos - Rule", "ESCU - Windows Unusual Count Of Invalid Users Failed To Auth Using NTLM - Rule", "ESCU - Windows Unusual Count Of Users Fail To Auth Wth ExplicitCredentials - Rule", "ESCU - Windows Unusual Count Of Users Failed To Auth Using Kerberos - Rule", "ESCU - Windows Unusual Count Of Users Failed To Authenticate From Process - Rule", "ESCU - Windows Unusual Count Of Users Failed To Authenticate Using NTLM - Rule", "ESCU - Windows Unusual Count Of Users Remotely Failed To Auth From Host - Rule", "ESCU - Windows WMI Process Call Create - Rule"] description = This analytic story contains detections that allow security analysts to detect and investigate unusual activities that might relate to the "Volt Typhoon" group targeting critical infrastructure organizations in United States and Guam. The affected organizations include the communications, manufacturing, utility, transportation, construction, maritime, government, information technology, and education sectors. This Analytic story looks for suspicious process execution, lolbin execution, command-line activity, lsass dump and many more. -narrative = Volt Typhoon is a state sponsored group typically focuses on espionage and information gathering. Based on Microsoft Threat Intelligence, This threat actor group puts strong emphasis on stealth in this campaign by relying almost exclusively on living-off-the-land techniques and hands-on-keyboard activity.\ -They issue commands via the command line to: 1. collect data, including credentials from local and network systems,\ -2. put the data into an archive file to stage it for exfiltration, and then\ -3. use the stolen valid credentials to maintain persistence.\ +narrative = Volt Typhoon is a state sponsored group typically focuses on espionage and information gathering. Based on Microsoft Threat Intelligence, This threat actor group puts strong emphasis on stealth in this campaign by relying almost exclusively on living-off-the-land techniques and hands-on-keyboard activity. \ +They issue commands via the command line to: 1. collect data, including credentials from local and network systems, \ +2. put the data into an archive file to stage it for exfiltration, and then \ +3. use the stolen valid credentials to maintain persistence. \ In addition, Volt Typhoon tries to blend into normal network activity by routing traffic through compromised small office and home office (SOHO) network equipment, including routers, firewalls, and VPN hardware. They have also been observed using custom versions of open-source tools to establish a command and control (C2) channel over proxy to further stay under the radar. [analytic_story://Warzone RAT] @@ -19432,8 +19432,8 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - Windows AppLocker Block Events - Rule", "ESCU - Windows AppLocker Execution from Uncommon Locations - Rule", "ESCU - Windows AppLocker Privilege Escalation via Unauthorized Bypass - Rule", "ESCU - Windows AppLocker Rare Application Launch Detection - Rule"] description = Windows AppLocker is a feature that enhances security by allowing administrators to specify which users or groups can run particular applications in their organization based on unique identities of files. This story covers various aspects of monitoring and managing AppLocker policies, including detecting unauthorized software installations, enforcing best practices for software usage, and identifying potential security breaches through advanced threat detection techniques. Through the use of Splunk Enterprise, Splunk Enterprise Security, and Splunk Cloud, organizations can gain insights into AppLocker events, ensuring compliance with corporate security policies and mitigating risks associated with unauthorized applications. -narrative = AppLocker, a built-in Windows security feature, provides organizations with the ability to control application usage across their networks. It enables administrators to define rules based on file names, publishers, and file hashes to allow or deny the execution of applications. This level of control helps in preventing malware and unlicensed software from running, thereby enhancing the security posture of an organization. \\ -Organizations should leverage AppLocker for several reasons. Firstly, it aids in the enforcement of software compliance policies by ensuring that only licensed and approved applications are run on the network. Secondly, by restricting the execution of unauthorized applications, AppLocker significantly reduces the attack surface, making it harder for attackers to exploit vulnerabilities in unapproved software. Thirdly, AppLocker's ability to log attempts to run unauthorized applications provides valuable insights for security monitoring and incident response activities. This logging capability enables organizations to detect and respond to potential security threats in real time. \\ +narrative = AppLocker, a built-in Windows security feature, provides organizations with the ability to control application usage across their networks. It enables administrators to define rules based on file names, publishers, and file hashes to allow or deny the execution of applications. This level of control helps in preventing malware and unlicensed software from running, thereby enhancing the security posture of an organization. \ \ +Organizations should leverage AppLocker for several reasons. Firstly, it aids in the enforcement of software compliance policies by ensuring that only licensed and approved applications are run on the network. Secondly, by restricting the execution of unauthorized applications, AppLocker significantly reduces the attack surface, making it harder for attackers to exploit vulnerabilities in unapproved software. Thirdly, AppLocker's ability to log attempts to run unauthorized applications provides valuable insights for security monitoring and incident response activities. This logging capability enables organizations to detect and respond to potential security threats in real time. \ \ In summary, AppLocker is a critical security tool that helps organizations manage application usage, enforce compliance policies, and mitigate security risks. By implementing AppLocker policies, organizations can achieve a robust security posture, protecting their assets from unauthorized software and potential cyber threats. [analytic_story://Windows Attack Surface Reduction] @@ -19522,9 +19522,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - System Processes Run From Unexpected Locations - Rule", "ESCU - Windows Process Injection Wermgr Child Process - Rule", "ESCU - WinEvent Scheduled Task Created to Spawn Shell - Rule"] description = In July 2023, CrowdStrike's Falcon Complete managed detection and response (MDR) team uncovered an exploit kit using an unknown vulnerability in the Windows Error Reporting (WER) component. The vulnerability, now identified as CVE-2023-36874, was also independently discovered by Google's Threat Analysis Group. The exploit came to light when suspicious binaries were observed on a European technology system. CrowdStrike's Counter Adversary Operations' analysis revealed a zero-day exploit targeting the WER service, allowing attackers to execute unauthorized code with elevated privileges. The exploit kit seen aimed to spawn a privileged interpreter, displaying the versatility and adaptability of the threat. CrowdStrike has listed some potential indicators of compromise, but these are of low fidelity due to their mutable nature. -narrative = In June 2023, CrowdStrike's Falcon Complete team observed suspicious activities on a European technology entity's system. Multiple binaries were dropped onto the system via Remote Desktop Protocol (RDP), some of which were flagged as potential exploits for a known vulnerability. However, a string containing the Russian term for "0day" suggested an unknown vulnerability was at play. Subsequent investigations identified this as a zero-day vulnerability affecting the Windows Error Reporting (WER) component, now known as CVE-2023-36874.\ -The WER service's function is to report software issues on Windows hosts. The exploit centered around manipulating the WER service by redirecting file systems to execute attacker-controlled code with elevated privileges. This was achieved by creating a symbolic link redirection from the C:\ drive to an attacker-controlled directory, and then triggering certain WER functions. Consequently, an unauthorized executable was run instead of the legitimate one, giving the attacker high-level access.\ -The observed exploit kit's primary objective was to initiate a privileged interpreter, such as cmd.exe or powershell_ise.exe. If this couldn't be achieved, a privileged scheduled task was created as an alternative. The exploit kit showcased a range of binaries, some packed and others not, some in C++ and others in pure C. This diversity suggests the knowledge of the vulnerability was likely shared among different developers.\ +narrative = In June 2023, CrowdStrike's Falcon Complete team observed suspicious activities on a European technology entity's system. Multiple binaries were dropped onto the system via Remote Desktop Protocol (RDP), some of which were flagged as potential exploits for a known vulnerability. However, a string containing the Russian term for "0day" suggested an unknown vulnerability was at play. Subsequent investigations identified this as a zero-day vulnerability affecting the Windows Error Reporting (WER) component, now known as CVE-2023-36874. \ +The WER service's function is to report software issues on Windows hosts. The exploit centered around manipulating the WER service by redirecting file systems to execute attacker-controlled code with elevated privileges. This was achieved by creating a symbolic link redirection from the C:\ drive to an attacker-controlled directory, and then triggering certain WER functions. Consequently, an unauthorized executable was run instead of the legitimate one, giving the attacker high-level access. \ +The observed exploit kit's primary objective was to initiate a privileged interpreter, such as cmd.exe or powershell_ise.exe. If this couldn't be achieved, a privileged scheduled task was created as an alternative. The exploit kit showcased a range of binaries, some packed and others not, some in C++ and others in pure C. This diversity suggests the knowledge of the vulnerability was likely shared among different developers. \ CrowdStrike's Counter Adversary Operations, as of now, hasn't linked this activity to any specific threat actor. They've provided potential indicators of compromise, but caution that these are easily changed, indicating the advanced capabilities of the adversaries. [analytic_story://Windows File Extension and Association Abuse] @@ -19536,10 +19536,10 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rico Valdez"}] spec_version = 3 searches = ["ESCU - Execution of File With Spaces Before Extension - Rule", "ESCU - Suspicious Changes to File Associations - Rule", "ESCU - Execution of File with Multiple Extensions - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task"] description = Detect and investigate suspected abuse of file extensions and Windows file associations. Some of the malicious behaviors involved may include inserting spaces before file extensions or prepending the file extension with a different one, among other techniques. -narrative = Attackers use a variety of techniques to entice users to run malicious code or to persist on an endpoint. One way to accomplish these goals is to leverage file extensions and the mechanism Windows uses to associate files with specific applications.\ -Since its earliest days, Windows has used extensions to identify file types. Users have become familiar with these extensions and their application associations. For example, if users see that a file ends in `.doc` or `.docx`, they will assume that it is a Microsoft Word document and expect that double-clicking will open it using `winword.exe`. The user will typically also presume that the `.docx` file is safe.\ -Attackers take advantage of this expectation by obfuscating the true file extension. They can accomplish this in a couple of ways. One technique involves inserting multiple spaces in the file name before the extension to hide the extension from the GUI, obscuring the true nature of the file. Another approach involves prepending the real extension with a different one. This is especially effective when Windows is configured to "hide extensions for known file types." In this case, the real extension is not displayed, but the prepended one is, leading end users to believe the file is a different type than it actually is.\ -Changing the association between a file extension and an application can allow an attacker to execute arbitrary code. The technique typically involves changing the association for an often-launched file type to associate instead with a malicious program the attacker has dropped on the endpoint. When the end user launches a file that has been manipulated in this way, it will execute the attacker's malware. It will also execute the application the end user expected to run, cleverly obscuring the fact that something suspicious has occurred.\ +narrative = Attackers use a variety of techniques to entice users to run malicious code or to persist on an endpoint. One way to accomplish these goals is to leverage file extensions and the mechanism Windows uses to associate files with specific applications. \ +Since its earliest days, Windows has used extensions to identify file types. Users have become familiar with these extensions and their application associations. For example, if users see that a file ends in `.doc` or `.docx`, they will assume that it is a Microsoft Word document and expect that double-clicking will open it using `winword.exe`. The user will typically also presume that the `.docx` file is safe. \ +Attackers take advantage of this expectation by obfuscating the true file extension. They can accomplish this in a couple of ways. One technique involves inserting multiple spaces in the file name before the extension to hide the extension from the GUI, obscuring the true nature of the file. Another approach involves prepending the real extension with a different one. This is especially effective when Windows is configured to "hide extensions for known file types." In this case, the real extension is not displayed, but the prepended one is, leading end users to believe the file is a different type than it actually is. \ +Changing the association between a file extension and an application can allow an attacker to execute arbitrary code. The technique typically involves changing the association for an often-launched file type to associate instead with a malicious program the attacker has dropped on the endpoint. When the end user launches a file that has been manipulated in this way, it will execute the attacker's malware. It will also execute the application the end user expected to run, cleverly obscuring the fact that something suspicious has occurred. \ Run the searches in this story to detect and investigate suspicious behavior that may indicate abuse or manipulation of Windows file extensions and/or associations. [analytic_story://Windows Log Manipulation] @@ -19551,7 +19551,7 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Rico Valdez"}] spec_version = 3 searches = ["ESCU - Deleting Shadow Copies - Rule", "ESCU - Suspicious Event Log Service Behavior - Rule", "ESCU - Suspicious wevtutil Usage - Rule", "ESCU - USN Journal Deletion - Rule", "ESCU - Windows Event Log Cleared - Rule", "ESCU - Get Notable History - Response Task", "ESCU - Get Parent Process Info - Response Task", "ESCU - Get Process Info - Response Task"] description = Adversaries often try to cover their tracks by manipulating Windows logs. Use these searches to help you monitor for suspicious activity surrounding log files--an essential component of an effective defense. -narrative = Because attackers often modify system logs to cover their tracks and/or to thwart the investigative process, log monitoring is an industry-recognized best practice. While there are legitimate reasons to manipulate system logs, it is still worthwhile to keep track of who manipulated the logs, when they manipulated them, and in what way they manipulated them (determining which accesses, tools, or utilities were employed). Even if no malicious activity is detected, the knowledge of an attempt to manipulate system logs may be indicative of a broader security risk that should be thoroughly investigated.\ +narrative = Because attackers often modify system logs to cover their tracks and/or to thwart the investigative process, log monitoring is an industry-recognized best practice. While there are legitimate reasons to manipulate system logs, it is still worthwhile to keep track of who manipulated the logs, when they manipulated them, and in what way they manipulated them (determining which accesses, tools, or utilities were employed). Even if no malicious activity is detected, the knowledge of an attempt to manipulate system logs may be indicative of a broader security risk that should be thoroughly investigated. \ The Analytic Story gives users two different ways to detect manipulation of Windows Event Logs and one way to detect deletion of the Update Sequence Number (USN) Change Journal. The story helps determine the history of the host and the users who have accessed it. Finally, the story aides in investigation by retrieving all the information on the process that caused these events (if the process has been identified). [analytic_story://Windows Persistence Techniques] @@ -19629,9 +19629,9 @@ maintainers = [{"company": "Splunk", "email": "-", "name": "Michael Haag"}] spec_version = 3 searches = ["ESCU - WinRAR Spawning Shell Application - Rule"] description = Group-IB Threat Intelligence unit discovered a zero-day vulnerability, CVE-2023-38831, in WinRAR, a popular compression tool. Cybercriminals exploited this vulnerability to deliver various malware families, including DarkMe and GuLoader, by crafting ZIP archives with spoofed extensions, which were then distributed on trading forums. Once the malware was executed, it allowed cybercriminals to withdraw funds from brokers' accounts. RARLAB was immediately notified about the vulnerability and released a patch. Group-IB recommends users update WinRAR to the latest version, stay informed about cyber threats, be cautious with unknown attachments, enable 2FA, backup data, and follow the principle of least privilege. -narrative = Group-IB Threat Intelligence unit identified a critical zero-day vulnerability, CVE-2023-38831, in WinRAR, a widely used compression tool. This vulnerability was exploited by cybercriminals to craft ZIP archives containing malicious and non-malicious files, distributed on specialized trading forums. The exploit allowed them to spoof file extensions, hiding the launch of malicious scripts within an archive masquerading as a '.jpg', '.txt', or any other file format. When victims opened the specially crafted archive, it executed the malware, leading to unauthorized access to their broker accounts and enabling the cybercriminals to perform illicit financial transactions and withdraw funds.\ -The vulnerability was discovered while researching the spread of DarkMe malware, a VisualBasic spy Trojan attributed to the financially motivated group, Evilnum. The malware was distributed alongside other malware families, such as GuLoader and Remcos RAT, via malicious ZIP archives posted on popular trading forums or distributed via file-sharing services. Despite efforts by forum administrators to warn users and disable threat actors' accounts, the cybercriminals continued to spread the malicious files, compromising devices, and leading to financial losses.\ -Group-IB immediately notified RARLAB about the vulnerability, and they promptly responded by issuing a patch. The beta version of the patch was released on July 20, 2023, and the final updated version, WinRAR 6.23, was released on August 2, 2023. Group-IB recommends all users install the latest version of WinRAR to mitigate the risk of exploitation.\ +narrative = Group-IB Threat Intelligence unit identified a critical zero-day vulnerability, CVE-2023-38831, in WinRAR, a widely used compression tool. This vulnerability was exploited by cybercriminals to craft ZIP archives containing malicious and non-malicious files, distributed on specialized trading forums. The exploit allowed them to spoof file extensions, hiding the launch of malicious scripts within an archive masquerading as a '.jpg', '.txt', or any other file format. When victims opened the specially crafted archive, it executed the malware, leading to unauthorized access to their broker accounts and enabling the cybercriminals to perform illicit financial transactions and withdraw funds. \ +The vulnerability was discovered while researching the spread of DarkMe malware, a VisualBasic spy Trojan attributed to the financially motivated group, Evilnum. The malware was distributed alongside other malware families, such as GuLoader and Remcos RAT, via malicious ZIP archives posted on popular trading forums or distributed via file-sharing services. Despite efforts by forum administrators to warn users and disable threat actors' accounts, the cybercriminals continued to spread the malicious files, compromising devices, and leading to financial losses. \ +Group-IB immediately notified RARLAB about the vulnerability, and they promptly responded by issuing a patch. The beta version of the patch was released on July 20, 2023, and the final updated version, WinRAR 6.23, was released on August 2, 2023. Group-IB recommends all users install the latest version of WinRAR to mitigate the risk of exploitation. \ In conclusion, the exploitation of the CVE-2023-38831 vulnerability highlights the constant risks associated with software vulnerabilities and the importance of remaining vigilant, keeping systems updated, and following security guidelines to avoid falling victim to such attacks. Collaboration between security researchers and software developers is essential to quickly identify and fix vulnerabilities, making it harder for cybercriminals to exploit them. [analytic_story://Winter Vivern] diff --git a/dist/DA-ESS-ContentUpdate/default/app.conf b/dist/DA-ESS-ContentUpdate/default/app.conf index 163eccf042..696425ea58 100644 --- a/dist/DA-ESS-ContentUpdate/default/app.conf +++ b/dist/DA-ESS-ContentUpdate/default/app.conf @@ -1,7 +1,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:36 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# @@ -11,7 +11,7 @@ is_configured = false state = enabled state_change_requires_restart = false -build = 20240510180009 +build = 20240515154109 [triggers] reload.analytic_stories = simple @@ -27,7 +27,7 @@ reload.es_investigations = simple [launcher] author = Splunk -version = 4.31.0 +version = 4.31.1 description = Explore the Analytic Stories included with ES Content Updates. [ui] diff --git a/dist/DA-ESS-ContentUpdate/default/collections.conf b/dist/DA-ESS-ContentUpdate/default/collections.conf index 2070f9c851..4e7d33021a 100644 --- a/dist/DA-ESS-ContentUpdate/default/collections.conf +++ b/dist/DA-ESS-ContentUpdate/default/collections.conf @@ -1,7 +1,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:36 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/content-version.conf b/dist/DA-ESS-ContentUpdate/default/content-version.conf index fbd862e6f9..9f63141a0f 100644 --- a/dist/DA-ESS-ContentUpdate/default/content-version.conf +++ b/dist/DA-ESS-ContentUpdate/default/content-version.conf @@ -1,9 +1,9 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:36 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# [content-version] -version = 4.31.0 \ No newline at end of file +version = 4.31.1 \ No newline at end of file diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_all_backup_logs_for_host___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_all_backup_logs_for_host___response_task.xml index ba951353c6..957ff37fa9 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_all_backup_logs_for_host___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_all_backup_logs_for_host___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_amazon_eks_kubernetes_activity_by_src_ip___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_amazon_eks_kubernetes_activity_by_src_ip___response_task.xml index f4f0624fc8..bd87f0d965 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_amazon_eks_kubernetes_activity_by_src_ip___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_amazon_eks_kubernetes_activity_by_src_ip___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_investigate_security_hub_alerts_by_dest___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_investigate_security_hub_alerts_by_dest___response_task.xml index ddc6518be0..fbcbdbb8e5 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_investigate_security_hub_alerts_by_dest___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_investigate_security_hub_alerts_by_dest___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_investigate_user_activities_by_accesskeyid___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_investigate_user_activities_by_accesskeyid___response_task.xml index c6815e2083..c10c866e82 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_investigate_user_activities_by_accesskeyid___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_investigate_user_activities_by_accesskeyid___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_investigate_user_activities_by_arn___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_investigate_user_activities_by_arn___response_task.xml index 7a824e3945..d97000d482 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_investigate_user_activities_by_arn___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_investigate_user_activities_by_arn___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_network_acl_details_from_id___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_network_acl_details_from_id___response_task.xml index da8793824c..2645f57fec 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_network_acl_details_from_id___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_network_acl_details_from_id___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_network_interface_details_via_resourceid___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_network_interface_details_via_resourceid___response_task.xml index 9e38d4732c..34e9b91901 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_network_interface_details_via_resourceid___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_network_interface_details_via_resourceid___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_s3_bucket_details_via_bucketname___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_s3_bucket_details_via_bucketname___response_task.xml index 7075a06db0..45dcada799 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_s3_bucket_details_via_bucketname___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_aws_s3_bucket_details_via_bucketname___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_gcp_kubernetes_activity_by_src_ip___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_gcp_kubernetes_activity_by_src_ip___response_task.xml index f8f6393f7e..5bf7acc12c 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_gcp_kubernetes_activity_by_src_ip___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_gcp_kubernetes_activity_by_src_ip___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_city___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_city___response_task.xml index c0feb2b9be..a56ea6cf06 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_city___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_city___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_country___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_country___response_task.xml index a8780a5c8f..3ae4ac3e02 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_country___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_country___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_ip_address___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_ip_address___response_task.xml index 450e0cbf25..efc05f9272 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_ip_address___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_ip_address___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_region___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_region___response_task.xml index 3eb8ac0266..3e5557c8e3 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_region___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_all_aws_activity_from_region___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_backup_logs_for_endpoint___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_backup_logs_for_endpoint___response_task.xml index 44c23ccb3c..04cb12ad93 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_backup_logs_for_endpoint___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_backup_logs_for_endpoint___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_certificate_logs_for_a_domain___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_certificate_logs_for_a_domain___response_task.xml index fcca501dff..663c836565 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_certificate_logs_for_a_domain___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_certificate_logs_for_a_domain___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_dns_server_history_for_a_host___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_dns_server_history_for_a_host___response_task.xml index 2a6df5e942..5aaeaf2202 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_dns_server_history_for_a_host___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_dns_server_history_for_a_host___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_dns_traffic_ratio___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_dns_traffic_ratio___response_task.xml index 66c218e0d3..4c9a7a3b65 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_dns_traffic_ratio___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_dns_traffic_ratio___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_ec2_instance_details_by_instanceid___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_ec2_instance_details_by_instanceid___response_task.xml index 0f38bde281..c1cd61fe88 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_ec2_instance_details_by_instanceid___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_ec2_instance_details_by_instanceid___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_ec2_launch_details___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_ec2_launch_details___response_task.xml index ac6f3b814c..e87bb852f8 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_ec2_launch_details___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_ec2_launch_details___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_email_info___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_email_info___response_task.xml index d4e6b3446a..b6696f8ade 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_email_info___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_email_info___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_emails_from_specific_sender___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_emails_from_specific_sender___response_task.xml index 9a787cc314..e80907eff3 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_emails_from_specific_sender___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_emails_from_specific_sender___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_first_occurrence_and_last_occurrence_of_a_mac_address___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_first_occurrence_and_last_occurrence_of_a_mac_address___response_task.xml index 77c799f3ad..de496de989 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_first_occurrence_and_last_occurrence_of_a_mac_address___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_first_occurrence_and_last_occurrence_of_a_mac_address___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_history_of_email_sources___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_history_of_email_sources___response_task.xml index 73698e2867..4a5c5c58bd 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_history_of_email_sources___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_history_of_email_sources___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_logon_rights_modifications_for_endpoint___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_logon_rights_modifications_for_endpoint___response_task.xml index 5e2bbb9b09..6b4f7fd938 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_logon_rights_modifications_for_endpoint___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_logon_rights_modifications_for_endpoint___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_logon_rights_modifications_for_user___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_logon_rights_modifications_for_user___response_task.xml index b5989662b6..a37bfd2e7b 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_logon_rights_modifications_for_user___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_logon_rights_modifications_for_user___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_notable_history___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_notable_history___response_task.xml index 2585763d0e..dd559a0ef2 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_notable_history___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_notable_history___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_parent_process_info___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_parent_process_info___response_task.xml index 1a2d4b2d88..5163ab843c 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_parent_process_info___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_parent_process_info___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_file_activity___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_file_activity___response_task.xml index 09be7f6902..c7292fc056 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_file_activity___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_file_activity___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_info___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_info___response_task.xml index 124e655a4e..672d2c4171 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_info___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_info___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_information_for_port_activity___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_information_for_port_activity___response_task.xml index ba5ae93528..15344a59bb 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_information_for_port_activity___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_information_for_port_activity___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_responsible_for_the_dns_traffic___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_responsible_for_the_dns_traffic___response_task.xml index 0f26851121..6e8b1d811b 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_responsible_for_the_dns_traffic___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_process_responsible_for_the_dns_traffic___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_sysmon_wmi_activity_for_host___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_sysmon_wmi_activity_for_host___response_task.xml index 9c56fb79b7..4dc92cf88a 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_sysmon_wmi_activity_for_host___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_sysmon_wmi_activity_for_host___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_web_session_information_via_session_id___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_web_session_information_via_session_id___response_task.xml index ecac9d5521..707a6408ee 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_web_session_information_via_session_id___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_get_web_session_information_via_session_id___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_aws_activities_via_region_name___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_aws_activities_via_region_name___response_task.xml index 56a2d6ccd6..f412e02168 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_aws_activities_via_region_name___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_aws_activities_via_region_name___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_aws_user_activities_by_user_field___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_aws_user_activities_by_user_field___response_task.xml index f50a805f8a..5279a06254 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_aws_user_activities_by_user_field___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_aws_user_activities_by_user_field___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_failed_logins_for_multiple_destinations___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_failed_logins_for_multiple_destinations___response_task.xml index 8eff60ffa5..a8a51c4905 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_failed_logins_for_multiple_destinations___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_failed_logins_for_multiple_destinations___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_network_traffic_from_src_ip___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_network_traffic_from_src_ip___response_task.xml index f77c384775..70a3ec86d4 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_network_traffic_from_src_ip___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_network_traffic_from_src_ip___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_okta_activity_by_app___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_okta_activity_by_app___response_task.xml index 778464f9d0..84053c97c3 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_okta_activity_by_app___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_okta_activity_by_app___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_pass_the_hash_attempts___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_pass_the_hash_attempts___response_task.xml index e0f8b0f48c..727b7fb996 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_pass_the_hash_attempts___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_pass_the_hash_attempts___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_pass_the_ticket_attempts___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_pass_the_ticket_attempts___response_task.xml index c304d8b64d..c65793bb12 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_pass_the_ticket_attempts___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_pass_the_ticket_attempts___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_previous_unseen_user___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_previous_unseen_user___response_task.xml index fc69f3661f..0ea616bd3e 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_previous_unseen_user___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_previous_unseen_user___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_successful_remote_desktop_authentications___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_successful_remote_desktop_authentications___response_task.xml index 155a0baadd..2e15fd826c 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_successful_remote_desktop_authentications___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_successful_remote_desktop_authentications___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_suspicious_strings_in_http_header___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_suspicious_strings_in_http_header___response_task.xml index a4afc17d85..5333ff1459 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_suspicious_strings_in_http_header___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_suspicious_strings_in_http_header___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_user_activities_in_okta___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_user_activities_in_okta___response_task.xml index 7334f251e2..c82059272d 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_user_activities_in_okta___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_user_activities_in_okta___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_web_posts_from_src___response_task.xml b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_web_posts_from_src___response_task.xml index 3f3344c251..5733bb6388 100644 --- a/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_web_posts_from_src___response_task.xml +++ b/dist/DA-ESS-ContentUpdate/default/data/ui/panels/workbench_panel_investigate_web_posts_from_src___response_task.xml @@ -2,7 +2,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:37 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/es_investigations.conf b/dist/DA-ESS-ContentUpdate/default/es_investigations.conf index 0528915125..4360c1973b 100644 --- a/dist/DA-ESS-ContentUpdate/default/es_investigations.conf +++ b/dist/DA-ESS-ContentUpdate/default/es_investigations.conf @@ -1,7 +1,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:36 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# @@ -19,7 +19,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_amazon_eks_kubernetes_activity_by_src_ip___response_task] @@ -35,7 +35,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_aws_investigate_security_hub_alerts_by_dest___response_task] @@ -51,7 +51,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_aws_investigate_user_activities_by_accesskeyid___response_task] @@ -67,7 +67,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_aws_investigate_user_activities_by_arn___response_task] @@ -75,15 +75,15 @@ label = AWS Investigate User Activities By ARN description = This search lists all the logged CloudTrail activities by a specific user ARN and will create a table containing the source of the user, the region of the activity, the name and type of the event, the action taken, and all the user's identity information. disabled = 0 tokens = {\ - "user": {\ - "valuePrefix": "\"",\ - "valueSuffix": "\"",\ - "delimiter": " OR user=",\ - "valueType": "primitive",\ - "value": "identity",\ - "default": "null"\ + "user": {\ + "valuePrefix": "\"",\ + "valueSuffix": "\"",\ + "delimiter": " OR user=",\ + "valueType": "primitive",\ + "value": "identity",\ + "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_aws_network_acl_details_from_id___response_task] @@ -99,7 +99,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_aws_network_interface_details_via_resourceid___response_task] @@ -115,7 +115,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_aws_s3_bucket_details_via_bucketname___response_task] @@ -131,7 +131,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_gcp_kubernetes_activity_by_src_ip___response_task] @@ -147,7 +147,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_all_aws_activity_from_city___response_task] @@ -163,7 +163,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_all_aws_activity_from_country___response_task] @@ -179,7 +179,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_all_aws_activity_from_ip_address___response_task] @@ -195,7 +195,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_all_aws_activity_from_region___response_task] @@ -211,7 +211,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_backup_logs_for_endpoint___response_task] @@ -227,7 +227,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_certificate_logs_for_a_domain___response_task] @@ -243,7 +243,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_dns_server_history_for_a_host___response_task] @@ -259,7 +259,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_dns_traffic_ratio___response_task] @@ -275,7 +275,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_ec2_instance_details_by_instanceid___response_task] @@ -291,7 +291,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_ec2_launch_details___response_task] @@ -307,7 +307,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_email_info___response_task] @@ -323,7 +323,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_emails_from_specific_sender___response_task] @@ -339,7 +339,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_first_occurrence_and_last_occurrence_of_a_mac_address___response_task] @@ -355,7 +355,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_history_of_email_sources___response_task] @@ -371,7 +371,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_logon_rights_modifications_for_endpoint___response_task] @@ -387,7 +387,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_logon_rights_modifications_for_user___response_task] @@ -395,15 +395,15 @@ label = Get Logon Rights Modifications For User description = This search allows you to retrieve any modifications to logon rights for a specific user account. disabled = 0 tokens = {\ - "user": {\ - "valuePrefix": "\"",\ - "valueSuffix": "\"",\ - "delimiter": " OR user=",\ - "valueType": "primitive",\ - "value": "identity",\ - "default": "null"\ + "user": {\ + "valuePrefix": "\"",\ + "valueSuffix": "\"",\ + "delimiter": " OR user=",\ + "valueType": "primitive",\ + "value": "identity",\ + "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_notable_history___response_task] @@ -419,7 +419,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_parent_process_info___response_task] @@ -434,7 +434,7 @@ tokens = {\ "valueType": "primitive",\ "value": "file",\ "default": "null"\ - }\, + },\ "dest": {\ "valuePrefix": "\"",\ "valueSuffix": "\"",\ @@ -443,7 +443,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_process_file_activity___response_task] @@ -458,7 +458,7 @@ tokens = {\ "valueType": "primitive",\ "value": "asset",\ "default": "null"\ - }\, + },\ "process_name": {\ "valuePrefix": "\"",\ "valueSuffix": "\"",\ @@ -467,7 +467,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_process_info___response_task] @@ -482,7 +482,7 @@ tokens = {\ "valueType": "primitive",\ "value": "file",\ "default": "null"\ - }\, + },\ "dest": {\ "valuePrefix": "\"",\ "valueSuffix": "\"",\ @@ -491,7 +491,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_process_information_for_port_activity___response_task] @@ -506,7 +506,7 @@ tokens = {\ "valueType": "primitive",\ "value": "asset",\ "default": "null"\ - }\, + },\ "dest_port": {\ "valuePrefix": "\"",\ "valueSuffix": "\"",\ @@ -515,7 +515,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_process_responsible_for_the_dns_traffic___response_task] @@ -531,7 +531,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_sysmon_wmi_activity_for_host___response_task] @@ -547,7 +547,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_get_web_session_information_via_session_id___response_task] @@ -563,7 +563,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_investigate_aws_activities_via_region_name___response_task] @@ -579,7 +579,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_investigate_aws_user_activities_by_user_field___response_task] @@ -587,15 +587,15 @@ label = Investigate AWS User Activities by user field description = This search lists all the logged CloudTrail activities by a specific user and will create a table containing the source of the user, the region of the activity, the name and type of the event, the action taken, and the user's identity information. disabled = 0 tokens = {\ - "user": {\ - "valuePrefix": "\"",\ - "valueSuffix": "\"",\ - "delimiter": " OR user=",\ - "valueType": "primitive",\ - "value": "identity",\ - "default": "null"\ + "user": {\ + "valuePrefix": "\"",\ + "valueSuffix": "\"",\ + "delimiter": " OR user=",\ + "valueType": "primitive",\ + "value": "identity",\ + "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_investigate_failed_logins_for_multiple_destinations___response_task] @@ -603,15 +603,15 @@ label = Investigate Failed Logins for Multiple Destinations description = This search returns failed logins to multiple destinations by user. disabled = 0 tokens = {\ - "user": {\ - "valuePrefix": "\"",\ - "valueSuffix": "\"",\ - "delimiter": " OR user=",\ - "valueType": "primitive",\ - "value": "identity",\ - "default": "null"\ + "user": {\ + "valuePrefix": "\"",\ + "valueSuffix": "\"",\ + "delimiter": " OR user=",\ + "valueType": "primitive",\ + "value": "identity",\ + "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_investigate_network_traffic_from_src_ip___response_task] @@ -627,7 +627,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_investigate_okta_activity_by_app___response_task] @@ -643,7 +643,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_investigate_pass_the_hash_attempts___response_task] @@ -659,7 +659,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_investigate_pass_the_ticket_attempts___response_task] @@ -675,7 +675,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_investigate_previous_unseen_user___response_task] @@ -691,7 +691,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_investigate_successful_remote_desktop_authentications___response_task] @@ -707,7 +707,7 @@ tokens = {\ "value": "asset",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_investigate_suspicious_strings_in_http_header___response_task] @@ -722,7 +722,7 @@ tokens = {\ "valueType": "primitive",\ "value": "file",\ "default": "null"\ - }\, + },\ "dest_ip": {\ "valuePrefix": "\"",\ "valueSuffix": "\"",\ @@ -731,7 +731,7 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_investigate_user_activities_in_okta___response_task] @@ -739,15 +739,15 @@ label = Investigate User Activities In Okta description = This search returns all okta events by a specific user disabled = 0 tokens = {\ - "user": {\ - "valuePrefix": "\"",\ - "valueSuffix": "\"",\ - "delimiter": " OR user=",\ - "valueType": "primitive",\ - "value": "identity",\ - "default": "null"\ + "user": {\ + "valuePrefix": "\"",\ + "valueSuffix": "\"",\ + "delimiter": " OR user=",\ + "valueType": "primitive",\ + "value": "identity",\ + "default": "null"\ }\ - }\ +}\ [panel://workbench_panel_investigate_web_posts_from_src___response_task] @@ -763,6 +763,6 @@ tokens = {\ "value": "file",\ "default": "null"\ }\ - }\ +}\ diff --git a/dist/DA-ESS-ContentUpdate/default/macros.conf b/dist/DA-ESS-ContentUpdate/default/macros.conf index f963b2f73b..6f90e082c0 100644 --- a/dist/DA-ESS-ContentUpdate/default/macros.conf +++ b/dist/DA-ESS-ContentUpdate/default/macros.conf @@ -1,7 +1,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:36 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# diff --git a/dist/DA-ESS-ContentUpdate/default/savedsearches.conf b/dist/DA-ESS-ContentUpdate/default/savedsearches.conf index cf7d6d03ab..e79d5bacbe 100644 --- a/dist/DA-ESS-ContentUpdate/default/savedsearches.conf +++ b/dist/DA-ESS-ContentUpdate/default/savedsearches.conf @@ -1,7 +1,7 @@ ############# # Automatically generated by 'contentctl build' from # https://github.com/splunk/contentctl -# On Date: 2024-05-10T18:00:36 UTC +# On Date: 2024-05-15T15:41:46 UTC # Author: Splunk Threat Research Team - Splunk # Contact: research@splunk.com ############# @@ -102,8 +102,8 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "nist": ["DE.AE"]} action.escu.data_models = ["Email"] action.escu.eli5 = Attackers often use spaces as a means to obfuscate an attachment's file extension. This search looks for messages with email attachments that have many spaces within the file names. -action.escu.how_to_implement = You need to ingest data from emails. Specifically, the sender's address and the file names of any attachments must be mapped to the Email data model. The threshold ratio is set to 10%, but this value can be configured to suit each environment.\ -**Splunk Phantom Playbook Integration**\ +action.escu.how_to_implement = You need to ingest data from emails. Specifically, the sender's address and the file names of any attachments must be mapped to the Email data model. The threshold ratio is set to 10%, but this value can be configured to suit each environment. \ +**Splunk Phantom Playbook Integration** \ If Splunk Phantom is also configured in your environment, a playbook called "Suspicious Email Attachment Investigate and Delete" can be configured to run when any results are found by this detection search. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/` and add the correct hostname to the "Phantom Instance" field in the Adaptive Response Actions when configuring this detection search. The notable event will be sent to Phantom and the playbook will gather further information about the file attachment and its network behaviors. If Phantom finds malicious behavior and an analyst approves of the results, the email will be deleted from the user's inbox. action.escu.known_false_positives = None at this time action.escu.creation_date = 2023-04-14 @@ -436,27 +436,27 @@ search = `okta` eventType=system.push.send_factor_verify_push OR ((legacyEventTy [ESCU - Okta Mismatch Between Source and Response for Verify Push Request - Rule] action.escu = 0 action.escu.enabled = 1 -description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. The following analytic identifies variations in client-based values for source and response events to identify suspicious request behavior. The detection is enhanced if the org is evaluating behavior conditions in sign-on policies using Okta Behavior Detection. NOTE: This detection requires the use of Okta Identity Engine (OIE) and will not function on Okta Classic.\ -For each Okta Verify Push challenge, the following two events are recorded in Okta System Log\ -Source of Push (Sign-In)\ -eventType eq \"system.push.send_factor_verify_push\"\ -User Push Response (Okta Verify client)\ -eventType eq "user.authentication.auth_via_mfa" AND debugContext.debugData.factor eq "OKTA_VERIFY_PUSH"\ -In sequence, the logic for the analytic -\ -* Groups by SessionID and retrieves any system.push.send_factor_verify_push events (the source of the push) and user.authentication.auth_via_mfa events where the factor is OKTA_VERIFY_PUSH - (the user response to the push)\ -* Counts the total number of push events, successful authentication events, and any push sources where the client is a new device. * Creates a ratio of successful sign-ins to pushes.\ +description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. The following analytic identifies variations in client-based values for source and response events to identify suspicious request behavior. The detection is enhanced if the org is evaluating behavior conditions in sign-on policies using Okta Behavior Detection. NOTE: This detection requires the use of Okta Identity Engine (OIE) and will not function on Okta Classic. \ +For each Okta Verify Push challenge, the following two events are recorded in Okta System Log \ +Source of Push (Sign-In) \ +eventType eq \"system.push.send_factor_verify_push\" \ +User Push Response (Okta Verify client) \ +eventType eq "user.authentication.auth_via_mfa" AND debugContext.debugData.factor eq "OKTA_VERIFY_PUSH" \ +In sequence, the logic for the analytic - \ +* Groups by SessionID and retrieves any system.push.send_factor_verify_push events (the source of the push) and user.authentication.auth_via_mfa events where the factor is OKTA_VERIFY_PUSH - (the user response to the push) \ +* Counts the total number of push events, successful authentication events, and any push sources where the client is a new device. * Creates a ratio of successful sign-ins to pushes. \ * If the ratio (currently tuned aggressively) indicates push spam, or if a user has rejected a push, the detection proceeds to evaluate whether there is more than one IP address used during the session (session roaming) and the presence of both a new IP and new device during the session. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1621"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic identifies variations in client-based values for source and response events to identify suspicious request behavior. The detection is enhanced if the org is evaluating behavior conditions in sign-on policies using Okta Behavior Detection. NOTE: This detection requires the use of Okta Identity Engine (OIE) and will not function on Okta Classic.\ -For each Okta Verify Push challenge, the following two events are recorded in Okta System Log\ -Source of Push (Sign-In)\ -eventType eq \"system.push.send_factor_verify_push\"\ -User Push Response (Okta Verify client)\ -eventType eq "user.authentication.auth_via_mfa" AND debugContext.debugData.factor eq "OKTA_VERIFY_PUSH"\ -In sequence, the logic for the analytic -\ -* Groups by SessionID and retrieves any system.push.send_factor_verify_push events (the source of the push) and user.authentication.auth_via_mfa events where the factor is OKTA_VERIFY_PUSH - (the user response to the push)\ -* Counts the total number of push events, successful authentication events, and any push sources where the client is a new device. * Creates a ratio of successful sign-ins to pushes.\ +action.escu.eli5 = The following analytic identifies variations in client-based values for source and response events to identify suspicious request behavior. The detection is enhanced if the org is evaluating behavior conditions in sign-on policies using Okta Behavior Detection. NOTE: This detection requires the use of Okta Identity Engine (OIE) and will not function on Okta Classic. \ +For each Okta Verify Push challenge, the following two events are recorded in Okta System Log \ +Source of Push (Sign-In) \ +eventType eq \"system.push.send_factor_verify_push\" \ +User Push Response (Okta Verify client) \ +eventType eq "user.authentication.auth_via_mfa" AND debugContext.debugData.factor eq "OKTA_VERIFY_PUSH" \ +In sequence, the logic for the analytic - \ +* Groups by SessionID and retrieves any system.push.send_factor_verify_push events (the source of the push) and user.authentication.auth_via_mfa events where the factor is OKTA_VERIFY_PUSH - (the user response to the push) \ +* Counts the total number of push events, successful authentication events, and any push sources where the client is a new device. * Creates a ratio of successful sign-ins to pushes. \ * If the ratio (currently tuned aggressively) indicates push spam, or if a user has rejected a push, the detection proceeds to evaluate whether there is more than one IP address used during the session (session roaming) and the presence of both a new IP and new device during the session. action.escu.how_to_implement = The analytic leverages Okta OktaIm2 logs to be ingested using the Splunk Add-on for Okta Identity Cloud (https://splunkbase.splunk.com/app/6553). action.escu.known_false_positives = False positives may be present based on organization size and configuration of Okta. Monitor, tune and filter as needed. @@ -483,15 +483,15 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "8085b79 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies variations in client-based values for source and response events to identify suspicious request behavior. The detection is enhanced if the org is evaluating behavior conditions in sign-on policies using Okta Behavior Detection. NOTE: This detection requires the use of Okta Identity Engine (OIE) and will not function on Okta Classic.\ -For each Okta Verify Push challenge, the following two events are recorded in Okta System Log\ -Source of Push (Sign-In)\ -eventType eq \"system.push.send_factor_verify_push\"\ -User Push Response (Okta Verify client)\ -eventType eq "user.authentication.auth_via_mfa" AND debugContext.debugData.factor eq "OKTA_VERIFY_PUSH"\ -In sequence, the logic for the analytic -\ -* Groups by SessionID and retrieves any system.push.send_factor_verify_push events (the source of the push) and user.authentication.auth_via_mfa events where the factor is OKTA_VERIFY_PUSH - (the user response to the push)\ -* Counts the total number of push events, successful authentication events, and any push sources where the client is a new device. * Creates a ratio of successful sign-ins to pushes.\ +action.notable.param.rule_description = The following analytic identifies variations in client-based values for source and response events to identify suspicious request behavior. The detection is enhanced if the org is evaluating behavior conditions in sign-on policies using Okta Behavior Detection. NOTE: This detection requires the use of Okta Identity Engine (OIE) and will not function on Okta Classic. \ +For each Okta Verify Push challenge, the following two events are recorded in Okta System Log \ +Source of Push (Sign-In) \ +eventType eq \"system.push.send_factor_verify_push\" \ +User Push Response (Okta Verify client) \ +eventType eq "user.authentication.auth_via_mfa" AND debugContext.debugData.factor eq "OKTA_VERIFY_PUSH" \ +In sequence, the logic for the analytic - \ +* Groups by SessionID and retrieves any system.push.send_factor_verify_push events (the source of the push) and user.authentication.auth_via_mfa events where the factor is OKTA_VERIFY_PUSH - (the user response to the push) \ +* Counts the total number of push events, successful authentication events, and any push sources where the client is a new device. * Creates a ratio of successful sign-ins to pushes. \ * If the ratio (currently tuned aggressively) indicates push spam, or if a user has rejected a push, the detection proceeds to evaluate whether there is more than one IP address used during the session (session roaming) and the presence of both a new IP and new device during the session. action.notable.param.rule_title = Okta Mismatch Between Source and Response for Verify Push Request action.notable.param.security_domain = access @@ -639,17 +639,17 @@ search = `okta` eventType=user.authentication.auth_via_mfa outcome.result=FAILU [ESCU - Okta Multiple Failed Requests to Access Applications - Rule] action.escu = 0 action.escu.enabled = 1 -description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. The following analytic identifies multiple failed app requests in an attempt to identify the reuse a stolen web session cookie. The logic of the analytic is as follows: * Retrieves policy evaluation and SSO details in events that contain the Application requested\ -* Formats target fields so we can aggregate specifically on Applications (AppInstances)\ -* Groups by User, Session and IP\ -* Creates a ratio of successful SSO events to total MFA challenges related to Application Sign On Policies\ +description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. The following analytic identifies multiple failed app requests in an attempt to identify the reuse a stolen web session cookie. The logic of the analytic is as follows: * Retrieves policy evaluation and SSO details in events that contain the Application requested \ +* Formats target fields so we can aggregate specifically on Applications (AppInstances) \ +* Groups by User, Session and IP \ +* Creates a ratio of successful SSO events to total MFA challenges related to Application Sign On Policies \ * Alerts when more than half of app sign on events are unsuccessful, and challenges were unsatisfied for more than three apps. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1550.004", "T1538"], "nist": ["DE.AE"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic identifies multiple failed app requests in an attempt to identify the reuse a stolen web session cookie. The logic of the analytic is as follows: * Retrieves policy evaluation and SSO details in events that contain the Application requested\ -* Formats target fields so we can aggregate specifically on Applications (AppInstances)\ -* Groups by User, Session and IP\ -* Creates a ratio of successful SSO events to total MFA challenges related to Application Sign On Policies\ +action.escu.eli5 = The following analytic identifies multiple failed app requests in an attempt to identify the reuse a stolen web session cookie. The logic of the analytic is as follows: * Retrieves policy evaluation and SSO details in events that contain the Application requested \ +* Formats target fields so we can aggregate specifically on Applications (AppInstances) \ +* Groups by User, Session and IP \ +* Creates a ratio of successful SSO events to total MFA challenges related to Application Sign On Policies \ * Alerts when more than half of app sign on events are unsuccessful, and challenges were unsatisfied for more than three apps. action.escu.how_to_implement = This analytic is specific to Okta and requires Okta:im2 logs to be ingested. action.escu.known_false_positives = False positives may be present based on organization size and configuration of Okta. @@ -995,15 +995,15 @@ search = `okta` eventType=user.account.report_suspicious_activity_by_enduser | s [ESCU - Okta Suspicious Use of a Session Cookie - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic looks for one or more policy evaluation events in which multiple client values (IP, User Agent, etc.) change associated to the same Device Token for a specific user. A detection opportunity arises when an adversary attempts to reuse a stolen web session cookie.\ -* Retrieves policy evaluation events from successful authentication events.\ -* Aggregates/Groups by Device Token and User, providing the first policy evaluation event in the search window.\ +description = The following analytic looks for one or more policy evaluation events in which multiple client values (IP, User Agent, etc.) change associated to the same Device Token for a specific user. A detection opportunity arises when an adversary attempts to reuse a stolen web session cookie. \ +* Retrieves policy evaluation events from successful authentication events. \ +* Aggregates/Groups by Device Token and User, providing the first policy evaluation event in the search window. \ * It checks for the presence of more than one IP and whether there are multiple OS or browsers for each User/Device Token combination. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1539"], "nist": ["DE.AE"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic looks for one or more policy evaluation events in which multiple client values (IP, User Agent, etc.) change associated to the same Device Token for a specific user. A detection opportunity arises when an adversary attempts to reuse a stolen web session cookie.\ -* Retrieves policy evaluation events from successful authentication events.\ -* Aggregates/Groups by Device Token and User, providing the first policy evaluation event in the search window.\ +action.escu.eli5 = The following analytic looks for one or more policy evaluation events in which multiple client values (IP, User Agent, etc.) change associated to the same Device Token for a specific user. A detection opportunity arises when an adversary attempts to reuse a stolen web session cookie. \ +* Retrieves policy evaluation events from successful authentication events. \ +* Aggregates/Groups by Device Token and User, providing the first policy evaluation event in the search window. \ * It checks for the presence of more than one IP and whether there are multiple OS or browsers for each User/Device Token combination. action.escu.how_to_implement = This detection utilizes logs from Okta Identity Management (IM) environments. It requires the ingestion of OktaIm2 logs through the Splunk Add-on for Okta Identity Cloud (https://splunkbase.splunk.com/app/6553). action.escu.known_false_positives = False positives may occur, depending on the organization's size and the configuration of Okta. @@ -3326,8 +3326,8 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery"], "mitre_attack": ["T1566.001", "T1566"], "nist": ["DE.AE"]} action.escu.data_models = ["Email"] action.escu.eli5 = The following analytic detects emails that contain attachments with suspicious file extensions. Detecting and responding to emails with suspicious attachments can mitigate the risks associated with phishing and malware attacks, thereby protecting the organization's data and systems from potential harm. The detection is made by using a Splunk query that searches for emails in the datamodel=Email where the filename of the attachment is not empty. The analytic uses the tstats command to summarize the count, first time, and last time of the emails that meet the criteria. It groups the results by the source user, file name, and message ID of the email. The detection is important because it indicates potential phishing or malware delivery attempts in which an attacker attempts to deliver malicious content through email attachments, which can lead to data breaches, malware infections, or unauthorized access to sensitive information. Next steps include reviewing the identified emails and attachments and analyzing the source user, file name, and message ID to determine if they are legitimate or malicious. Additionally, you must inspect any relevant on-disk artifacts associated with the attachments and investigate any concurrent processes to identify the source of the attack. -action.escu.how_to_implement = You need to ingest data from emails. Specifically, the sender's address and the file names of any attachments must be mapped to the Email data model.\ -**Splunk Phantom Playbook Integration**\ +action.escu.how_to_implement = You need to ingest data from emails. Specifically, the sender's address and the file names of any attachments must be mapped to the Email data model. \ +**Splunk Phantom Playbook Integration** \ If Splunk Phantom is also configured in your environment, a Playbook called "Suspicious Email Attachment Investigate and Delete" can be configured to run when any results are found by this detection search. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, and add the correct hostname to the "Phantom Instance" field in the Adaptive Response Actions when configuring this detection search. The notable event will be sent to Phantom and the playbook will gather further information about the file attachment and its network behaviors. If Phantom finds malicious behavior and an analyst approves of the results, the email will be deleted from the user's inbox.' action.escu.known_false_positives = None identified action.escu.creation_date = 2023-04-14 @@ -5512,11 +5512,11 @@ search = `cloudtrail` eventName = JobCreated | stats count min(_time) as firstT [ESCU - AWS Exfiltration via Bucket Replication - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic detects API calls made to an S3 bucket when bucket replication services are enabled. S3 bucket replication is a feature offered by Amazon Web Services (AWS) that allows you to automatically and asynchronously copy data from one S3 bucket to another in the same or different region.\ +description = The following analytic detects API calls made to an S3 bucket when bucket replication services are enabled. S3 bucket replication is a feature offered by Amazon Web Services (AWS) that allows you to automatically and asynchronously copy data from one S3 bucket to another in the same or different region. \ S3 bucket replication can also be used for cross-account replication, where data is replicated from a source bucket owned by one AWS account to a destination bucket owned by a different AWS account. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Actions on Objectives"], "mitre_attack": ["T1537"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic detects API calls made to an S3 bucket when bucket replication services are enabled. S3 bucket replication is a feature offered by Amazon Web Services (AWS) that allows you to automatically and asynchronously copy data from one S3 bucket to another in the same or different region.\ +action.escu.eli5 = The following analytic detects API calls made to an S3 bucket when bucket replication services are enabled. S3 bucket replication is a feature offered by Amazon Web Services (AWS) that allows you to automatically and asynchronously copy data from one S3 bucket to another in the same or different region. \ S3 bucket replication can also be used for cross-account replication, where data is replicated from a source bucket owned by one AWS account to a destination bucket owned by a different AWS account. action.escu.how_to_implement = You must install splunk AWS add on and Splunk App for AWS. This search works with AWS CloudTrail logs. action.escu.known_false_positives = It is possible that an AWS admin has legitimately implemented data replication to ensure data availability and improve data protection/backup strategies. @@ -5543,7 +5543,7 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "eeb432d schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic detects API calls made to an S3 bucket when bucket replication services are enabled. S3 bucket replication is a feature offered by Amazon Web Services (AWS) that allows you to automatically and asynchronously copy data from one S3 bucket to another in the same or different region.\ +action.notable.param.rule_description = The following analytic detects API calls made to an S3 bucket when bucket replication services are enabled. S3 bucket replication is a feature offered by Amazon Web Services (AWS) that allows you to automatically and asynchronously copy data from one S3 bucket to another in the same or different region. \ S3 bucket replication can also be used for cross-account replication, where data is replicated from a source bucket owned by one AWS account to a destination bucket owned by a different AWS account. action.notable.param.rule_title = AWS Exfiltration via Bucket Replication action.notable.param.security_domain = threat @@ -7492,11 +7492,11 @@ search = `azure_monitor_aad` operationName="Add service principal" properties.i [ESCU - Azure AD Multiple Users Failing To Authenticate From Ip - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies one source Ip failing to authenticate with 30 unique valid users within 5 minutes. This behavior could represent an adversary performing a Password Spraying attack against an Azure Active Directory tenant to obtain initial access or elevate privileges. Error Code 50126 represents an invalid password. This logic can be used for real time security monitoring as well as threat hunting exercises.\ +description = The following analytic identifies one source Ip failing to authenticate with 30 unique valid users within 5 minutes. This behavior could represent an adversary performing a Password Spraying attack against an Azure Active Directory tenant to obtain initial access or elevate privileges. Error Code 50126 represents an invalid password. This logic can be used for real time security monitoring as well as threat hunting exercises. \ Azure AD tenants can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold if needed. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Weaponization"], "mitre_attack": ["T1586", "T1586.003", "T1110", "T1110.003", "T1110.004"], "nist": ["DE.AE"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic identifies one source Ip failing to authenticate with 30 unique valid users within 5 minutes. This behavior could represent an adversary performing a Password Spraying attack against an Azure Active Directory tenant to obtain initial access or elevate privileges. Error Code 50126 represents an invalid password. This logic can be used for real time security monitoring as well as threat hunting exercises.\ +action.escu.eli5 = The following analytic identifies one source Ip failing to authenticate with 30 unique valid users within 5 minutes. This behavior could represent an adversary performing a Password Spraying attack against an Azure Active Directory tenant to obtain initial access or elevate privileges. Error Code 50126 represents an invalid password. This logic can be used for real time security monitoring as well as threat hunting exercises. \ Azure AD tenants can be very different depending on the organization. Users should test this detection and customize the arbitrary threshold if needed. action.escu.how_to_implement = You must install the latest version of Splunk Add-on for Microsoft Cloud Services from Splunkbase (https://splunkbase.splunk.com/app/3110/#/details). You must be ingesting Azure Active Directory events into your Splunk environment through an EventHub. This analytic was written to be used with the azure:monitor:aad sourcetype leveraging the Signin log category. action.escu.known_false_positives = A source Ip failing to authenticate with multiple users is not a common for legitimate behavior. @@ -8428,13 +8428,13 @@ search = `azure_monitor_aad` operationName="Consent to application" | eval new_f [ESCU - Azure AD Unusual Number of Failed Authentications From Ip - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies one source Ip failing to authenticate with multiple valid users. This behavior could represent an adversary performing a Password Spraying attack against an Azure Active Directory tenant to obtain initial access or elevate privileges. Error Code 50126 represents an invalid password.\ -The detection calculates the standard deviation for source Ip and leverages the 3-sigma statistical rule to identify an unusual number of failed authentication attempts. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ +description = The following analytic identifies one source Ip failing to authenticate with multiple valid users. This behavior could represent an adversary performing a Password Spraying attack against an Azure Active Directory tenant to obtain initial access or elevate privileges. Error Code 50126 represents an invalid password. \ +The detection calculates the standard deviation for source Ip and leverages the 3-sigma statistical rule to identify an unusual number of failed authentication attempts. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ While looking for anomalies using statistical methods like the standard deviation can have benefits, we also recommend using threshold-based detections to complement coverage. A similar analytic following the threshold model is `Azure AD Multiple Users Failing To Authenticate From Ip`. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Weaponization"], "mitre_attack": ["T1586", "T1586.003", "T1110", "T1110.003", "T1110.004"], "nist": ["DE.AE"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic identifies one source Ip failing to authenticate with multiple valid users. This behavior could represent an adversary performing a Password Spraying attack against an Azure Active Directory tenant to obtain initial access or elevate privileges. Error Code 50126 represents an invalid password.\ -The detection calculates the standard deviation for source Ip and leverages the 3-sigma statistical rule to identify an unusual number of failed authentication attempts. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ +action.escu.eli5 = The following analytic identifies one source Ip failing to authenticate with multiple valid users. This behavior could represent an adversary performing a Password Spraying attack against an Azure Active Directory tenant to obtain initial access or elevate privileges. Error Code 50126 represents an invalid password. \ +The detection calculates the standard deviation for source Ip and leverages the 3-sigma statistical rule to identify an unusual number of failed authentication attempts. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ While looking for anomalies using statistical methods like the standard deviation can have benefits, we also recommend using threshold-based detections to complement coverage. A similar analytic following the threshold model is `Azure AD Multiple Users Failing To Authenticate From Ip`. action.escu.how_to_implement = You must install the latest version of Splunk Add-on for Microsoft Cloud Services from Splunkbase (https://splunkbase.splunk.com/app/3110/#/details). You must be ingesting Azure Active Directory events into your Splunk environment through an EventHub. This analytic was written to be used with the azure:monitor:aad sourcetype leveraging the Signin log category. action.escu.known_false_positives = A source Ip failing to authenticate with multiple users is not a common for legitimate behavior. @@ -9135,7 +9135,7 @@ action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", " action.escu.data_models = ["Change"] action.escu.eli5 = This search looks for cloud provisioning activities from previously unseen cities. Provisioning activities are defined broadly as any event that runs or creates something. action.escu.how_to_implement = You must be ingesting your cloud infrastructure logs from your cloud provider. You should run the baseline search `Previously Seen Cloud Provisioning Activity Sources - Initial` to build the initial table of source IP address, geographic locations, and times. You must also enable the second baseline search `Previously Seen Cloud Provisioning Activity Sources - Update` to keep this table up to date and to age out old data. You can adjust the time window for this search by updating the `previously_unseen_cloud_provisioning_activity_window` macro. You can also provide additional filtering for this search by customizing the `cloud_provisioning_activity_from_previously_unseen_city_filter` macro. -action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new IP address is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your country, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. action.escu.creation_date = 2020-10-09 action.escu.modification_date = 2020-10-09 @@ -9177,7 +9177,7 @@ action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", " action.escu.data_models = ["Change"] action.escu.eli5 = This search looks for cloud provisioning activities from previously unseen countries. Provisioning activities are defined broadly as any event that runs or creates something. action.escu.how_to_implement = You must be ingesting your cloud infrastructure logs from your cloud provider. You should run the baseline search `Previously Seen Cloud Provisioning Activity Sources - Initial` to build the initial table of source IP address, geographic locations, and times. You must also enable the second baseline search `Previously Seen Cloud Provisioning Activity Sources - Update` to keep this table up to date and to age out old data. You can adjust the time window for this search by updating the `previously_unseen_cloud_provisioning_activity_window` macro. You can also provide additional filtering for this search by customizing the `cloud_provisioning_activity_from_previously_unseen_country_filter` macro. -action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new IP address is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your country, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. action.escu.creation_date = 2020-10-09 action.escu.modification_date = 2020-10-09 @@ -9219,7 +9219,7 @@ action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", " action.escu.data_models = ["Change"] action.escu.eli5 = This search looks for cloud provisioning activities from previously unseen IP addresses. Provisioning activities are defined broadly as any event that runs or creates something. action.escu.how_to_implement = You must be ingesting your cloud infrastructure logs from your cloud provider. You should run the baseline search `Previously Seen Cloud Provisioning Activity Sources - Initial` to build the initial table of source IP address, geographic locations, and times. You must also enable the second baseline search `Previously Seen Cloud Provisioning Activity Sources - Update` to keep this table up to date and to age out old data. You can adjust the time window for this search by updating the `previously_unseen_cloud_provisioning_activity_window` macro. You can also provide additional filtering for this search by customizing the `cloud_provisioning_activity_from_previously_unseen_ip_address_filter` macro. -action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new IP address is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your country, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. action.escu.creation_date = 2020-08-16 action.escu.modification_date = 2020-08-16 @@ -9261,7 +9261,7 @@ action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", " action.escu.data_models = ["Change"] action.escu.eli5 = This search looks for cloud provisioning activities from previously unseen regions. Provisioning activities are defined broadly as any event that runs or creates something. action.escu.how_to_implement = You must be ingesting your cloud infrastructure logs from your cloud provider. You should run the baseline search `Previously Seen Cloud Provisioning Activity Sources - Initial` to build the initial table of source IP address, geographic locations, and times. You must also enable the second baseline search `Previously Seen Cloud Provisioning Activity Sources - Update` to keep this table up to date and to age out old data. You can adjust the time window for this search by updating the `previously_unseen_cloud_provisioning_activity_window` macro. You can also provide additional filtering for this search by customizing the `cloud_provisioning_activity_from_previously_unseen_region_filter` macro. -action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new IP address is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your country, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. action.escu.creation_date = 2020-08-16 action.escu.modification_date = 2020-08-16 @@ -9298,15 +9298,15 @@ search = | tstats earliest(_time) as firstTime, latest(_time) as lastTime from d [ESCU - Cloud Security Groups Modifications by User - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies users who are unsually modifying security group in your cloud enriovnment,focusing on actions such as modifications, deletions, or creations performed by users over 30-minute intervals. Analyzing patterns of modifications to security groups can help in identifying anomalous behavior that may indicate a compromised account or an insider threat.\ -The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ -This detection will only trigger on all user and service accounts that have created/modified/deleted a security group .\ +description = The following analytic identifies users who are unsually modifying security group in your cloud enriovnment,focusing on actions such as modifications, deletions, or creations performed by users over 30-minute intervals. Analyzing patterns of modifications to security groups can help in identifying anomalous behavior that may indicate a compromised account or an insider threat. \ +The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ +This detection will only trigger on all user and service accounts that have created/modified/deleted a security group . \ The analytics returned fields allow analysts to investigate the event further by providing fields like source ip and values of the security objects affected. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1578.005"], "nist": ["DE.AE"]} action.escu.data_models = ["Change"] -action.escu.eli5 = The following analytic identifies users who are unsually modifying security group in your cloud enriovnment,focusing on actions such as modifications, deletions, or creations performed by users over 30-minute intervals. Analyzing patterns of modifications to security groups can help in identifying anomalous behavior that may indicate a compromised account or an insider threat.\ -The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises.\ -This detection will only trigger on all user and service accounts that have created/modified/deleted a security group .\ +action.escu.eli5 = The following analytic identifies users who are unsually modifying security group in your cloud enriovnment,focusing on actions such as modifications, deletions, or creations performed by users over 30-minute intervals. Analyzing patterns of modifications to security groups can help in identifying anomalous behavior that may indicate a compromised account or an insider threat. \ +The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number of users. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. This logic can be used for real time security monitoring as well as threat hunting exercises. \ +This detection will only trigger on all user and service accounts that have created/modified/deleted a security group . \ The analytics returned fields allow analysts to investigate the event further by providing fields like source ip and values of the security objects affected. action.escu.how_to_implement = This search requries the Cloud infrastructure logs such as AWS Cloudtrail, GCP Pubsub Message logs, Azure Audit logs to be ingested into an accelerated Change datamodel. It is also recommended that users can try different combinations of the `bucket` span time and outlier conditions to better suit with their environment. action.escu.known_false_positives = It is possible that legitimate user/admin may modify a number of security groups @@ -10997,10 +10997,10 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This detection detects inbound network traffic volume anomalies from processes running within containerised workloads. Anomalies are provided with context identifying the Kubernetes cluster, the workload name, and the type of anomaly.This detection leverages Network performance Monitoring metrics harvested using an OTEL collector, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection compares the tcp.bytes, tcp.new_sockets, tcp.packets, udp.bytes, udp.packets metrics for destination (receiving) workload process pairs over the last 1 hour, with the average of those metrics for those pairs over the last 30 days in order to detect any anonymously high inbound network activity. Anomalies in inbound network traffic may suggest that the container is receiving unexpected or unauthorized data, potentially indicative of a breach, a vulnerability exploitation attempt, an attempt to overload the service, or propagation of malware. Successful compromise of a containerised application resulting in the ability to upload data, can result in installation of command and control software or other malware, data integrity damage, container escape, and further compromise of the environment. Additionally this kind of activity may result in resource contention, performance degradation and disruption to the normal operation of the environment. -action.escu.how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default:\ -* Name sim_npm_metrics_to_metrics_index\ -* Org ID \ -* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E')\ +action.escu.how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default: \ +* Name sim_npm_metrics_to_metrics_index \ +* Org ID \ +* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E') \ * Metric Resolution 10000 action.escu.known_false_positives = unknown action.escu.creation_date = 2024-01-10 @@ -11042,17 +11042,17 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This analytic identifies high Inbound or Outbound Network IO anomalies in a Kubernetes container. It uses process metrics from an OTEL collector and Kubelet Stats Receiver, and data from Splunk Observability cloud via the Splunk Infrastructure Monitoring Add-on. A lookup table containing average and standard deviation for network IO is used to evaluate anomalies for each container. An event is generated if the anomaly persists over a 1 hour period. These anomalies may indicate security threats such as data exfiltration, command and control communication, service disruptions, or unauthorized data transfers. They can compromise the confidentiality, availability, and integrity of applications and data, necessitating rapid detection and response. Anomalous network utilization may suggest a compromised container, potentially leading to data breaches, service outages, financial losses, and reputational damage. -action.escu.how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +action.escu.how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio action.escu.known_false_positives = unknown action.escu.creation_date = 2023-12-19 @@ -11094,17 +11094,17 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This analytic identifies changes in network communication behavior in a Kubernetes container by examining inbound to outbound network IO ratios. It uses process metrics from an OTEL collector and Kubelet Stats Receiver, and data from Splunk Observability cloud via the Splunk Infrastructure Monitoring Add-on. A lookup table containing average and standard deviation for network IO is used to evaluate anomalies for each container. An event is generated if the anomaly persists over a 1 hour period. These anomalies may indicate security threats such as data exfiltration, command and control communication, or compromised container behavior. They can compromise the confidentiality, availability, and integrity of applications and data, necessitating rapid detection and response. Anomalous network utilization may suggest a compromised container, potentially leading to data breaches, service outages, and unauthorized access within the Kubernetes cluster. -action.escu.how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +action.escu.how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio action.escu.known_false_positives = unknown action.escu.creation_date = 2023-12-19 @@ -11146,10 +11146,10 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This detection detects outbound network traffic volume anomalies from processes running within containerised workloads. Anomalies are provided with context identifying the Kubernetes cluster, the workload name, and the type of anomaly. This detection leverages Network performance Monitoring metrics harvested using an OTEL collector, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection compares the tcp.bytes, tcp.new_sockets, tcp.packets, udp.bytes, udp.packets metrics for source (transmitting) workload process pairs over the last 1 hout, with the average of those metrics for those pairs over the last 30 days in order to detect any anonymously high outbound network activity. Anonymously high outbound network traffic from a process running in a container is a potential indication of data exfiltration, or an indication that the process has been modified. Anomalously high outbound network activity from a process running within a container suggests the potential compromise, which may lead to unauthorized data exfiltration, communication with malicious entities, or the propagation of malware to external systems. The compromised container could also serve as a pivot point for further attacks within the containerized environment. -action.escu.how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default:\ -* Name sim_npm_metrics_to_metrics_index\ -* Org ID \ -* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E')\ +action.escu.how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default: \ +* Name sim_npm_metrics_to_metrics_index \ +* Org ID \ +* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E') \ * Metric Resolution 10000 action.escu.known_false_positives = unknown action.escu.creation_date = 2024-01-10 @@ -11191,10 +11191,10 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This detection detects network traffic volume anomalies between workloads in a microservices hosted application, or between a workload and the outside world if the workload is shown as (unknown). This detection leverages Network performance Monitoring metrics harvested using an OTEL collector, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on (https://splunkbase.splunk.com/app/5247). This detection compares the tcp.bytes, tcp.new_sockets, tcp.packets, udp.bytes, udp.packets metrics between workloads over the last 1 hour, with the average of those metrics over the last 30 days in order to detect any anonymously high inbound or outbound network activity. Unexpected spikes in network traffic may signify unauthorized data transfers, or abnormal behavior within the microservices ecosystem. Such activity might signify data exfiltration, unauthorized lateral movement, within the microservices environment. If a bad actor is responsible for this traffic they could compromise additional services or extract sensitive data, potentially leading to data breaches. -action.escu.how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default:\ -* Name sim_npm_metrics_to_metrics_index\ -* Org ID \ -* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E')\ +action.escu.how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default: \ +* Name sim_npm_metrics_to_metrics_index \ +* Org ID \ +* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E') \ * Metric Resolution 10000 action.escu.known_false_positives = unknown action.escu.creation_date = 2024-01-10 @@ -11441,10 +11441,10 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This analytic detects TCP communication between a newly seen source and destination workload pair. This is done to identify changes in network behavior between workloads in a kubernetes cluster. This detection leverages Network performance Monitoring metrics harvested using an OTEL collector, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection compares network activity between workloads over the last 1 hour, with those over the last 30 days in order to detect newly seen inter workload communication. Newly seen network connections in a microservices based app indicate a change in behavior which could indicate potential security threats or anomalies. Distributed applications typically have common established network connection topologies, and new connections are often either an indication of a change in the application or an active threat. Unauthorized connections may enable the attacker to infiltrate the applications ecosystem, potentially leading to data breaches, manipulation of sensitive information, or disruption of critical services. Bad actors may exploit these connections to gain access, escalate privileges, move laterally within the microservices, or introduce malicious code or payloads, putting the applications integrity, availability, and confidentiality at risk. -action.escu.how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default:\ -* Name sim_npm_metrics_to_metrics_index\ -* Org ID \ -* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E')\ +action.escu.how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default: \ +* Name sim_npm_metrics_to_metrics_index \ +* Org ID \ +* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E') \ * Metric Resolution 10000 action.escu.known_false_positives = unknown action.escu.creation_date = 2024-01-10 @@ -11486,10 +11486,10 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This analytic detects UDP communication between a newly seen source and destination workload pair. This is done to identify changes in network behavior between workloads in a kubernetes cluster. This detection leverages Network performance Monitoring metrics harvested using an OTEL collector, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection compares network activity between workloads over the last 1 hour, with those over the last 30 days in order to detect newly seen inter workload communication. Newly seen network connections in a microservices based app indicate a change in behavior which could indicate potential security threats or anomalies. Distributed applications typically have common established network connection topologies, and new connections are often either an indication of a change in the application or an active threat. Unauthorized connections may enable the attacker to infiltrate the applications ecosystem, potentially leading to data breaches, manipulation of sensitive information, or disruption of critical services. Bad actors may exploit these connections to gain access, escalate privileges, move laterally within the microservices, or introduce malicious code or payloads, putting the applications integrity, availability, and confidentiality at risk. -action.escu.how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default:\ -* Name sim_npm_metrics_to_metrics_index\ -* Org ID \ -* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E')\ +action.escu.how_to_implement = To gather NPM metrics the Open Telemetry to the Kubernetes Cluster and enable Network Performance Monitoring according to instructions found in Splunk Docs https://docs.splunk.com/observability/en/infrastructure/network-explorer/network-explorer-setup.html#network-explorer-setup In order to access those metrics from within Splunk Enterprise and ES, the Splunk Infrastructure Monitoring add-on must be installed and configured on a Splunk Search Head. Once installed, first configure the add-on with your O11y Cloud Org ID and Access Token. Lastly set up the add-on to ingest metrics from O11y cloud using the following settings, and any other settings left at default: \ +* Name sim_npm_metrics_to_metrics_index \ +* Org ID \ +* Signal Flow Program data('tcp.packets').publish(label='A'); data('tcp.bytes').publish(label='B'); data('tcp.new_sockets').publish(label='C'); data('udp.packets').publish(label='D'); data('udp.bytes').publish(label='E') \ * Metric Resolution 10000 action.escu.known_false_positives = unknown action.escu.creation_date = 2024-01-10 @@ -11748,17 +11748,17 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = The following analytic identifies containerised workloads that have been created using a previously unseen image. This detection leverages process metrics harvested using an OTEL collector and kubernetes cluster receiver, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection uses the k8s.container.ready metric to compare the container image names seen in the last 1 hour with those seen in the 30 days prior to those 1 hour, and alerts if a new container image is detected. When a container in a Kubernetes cluster created using a previously unseen image it raises potential security risks and unknown variables. Unfamiliar container images could contain vulnerabilities, malware, or misconfigurations that pose threats to the cluster's integrity and the applications it hosts. The absence of prior knowledge about the image makes it difficult to assess its trustworthiness, track its lineage, or verify its compliance with security policies. The potential security impact of a container created using a compromised image is significant. Compromised containers can potentially introduce malware, backdoors, or other malicious code into the containerized application, leading to data breaches, service disruptions, and unauthorized access within the Kubernetes cluster. A compromised image can serve as a foothold for lateral movement and privilege escalation, potentially compromising other containers, pods, or nodes in the cluster. Additionally, it may enable the actor to exfiltrate sensitive data, manipulate configurations, or execute arbitrary code, posing risks to the confidentiality, availability, and integrity of applications and data hosted within the cluster -action.escu.how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +action.escu.how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio action.escu.known_false_positives = unknown action.escu.creation_date = 2023-12-18 @@ -11800,17 +11800,17 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This analytic detects newly seen process within the Kubernetes scope on a master or worker node. This detection leverages process metrics harvested using an OTEL collector and hostmetrics receiever, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection compares the processes seen for each node over the previous 1 hour with those over the previous 30 days up until the previous 1 hour. The specific metric used by this detection is process.memory.utilization. Newly seen processes on a Kubernetes worker node are concerning as they may represent security risks and anomalies that could be related to unauthorized activity. New processes may be introduced in an attempt to compromise the node or gain control of the Kubernetes cluster. By detecting these processes, they can be investigated, and correlated with other anomalous activity for that host. Newly seen processes may be part of an attacker's strategy to compromise the node, gain unauthorized access, and subsequently extend their control to the entire Kubernetes cluster. These processes could facilitate activities such as data exfiltration, privilege escalation, denial-of-service attacks, or the introduction of malware and backdoors, putting sensitive data, applications, and the entire infrastructure at risk. The consequences may include data breaches, service disruptions, financial losses, and reputational damage, underscoring the need to identify anomalous process and associate them with any concurrent risk activity. -action.escu.how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +action.escu.how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio action.escu.known_false_positives = unknown action.escu.creation_date = 2023-12-18 @@ -11852,17 +11852,17 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This analytic detects processes running within the same scope as Kubernetes that have been run from a newly seen path. This detection leverages process metrics harvested using an OTEL collector and hostmetrics receiever, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection compares the processes seen for each node over the previous 1 hour with those over the previous 30 days up until the previous 1 hour, and alerts if the path for that process was not seen over the previous 30 days. The specific metric used by this detection is process.memory.utilization. Processes running from a newly seen path can signify potential security risks and anomalies. A process executing from an unfamiliar file path may indicate unauthorized changes to the file system, a compromised node, or the introduction of malicious software. If the presence of a process running from a newly seen file path on a Kubernetes node indicates malicious activity, the security implications could be severe. It suggests that an attacker has potentially compromised the node, allowing them to execute unauthorized processes and potentially gain control over critical resources. This could lead to further exploitation, data exfiltration, privilege escalation, or the introduction of malware and backdoors within the Kubernetes cluster. -action.escu.how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +action.escu.how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio action.escu.known_false_positives = unknown action.escu.creation_date = 2023-12-18 @@ -11904,17 +11904,17 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This analytic identifies high resource utilization anomalies in Kubernetes processes. It uses process metrics from an OTEL collector and hostmetrics receiver, fetched from Splunk Observability cloud via the Splunk Infrastructure Monitoring Add-on. The detection uses a lookup table with average and standard deviation values for various process metrics to identify anomalies. High resource utilization can indicate security threats or operational issues, such as cryptojacking, unauthorized data exfiltration, or compromised containers. These anomalies can disrupt services, exhaust resources, increase costs, and allow attackers to evade detection or maintain access. -action.escu.how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +action.escu.how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio action.escu.known_false_positives = unknown action.escu.creation_date = 2023-12-18 @@ -11956,17 +11956,17 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This analytic detects anomalously changes in the ratio between specific process resources on a Kubernetes node, based on the past behavior for each process running in the Kubernetes scope on that node. This detection leverages process metrics harvested using an OTEL collector and hostmetrics receiver, and is pulled from Splunk Observability cloud using the Splunk Infrastructure Monitoring Add-on. (https://splunkbase.splunk.com/app/5247). This detection also leverages a lookup table that contains average and standard deviation for the cpu:disk operations, cpu:mem, cpu:thread count, disk operations:thread count, and mem:disk operations ratios. This is used to indicate an anomalous change in resource ratios that indicate the workload has changed behavior irrespective of load. Changes in the relationship between utilization of different resources can indicate a change in behavior of the monitored process, which can indicate a potentially compromised application. Deviations in resource ratios, such as memory-to-CPU or CPU-to-disk utilization, may signify compromised processes, malicious activity, or misconfigurations that could pose risks. A change in process behavior could signify a potential security breach within the Kubernetes environment, where an attacker may have compromised a process either on the node or running within a container. -action.escu.how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +action.escu.how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio action.escu.known_false_positives = unknown action.escu.creation_date = 2023-12-18 @@ -12096,17 +12096,17 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This analytic identifies shell activity within the Kubernetes privilege scope on a worker node, returning a list of shell processes regardless of CPU resource consumption. It uses process metrics from an OTEL collector hostmetrics receiver, pulled from Splunk Observability cloud via the Splunk Infrastructure Monitoring Add-on. Metrics used are process.cpu.utilization and process.memory.utilization. Shell processes can indicate unauthorized or suspicious activity, posing a security threat. Shell access to worker nodes can provide attackers an entry point to compromise the node and the entire Kubernetes cluster. Monitoring and detecting shell processes is crucial for anomaly identification, security policy enforcement, and breach mitigation. Unauthorized shell processes on a Kubernetes worker node can severely compromise the cluster's security and integrity. Such access can lead to data theft, service disruption, privilege escalation, lateral movement, and further attacks within the cluster. It may also enable attackers to manipulate configurations, deploy malicious containers, and execute arbitrary code, posing a severe risk to the confidentiality, availability, and integrity of applications and sensitive data. -action.escu.how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +action.escu.how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio action.escu.known_false_positives = unknown action.escu.creation_date = 2023-12-18 @@ -12148,17 +12148,17 @@ description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splu action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1204"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This analytic identifies shell activity within the Kubernetes privilege scope on a worker node. It returns shell processes only if they're consuming CPU resources. The detection uses process metrics from an OTEL collector hostmetrics receiver, pulled from Splunk Observability cloud via the Splunk Infrastructure Monitoring Add-on. The metrics used are process.cpu.utilization and process.memory.utilization. Shell processes can indicate unauthorized activity, posing a security threat. Attackers could compromise the node and the entire Kubernetes cluster via shell access to worker nodes. Monitoring shell processes is crucial for anomaly detection, policy enforcement, and breach mitigation. Unauthorized shell processes on a Kubernetes worker node could severely impact the cluster's security and integrity. Attackers could gain full control over the host's resources and file system, compromising all hosted workloads and data. This access could lead to data theft, service disruption, privilege escalation, lateral movement, and further attacks within the cluster. Attackers could also manipulate configurations, deploy malicious containers, and execute arbitrary code, severely risking the confidentiality, availability, and integrity of applications and sensitive data. A rapid and comprehensive incident response is required to mitigate and recover from such a breach. -action.escu.how_to_implement = To implement this detection, follow these steps:\ -* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster.\ -* Enable the hostmetrics/process receiver in the OTEL configuration.\ -* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled.\ -* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247)\ -* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token.\ -* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index".\ -* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID.\ -* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K')\ -* Set the Metric Resolution to 10000.\ -* Leave all other settings at their default values.\ +action.escu.how_to_implement = To implement this detection, follow these steps: \ +* Deploy the OpenTelemetry Collector (OTEL) to your Kubernetes cluster. \ +* Enable the hostmetrics/process receiver in the OTEL configuration. \ +* Ensure that the process metrics, specifically Process.cpu.utilization and process.memory.utilization, are enabled. \ +* Install the Splunk Infrastructure Monitoring (SIM) add-on. (ref: https://splunkbase.splunk.com/app/5247) \ +* Configure the SIM add-on with your Observability Cloud Organization ID and Access Token. \ +* Set up the SIM modular input to ingest Process Metrics. Name this input "sim_process_metrics_to_metrics_index". \ +* In the SIM configuration, set the Organization ID to your Observability Cloud Organization ID. \ +* Set the Signal Flow Program to the following: data('process.threads').publish(label='A'); data('process.cpu.utilization').publish(label='B'); data('process.cpu.time').publish(label='C'); data('process.disk.io').publish(label='D'); data('process.memory.usage').publish(label='E'); data('process.memory.virtual').publish(label='F'); data('process.memory.utilization').publish(label='G'); data('process.cpu.utilization').publish(label='H'); data('process.disk.operations').publish(label='I'); data('process.handles').publish(label='J'); data('process.threads').publish(label='K') \ +* Set the Metric Resolution to 10000. \ +* Leave all other settings at their default values. \ * Run the Search Baseline Of Kubernetes Container Network IO Ratio action.escu.known_false_positives = unknown action.escu.creation_date = 2023-12-18 @@ -14610,7 +14610,7 @@ action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation action.escu.data_models = [] action.escu.eli5 = This search looks for AWS provisioning activities from previously unseen cities. Provisioning activities are defined broadly as any event that begins with "Run" or "Create." This search is deprecated and have been translated to use the latest Change Datamodel. action.escu.how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. This search works best when you run the "Previously Seen AWS Provisioning Activity Sources" support search once to create a history of previously seen locations that have provisioned AWS resources. -action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new city is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your city, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. action.escu.creation_date = 2018-03-16 action.escu.modification_date = 2018-03-16 @@ -14652,7 +14652,7 @@ action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation action.escu.data_models = [] action.escu.eli5 = This search looks for AWS provisioning activities from previously unseen countries. Provisioning activities are defined broadly as any event that begins with "Run" or "Create." This search is deprecated and have been translated to use the latest Change Datamodel. action.escu.how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. This search works best when you run the "Previously Seen AWS Provisioning Activity Sources" support search once to create a history of previously seen locations that have provisioned AWS resources. -action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching over plus what is stored in the cache feature. But while there are really no \"false positives\" in a traditional sense, there is definitely lots of noise.\ +action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching over plus what is stored in the cache feature. But while there are really no \"false positives\" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new country is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your country, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. action.escu.creation_date = 2018-03-16 action.escu.modification_date = 2018-03-16 @@ -14694,7 +14694,7 @@ action.escu.mappings = {"cis20": ["CIS 10"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This search looks for AWS provisioning activities from previously unseen IP addresses. Provisioning activities are defined broadly as any event that begins with "Run" or "Create." This search is deprecated and have been translated to use the latest Change Datamodel. action.escu.how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. This search works best when you run the "Previously Seen AWS Provisioning Activity Sources" support search once to create a history of previously seen locations that have provisioned AWS resources. -action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new IP address is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your country, there should be few false positives. If you are located in countries where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. action.escu.creation_date = 2018-03-16 action.escu.modification_date = 2018-03-16 @@ -14736,7 +14736,7 @@ action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation action.escu.data_models = [] action.escu.eli5 = This search looks for AWS provisioning activities from previously unseen regions. Region in this context is similar to a state in the United States. Provisioning activities are defined broadly as any event that begins with "Run" or "Create." This search is deprecated and have been translated to use the latest Change Datamodel. action.escu.how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. This search works best when you run the "Previously Seen AWS Provisioning Activity Sources" support search once to create a history of previously seen locations that have provisioned AWS resources. -action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise.\ +action.escu.known_false_positives = This is a strictly behavioral search, so we define "false positive" slightly differently. Every time this fires, it will accurately reflect the first occurrence in the time period you're searching within, plus what is stored in the cache feature. But while there are really no "false positives" in a traditional sense, there is definitely lots of noise. \ This search will fire any time a new region is seen in the **GeoIP** database for any kind of provisioning activity. If you typically do all provisioning from tools inside of your region, there should be few false positives. If you are located in regions where the free version of **MaxMind GeoIP** that ships by default with Splunk has weak resolution (particularly small countries in less economically powerful regions), this may be much less valuable to you. action.escu.creation_date = 2018-03-16 action.escu.modification_date = 2018-03-16 @@ -14813,9 +14813,9 @@ description = **WARNING**, this detection has been marked **DEPRECATED** by the action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Actions on Objectives"], "mitre_attack": ["T1048.003"], "nist": ["DE.CM"]} action.escu.data_models = ["Network_Resolution"] action.escu.eli5 = This search allows you to identify the endpoints that have connected to more than five DNS servers and made DNS Queries over the time frame of the search. -action.escu.how_to_implement = This search requires that DNS data is being ingested and populating the `Network_Resolution` data model. This data can come from DNS logs or from solutions that parse network traffic for this data, such as Splunk Stream or Bro.\ -This search produces fields (`dest_count`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** Distinct DNS Connections, **Field:** dest_count\ +action.escu.how_to_implement = This search requires that DNS data is being ingested and populating the `Network_Resolution` data model. This data can come from DNS logs or from solutions that parse network traffic for this data, such as Splunk Stream or Bro. \ +This search produces fields (`dest_count`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** Distinct DNS Connections, **Field:** dest_count \ Detailed documentation on how to create a new field within Incident Review may be found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` action.escu.known_false_positives = It's possible that an enterprise has more than five DNS servers that are configured in a round-robin rotation. Please customize the search, as appropriate. action.escu.creation_date = 2020-07-21 @@ -15024,11 +15024,11 @@ description = **WARNING**, this detection has been marked **DEPRECATED** by the action.escu.mappings = {"cis20": ["CIS 13"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This search looks for AWS CloudTrail events where a user logged into the AWS account, is making API calls and has not enabled Multi Factor authentication. Multi factor authentication adds a layer of security by forcing the users to type a unique authentication code from an approved authentication device when they access AWS websites or services. AWS Best Practices recommend that you enable MFA for privileged IAM users. -action.escu.how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. Leverage the support search `Create a list of approved AWS service accounts`: run it once every 30 days to create a list of service accounts and validate them.\ -This search produces fields (`eventName`,`userIdentity.type`,`userIdentity.arn`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** AWS Event Name, **Field:** eventName\ -* **Label:** AWS User ARN, **Field:** userIdentity.arn\ -* **Label:** AWS User Type, **Field:** userIdentity.type\ +action.escu.how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. Leverage the support search `Create a list of approved AWS service accounts`: run it once every 30 days to create a list of service accounts and validate them. \ +This search produces fields (`eventName`,`userIdentity.type`,`userIdentity.arn`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** AWS Event Name, **Field:** eventName \ +* **Label:** AWS User ARN, **Field:** userIdentity.arn \ +* **Label:** AWS User Type, **Field:** userIdentity.type \ Detailed documentation on how to create a new field within Incident Review may be found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` action.escu.known_false_positives = Many service accounts configured within an AWS infrastructure do not have multi factor authentication enabled. Please ignore the service accounts, if triggered and instead add them to the aws_service_accounts.csv file to fine tune the detection. It is also possible that the search detects users in your environment using Single Sign-On systems, since the MFA is not handled by AWS. action.escu.creation_date = 2018-05-17 @@ -15065,11 +15065,11 @@ description = **WARNING**, this detection has been marked **DEPRECATED** by the action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", "Exploitation", "Installation"], "mitre_attack": ["T1078.004"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This search looks for successful AWS CloudTrail activity by user accounts that are not listed in the identity table or `aws_service_accounts.csv`. It returns event names and count, as well as the first and last time a specific user or service is detected, grouped by users. Deprecated because managing this list can be quite hard. -action.escu.how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. You must also populate the `identity_lookup_expanded` lookup shipped with the Asset and Identity framework to be able to look up users in your identity table in Enterprise Security (ES). Leverage the support search called "Create a list of approved AWS service accounts": run it once every 30 days to create and validate a list of service accounts.\ -This search produces fields (`eventName`,`firstTime`,`lastTime`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** AWS Event Name, **Field:** eventName\ -* **Label:** First Time, **Field:** firstTime\ -* **Label:** Last Time, **Field:** lastTime\ +action.escu.how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. You must also populate the `identity_lookup_expanded` lookup shipped with the Asset and Identity framework to be able to look up users in your identity table in Enterprise Security (ES). Leverage the support search called "Create a list of approved AWS service accounts": run it once every 30 days to create and validate a list of service accounts. \ +This search produces fields (`eventName`,`firstTime`,`lastTime`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** AWS Event Name, **Field:** eventName \ +* **Label:** First Time, **Field:** firstTime \ +* **Label:** Last Time, **Field:** lastTime \ Detailed documentation on how to create a new field within Incident Review may be found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` action.escu.known_false_positives = It's likely that you'll find activity detected by users/service accounts that are not listed in the `identity_lookup_expanded` or ` aws_service_accounts.csv` file. If the user is a legitimate service account, update the `aws_service_accounts.csv` table with that entry. action.escu.creation_date = 2020-07-21 @@ -15106,9 +15106,9 @@ description = **WARNING**, this detection has been marked **DEPRECATED** by the action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery"], "mitre_attack": ["T1566.003"], "nist": ["DE.CM"]} action.escu.data_models = ["Network_Resolution", "Web"] action.escu.eli5 = This search looks for DNS requests for phishing domains that are leveraging EvilGinx tools to mimic websites. -action.escu.how_to_implement = You need to ingest data from your DNS logs in the Network_Resolution datamodel. Specifically you must ingest the domain that is being queried and the IP of the host originating the request. Ideally, you should also be ingesting the answer to the query and the query type. This approach allows you to also create your own localized passive DNS capability which can aid you in future investigations. You will have to add legitimate domain names to the `legit_domains.csv` file shipped with the app.\ -**Splunk>Phantom Playbook Integration**\ -If Splunk>Phantom is also configured in your environment, a Playbook called `Lets Encrypt Domain Investigate` can be configured to run when any results are found by this detection search. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, add the correct hostname to the "Phantom Instance" field in the Adaptive Response Actions when configuring this detection search, and set the corresponding Playbook to active.\ +action.escu.how_to_implement = You need to ingest data from your DNS logs in the Network_Resolution datamodel. Specifically you must ingest the domain that is being queried and the IP of the host originating the request. Ideally, you should also be ingesting the answer to the query and the query type. This approach allows you to also create your own localized passive DNS capability which can aid you in future investigations. You will have to add legitimate domain names to the `legit_domains.csv` file shipped with the app. \ +**Splunk>Phantom Playbook Integration** \ +If Splunk>Phantom is also configured in your environment, a Playbook called `Lets Encrypt Domain Investigate` can be configured to run when any results are found by this detection search. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, add the correct hostname to the "Phantom Instance" field in the Adaptive Response Actions when configuring this detection search, and set the corresponding Playbook to active. \ (Playbook link:`https://my.phantom.us/4.2/playbook/lets-encrypt-domain-investigate/`) action.escu.known_false_positives = If a known good domain is not listed in the legit_domains.csv file, then the search could give you false postives. Please update that lookup file to filter out DNS requests to legitimate domains. action.escu.creation_date = 2020-07-21 @@ -15374,11 +15374,11 @@ description = **WARNING**, this detection has been marked **DEPRECATED** by the action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Delivery", "Exploitation", "Installation"], "mitre_attack": ["T1078.004"], "nist": ["DE.AE"]} action.escu.data_models = [] action.escu.eli5 = This search will detect users creating spikes of API activity in your AWS environment. It will also update the cache file that factors in the latest data. This search is deprecated and have been translated to use the latest Change Datamodel. -action.escu.how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. You can modify `dataPointThreshold` and `deviationThreshold` to better fit your environment. The `dataPointThreshold` variable is the minimum number of data points required to have a statistically significant amount of data to determine. The `deviationThreshold` variable is the number of standard deviations away from the mean that the value must be to be considered a spike.\ -This search produces fields (`eventName`,`numberOfApiCalls`,`uniqueApisCalled`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** AWS Event Name, **Field:** eventName\ -* **Label:** Number of API Calls, **Field:** numberOfApiCalls\ -* **Label:** Unique API Calls, **Field:** uniqueApisCalled\ +action.escu.how_to_implement = You must install the AWS App for Splunk (version 5.1.0 or later) and Splunk Add-on for AWS (version 4.4.0 or later), then configure your AWS CloudTrail inputs. You can modify `dataPointThreshold` and `deviationThreshold` to better fit your environment. The `dataPointThreshold` variable is the minimum number of data points required to have a statistically significant amount of data to determine. The `deviationThreshold` variable is the number of standard deviations away from the mean that the value must be to be considered a spike. \ +This search produces fields (`eventName`,`numberOfApiCalls`,`uniqueApisCalled`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** AWS Event Name, **Field:** eventName \ +* **Label:** Number of API Calls, **Field:** numberOfApiCalls \ +* **Label:** Unique API Calls, **Field:** uniqueApisCalled \ Detailed documentation on how to create a new field within Incident Review may be found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` action.escu.known_false_positives = None. action.escu.creation_date = 2020-07-21 @@ -15549,9 +15549,9 @@ description = **WARNING**, this detection has been marked **DEPRECATED** by the action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Command and Control"], "mitre_attack": ["T1071.001"], "nist": ["DE.CM"]} action.escu.data_models = ["Web"] action.escu.eli5 = This search looks for web connections to dynamic DNS providers. -action.escu.how_to_implement = This search requires you to be ingesting web-traffic logs. You can obtain these logs from indexing data from a web proxy or by using a network-traffic-analysis tool, such as Bro or Splunk Stream. The web data model must contain the URL being requested, the IP address of the host initiating the request, and the destination IP. This search also leverages a lookup file, `dynamic_dns_providers_default.csv`, which contains a non-exhaustive list of dynamic DNS providers. Consider periodically updating this local lookup file with new domains.\ -This search produces fields (`isDynDNS`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry):\ -* **Label:** IsDynamicDNS, **Field:** isDynDNS\ +action.escu.how_to_implement = This search requires you to be ingesting web-traffic logs. You can obtain these logs from indexing data from a web proxy or by using a network-traffic-analysis tool, such as Bro or Splunk Stream. The web data model must contain the URL being requested, the IP address of the host initiating the request, and the destination IP. This search also leverages a lookup file, `dynamic_dns_providers_default.csv`, which contains a non-exhaustive list of dynamic DNS providers. Consider periodically updating this local lookup file with new domains. \ +This search produces fields (`isDynDNS`) that are not yet supported by ES Incident Review and therefore cannot be viewed when a notable event is raised. These fields contribute additional context to the notable. To see the additional metadata, add the following fields, if not already present, to Incident Review - Event Attributes (Configure > Incident Management > Incident Review Settings > Add New Entry): \ +* **Label:** IsDynamicDNS, **Field:** isDynDNS \ Detailed documentation on how to create a new field within Incident Review may be found here: `https://docs.splunk.com/Documentation/ES/5.3.0/Admin/Customizenotables#Add_a_field_to_the_notable_event_details` Deprecated because duplicate. action.escu.known_false_positives = It is possible that list of dynamic DNS providers is outdated and/or that the URL being requested is legitimate. action.escu.creation_date = 2020-07-21 @@ -15595,11 +15595,11 @@ search = | tstats `security_content_summariesonly` count values(Web.url) as url [ESCU - Detection of DNS Tunnels - Rule] action.escu = 0 action.escu.enabled = 1 -description = **WARNING**, this detection has been marked **DEPRECATED** by the Splunk Threat Research Team. This means that it will no longer be maintained or supported. If you have any questions feel free to email us at: research@splunk.com. This search is used to detect DNS tunneling, by calculating the sum of the length of DNS queries and DNS answers. The search also filters out potential false positives by filtering out queries made to internal systems and the queries originating from internal DNS, Web, and Email servers. Endpoints using DNS as a method of transmission for data exfiltration, Command And Control, or evasion of security controls can often be detected by noting an unusually large volume of DNS traffic.\ +description = **WARNING**, this detection has been marked **DEPRECATED** by the Splunk Threat Research Team. This means that it will no longer be maintained or supported. If you have any questions feel free to email us at: research@splunk.com. This search is used to detect DNS tunneling, by calculating the sum of the length of DNS queries and DNS answers. The search also filters out potential false positives by filtering out queries made to internal systems and the queries originating from internal DNS, Web, and Email servers. Endpoints using DNS as a method of transmission for data exfiltration, Command And Control, or evasion of security controls can often be detected by noting an unusually large volume of DNS traffic. \ NOTE:Deprecated because existing detection is doing the same. This detection is replaced with two other variations, if you are using MLTK then you can use this search `ESCU - DNS Query Length Outliers - MLTK - Rule` or use the standard deviation version `ESCU - DNS Query Length With High Standard Deviation - Rule`, as an alternantive. action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Actions on Objectives"], "mitre_attack": ["T1048.003"], "nist": ["DE.CM"]} action.escu.data_models = ["Network_Resolution"] -action.escu.eli5 = This search is used to detect DNS tunneling, by calculating the sum of the length of DNS queries and DNS answers. The search also filters out potential false positives by filtering out queries made to internal systems and the queries originating from internal DNS, Web, and Email servers. Endpoints using DNS as a method of transmission for data exfiltration, Command And Control, or evasion of security controls can often be detected by noting an unusually large volume of DNS traffic.\ +action.escu.eli5 = This search is used to detect DNS tunneling, by calculating the sum of the length of DNS queries and DNS answers. The search also filters out potential false positives by filtering out queries made to internal systems and the queries originating from internal DNS, Web, and Email servers. Endpoints using DNS as a method of transmission for data exfiltration, Command And Control, or evasion of security controls can often be detected by noting an unusually large volume of DNS traffic. \ NOTE:Deprecated because existing detection is doing the same. This detection is replaced with two other variations, if you are using MLTK then you can use this search `ESCU - DNS Query Length Outliers - MLTK - Rule` or use the standard deviation version `ESCU - DNS Query Length With High Standard Deviation - Rule`, as an alternantive. action.escu.how_to_implement = To successfully implement this search, we must ensure that DNS data is being ingested and mapped to the appropriate fields in the Network_Resolution data model. Fields like src_category are automatically provided by the Assets and Identity Framework shipped with Splunk Enterprise Security. You will need to ensure you are using the Assets and Identity Framework and populating the src_category field. You will also need to enable the `cim_corporate_web_domain_search()` macro which will essentially filter out the DNS queries made to the corporate web domains to reduce alert fatigue. action.escu.known_false_positives = It's possible that normal DNS traffic will exhibit this behavior. If an alert is generated, please investigate and validate as appropriate. The threshold can also be modified to better suit your environment. @@ -15626,7 +15626,7 @@ action.correlationsearch.metadata = {"deprecated": "1", "detection_id": "104658f schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = This search is used to detect DNS tunneling, by calculating the sum of the length of DNS queries and DNS answers. The search also filters out potential false positives by filtering out queries made to internal systems and the queries originating from internal DNS, Web, and Email servers. Endpoints using DNS as a method of transmission for data exfiltration, Command And Control, or evasion of security controls can often be detected by noting an unusually large volume of DNS traffic.\ +action.notable.param.rule_description = This search is used to detect DNS tunneling, by calculating the sum of the length of DNS queries and DNS answers. The search also filters out potential false positives by filtering out queries made to internal systems and the queries originating from internal DNS, Web, and Email servers. Endpoints using DNS as a method of transmission for data exfiltration, Command And Control, or evasion of security controls can often be detected by noting an unusually large volume of DNS traffic. \ NOTE:Deprecated because existing detection is doing the same. This detection is replaced with two other variations, if you are using MLTK then you can use this search `ESCU - DNS Query Length Outliers - MLTK - Rule` or use the standard deviation version `ESCU - DNS Query Length With High Standard Deviation - Rule`, as an alternantive. action.notable.param.rule_title = Detection of DNS Tunnels action.notable.param.security_domain = network @@ -15696,9 +15696,9 @@ description = **WARNING**, this detection has been marked **DEPRECATED** by the action.escu.mappings = {"cis20": ["CIS 13"], "kill_chain_phases": ["Command and Control"], "mitre_attack": ["T1071.004"], "nist": ["DE.CM"]} action.escu.data_models = ["Network_Resolution"] action.escu.eli5 = The search takes the DNS records and their answers results of the discovered_dns_records lookup and finds if any records have changed by searching DNS response from the Network_Resolution datamodel across the last day. -action.escu.how_to_implement = To successfully implement this search you will need to ensure that DNS data is populating the `Network_Resolution` data model. It also requires that the `discover_dns_record` lookup table be populated by the included support search "Discover DNS record".\ -**Splunk>Phantom Playbook Integration**\ -If Splunk>Phantom is also configured in your environment, a Playbook called "DNS Hijack Enrichment" can be configured to run when any results are found by this detection search. The playbook takes in the DNS record changed and uses Geoip, whois, Censys and PassiveTotal to detect if DNS issuers changed. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, add the correct hostname to the \"Phantom Instance\" field in the Adaptive Response Actions when configuring this detection search, and set the corresponding Playbook to active.\ +action.escu.how_to_implement = To successfully implement this search you will need to ensure that DNS data is populating the `Network_Resolution` data model. It also requires that the `discover_dns_record` lookup table be populated by the included support search "Discover DNS record". \ +**Splunk>Phantom Playbook Integration** \ +If Splunk>Phantom is also configured in your environment, a Playbook called "DNS Hijack Enrichment" can be configured to run when any results are found by this detection search. The playbook takes in the DNS record changed and uses Geoip, whois, Censys and PassiveTotal to detect if DNS issuers changed. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, add the correct hostname to the \"Phantom Instance\" field in the Adaptive Response Actions when configuring this detection search, and set the corresponding Playbook to active. \ (Playbook Link:`https://my.phantom.us/4.2/playbook/dns-hijack-enrichment/`) action.escu.known_false_positives = Legitimate DNS changes can be detected in this search. Investigate, verify and update the list of provided current answers for the domains in question as appropriate. action.escu.creation_date = 2020-07-21 @@ -15742,11 +15742,11 @@ search = | inputlookup discovered_dns_records | rename answer as discovered_answ [ESCU - Dump LSASS via procdump Rename - Rule] action.escu = 0 action.escu.enabled = 1 -description = **WARNING**, this detection has been marked **DEPRECATED** by the Splunk Threat Research Team. This means that it will no longer be maintained or supported. If you have any questions feel free to email us at: research@splunk.com. Detect a renamed instance of procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. Modify the query as needed.\ +description = **WARNING**, this detection has been marked **DEPRECATED** by the Splunk Threat Research Team. This means that it will no longer be maintained or supported. If you have any questions feel free to email us at: research@splunk.com. Detect a renamed instance of procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. Modify the query as needed. \ During triage, confirm this is procdump.exe executing. If it is the first time a Sysinternals utility has been ran, it is possible there will be a -accepteula on the command line. Review other endpoint data sources for cross process (injection) into lsass.exe. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1003.001"], "nist": ["DE.AE"]} action.escu.data_models = [] -action.escu.eli5 = Detect a renamed instance of procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. Modify the query as needed.\ +action.escu.eli5 = Detect a renamed instance of procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. Modify the query as needed. \ During triage, confirm this is procdump.exe executing. If it is the first time a Sysinternals utility has been ran, it is possible there will be a -accepteula on the command line. Review other endpoint data sources for cross process (injection) into lsass.exe. action.escu.how_to_implement = To successfully implement this search you need to be ingesting information on process that include the name of the process responsible for the changes from your endpoints into the `Endpoint` datamodel in the `Processes` node. action.escu.known_false_positives = None identified. @@ -20487,11 +20487,11 @@ search = `sysmon` EventCode=7 ImageLoaded IN ("*\\CMLUA.dll", "*\\CMSTPLUA.dll" [ESCU - Cobalt Strike Named Pipes - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies the use of default or publicly known named pipes used with Cobalt Strike. A named pipe is a named, one-way or duplex pipe for communication between the pipe server and one or more pipe clients. Cobalt Strike uses named pipes in many ways and has default values used with the Artifact Kit and Malleable C2 Profiles. The following query assists with identifying these default named pipes. Each EDR product presents named pipes a little different. Consider taking the values and generating a query based on the product of choice.\ +description = The following analytic identifies the use of default or publicly known named pipes used with Cobalt Strike. A named pipe is a named, one-way or duplex pipe for communication between the pipe server and one or more pipe clients. Cobalt Strike uses named pipes in many ways and has default values used with the Artifact Kit and Malleable C2 Profiles. The following query assists with identifying these default named pipes. Each EDR product presents named pipes a little different. Consider taking the values and generating a query based on the product of choice. \ Upon triage, review the process performing the named pipe. If it is explorer.exe, It is possible it was injected into by another process. Review recent parallel processes to identify suspicious patterns or behaviors. A parallel process may have a network connection, review and follow the connection back to identify any file modifications. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1055"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic identifies the use of default or publicly known named pipes used with Cobalt Strike. A named pipe is a named, one-way or duplex pipe for communication between the pipe server and one or more pipe clients. Cobalt Strike uses named pipes in many ways and has default values used with the Artifact Kit and Malleable C2 Profiles. The following query assists with identifying these default named pipes. Each EDR product presents named pipes a little different. Consider taking the values and generating a query based on the product of choice.\ +action.escu.eli5 = The following analytic identifies the use of default or publicly known named pipes used with Cobalt Strike. A named pipe is a named, one-way or duplex pipe for communication between the pipe server and one or more pipe clients. Cobalt Strike uses named pipes in many ways and has default values used with the Artifact Kit and Malleable C2 Profiles. The following query assists with identifying these default named pipes. Each EDR product presents named pipes a little different. Consider taking the values and generating a query based on the product of choice. \ Upon triage, review the process performing the named pipe. If it is explorer.exe, It is possible it was injected into by another process. Review recent parallel processes to identify suspicious patterns or behaviors. A parallel process may have a network connection, review and follow the connection back to identify any file modifications. action.escu.how_to_implement = To successfully implement this search, you need to be ingesting logs with the process name, parent process, and command-line executions from your endpoints. If you are using Sysmon, you must have at least version 6.0.4 of the Sysmon TA. action.escu.known_false_positives = The idea of using named pipes with Cobalt Strike is to blend in. Therefore, some of the named pipes identified and added may cause false positives. Filter by process name or pipe name to reduce false positives. @@ -20518,7 +20518,7 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "5876d42 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies the use of default or publicly known named pipes used with Cobalt Strike. A named pipe is a named, one-way or duplex pipe for communication between the pipe server and one or more pipe clients. Cobalt Strike uses named pipes in many ways and has default values used with the Artifact Kit and Malleable C2 Profiles. The following query assists with identifying these default named pipes. Each EDR product presents named pipes a little different. Consider taking the values and generating a query based on the product of choice.\ +action.notable.param.rule_description = The following analytic identifies the use of default or publicly known named pipes used with Cobalt Strike. A named pipe is a named, one-way or duplex pipe for communication between the pipe server and one or more pipe clients. Cobalt Strike uses named pipes in many ways and has default values used with the Artifact Kit and Malleable C2 Profiles. The following query assists with identifying these default named pipes. Each EDR product presents named pipes a little different. Consider taking the values and generating a query based on the product of choice. \ Upon triage, review the process performing the named pipe. If it is explorer.exe, It is possible it was injected into by another process. Review recent parallel processes to identify suspicious patterns or behaviors. A parallel process may have a network connection, review and follow the connection back to identify any file modifications. action.notable.param.rule_title = Cobalt Strike Named Pipes action.notable.param.security_domain = endpoint @@ -21856,15 +21856,15 @@ search = `wineventlog_security` EventCode=4624 OR EventCode=4742 TargetUserName= [ESCU - Detect Copy of ShadowCopy with Script Block Logging - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies `copy` or `[System.IO.File]::Copy` being used to capture the SAM, SYSTEM or SECURITY hives identified in script block. This will catch the most basic use cases for credentials being taken for offline cracking.\ +description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies `copy` or `[System.IO.File]::Copy` being used to capture the SAM, SYSTEM or SECURITY hives identified in script block. This will catch the most basic use cases for credentials being taken for offline cracking. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1003.002", "T1003"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies `copy` or `[System.IO.File]::Copy` being used to capture the SAM, SYSTEM or SECURITY hives identified in script block. This will catch the most basic use cases for credentials being taken for offline cracking.\ +action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies `copy` or `[System.IO.File]::Copy` being used to capture the SAM, SYSTEM or SECURITY hives identified in script block. This will catch the most basic use cases for credentials being taken for offline cracking. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. action.escu.known_false_positives = Limited false positives as the scope is limited to SAM, SYSTEM and SECURITY hives. @@ -21891,9 +21891,9 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "9251299 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies `copy` or `[System.IO.File]::Copy` being used to capture the SAM, SYSTEM or SECURITY hives identified in script block. This will catch the most basic use cases for credentials being taken for offline cracking.\ +action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies `copy` or `[System.IO.File]::Copy` being used to capture the SAM, SYSTEM or SECURITY hives identified in script block. This will catch the most basic use cases for credentials being taken for offline cracking. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.notable.param.rule_title = Detect Copy of ShadowCopy with Script Block Logging action.notable.param.security_domain = endpoint @@ -21959,15 +21959,15 @@ search = `sysmon` EventCode=10 TargetImage=*lsass.exe (GrantedAccess=0x1010 OR G [ESCU - Detect Empire with PowerShell Script Block Logging - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies the common PowerShell stager used by PowerShell-Empire. Each stager that may use PowerShell all uses the same pattern. The initial HTTP will be base64 encoded and use `system.net.webclient`. Note that some obfuscation may evade the analytic.\ +description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies the common PowerShell stager used by PowerShell-Empire. Each stager that may use PowerShell all uses the same pattern. The initial HTTP will be base64 encoded and use `system.net.webclient`. Note that some obfuscation may evade the analytic. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1059", "T1059.001"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies the common PowerShell stager used by PowerShell-Empire. Each stager that may use PowerShell all uses the same pattern. The initial HTTP will be base64 encoded and use `system.net.webclient`. Note that some obfuscation may evade the analytic.\ +action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies the common PowerShell stager used by PowerShell-Empire. Each stager that may use PowerShell all uses the same pattern. The initial HTTP will be base64 encoded and use `system.net.webclient`. Note that some obfuscation may evade the analytic. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. action.escu.known_false_positives = False positives may only pertain to it not being related to Empire, but another framework. Filter as needed if any applications use the same pattern. @@ -21994,9 +21994,9 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "bc1dc6b schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies the common PowerShell stager used by PowerShell-Empire. Each stager that may use PowerShell all uses the same pattern. The initial HTTP will be base64 encoded and use `system.net.webclient`. Note that some obfuscation may evade the analytic.\ +action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies the common PowerShell stager used by PowerShell-Empire. Each stager that may use PowerShell all uses the same pattern. The initial HTTP will be base64 encoded and use `system.net.webclient`. Note that some obfuscation may evade the analytic. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.notable.param.rule_title = Detect Empire with PowerShell Script Block Logging action.notable.param.security_domain = endpoint @@ -22019,8 +22019,8 @@ description = This search identifies endpoints that have caused a relatively hig action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", "Exploitation", "Installation"], "mitre_attack": ["T1078", "T1078.002"], "nist": ["DE.AE"]} action.escu.data_models = ["Change"] action.escu.eli5 = This search identifies endpoints that have caused a relatively high number of account lockouts in a short period. -action.escu.how_to_implement = You must ingest your Windows security event logs in the `Change` datamodel under the nodename is `Account_Management`, for this search to execute successfully. Please consider updating the cron schedule and the count of lockouts you want to monitor, according to your environment.\ -**Splunk>Phantom Playbook Integration** If Splunk>Phantom is also configured in your environment, a Playbook called "Excessive Account Lockouts Enrichment and Response" can be configured to run when any results are found by this detection search. The Playbook executes the Contextual and Investigative searches in this Story, conducts additional information gathering on Windows endpoints, and takes a response action to shut down the affected endpoint. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, add the correct hostname to the "Phantom Instance" field in the Adaptive Response Actions when configuring this detection search, and set the corresponding Playbook to active.\ +action.escu.how_to_implement = You must ingest your Windows security event logs in the `Change` datamodel under the nodename is `Account_Management`, for this search to execute successfully. Please consider updating the cron schedule and the count of lockouts you want to monitor, according to your environment. \ +**Splunk>Phantom Playbook Integration** If Splunk>Phantom is also configured in your environment, a Playbook called "Excessive Account Lockouts Enrichment and Response" can be configured to run when any results are found by this detection search. The Playbook executes the Contextual and Investigative searches in this Story, conducts additional information gathering on Windows endpoints, and takes a response action to shut down the affected endpoint. To use this integration, install the Phantom App for Splunk `https://splunkbase.splunk.com/app/3411/`, add the correct hostname to the "Phantom Instance" field in the Adaptive Response Actions when configuring this detection search, and set the corresponding Playbook to active. \ Playbook Link:`https://my.phantom.us/4.1/playbook/excessive-account-lockouts-enrichment-and-response/`) action.escu.known_false_positives = It's possible that a widely used system, such as a kiosk, could cause a large number of account lockouts. action.escu.creation_date = 2024-03-19 @@ -22323,15 +22323,15 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTime [ESCU - Detect Mimikatz With PowerShell Script Block Logging - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all.\ -\ -This analytic identifies common Mimikatz functions that may be identified in the script block, including `mimikatz`. This will catch the most basic use cases for Pass the Ticket, Pass the Hash and `-DumprCreds`.\ +description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all. \ + \ +This analytic identifies common Mimikatz functions that may be identified in the script block, including `mimikatz`. This will catch the most basic use cases for Pass the Ticket, Pass the Hash and `-DumprCreds`. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1003", "T1059.001"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all.\ -\ -This analytic identifies common Mimikatz functions that may be identified in the script block, including `mimikatz`. This will catch the most basic use cases for Pass the Ticket, Pass the Hash and `-DumprCreds`.\ +action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all. \ + \ +This analytic identifies common Mimikatz functions that may be identified in the script block, including `mimikatz`. This will catch the most basic use cases for Pass the Ticket, Pass the Hash and `-DumprCreds`. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. action.escu.known_false_positives = False positives should be limited as the commands being identifies are quite specific to EventCode 4104 and Mimikatz. Filter as needed. @@ -22358,9 +22358,9 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "8148c29 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all.\ -\ -This analytic identifies common Mimikatz functions that may be identified in the script block, including `mimikatz`. This will catch the most basic use cases for Pass the Ticket, Pass the Hash and `-DumprCreds`.\ +action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all. \ + \ +This analytic identifies common Mimikatz functions that may be identified in the script block, including `mimikatz`. This will catch the most basic use cases for Pass the Ticket, Pass the Hash and `-DumprCreds`. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.notable.param.rule_title = Detect Mimikatz With PowerShell Script Block Logging action.notable.param.security_domain = endpoint @@ -23150,11 +23150,11 @@ search = | tstats `security_content_summariesonly` count FROM datamodel=Endpoint [ESCU - Detect Regsvr32 Application Control Bypass - Rule] action.escu = 0 action.escu.enabled = 1 -description = Adversaries may abuse Regsvr32.exe to proxy execution of malicious code. Regsvr32.exe is a command-line program used to register and unregister object linking and embedding controls, including dynamic link libraries (DLLs), on Windows systems. Regsvr32.exe is also a Microsoft signed binary.This variation of the technique is often referred to as a "Squiblydoo" attack.\ +description = Adversaries may abuse Regsvr32.exe to proxy execution of malicious code. Regsvr32.exe is a command-line program used to register and unregister object linking and embedding controls, including dynamic link libraries (DLLs), on Windows systems. Regsvr32.exe is also a Microsoft signed binary.This variation of the technique is often referred to as a "Squiblydoo" attack. \ Upon investigating, look for network connections to remote destinations (internal or external). Be cautious to modify the query to look for "scrobj.dll", the ".dll" is not required to load scrobj. "scrobj.dll" will be loaded by "regsvr32.exe" upon execution. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1218", "T1218.010"], "nist": ["DE.CM"]} action.escu.data_models = ["Endpoint"] -action.escu.eli5 = Adversaries may abuse Regsvr32.exe to proxy execution of malicious code. Regsvr32.exe is a command-line program used to register and unregister object linking and embedding controls, including dynamic link libraries (DLLs), on Windows systems. Regsvr32.exe is also a Microsoft signed binary.This variation of the technique is often referred to as a "Squiblydoo" attack.\ +action.escu.eli5 = Adversaries may abuse Regsvr32.exe to proxy execution of malicious code. Regsvr32.exe is a command-line program used to register and unregister object linking and embedding controls, including dynamic link libraries (DLLs), on Windows systems. Regsvr32.exe is also a Microsoft signed binary.This variation of the technique is often referred to as a "Squiblydoo" attack. \ Upon investigating, look for network connections to remote destinations (internal or external). Be cautious to modify the query to look for "scrobj.dll", the ".dll" is not required to load scrobj. "scrobj.dll" will be loaded by "regsvr32.exe" upon execution. action.escu.how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. action.escu.known_false_positives = Limited false positives related to third party software registering .DLL's. @@ -23181,7 +23181,7 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "070e9b8 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = Adversaries may abuse Regsvr32.exe to proxy execution of malicious code. Regsvr32.exe is a command-line program used to register and unregister object linking and embedding controls, including dynamic link libraries (DLLs), on Windows systems. Regsvr32.exe is also a Microsoft signed binary.This variation of the technique is often referred to as a "Squiblydoo" attack.\ +action.notable.param.rule_description = Adversaries may abuse Regsvr32.exe to proxy execution of malicious code. Regsvr32.exe is a command-line program used to register and unregister object linking and embedding controls, including dynamic link libraries (DLLs), on Windows systems. Regsvr32.exe is also a Microsoft signed binary.This variation of the technique is often referred to as a "Squiblydoo" attack. \ Upon investigating, look for network connections to remote destinations (internal or external). Be cautious to modify the query to look for "scrobj.dll", the ".dll" is not required to load scrobj. "scrobj.dll" will be loaded by "regsvr32.exe" upon execution. action.notable.param.rule_title = Detect Regsvr32 Application Control Bypass action.notable.param.security_domain = endpoint @@ -24025,19 +24025,19 @@ search = | tstats `security_content_summariesonly` count max(_time) as lastTime, [ESCU - Detect WMI Event Subscription Persistence - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies the use of WMI Event Subscription to establish persistence or perform privilege escalation. WMI can be used to install event filters, providers, consumers, and bindings that execute code when a defined event occurs. WMI subscription execution is proxied by the WMI Provider Host process (WmiPrvSe.exe) and thus may result in elevated SYSTEM privileges. This analytic is restricted by commonly added process execution and a path. If the volume is low enough, remove the values and flag on any new subscriptions.\ -All event subscriptions have three components\ -1. Filter - WQL Query for the events we want. EventID equals 19\ -1. Consumer - An action to take upon triggering the filter. EventID equals 20\ -1. Binding - Registers a filter to a consumer. EventID equals 21\ +description = The following analytic identifies the use of WMI Event Subscription to establish persistence or perform privilege escalation. WMI can be used to install event filters, providers, consumers, and bindings that execute code when a defined event occurs. WMI subscription execution is proxied by the WMI Provider Host process (WmiPrvSe.exe) and thus may result in elevated SYSTEM privileges. This analytic is restricted by commonly added process execution and a path. If the volume is low enough, remove the values and flag on any new subscriptions. \ +All event subscriptions have three components \ +1. Filter - WQL Query for the events we want. EventID equals 19 \ +1. Consumer - An action to take upon triggering the filter. EventID equals 20 \ +1. Binding - Registers a filter to a consumer. EventID equals 21 \ Monitor for the creation of new WMI EventFilter, EventConsumer, and FilterToConsumerBinding. It may be pertinent to review all 3 to identify the flow of execution. In addition, EventCode 4104 may assist with any other PowerShell script usage that registered the subscription. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1546.003", "T1546"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic identifies the use of WMI Event Subscription to establish persistence or perform privilege escalation. WMI can be used to install event filters, providers, consumers, and bindings that execute code when a defined event occurs. WMI subscription execution is proxied by the WMI Provider Host process (WmiPrvSe.exe) and thus may result in elevated SYSTEM privileges. This analytic is restricted by commonly added process execution and a path. If the volume is low enough, remove the values and flag on any new subscriptions.\ -All event subscriptions have three components\ -1. Filter - WQL Query for the events we want. EventID equals 19\ -1. Consumer - An action to take upon triggering the filter. EventID equals 20\ -1. Binding - Registers a filter to a consumer. EventID equals 21\ +action.escu.eli5 = The following analytic identifies the use of WMI Event Subscription to establish persistence or perform privilege escalation. WMI can be used to install event filters, providers, consumers, and bindings that execute code when a defined event occurs. WMI subscription execution is proxied by the WMI Provider Host process (WmiPrvSe.exe) and thus may result in elevated SYSTEM privileges. This analytic is restricted by commonly added process execution and a path. If the volume is low enough, remove the values and flag on any new subscriptions. \ +All event subscriptions have three components \ +1. Filter - WQL Query for the events we want. EventID equals 19 \ +1. Consumer - An action to take upon triggering the filter. EventID equals 20 \ +1. Binding - Registers a filter to a consumer. EventID equals 21 \ Monitor for the creation of new WMI EventFilter, EventConsumer, and FilterToConsumerBinding. It may be pertinent to review all 3 to identify the flow of execution. In addition, EventCode 4104 may assist with any other PowerShell script usage that registered the subscription. action.escu.how_to_implement = To successfully implement this search, you need to be ingesting logs with that provide WMI Event Subscription from your endpoints. If you are using Sysmon, you must have at least version 6.0.4 of the Sysmon TA and have enabled EventID 19, 20 and 21. Tune and filter known good to limit the volume. action.escu.known_false_positives = It is possible some applications will create a consumer and may be required to be filtered. For tuning, add any additional LOLBin's for further depth of coverage. @@ -24064,11 +24064,11 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "01d9a0c schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies the use of WMI Event Subscription to establish persistence or perform privilege escalation. WMI can be used to install event filters, providers, consumers, and bindings that execute code when a defined event occurs. WMI subscription execution is proxied by the WMI Provider Host process (WmiPrvSe.exe) and thus may result in elevated SYSTEM privileges. This analytic is restricted by commonly added process execution and a path. If the volume is low enough, remove the values and flag on any new subscriptions.\ -All event subscriptions have three components\ -1. Filter - WQL Query for the events we want. EventID equals 19\ -1. Consumer - An action to take upon triggering the filter. EventID equals 20\ -1. Binding - Registers a filter to a consumer. EventID equals 21\ +action.notable.param.rule_description = The following analytic identifies the use of WMI Event Subscription to establish persistence or perform privilege escalation. WMI can be used to install event filters, providers, consumers, and bindings that execute code when a defined event occurs. WMI subscription execution is proxied by the WMI Provider Host process (WmiPrvSe.exe) and thus may result in elevated SYSTEM privileges. This analytic is restricted by commonly added process execution and a path. If the volume is low enough, remove the values and flag on any new subscriptions. \ +All event subscriptions have three components \ +1. Filter - WQL Query for the events we want. EventID equals 19 \ +1. Consumer - An action to take upon triggering the filter. EventID equals 20 \ +1. Binding - Registers a filter to a consumer. EventID equals 21 \ Monitor for the creation of new WMI EventFilter, EventConsumer, and FilterToConsumerBinding. It may be pertinent to review all 3 to identify the flow of execution. In addition, EventCode 4104 may assist with any other PowerShell script usage that registered the subscription. action.notable.param.rule_title = Detect WMI Event Subscription Persistence action.notable.param.security_domain = endpoint @@ -24974,11 +24974,11 @@ search = `powershell` EventCode=4104 (ScriptBlockText = "*Get-ADUser*" AND Scri [ESCU - Disabled Kerberos Pre-Authentication Discovery With PowerView - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify the execution of the `Get-DomainUser` commandlet with specific parameters. `Get-DomainUser` is part of PowerView, a PowerShell tool used to perform enumeration on Windows Active Directory networks. As the name suggests, `Get-DomainUser` is used to identify domain users and combining it with `-PreauthNotRequired` allows adversaries to discover domain accounts with Kerberos Pre Authentication disabled.\ +description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify the execution of the `Get-DomainUser` commandlet with specific parameters. `Get-DomainUser` is part of PowerView, a PowerShell tool used to perform enumeration on Windows Active Directory networks. As the name suggests, `Get-DomainUser` is used to identify domain users and combining it with `-PreauthNotRequired` allows adversaries to discover domain accounts with Kerberos Pre Authentication disabled. \ Red Teams and adversaries alike use may leverage PowerView to enumerate these accounts and attempt to crack their passwords offline. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1558", "T1558.004"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify the execution of the `Get-DomainUser` commandlet with specific parameters. `Get-DomainUser` is part of PowerView, a PowerShell tool used to perform enumeration on Windows Active Directory networks. As the name suggests, `Get-DomainUser` is used to identify domain users and combining it with `-PreauthNotRequired` allows adversaries to discover domain accounts with Kerberos Pre Authentication disabled.\ +action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify the execution of the `Get-DomainUser` commandlet with specific parameters. `Get-DomainUser` is part of PowerView, a PowerShell tool used to perform enumeration on Windows Active Directory networks. As the name suggests, `Get-DomainUser` is used to identify domain users and combining it with `-PreauthNotRequired` allows adversaries to discover domain accounts with Kerberos Pre Authentication disabled. \ Red Teams and adversaries alike use may leverage PowerView to enumerate these accounts and attempt to crack their passwords offline. action.escu.how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. action.escu.known_false_positives = Administrators or power users may use PowerView for troubleshooting @@ -25005,7 +25005,7 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "b0b34e2 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify the execution of the `Get-DomainUser` commandlet with specific parameters. `Get-DomainUser` is part of PowerView, a PowerShell tool used to perform enumeration on Windows Active Directory networks. As the name suggests, `Get-DomainUser` is used to identify domain users and combining it with `-PreauthNotRequired` allows adversaries to discover domain accounts with Kerberos Pre Authentication disabled.\ +action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify the execution of the `Get-DomainUser` commandlet with specific parameters. `Get-DomainUser` is part of PowerView, a PowerShell tool used to perform enumeration on Windows Active Directory networks. As the name suggests, `Get-DomainUser` is used to identify domain users and combining it with `-PreauthNotRequired` allows adversaries to discover domain accounts with Kerberos Pre Authentication disabled. \ Red Teams and adversaries alike use may leverage PowerView to enumerate these accounts and attempt to crack their passwords offline. action.notable.param.rule_title = Disabled Kerberos Pre-Authentication Discovery With PowerView action.notable.param.security_domain = endpoint @@ -26080,19 +26080,19 @@ search = `sysmon` EventCode= 11 TargetFilename = "*\\license.dat" AND (TargetFi [ESCU - DSQuery Domain Discovery - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies "dsquery.exe" execution with arguments looking for `TrustedDomain` query directly on the command-line. This is typically indicative of an Administrator or adversary perform domain trust discovery. Note that this query does not identify any other variations of "Dsquery.exe" usage.\ -Within this detection, it is assumed `dsquery.exe` is not moved or renamed.\ -The search will return the first time and last time these command-line arguments were used for these executions, as well as the target system, the user, process "dsquery.exe" and its parent process.\ -DSQuery.exe is natively found in `C:\Windows\system32` and `C:\Windows\syswow64` and only on Server operating system.\ -The following DLL(s) are loaded when DSQuery.exe is launched `dsquery.dll`. If found loaded by another process, it is possible dsquery is running within that process context in memory.\ +description = The following analytic identifies "dsquery.exe" execution with arguments looking for `TrustedDomain` query directly on the command-line. This is typically indicative of an Administrator or adversary perform domain trust discovery. Note that this query does not identify any other variations of "Dsquery.exe" usage. \ +Within this detection, it is assumed `dsquery.exe` is not moved or renamed. \ +The search will return the first time and last time these command-line arguments were used for these executions, as well as the target system, the user, process "dsquery.exe" and its parent process. \ +DSQuery.exe is natively found in `C:\Windows\system32` and `C:\Windows\syswow64` and only on Server operating system. \ +The following DLL(s) are loaded when DSQuery.exe is launched `dsquery.dll`. If found loaded by another process, it is possible dsquery is running within that process context in memory. \ In addition to trust discovery, review parallel processes for additional behaviors performed. Identify the parent process and capture any files (batch files, for example) being used. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1482"], "nist": ["DE.CM"]} action.escu.data_models = ["Endpoint"] -action.escu.eli5 = The following analytic identifies "dsquery.exe" execution with arguments looking for `TrustedDomain` query directly on the command-line. This is typically indicative of an Administrator or adversary perform domain trust discovery. Note that this query does not identify any other variations of "Dsquery.exe" usage.\ -Within this detection, it is assumed `dsquery.exe` is not moved or renamed.\ -The search will return the first time and last time these command-line arguments were used for these executions, as well as the target system, the user, process "dsquery.exe" and its parent process.\ -DSQuery.exe is natively found in `C:\Windows\system32` and `C:\Windows\syswow64` and only on Server operating system.\ -The following DLL(s) are loaded when DSQuery.exe is launched `dsquery.dll`. If found loaded by another process, it is possible dsquery is running within that process context in memory.\ +action.escu.eli5 = The following analytic identifies "dsquery.exe" execution with arguments looking for `TrustedDomain` query directly on the command-line. This is typically indicative of an Administrator or adversary perform domain trust discovery. Note that this query does not identify any other variations of "Dsquery.exe" usage. \ +Within this detection, it is assumed `dsquery.exe` is not moved or renamed. \ +The search will return the first time and last time these command-line arguments were used for these executions, as well as the target system, the user, process "dsquery.exe" and its parent process. \ +DSQuery.exe is natively found in `C:\Windows\system32` and `C:\Windows\syswow64` and only on Server operating system. \ +The following DLL(s) are loaded when DSQuery.exe is launched `dsquery.dll`. If found loaded by another process, it is possible dsquery is running within that process context in memory. \ In addition to trust discovery, review parallel processes for additional behaviors performed. Identify the parent process and capture any files (batch files, for example) being used. action.escu.how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. action.escu.known_false_positives = Limited false positives. If there is a true false positive, filter based on command-line or parent process. @@ -26119,11 +26119,11 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "cc31603 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies "dsquery.exe" execution with arguments looking for `TrustedDomain` query directly on the command-line. This is typically indicative of an Administrator or adversary perform domain trust discovery. Note that this query does not identify any other variations of "Dsquery.exe" usage.\ -Within this detection, it is assumed `dsquery.exe` is not moved or renamed.\ -The search will return the first time and last time these command-line arguments were used for these executions, as well as the target system, the user, process "dsquery.exe" and its parent process.\ -DSQuery.exe is natively found in `C:\Windows\system32` and `C:\Windows\syswow64` and only on Server operating system.\ -The following DLL(s) are loaded when DSQuery.exe is launched `dsquery.dll`. If found loaded by another process, it is possible dsquery is running within that process context in memory.\ +action.notable.param.rule_description = The following analytic identifies "dsquery.exe" execution with arguments looking for `TrustedDomain` query directly on the command-line. This is typically indicative of an Administrator or adversary perform domain trust discovery. Note that this query does not identify any other variations of "Dsquery.exe" usage. \ +Within this detection, it is assumed `dsquery.exe` is not moved or renamed. \ +The search will return the first time and last time these command-line arguments were used for these executions, as well as the target system, the user, process "dsquery.exe" and its parent process. \ +DSQuery.exe is natively found in `C:\Windows\system32` and `C:\Windows\syswow64` and only on Server operating system. \ +The following DLL(s) are loaded when DSQuery.exe is launched `dsquery.dll`. If found loaded by another process, it is possible dsquery is running within that process context in memory. \ In addition to trust discovery, review parallel processes for additional behaviors performed. Identify the parent process and capture any files (batch files, for example) being used. action.notable.param.rule_title = DSQuery Domain Discovery action.notable.param.security_domain = endpoint @@ -26189,11 +26189,11 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTime [ESCU - Dump LSASS via procdump - Rule] action.escu = 0 action.escu.enabled = 1 -description = Detect procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. This query does not monitor for the internal name (original_file_name=procdump) of the PE or look for procdump64.exe. Modify the query as needed.\ +description = Detect procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. This query does not monitor for the internal name (original_file_name=procdump) of the PE or look for procdump64.exe. Modify the query as needed. \ During triage, confirm this is procdump.exe executing. If it is the first time a Sysinternals utility has been ran, it is possible there will be a -accepteula on the command line. Review other endpoint data sources for cross process (injection) into lsass.exe. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1003.001", "T1003"], "nist": ["DE.CM"]} action.escu.data_models = ["Endpoint"] -action.escu.eli5 = Detect procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. This query does not monitor for the internal name (original_file_name=procdump) of the PE or look for procdump64.exe. Modify the query as needed.\ +action.escu.eli5 = Detect procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. This query does not monitor for the internal name (original_file_name=procdump) of the PE or look for procdump64.exe. Modify the query as needed. \ During triage, confirm this is procdump.exe executing. If it is the first time a Sysinternals utility has been ran, it is possible there will be a -accepteula on the command line. Review other endpoint data sources for cross process (injection) into lsass.exe. action.escu.how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. action.escu.known_false_positives = None identified. @@ -26220,7 +26220,7 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "3742ebf schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = Detect procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. This query does not monitor for the internal name (original_file_name=procdump) of the PE or look for procdump64.exe. Modify the query as needed.\ +action.notable.param.rule_description = Detect procdump.exe dumping the lsass process. This query looks for both -mm and -ma usage. -mm will produce a mini dump file and -ma will write a dump file with all process memory. Both are highly suspect and should be reviewed. This query does not monitor for the internal name (original_file_name=procdump) of the PE or look for procdump64.exe. Modify the query as needed. \ During triage, confirm this is procdump.exe executing. If it is the first time a Sysinternals utility has been ran, it is possible there will be a -accepteula on the command line. Review other endpoint data sources for cross process (injection) into lsass.exe. action.notable.param.rule_title = Dump LSASS via procdump action.notable.param.security_domain = endpoint @@ -27191,21 +27191,21 @@ search = | tstats `security_content_summariesonly` values(Processes.process) as [ESCU - Exchange PowerShell Abuse via SSRF - Rule] action.escu = 0 action.escu.enabled = 1 -description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. This analytic identifies suspicious behavior related to ProxyShell against on-premise Microsoft Exchange servers. This analytic has been replaced by GUID d436f9e7-0ee7-4a47-864b-6dea2c4e2752 which utilizes the Web Datamodel.\ -Modification of this analytic is requried to ensure fields are mapped accordingly.\ -\ -A suspicious event will have `PowerShell`, the method `POST` and `autodiscover.json`. This is indicative of accessing PowerShell on the back end of Exchange with SSRF.\ -\ -An event will look similar to `POST /autodiscover/autodiscover.json a=dsxvu@fnsso.flq/powershell/?X-Rps-CAT=VgEAVAdXaW5kb3d...` (abbreviated)\ +description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. This analytic identifies suspicious behavior related to ProxyShell against on-premise Microsoft Exchange servers. This analytic has been replaced by GUID d436f9e7-0ee7-4a47-864b-6dea2c4e2752 which utilizes the Web Datamodel. \ +Modification of this analytic is requried to ensure fields are mapped accordingly. \ + \ +A suspicious event will have `PowerShell`, the method `POST` and `autodiscover.json`. This is indicative of accessing PowerShell on the back end of Exchange with SSRF. \ + \ +An event will look similar to `POST /autodiscover/autodiscover.json a=dsxvu@fnsso.flq/powershell/?X-Rps-CAT=VgEAVAdXaW5kb3d...` (abbreviated) \ Review the source attempting to perform this activity against your environment. In addition, review PowerShell logs and access recently granted to Exchange roles. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Delivery", "Installation"], "mitre_attack": ["T1190", "T1133"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = This analytic identifies suspicious behavior related to ProxyShell against on-premise Microsoft Exchange servers. This analytic has been replaced by GUID d436f9e7-0ee7-4a47-864b-6dea2c4e2752 which utilizes the Web Datamodel.\ -Modification of this analytic is requried to ensure fields are mapped accordingly.\ -\ -A suspicious event will have `PowerShell`, the method `POST` and `autodiscover.json`. This is indicative of accessing PowerShell on the back end of Exchange with SSRF.\ -\ -An event will look similar to `POST /autodiscover/autodiscover.json a=dsxvu@fnsso.flq/powershell/?X-Rps-CAT=VgEAVAdXaW5kb3d...` (abbreviated)\ +action.escu.eli5 = This analytic identifies suspicious behavior related to ProxyShell against on-premise Microsoft Exchange servers. This analytic has been replaced by GUID d436f9e7-0ee7-4a47-864b-6dea2c4e2752 which utilizes the Web Datamodel. \ +Modification of this analytic is requried to ensure fields are mapped accordingly. \ + \ +A suspicious event will have `PowerShell`, the method `POST` and `autodiscover.json`. This is indicative of accessing PowerShell on the back end of Exchange with SSRF. \ + \ +An event will look similar to `POST /autodiscover/autodiscover.json a=dsxvu@fnsso.flq/powershell/?X-Rps-CAT=VgEAVAdXaW5kb3d...` (abbreviated) \ Review the source attempting to perform this activity against your environment. In addition, review PowerShell logs and access recently granted to Exchange roles. action.escu.how_to_implement = The following analytic requires on-premise Exchange to be logging to Splunk using the TA - https://splunkbase.splunk.com/app/3225. Ensure logs are parsed correctly, or tune the analytic for your environment. action.escu.known_false_positives = Limited false positives, however, tune as needed. @@ -27232,12 +27232,12 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "29228ab schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = This analytic identifies suspicious behavior related to ProxyShell against on-premise Microsoft Exchange servers. This analytic has been replaced by GUID d436f9e7-0ee7-4a47-864b-6dea2c4e2752 which utilizes the Web Datamodel.\ -Modification of this analytic is requried to ensure fields are mapped accordingly.\ -\ -A suspicious event will have `PowerShell`, the method `POST` and `autodiscover.json`. This is indicative of accessing PowerShell on the back end of Exchange with SSRF.\ -\ -An event will look similar to `POST /autodiscover/autodiscover.json a=dsxvu@fnsso.flq/powershell/?X-Rps-CAT=VgEAVAdXaW5kb3d...` (abbreviated)\ +action.notable.param.rule_description = This analytic identifies suspicious behavior related to ProxyShell against on-premise Microsoft Exchange servers. This analytic has been replaced by GUID d436f9e7-0ee7-4a47-864b-6dea2c4e2752 which utilizes the Web Datamodel. \ +Modification of this analytic is requried to ensure fields are mapped accordingly. \ + \ +A suspicious event will have `PowerShell`, the method `POST` and `autodiscover.json`. This is indicative of accessing PowerShell on the back end of Exchange with SSRF. \ + \ +An event will look similar to `POST /autodiscover/autodiscover.json a=dsxvu@fnsso.flq/powershell/?X-Rps-CAT=VgEAVAdXaW5kb3d...` (abbreviated) \ Review the source attempting to perform this activity against your environment. In addition, review PowerShell logs and access recently granted to Exchange roles. action.notable.param.rule_title = Exchange PowerShell Abuse via SSRF action.notable.param.security_domain = endpoint @@ -27256,17 +27256,17 @@ search = `exchange` c_uri="*//autodiscover*" cs_uri_query="*PowerShell*" cs_meth [ESCU - Exchange PowerShell Module Usage - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies the usage of Exchange PowerShell modules that were recently used for a proof of concept related to ProxyShell. Adversaries may abuse a limited set of PwSh Modules related to Exchange once gained access via ProxyShell or ProxyNotShell.\ -Inherently, the usage of the modules is not malicious, but reviewing parallel processes, and user, of the session will assist with determining the intent.\ -Module - New-MailboxExportRequest will begin the process of exporting contents of a primary mailbox or archive to a .pst file.\ -Module - New-managementroleassignment can assign a management role to a management role group, management role assignment policy, user, or universal security group (USG).\ +description = The following analytic identifies the usage of Exchange PowerShell modules that were recently used for a proof of concept related to ProxyShell. Adversaries may abuse a limited set of PwSh Modules related to Exchange once gained access via ProxyShell or ProxyNotShell. \ +Inherently, the usage of the modules is not malicious, but reviewing parallel processes, and user, of the session will assist with determining the intent. \ +Module - New-MailboxExportRequest will begin the process of exporting contents of a primary mailbox or archive to a .pst file. \ +Module - New-managementroleassignment can assign a management role to a management role group, management role assignment policy, user, or universal security group (USG). \ Module - New-MailboxSearch cmdlet to create a mailbox search and either get an estimate of search results, place search results on In-Place Hold or copy them to a Discovery mailbox. You can also place all contents in a mailbox on hold by not specifying a search query, which accomplishes similar results as Litigation Hold. \ Module - Get-Recipient cmdlet to view existing recipient objects in your organization. This cmdlet returns all mail-enabled objects (for example, mailboxes, mail users, mail contacts, and distribution groups). action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1059", "T1059.001"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic identifies the usage of Exchange PowerShell modules that were recently used for a proof of concept related to ProxyShell. Adversaries may abuse a limited set of PwSh Modules related to Exchange once gained access via ProxyShell or ProxyNotShell.\ -Inherently, the usage of the modules is not malicious, but reviewing parallel processes, and user, of the session will assist with determining the intent.\ -Module - New-MailboxExportRequest will begin the process of exporting contents of a primary mailbox or archive to a .pst file.\ -Module - New-managementroleassignment can assign a management role to a management role group, management role assignment policy, user, or universal security group (USG).\ +action.escu.eli5 = The following analytic identifies the usage of Exchange PowerShell modules that were recently used for a proof of concept related to ProxyShell. Adversaries may abuse a limited set of PwSh Modules related to Exchange once gained access via ProxyShell or ProxyNotShell. \ +Inherently, the usage of the modules is not malicious, but reviewing parallel processes, and user, of the session will assist with determining the intent. \ +Module - New-MailboxExportRequest will begin the process of exporting contents of a primary mailbox or archive to a .pst file. \ +Module - New-managementroleassignment can assign a management role to a management role group, management role assignment policy, user, or universal security group (USG). \ Module - New-MailboxSearch cmdlet to create a mailbox search and either get an estimate of search results, place search results on In-Place Hold or copy them to a Discovery mailbox. You can also place all contents in a mailbox on hold by not specifying a search query, which accomplishes similar results as Litigation Hold. \ Module - Get-Recipient cmdlet to view existing recipient objects in your organization. This cmdlet returns all mail-enabled objects (for example, mailboxes, mail users, mail contacts, and distribution groups). action.escu.how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. action.escu.known_false_positives = Administrators or power users may use this PowerShell commandlet for troubleshooting. @@ -27293,10 +27293,10 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "2d10095 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies the usage of Exchange PowerShell modules that were recently used for a proof of concept related to ProxyShell. Adversaries may abuse a limited set of PwSh Modules related to Exchange once gained access via ProxyShell or ProxyNotShell.\ -Inherently, the usage of the modules is not malicious, but reviewing parallel processes, and user, of the session will assist with determining the intent.\ -Module - New-MailboxExportRequest will begin the process of exporting contents of a primary mailbox or archive to a .pst file.\ -Module - New-managementroleassignment can assign a management role to a management role group, management role assignment policy, user, or universal security group (USG).\ +action.notable.param.rule_description = The following analytic identifies the usage of Exchange PowerShell modules that were recently used for a proof of concept related to ProxyShell. Adversaries may abuse a limited set of PwSh Modules related to Exchange once gained access via ProxyShell or ProxyNotShell. \ +Inherently, the usage of the modules is not malicious, but reviewing parallel processes, and user, of the session will assist with determining the intent. \ +Module - New-MailboxExportRequest will begin the process of exporting contents of a primary mailbox or archive to a .pst file. \ +Module - New-managementroleassignment can assign a management role to a management role group, management role assignment policy, user, or universal security group (USG). \ Module - New-MailboxSearch cmdlet to create a mailbox search and either get an estimate of search results, place search results on In-Place Hold or copy them to a Discovery mailbox. You can also place all contents in a mailbox on hold by not specifying a search query, which accomplishes similar results as Litigation Hold. \ Module - Get-Recipient cmdlet to view existing recipient objects in your organization. This cmdlet returns all mail-enabled objects (for example, mailboxes, mail users, mail contacts, and distribution groups). action.notable.param.rule_title = Exchange PowerShell Module Usage action.notable.param.security_domain = endpoint @@ -27714,17 +27714,17 @@ search = `wineventlog_system` EventCode=7036 | rex field=Message "The (?Polices->Windows Settings->Security Settings->Advanced Audit Policy Configuration. Expand this node, go to Object Access (Audit Polices->Object Access), then select the Setting Audit Detailed File Share Audit\ -It is possible this is not enabled by default and may need to be reviewed and enabled.\ -\ +description = The following analytic utilizes Windows Event Code 5145, "A network share object was checked to see whether client can be granted desired access". During our research into PetitPotam, CVE-2021-36942, we identified the ocurrence of this event on the target host with specific values. \ +To enable 5145 events via Group Policy - Computer Configuration->Polices->Windows Settings->Security Settings->Advanced Audit Policy Configuration. Expand this node, go to Object Access (Audit Polices->Object Access), then select the Setting Audit Detailed File Share Audit \ +It is possible this is not enabled by default and may need to be reviewed and enabled. \ + \ During triage, review parallel security events to identify further suspicious activity. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1187"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes Windows Event Code 5145, "A network share object was checked to see whether client can be granted desired access". During our research into PetitPotam, CVE-2021-36942, we identified the ocurrence of this event on the target host with specific values.\ -To enable 5145 events via Group Policy - Computer Configuration->Polices->Windows Settings->Security Settings->Advanced Audit Policy Configuration. Expand this node, go to Object Access (Audit Polices->Object Access), then select the Setting Audit Detailed File Share Audit\ -It is possible this is not enabled by default and may need to be reviewed and enabled.\ -\ +action.escu.eli5 = The following analytic utilizes Windows Event Code 5145, "A network share object was checked to see whether client can be granted desired access". During our research into PetitPotam, CVE-2021-36942, we identified the ocurrence of this event on the target host with specific values. \ +To enable 5145 events via Group Policy - Computer Configuration->Polices->Windows Settings->Security Settings->Advanced Audit Policy Configuration. Expand this node, go to Object Access (Audit Polices->Object Access), then select the Setting Audit Detailed File Share Audit \ +It is possible this is not enabled by default and may need to be reviewed and enabled. \ + \ During triage, review parallel security events to identify further suspicious activity. action.escu.how_to_implement = Windows Event Code 5145 is required to utilize this analytic and it may not be enabled in most environments. action.escu.known_false_positives = False positives have been limited when the Anonymous Logon is used for Account Name. @@ -37463,10 +37463,10 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "95b8061 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic utilizes Windows Event Code 5145, "A network share object was checked to see whether client can be granted desired access". During our research into PetitPotam, CVE-2021-36942, we identified the ocurrence of this event on the target host with specific values.\ -To enable 5145 events via Group Policy - Computer Configuration->Polices->Windows Settings->Security Settings->Advanced Audit Policy Configuration. Expand this node, go to Object Access (Audit Polices->Object Access), then select the Setting Audit Detailed File Share Audit\ -It is possible this is not enabled by default and may need to be reviewed and enabled.\ -\ +action.notable.param.rule_description = The following analytic utilizes Windows Event Code 5145, "A network share object was checked to see whether client can be granted desired access". During our research into PetitPotam, CVE-2021-36942, we identified the ocurrence of this event on the target host with specific values. \ +To enable 5145 events via Group Policy - Computer Configuration->Polices->Windows Settings->Security Settings->Advanced Audit Policy Configuration. Expand this node, go to Object Access (Audit Polices->Object Access), then select the Setting Audit Detailed File Share Audit \ +It is possible this is not enabled by default and may need to be reviewed and enabled. \ + \ During triage, review parallel security events to identify further suspicious activity. action.notable.param.rule_title = PetitPotam Network Share Access Request action.notable.param.security_domain = endpoint @@ -37609,11 +37609,11 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTime [ESCU - Possible Lateral Movement PowerShell Spawn - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic is designed to identify possible lateral movement attacks that involve the spawning of a PowerShell process as a child or grandchild process of commonly abused processes. These processes include services.exe, wmiprsve.exe, svchost.exe, wsmprovhost.exe, and mmc.exe.\ +description = The following analytic is designed to identify possible lateral movement attacks that involve the spawning of a PowerShell process as a child or grandchild process of commonly abused processes. These processes include services.exe, wmiprsve.exe, svchost.exe, wsmprovhost.exe, and mmc.exe. \ Such behavior is indicative of legitimate Windows features such as the Service Control Manager, Windows Management Instrumentation, Task Scheduler, Windows Remote Management, and the DCOM protocol being abused to start a process on a remote endpoint. This behavior is often seen during lateral movement techniques where adversaries or red teams abuse these services for lateral movement and remote code execution. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1021", "T1021.003", "T1021.006", "T1047", "T1053.005", "T1543.003", "T1059.001", "T1218.014"], "nist": ["DE.CM"]} action.escu.data_models = ["Endpoint"] -action.escu.eli5 = The following analytic is designed to identify possible lateral movement attacks that involve the spawning of a PowerShell process as a child or grandchild process of commonly abused processes. These processes include services.exe, wmiprsve.exe, svchost.exe, wsmprovhost.exe, and mmc.exe.\ +action.escu.eli5 = The following analytic is designed to identify possible lateral movement attacks that involve the spawning of a PowerShell process as a child or grandchild process of commonly abused processes. These processes include services.exe, wmiprsve.exe, svchost.exe, wsmprovhost.exe, and mmc.exe. \ Such behavior is indicative of legitimate Windows features such as the Service Control Manager, Windows Management Instrumentation, Task Scheduler, Windows Remote Management, and the DCOM protocol being abused to start a process on a remote endpoint. This behavior is often seen during lateral movement techniques where adversaries or red teams abuse these services for lateral movement and remote code execution. action.escu.how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. action.escu.known_false_positives = Legitimate applications may spawn PowerShell as a child process of the the identified processes. Filter as needed. @@ -37640,7 +37640,7 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "cb909b3 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic is designed to identify possible lateral movement attacks that involve the spawning of a PowerShell process as a child or grandchild process of commonly abused processes. These processes include services.exe, wmiprsve.exe, svchost.exe, wsmprovhost.exe, and mmc.exe.\ +action.notable.param.rule_description = The following analytic is designed to identify possible lateral movement attacks that involve the spawning of a PowerShell process as a child or grandchild process of commonly abused processes. These processes include services.exe, wmiprsve.exe, svchost.exe, wsmprovhost.exe, and mmc.exe. \ Such behavior is indicative of legitimate Windows features such as the Service Control Manager, Windows Management Instrumentation, Task Scheduler, Windows Remote Management, and the DCOM protocol being abused to start a process on a remote endpoint. This behavior is often seen during lateral movement techniques where adversaries or red teams abuse these services for lateral movement and remote code execution. action.notable.param.rule_title = Possible Lateral Movement PowerShell Spawn action.notable.param.security_domain = endpoint @@ -37949,15 +37949,15 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTime [ESCU - PowerShell Domain Enumeration - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies specific PowerShell modules typically used to enumerate an organizations domain or users.\ +description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies specific PowerShell modules typically used to enumerate an organizations domain or users. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1059", "T1059.001"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies specific PowerShell modules typically used to enumerate an organizations domain or users.\ +action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies specific PowerShell modules typically used to enumerate an organizations domain or users. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. action.escu.known_false_positives = It is possible there will be false positives, filter as needed. @@ -37984,9 +37984,9 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "e1866ce schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies specific PowerShell modules typically used to enumerate an organizations domain or users.\ +action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies specific PowerShell modules typically used to enumerate an organizations domain or users. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.notable.param.rule_title = PowerShell Domain Enumeration action.notable.param.security_domain = endpoint @@ -38140,17 +38140,17 @@ search = `powershell` EventCode=4104 ScriptBlockText = "*CreateInstance([type]:: [ESCU - Powershell Fileless Process Injection via GetProcAddress - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all.\ -\ -This analytic identifies `GetProcAddress` in the script block. This is not normal to be used by most PowerShell scripts and is typically unsafe/malicious. Many attack toolkits use GetProcAddress to obtain code execution.\ -In use, `$var_gpa = $var_unsafe_native_methods.GetMethod(GetProcAddress` and later referenced/executed elsewhere.\ +description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all. \ + \ +This analytic identifies `GetProcAddress` in the script block. This is not normal to be used by most PowerShell scripts and is typically unsafe/malicious. Many attack toolkits use GetProcAddress to obtain code execution. \ +In use, `$var_gpa = $var_unsafe_native_methods.GetMethod(GetProcAddress` and later referenced/executed elsewhere. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1059", "T1055", "T1059.001"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all.\ -\ -This analytic identifies `GetProcAddress` in the script block. This is not normal to be used by most PowerShell scripts and is typically unsafe/malicious. Many attack toolkits use GetProcAddress to obtain code execution.\ -In use, `$var_gpa = $var_unsafe_native_methods.GetMethod(GetProcAddress` and later referenced/executed elsewhere.\ +action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all. \ + \ +This analytic identifies `GetProcAddress` in the script block. This is not normal to be used by most PowerShell scripts and is typically unsafe/malicious. Many attack toolkits use GetProcAddress to obtain code execution. \ +In use, `$var_gpa = $var_unsafe_native_methods.GetMethod(GetProcAddress` and later referenced/executed elsewhere. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. action.escu.known_false_positives = Limited false positives. Filter as needed. @@ -38177,10 +38177,10 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "a26d9db schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all.\ -\ -This analytic identifies `GetProcAddress` in the script block. This is not normal to be used by most PowerShell scripts and is typically unsafe/malicious. Many attack toolkits use GetProcAddress to obtain code execution.\ -In use, `$var_gpa = $var_unsafe_native_methods.GetMethod(GetProcAddress` and later referenced/executed elsewhere.\ +action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all. \ + \ +This analytic identifies `GetProcAddress` in the script block. This is not normal to be used by most PowerShell scripts and is typically unsafe/malicious. Many attack toolkits use GetProcAddress to obtain code execution. \ +In use, `$var_gpa = $var_unsafe_native_methods.GetMethod(GetProcAddress` and later referenced/executed elsewhere. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.notable.param.rule_title = Powershell Fileless Process Injection via GetProcAddress action.notable.param.security_domain = endpoint @@ -38199,19 +38199,19 @@ search = `powershell` EventCode=4104 ScriptBlockText=*getprocaddress* | stats co [ESCU - Powershell Fileless Script Contains Base64 Encoded Content - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies `FromBase64String` within the script block. A typical malicious instance will include additional code.\ -Command example - `[Byte[]]$var_code = [System.Convert]::FromBase64String(38uqIyMjQ6rG....`\ -\ +description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies `FromBase64String` within the script block. A typical malicious instance will include additional code. \ +Command example - `[Byte[]]$var_code = [System.Convert]::FromBase64String(38uqIyMjQ6rG....` \ + \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1059", "T1027", "T1059.001"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies `FromBase64String` within the script block. A typical malicious instance will include additional code.\ -Command example - `[Byte[]]$var_code = [System.Convert]::FromBase64String(38uqIyMjQ6rG....`\ -\ +action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies `FromBase64String` within the script block. A typical malicious instance will include additional code. \ +Command example - `[Byte[]]$var_code = [System.Convert]::FromBase64String(38uqIyMjQ6rG....` \ + \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. action.escu.known_false_positives = False positives should be limited. Filter as needed. @@ -38238,11 +38238,11 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "8acbc04 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies `FromBase64String` within the script block. A typical malicious instance will include additional code.\ -Command example - `[Byte[]]$var_code = [System.Convert]::FromBase64String(38uqIyMjQ6rG....`\ -\ +action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies `FromBase64String` within the script block. A typical malicious instance will include additional code. \ +Command example - `[Byte[]]$var_code = [System.Convert]::FromBase64String(38uqIyMjQ6rG....` \ + \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.notable.param.rule_title = Powershell Fileless Script Contains Base64 Encoded Content action.notable.param.security_domain = endpoint @@ -38297,15 +38297,15 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTime [ESCU - Powershell Get LocalGroup Discovery with Script Block Logging - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies PowerShell cmdlet - `get-localgroup` being ran. Typically, by itself, is not malicious but may raise suspicion based on time of day, endpoint and username.\ +description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies PowerShell cmdlet - `get-localgroup` being ran. Typically, by itself, is not malicious but may raise suspicion based on time of day, endpoint and username. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1069", "T1069.001"], "nist": ["DE.AE"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies PowerShell cmdlet - `get-localgroup` being ran. Typically, by itself, is not malicious but may raise suspicion based on time of day, endpoint and username.\ +action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies PowerShell cmdlet - `get-localgroup` being ran. Typically, by itself, is not malicious but may raise suspicion based on time of day, endpoint and username. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. action.escu.known_false_positives = False positives may be present. Tune as needed. @@ -38427,15 +38427,15 @@ search = `powershell` EventCode=4104 ScriptBlockText IN ("*invoke-wmiexec*") | s [ESCU - Powershell Load Module in Meterpreter - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies "MSF.Powershell","MSF.Powershell.Meterpreter","MSF.Powershell.Meterpreter.Kiwi","MSF.Powershell.Meterpreter.Transport" being used. This behavior is related to when a Meterpreter session is started and the operator runs load_kiwi.\ +description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies "MSF.Powershell","MSF.Powershell.Meterpreter","MSF.Powershell.Meterpreter.Kiwi","MSF.Powershell.Meterpreter.Transport" being used. This behavior is related to when a Meterpreter session is started and the operator runs load_kiwi. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1059", "T1059.001"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies "MSF.Powershell","MSF.Powershell.Meterpreter","MSF.Powershell.Meterpreter.Kiwi","MSF.Powershell.Meterpreter.Transport" being used. This behavior is related to when a Meterpreter session is started and the operator runs load_kiwi.\ +action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies "MSF.Powershell","MSF.Powershell.Meterpreter","MSF.Powershell.Meterpreter.Kiwi","MSF.Powershell.Meterpreter.Transport" being used. This behavior is related to when a Meterpreter session is started and the operator runs load_kiwi. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.how_to_implement = The following analytic requires PowerShell operational logs to be imported. Modify the powershell macro as needed to match the sourcetype or add index. This analytic is specific to 4104, or PowerShell Script Block Logging. action.escu.known_false_positives = False positives should be very limited as this is strict to MetaSploit behavior. @@ -38462,9 +38462,9 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "d5905da schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies "MSF.Powershell","MSF.Powershell.Meterpreter","MSF.Powershell.Meterpreter.Kiwi","MSF.Powershell.Meterpreter.Transport" being used. This behavior is related to when a Meterpreter session is started and the operator runs load_kiwi.\ +action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies "MSF.Powershell","MSF.Powershell.Meterpreter","MSF.Powershell.Meterpreter.Kiwi","MSF.Powershell.Meterpreter.Transport" being used. This behavior is related to when a Meterpreter session is started and the operator runs load_kiwi. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.notable.param.rule_title = Powershell Load Module in Meterpreter action.notable.param.security_domain = endpoint @@ -38483,15 +38483,15 @@ search = `powershell` EventCode=4104 ScriptBlockText IN ("*MSF.Powershell*","*MS [ESCU - PowerShell Loading DotNET into Memory via Reflection - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all.\ -\ -This analytic identifies the use of PowerShell loading .net assembly via reflection. This is commonly found in malicious PowerShell usage, including Empire and Cobalt Strike. In addition, the `load(` value may be modifed by removing `(` and it will identify more events to review.\ +description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all. \ + \ +This analytic identifies the use of PowerShell loading .net assembly via reflection. This is commonly found in malicious PowerShell usage, including Empire and Cobalt Strike. In addition, the `load(` value may be modifed by removing `(` and it will identify more events to review. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1059", "T1059.001"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all.\ -\ -This analytic identifies the use of PowerShell loading .net assembly via reflection. This is commonly found in malicious PowerShell usage, including Empire and Cobalt Strike. In addition, the `load(` value may be modifed by removing `(` and it will identify more events to review.\ +action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all. \ + \ +This analytic identifies the use of PowerShell loading .net assembly via reflection. This is commonly found in malicious PowerShell usage, including Empire and Cobalt Strike. In addition, the `load(` value may be modifed by removing `(` and it will identify more events to review. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. action.escu.known_false_positives = False positives should be limited as day to day scripts do not use this method. @@ -38518,9 +38518,9 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "85bc3f3 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all.\ -\ -This analytic identifies the use of PowerShell loading .net assembly via reflection. This is commonly found in malicious PowerShell usage, including Empire and Cobalt Strike. In addition, the `load(` value may be modifed by removing `(` and it will identify more events to review.\ +action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable no critical endpoints or all. \ + \ +This analytic identifies the use of PowerShell loading .net assembly via reflection. This is commonly found in malicious PowerShell usage, including Empire and Cobalt Strike. In addition, the `load(` value may be modifed by removing `(` and it will identify more events to review. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.notable.param.rule_title = PowerShell Loading DotNET into Memory via Reflection action.notable.param.security_domain = endpoint @@ -39097,15 +39097,15 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTim [ESCU - Print Spooler Adding A Printer Driver - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies new printer drivers being load by utilizing the Windows PrintService operational logs, EventCode 316. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare.\ -\ -Within the proof of concept code, the following event will occur - "Printer driver 1234 for Windows x64 Version-3 was added or updated. Files:- UNIDRV.DLL, kernelbase.dll, evil.dll. No user action is required."\ +description = The following analytic identifies new printer drivers being load by utilizing the Windows PrintService operational logs, EventCode 316. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare. \ + \ +Within the proof of concept code, the following event will occur - "Printer driver 1234 for Windows x64 Version-3 was added or updated. Files:- UNIDRV.DLL, kernelbase.dll, evil.dll. No user action is required." \ During triage, isolate the endpoint and review for source of exploitation. Capture any additional file modification events and review the source of where the exploitation began. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1547.012", "T1547"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic identifies new printer drivers being load by utilizing the Windows PrintService operational logs, EventCode 316. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare.\ -\ -Within the proof of concept code, the following event will occur - "Printer driver 1234 for Windows x64 Version-3 was added or updated. Files:- UNIDRV.DLL, kernelbase.dll, evil.dll. No user action is required."\ +action.escu.eli5 = The following analytic identifies new printer drivers being load by utilizing the Windows PrintService operational logs, EventCode 316. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare. \ + \ +Within the proof of concept code, the following event will occur - "Printer driver 1234 for Windows x64 Version-3 was added or updated. Files:- UNIDRV.DLL, kernelbase.dll, evil.dll. No user action is required." \ During triage, isolate the endpoint and review for source of exploitation. Capture any additional file modification events and review the source of where the exploitation began. action.escu.how_to_implement = You will need to ensure PrintService Admin and Operational logs are being logged to Splunk from critical or all systems. action.escu.known_false_positives = Unknown. This may require filtering. @@ -39132,9 +39132,9 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "313681a schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies new printer drivers being load by utilizing the Windows PrintService operational logs, EventCode 316. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare.\ -\ -Within the proof of concept code, the following event will occur - "Printer driver 1234 for Windows x64 Version-3 was added or updated. Files:- UNIDRV.DLL, kernelbase.dll, evil.dll. No user action is required."\ +action.notable.param.rule_description = The following analytic identifies new printer drivers being load by utilizing the Windows PrintService operational logs, EventCode 316. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare. \ + \ +Within the proof of concept code, the following event will occur - "Printer driver 1234 for Windows x64 Version-3 was added or updated. Files:- UNIDRV.DLL, kernelbase.dll, evil.dll. No user action is required." \ During triage, isolate the endpoint and review for source of exploitation. Capture any additional file modification events and review the source of where the exploitation began. action.notable.param.rule_title = Print Spooler Adding A Printer Driver action.notable.param.security_domain = endpoint @@ -39153,15 +39153,15 @@ search = `printservice` EventCode=316 category = "Adding a printer driver" Messa [ESCU - Print Spooler Failed to Load a Plug-in - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies driver load errors utilizing the Windows PrintService Admin logs. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare.\ -Within the proof of concept code, the following error will occur - "The print spooler failed to load a plug-in module C:\Windows\system32\spool\DRIVERS\x64\3\meterpreter.dll, error code 0x45A. See the event user data for context information."\ -The analytic is based on file path and failure to load the plug-in.\ +description = The following analytic identifies driver load errors utilizing the Windows PrintService Admin logs. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare. \ +Within the proof of concept code, the following error will occur - "The print spooler failed to load a plug-in module C:\Windows\system32\spool\DRIVERS\x64\3\meterpreter.dll, error code 0x45A. See the event user data for context information." \ +The analytic is based on file path and failure to load the plug-in. \ During triage, isolate the endpoint and review for source of exploitation. Capture any additional file modification events. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1547.012", "T1547"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic identifies driver load errors utilizing the Windows PrintService Admin logs. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare.\ -Within the proof of concept code, the following error will occur - "The print spooler failed to load a plug-in module C:\Windows\system32\spool\DRIVERS\x64\3\meterpreter.dll, error code 0x45A. See the event user data for context information."\ -The analytic is based on file path and failure to load the plug-in.\ +action.escu.eli5 = The following analytic identifies driver load errors utilizing the Windows PrintService Admin logs. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare. \ +Within the proof of concept code, the following error will occur - "The print spooler failed to load a plug-in module C:\Windows\system32\spool\DRIVERS\x64\3\meterpreter.dll, error code 0x45A. See the event user data for context information." \ +The analytic is based on file path and failure to load the plug-in. \ During triage, isolate the endpoint and review for source of exploitation. Capture any additional file modification events. action.escu.how_to_implement = You will need to ensure PrintService Admin and Operational logs are being logged to Splunk from critical or all systems. action.escu.known_false_positives = False positives are unknown and filtering may be required. @@ -39188,9 +39188,9 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "1adc954 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies driver load errors utilizing the Windows PrintService Admin logs. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare.\ -Within the proof of concept code, the following error will occur - "The print spooler failed to load a plug-in module C:\Windows\system32\spool\DRIVERS\x64\3\meterpreter.dll, error code 0x45A. See the event user data for context information."\ -The analytic is based on file path and failure to load the plug-in.\ +action.notable.param.rule_description = The following analytic identifies driver load errors utilizing the Windows PrintService Admin logs. This was identified during our testing of CVE-2021-34527 previously (CVE-2021-1675) or PrintNightmare. \ +Within the proof of concept code, the following error will occur - "The print spooler failed to load a plug-in module C:\Windows\system32\spool\DRIVERS\x64\3\meterpreter.dll, error code 0x45A. See the event user data for context information." \ +The analytic is based on file path and failure to load the plug-in. \ During triage, isolate the endpoint and review for source of exploitation. Capture any additional file modification events. action.notable.param.rule_title = Print Spooler Failed to Load a Plug-in action.notable.param.security_domain = endpoint @@ -41747,15 +41747,15 @@ search = `sysmon` EventCode=11 TargetFilename = "*\\Windows\\SchCache\\*" Targe [ESCU - Schedule Task with HTTP Command Arguments - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic detects the registration of suspicious tasks on Windows using the Windows Security EventCode 4698, "A scheduled task was created." It specifically looks for tasks registered through schtasks.exe or TaskService that have command arguments containing the string "HTTP." This behavior is often associated with malware or attacks that utilize Living off the Land binaries (lolbins) to download additional files or payloads to the compromised machine.\ -The search returns information about the task, such as the task name, command, author, enabled status, hidden status, and arguments. Upon triage, it is important to identify the source of the scheduled task, whether it was registered through schtasks.exe or TaskService. Review the details of the created task and the command to be executed. Capture relevant artifacts on disk and examine them. Additionally, identify any parallel processes occurring within the same timeframe to determine the source of the attack.\ -Implementing this analytic requires ingesting logs with information about task schedules, specifically Windows Security Log EventCode 4698, from your endpoints. It is recommended to tune and filter known instances of task schedules used in your environment to minimize false positives.\ +description = The following analytic detects the registration of suspicious tasks on Windows using the Windows Security EventCode 4698, "A scheduled task was created." It specifically looks for tasks registered through schtasks.exe or TaskService that have command arguments containing the string "HTTP." This behavior is often associated with malware or attacks that utilize Living off the Land binaries (lolbins) to download additional files or payloads to the compromised machine. \ +The search returns information about the task, such as the task name, command, author, enabled status, hidden status, and arguments. Upon triage, it is important to identify the source of the scheduled task, whether it was registered through schtasks.exe or TaskService. Review the details of the created task and the command to be executed. Capture relevant artifacts on disk and examine them. Additionally, identify any parallel processes occurring within the same timeframe to determine the source of the attack. \ +Implementing this analytic requires ingesting logs with information about task schedules, specifically Windows Security Log EventCode 4698, from your endpoints. It is recommended to tune and filter known instances of task schedules used in your environment to minimize false positives. \ Detecting the registration of suspicious tasks with HTTP command arguments is valuable for a SOC as it indicates potential malicious activity or an attempt to establish persistence on the system. If a true positive is found, further investigation is warranted to analyze the nature and purpose of the scheduled task, identify any downloaded files or payloads, and mitigate the associated risks. The impact of a true positive can vary but may include data exfiltration, malware propagation, or unauthorized access to sensitive information. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic detects the registration of suspicious tasks on Windows using the Windows Security EventCode 4698, "A scheduled task was created." It specifically looks for tasks registered through schtasks.exe or TaskService that have command arguments containing the string "HTTP." This behavior is often associated with malware or attacks that utilize Living off the Land binaries (lolbins) to download additional files or payloads to the compromised machine.\ -The search returns information about the task, such as the task name, command, author, enabled status, hidden status, and arguments. Upon triage, it is important to identify the source of the scheduled task, whether it was registered through schtasks.exe or TaskService. Review the details of the created task and the command to be executed. Capture relevant artifacts on disk and examine them. Additionally, identify any parallel processes occurring within the same timeframe to determine the source of the attack.\ -Implementing this analytic requires ingesting logs with information about task schedules, specifically Windows Security Log EventCode 4698, from your endpoints. It is recommended to tune and filter known instances of task schedules used in your environment to minimize false positives.\ +action.escu.eli5 = The following analytic detects the registration of suspicious tasks on Windows using the Windows Security EventCode 4698, "A scheduled task was created." It specifically looks for tasks registered through schtasks.exe or TaskService that have command arguments containing the string "HTTP." This behavior is often associated with malware or attacks that utilize Living off the Land binaries (lolbins) to download additional files or payloads to the compromised machine. \ +The search returns information about the task, such as the task name, command, author, enabled status, hidden status, and arguments. Upon triage, it is important to identify the source of the scheduled task, whether it was registered through schtasks.exe or TaskService. Review the details of the created task and the command to be executed. Capture relevant artifacts on disk and examine them. Additionally, identify any parallel processes occurring within the same timeframe to determine the source of the attack. \ +Implementing this analytic requires ingesting logs with information about task schedules, specifically Windows Security Log EventCode 4698, from your endpoints. It is recommended to tune and filter known instances of task schedules used in your environment to minimize false positives. \ Detecting the registration of suspicious tasks with HTTP command arguments is valuable for a SOC as it indicates potential malicious activity or an attempt to establish persistence on the system. If a true positive is found, further investigation is warranted to analyze the nature and purpose of the scheduled task, identify any downloaded files or payloads, and mitigate the associated risks. The impact of a true positive can vary but may include data exfiltration, malware propagation, or unauthorized access to sensitive information. action.escu.how_to_implement = To successfully implement this search, you need to be ingesting logs with the task schedule (Exa. Security Log EventCode 4698) endpoints. Tune and filter known instances of Task schedule used in your environment. action.escu.known_false_positives = unknown @@ -41782,9 +41782,9 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "523c268 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic detects the registration of suspicious tasks on Windows using the Windows Security EventCode 4698, "A scheduled task was created." It specifically looks for tasks registered through schtasks.exe or TaskService that have command arguments containing the string "HTTP." This behavior is often associated with malware or attacks that utilize Living off the Land binaries (lolbins) to download additional files or payloads to the compromised machine.\ -The search returns information about the task, such as the task name, command, author, enabled status, hidden status, and arguments. Upon triage, it is important to identify the source of the scheduled task, whether it was registered through schtasks.exe or TaskService. Review the details of the created task and the command to be executed. Capture relevant artifacts on disk and examine them. Additionally, identify any parallel processes occurring within the same timeframe to determine the source of the attack.\ -Implementing this analytic requires ingesting logs with information about task schedules, specifically Windows Security Log EventCode 4698, from your endpoints. It is recommended to tune and filter known instances of task schedules used in your environment to minimize false positives.\ +action.notable.param.rule_description = The following analytic detects the registration of suspicious tasks on Windows using the Windows Security EventCode 4698, "A scheduled task was created." It specifically looks for tasks registered through schtasks.exe or TaskService that have command arguments containing the string "HTTP." This behavior is often associated with malware or attacks that utilize Living off the Land binaries (lolbins) to download additional files or payloads to the compromised machine. \ +The search returns information about the task, such as the task name, command, author, enabled status, hidden status, and arguments. Upon triage, it is important to identify the source of the scheduled task, whether it was registered through schtasks.exe or TaskService. Review the details of the created task and the command to be executed. Capture relevant artifacts on disk and examine them. Additionally, identify any parallel processes occurring within the same timeframe to determine the source of the attack. \ +Implementing this analytic requires ingesting logs with information about task schedules, specifically Windows Security Log EventCode 4698, from your endpoints. It is recommended to tune and filter known instances of task schedules used in your environment to minimize false positives. \ Detecting the registration of suspicious tasks with HTTP command arguments is valuable for a SOC as it indicates potential malicious activity or an attempt to establish persistence on the system. If a true positive is found, further investigation is warranted to analyze the nature and purpose of the scheduled task, identify any downloaded files or payloads, and mitigate the associated risks. The impact of a true positive can vary but may include data exfiltration, malware propagation, or unauthorized access to sensitive information. action.notable.param.rule_title = Schedule Task with HTTP Command Arguments action.notable.param.security_domain = endpoint @@ -41803,15 +41803,15 @@ search = `wineventlog_security` EventCode=4698 | xmlkv Message| search Arguments [ESCU - Schedule Task with Rundll32 Command Trigger - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic detects the creation of suspicious tasks in Windows, specifically tasks using the rundll32 command. It's implemented using Windows Security EventCode 4698 for A scheduled task was created, and looks for tasks executed either via schtasks.exe or TaskService. This behavior is worth identifying as it is commonly used by malware, such as TrickBot, that leverages rundll32 to execute its downloader.\ -If a true positive is found, it suggests an attacker is trying to persist within the environment or potentially deliver additional malicious payloads, leading to data theft, ransomware, or other damaging outcomes.\ -To implement this analytic, ensure you are ingesting logs with task schedule information from your endpoints. Be aware of potential false positives - legitimate uses of Task Scheduler in your environment may cause benign activities to be flagged.\ +description = The following analytic detects the creation of suspicious tasks in Windows, specifically tasks using the rundll32 command. It's implemented using Windows Security EventCode 4698 for A scheduled task was created, and looks for tasks executed either via schtasks.exe or TaskService. This behavior is worth identifying as it is commonly used by malware, such as TrickBot, that leverages rundll32 to execute its downloader. \ +If a true positive is found, it suggests an attacker is trying to persist within the environment or potentially deliver additional malicious payloads, leading to data theft, ransomware, or other damaging outcomes. \ +To implement this analytic, ensure you are ingesting logs with task schedule information from your endpoints. Be aware of potential false positives - legitimate uses of Task Scheduler in your environment may cause benign activities to be flagged. \ Upon triage, review the scheduled task's source and the command to be executed. Capture and inspect any relevant on-disk artifacts, and look for concurrent processes to identify the attack source. This approach helps analysts detect potential threats earlier and mitigate the risks. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic detects the creation of suspicious tasks in Windows, specifically tasks using the rundll32 command. It's implemented using Windows Security EventCode 4698 for A scheduled task was created, and looks for tasks executed either via schtasks.exe or TaskService. This behavior is worth identifying as it is commonly used by malware, such as TrickBot, that leverages rundll32 to execute its downloader.\ -If a true positive is found, it suggests an attacker is trying to persist within the environment or potentially deliver additional malicious payloads, leading to data theft, ransomware, or other damaging outcomes.\ -To implement this analytic, ensure you are ingesting logs with task schedule information from your endpoints. Be aware of potential false positives - legitimate uses of Task Scheduler in your environment may cause benign activities to be flagged.\ +action.escu.eli5 = The following analytic detects the creation of suspicious tasks in Windows, specifically tasks using the rundll32 command. It's implemented using Windows Security EventCode 4698 for A scheduled task was created, and looks for tasks executed either via schtasks.exe or TaskService. This behavior is worth identifying as it is commonly used by malware, such as TrickBot, that leverages rundll32 to execute its downloader. \ +If a true positive is found, it suggests an attacker is trying to persist within the environment or potentially deliver additional malicious payloads, leading to data theft, ransomware, or other damaging outcomes. \ +To implement this analytic, ensure you are ingesting logs with task schedule information from your endpoints. Be aware of potential false positives - legitimate uses of Task Scheduler in your environment may cause benign activities to be flagged. \ Upon triage, review the scheduled task's source and the command to be executed. Capture and inspect any relevant on-disk artifacts, and look for concurrent processes to identify the attack source. This approach helps analysts detect potential threats earlier and mitigate the risks. action.escu.how_to_implement = To successfully implement this search, you need to be ingesting logs with the task schedule (Exa. Security Log EventCode 4698) endpoints. Tune and filter known instances of Task schedule used in your environment. action.escu.known_false_positives = unknown @@ -41838,9 +41838,9 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "75b00fd schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic detects the creation of suspicious tasks in Windows, specifically tasks using the rundll32 command. It's implemented using Windows Security EventCode 4698 for A scheduled task was created, and looks for tasks executed either via schtasks.exe or TaskService. This behavior is worth identifying as it is commonly used by malware, such as TrickBot, that leverages rundll32 to execute its downloader.\ -If a true positive is found, it suggests an attacker is trying to persist within the environment or potentially deliver additional malicious payloads, leading to data theft, ransomware, or other damaging outcomes.\ -To implement this analytic, ensure you are ingesting logs with task schedule information from your endpoints. Be aware of potential false positives - legitimate uses of Task Scheduler in your environment may cause benign activities to be flagged.\ +action.notable.param.rule_description = The following analytic detects the creation of suspicious tasks in Windows, specifically tasks using the rundll32 command. It's implemented using Windows Security EventCode 4698 for A scheduled task was created, and looks for tasks executed either via schtasks.exe or TaskService. This behavior is worth identifying as it is commonly used by malware, such as TrickBot, that leverages rundll32 to execute its downloader. \ +If a true positive is found, it suggests an attacker is trying to persist within the environment or potentially deliver additional malicious payloads, leading to data theft, ransomware, or other damaging outcomes. \ +To implement this analytic, ensure you are ingesting logs with task schedule information from your endpoints. Be aware of potential false positives - legitimate uses of Task Scheduler in your environment may cause benign activities to be flagged. \ Upon triage, review the scheduled task's source and the command to be executed. Capture and inspect any relevant on-disk artifacts, and look for concurrent processes to identify the attack source. This approach helps analysts detect potential threats earlier and mitigate the risks. action.notable.param.rule_title = Schedule Task with Rundll32 Command Trigger action.notable.param.security_domain = endpoint @@ -41859,11 +41859,11 @@ search = `wineventlog_security` EventCode=4698 | xmlkv Message | search Command [ESCU - Scheduled Task Creation on Remote Endpoint using At - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic detects the creation of suspicious tasks on a remote Windows endpoint using the at.exe command with command-line arguments. This technique is commonly used by red teams and adversaries for lateral movement and remote code execution. The at.exe binary leverages the deprecated AT protocol, which may still work on previous versions of Windows. Attackers can enable this protocol on demand by modifying a system registry key. It is important to consider potential false positives. While administrators may create scheduled tasks on remote systems, this activity is typically limited to a small set of hosts or users.\ +description = The following analytic detects the creation of suspicious tasks on a remote Windows endpoint using the at.exe command with command-line arguments. This technique is commonly used by red teams and adversaries for lateral movement and remote code execution. The at.exe binary leverages the deprecated AT protocol, which may still work on previous versions of Windows. Attackers can enable this protocol on demand by modifying a system registry key. It is important to consider potential false positives. While administrators may create scheduled tasks on remote systems, this activity is typically limited to a small set of hosts or users. \ Identifying the creation of scheduled tasks on remote endpoints is crucial for a Security Operations Center (SOC) because it indicates potential unauthorized activity or an attacker attempting to establish persistence or execute malicious code. The impact of a true positive can be significant, leading to unauthorized access, data theft, or other damaging outcomes. During triage, investigate the source and purpose of the scheduled task, inspect relevant on-disk artifacts, and analyze concurrent processes to identify the extent of the attack and take appropriate response actions. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053", "T1053.002"], "nist": ["DE.CM"]} action.escu.data_models = ["Endpoint"] -action.escu.eli5 = The following analytic detects the creation of suspicious tasks on a remote Windows endpoint using the at.exe command with command-line arguments. This technique is commonly used by red teams and adversaries for lateral movement and remote code execution. The at.exe binary leverages the deprecated AT protocol, which may still work on previous versions of Windows. Attackers can enable this protocol on demand by modifying a system registry key. It is important to consider potential false positives. While administrators may create scheduled tasks on remote systems, this activity is typically limited to a small set of hosts or users.\ +action.escu.eli5 = The following analytic detects the creation of suspicious tasks on a remote Windows endpoint using the at.exe command with command-line arguments. This technique is commonly used by red teams and adversaries for lateral movement and remote code execution. The at.exe binary leverages the deprecated AT protocol, which may still work on previous versions of Windows. Attackers can enable this protocol on demand by modifying a system registry key. It is important to consider potential false positives. While administrators may create scheduled tasks on remote systems, this activity is typically limited to a small set of hosts or users. \ Identifying the creation of scheduled tasks on remote endpoints is crucial for a Security Operations Center (SOC) because it indicates potential unauthorized activity or an attacker attempting to establish persistence or execute malicious code. The impact of a true positive can be significant, leading to unauthorized access, data theft, or other damaging outcomes. During triage, investigate the source and purpose of the scheduled task, inspect relevant on-disk artifacts, and analyze concurrent processes to identify the extent of the attack and take appropriate response actions. action.escu.how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. action.escu.known_false_positives = Administrators may create scheduled tasks on remote systems, but this activity is usually limited to a small set of hosts or users. @@ -41890,7 +41890,7 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "4be5485 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic detects the creation of suspicious tasks on a remote Windows endpoint using the at.exe command with command-line arguments. This technique is commonly used by red teams and adversaries for lateral movement and remote code execution. The at.exe binary leverages the deprecated AT protocol, which may still work on previous versions of Windows. Attackers can enable this protocol on demand by modifying a system registry key. It is important to consider potential false positives. While administrators may create scheduled tasks on remote systems, this activity is typically limited to a small set of hosts or users.\ +action.notable.param.rule_description = The following analytic detects the creation of suspicious tasks on a remote Windows endpoint using the at.exe command with command-line arguments. This technique is commonly used by red teams and adversaries for lateral movement and remote code execution. The at.exe binary leverages the deprecated AT protocol, which may still work on previous versions of Windows. Attackers can enable this protocol on demand by modifying a system registry key. It is important to consider potential false positives. While administrators may create scheduled tasks on remote systems, this activity is typically limited to a small set of hosts or users. \ Identifying the creation of scheduled tasks on remote endpoints is crucial for a Security Operations Center (SOC) because it indicates potential unauthorized activity or an attacker attempting to establish persistence or execute malicious code. The impact of a true positive can be significant, leading to unauthorized access, data theft, or other damaging outcomes. During triage, investigate the source and purpose of the scheduled task, inspect relevant on-disk artifacts, and analyze concurrent processes to identify the extent of the attack and take appropriate response actions. action.notable.param.rule_title = Scheduled Task Creation on Remote Endpoint using At action.notable.param.security_domain = endpoint @@ -42426,17 +42426,17 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTime [ESCU - ServicePrincipalNames Discovery with PowerShell - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies `powershell.exe` usage, using Script Block Logging EventCode 4104, related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack.\ -What is a ServicePrincipleName?\ -A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name.\ -The following analytic identifies the use of KerberosRequestorSecurityToken class within the script block. Using .NET System.IdentityModel.Tokens.KerberosRequestorSecurityToken class in PowerShell is the equivelant of using setspn.exe.\ +description = The following analytic identifies `powershell.exe` usage, using Script Block Logging EventCode 4104, related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack. \ +What is a ServicePrincipleName? \ +A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name. \ +The following analytic identifies the use of KerberosRequestorSecurityToken class within the script block. Using .NET System.IdentityModel.Tokens.KerberosRequestorSecurityToken class in PowerShell is the equivelant of using setspn.exe. \ During triage, review parallel processes for further suspicious activity. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1558.003"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic identifies `powershell.exe` usage, using Script Block Logging EventCode 4104, related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack.\ -What is a ServicePrincipleName?\ -A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name.\ -The following analytic identifies the use of KerberosRequestorSecurityToken class within the script block. Using .NET System.IdentityModel.Tokens.KerberosRequestorSecurityToken class in PowerShell is the equivelant of using setspn.exe.\ +action.escu.eli5 = The following analytic identifies `powershell.exe` usage, using Script Block Logging EventCode 4104, related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack. \ +What is a ServicePrincipleName? \ +A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name. \ +The following analytic identifies the use of KerberosRequestorSecurityToken class within the script block. Using .NET System.IdentityModel.Tokens.KerberosRequestorSecurityToken class in PowerShell is the equivelant of using setspn.exe. \ During triage, review parallel processes for further suspicious activity. action.escu.how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. action.escu.known_false_positives = False positives should be limited, however filter as needed. @@ -42463,10 +42463,10 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "1324306 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies `powershell.exe` usage, using Script Block Logging EventCode 4104, related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack.\ -What is a ServicePrincipleName?\ -A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name.\ -The following analytic identifies the use of KerberosRequestorSecurityToken class within the script block. Using .NET System.IdentityModel.Tokens.KerberosRequestorSecurityToken class in PowerShell is the equivelant of using setspn.exe.\ +action.notable.param.rule_description = The following analytic identifies `powershell.exe` usage, using Script Block Logging EventCode 4104, related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack. \ +What is a ServicePrincipleName? \ +A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name. \ +The following analytic identifies the use of KerberosRequestorSecurityToken class within the script block. Using .NET System.IdentityModel.Tokens.KerberosRequestorSecurityToken class in PowerShell is the equivelant of using setspn.exe. \ During triage, review parallel processes for further suspicious activity. action.notable.param.rule_title = ServicePrincipalNames Discovery with PowerShell action.notable.param.security_domain = endpoint @@ -42485,23 +42485,23 @@ search = `powershell` EventCode=4104 ScriptBlockText="*KerberosRequestorSecurity [ESCU - ServicePrincipalNames Discovery with SetSPN - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies `setspn.exe` usage related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack.\ -What is a ServicePrincipleName?\ -A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name.\ -Example usage includes the following\ -* setspn -T offense -Q */* 1. setspn -T attackrange.local -F -Q MSSQLSvc/* 1. setspn -Q */* > allspns.txt 1. setspn -q\ -Values\ -* -F = perform queries at the forest, rather than domain level 1. -T = perform query on the specified domain or forest (when -F is also used) 1. -Q = query for existence of SPN\ +description = The following analytic identifies `setspn.exe` usage related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack. \ +What is a ServicePrincipleName? \ +A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name. \ +Example usage includes the following \ +* setspn -T offense -Q */* 1. setspn -T attackrange.local -F -Q MSSQLSvc/* 1. setspn -Q */* > allspns.txt 1. setspn -q \ +Values \ +* -F = perform queries at the forest, rather than domain level 1. -T = perform query on the specified domain or forest (when -F is also used) 1. -Q = query for existence of SPN \ During triage, review parallel processes for further suspicious activity. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1558.003"], "nist": ["DE.CM"]} action.escu.data_models = ["Endpoint"] -action.escu.eli5 = The following analytic identifies `setspn.exe` usage related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack.\ -What is a ServicePrincipleName?\ -A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name.\ -Example usage includes the following\ -* setspn -T offense -Q */* 1. setspn -T attackrange.local -F -Q MSSQLSvc/* 1. setspn -Q */* > allspns.txt 1. setspn -q\ -Values\ -* -F = perform queries at the forest, rather than domain level 1. -T = perform query on the specified domain or forest (when -F is also used) 1. -Q = query for existence of SPN\ +action.escu.eli5 = The following analytic identifies `setspn.exe` usage related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack. \ +What is a ServicePrincipleName? \ +A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name. \ +Example usage includes the following \ +* setspn -T offense -Q */* 1. setspn -T attackrange.local -F -Q MSSQLSvc/* 1. setspn -Q */* > allspns.txt 1. setspn -q \ +Values \ +* -F = perform queries at the forest, rather than domain level 1. -T = perform query on the specified domain or forest (when -F is also used) 1. -Q = query for existence of SPN \ During triage, review parallel processes for further suspicious activity. action.escu.how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. action.escu.known_false_positives = False positives may be caused by Administrators resetting SPNs or querying for SPNs. Filter as needed. @@ -42528,13 +42528,13 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "ae8b3ef schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies `setspn.exe` usage related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack.\ -What is a ServicePrincipleName?\ -A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name.\ -Example usage includes the following\ -* setspn -T offense -Q */* 1. setspn -T attackrange.local -F -Q MSSQLSvc/* 1. setspn -Q */* > allspns.txt 1. setspn -q\ -Values\ -* -F = perform queries at the forest, rather than domain level 1. -T = perform query on the specified domain or forest (when -F is also used) 1. -Q = query for existence of SPN\ +action.notable.param.rule_description = The following analytic identifies `setspn.exe` usage related to querying the domain for Service Principle Names. typically, this is a precursor activity related to kerberoasting or the silver ticket attack. \ +What is a ServicePrincipleName? \ +A service principal name (SPN) is a unique identifier of a service instance. SPNs are used by Kerberos authentication to associate a service instance with a service logon account. This allows a client application to request that the service authenticate an account even if the client does not have the account name. \ +Example usage includes the following \ +* setspn -T offense -Q */* 1. setspn -T attackrange.local -F -Q MSSQLSvc/* 1. setspn -Q */* > allspns.txt 1. setspn -q \ +Values \ +* -F = perform queries at the forest, rather than domain level 1. -T = perform query on the specified domain or forest (when -F is also used) 1. -Q = query for existence of SPN \ During triage, review parallel processes for further suspicious activity. action.notable.param.rule_title = ServicePrincipalNames Discovery with SetSPN action.notable.param.security_domain = endpoint @@ -42788,15 +42788,15 @@ search = | tstats `security_content_summariesonly` values(Processes.process) as [ESCU - Short Lived Scheduled Task - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic utilizes Windows Security EventCode 4698, "A scheduled task was created," and EventCode 4699, "A scheduled task was deleted," to identify scheduled tasks that are created and deleted within a short time frame of less than 30 seconds. This behavior is indicative of a potential lateral movement attack where the Task Scheduler is abused to achieve code execution. Both red teams and adversaries may exploit the Task Scheduler for lateral movement and remote code execution.\ -To implement this analytic, ensure that you are ingesting Windows Security Event Logs with EventCode 4698 enabled. Additionally, the Windows TA (Technology Add-on) is required to parse and extract the necessary information from the logs.\ -It's important to note that while uncommon, legitimate applications may create and delete scheduled tasks within a short duration. Analysts should filter the results based on the specific context and environment to reduce false positives.\ +description = The following analytic utilizes Windows Security EventCode 4698, "A scheduled task was created," and EventCode 4699, "A scheduled task was deleted," to identify scheduled tasks that are created and deleted within a short time frame of less than 30 seconds. This behavior is indicative of a potential lateral movement attack where the Task Scheduler is abused to achieve code execution. Both red teams and adversaries may exploit the Task Scheduler for lateral movement and remote code execution. \ +To implement this analytic, ensure that you are ingesting Windows Security Event Logs with EventCode 4698 enabled. Additionally, the Windows TA (Technology Add-on) is required to parse and extract the necessary information from the logs. \ +It's important to note that while uncommon, legitimate applications may create and delete scheduled tasks within a short duration. Analysts should filter the results based on the specific context and environment to reduce false positives. \ Identifying short-lived scheduled tasks is valuable for a SOC as it can indicate malicious activities attempting to move laterally or execute unauthorized code on Windows systems. By detecting and investigating these events, security analysts can respond promptly to prevent further compromise and mitigate potential risks. The impact of a true positive could range from unauthorized access to data exfiltration or the execution of malicious payloads. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.005"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes Windows Security EventCode 4698, "A scheduled task was created," and EventCode 4699, "A scheduled task was deleted," to identify scheduled tasks that are created and deleted within a short time frame of less than 30 seconds. This behavior is indicative of a potential lateral movement attack where the Task Scheduler is abused to achieve code execution. Both red teams and adversaries may exploit the Task Scheduler for lateral movement and remote code execution.\ -To implement this analytic, ensure that you are ingesting Windows Security Event Logs with EventCode 4698 enabled. Additionally, the Windows TA (Technology Add-on) is required to parse and extract the necessary information from the logs.\ -It's important to note that while uncommon, legitimate applications may create and delete scheduled tasks within a short duration. Analysts should filter the results based on the specific context and environment to reduce false positives.\ +action.escu.eli5 = The following analytic utilizes Windows Security EventCode 4698, "A scheduled task was created," and EventCode 4699, "A scheduled task was deleted," to identify scheduled tasks that are created and deleted within a short time frame of less than 30 seconds. This behavior is indicative of a potential lateral movement attack where the Task Scheduler is abused to achieve code execution. Both red teams and adversaries may exploit the Task Scheduler for lateral movement and remote code execution. \ +To implement this analytic, ensure that you are ingesting Windows Security Event Logs with EventCode 4698 enabled. Additionally, the Windows TA (Technology Add-on) is required to parse and extract the necessary information from the logs. \ +It's important to note that while uncommon, legitimate applications may create and delete scheduled tasks within a short duration. Analysts should filter the results based on the specific context and environment to reduce false positives. \ Identifying short-lived scheduled tasks is valuable for a SOC as it can indicate malicious activities attempting to move laterally or execute unauthorized code on Windows systems. By detecting and investigating these events, security analysts can respond promptly to prevent further compromise and mitigate potential risks. The impact of a true positive could range from unauthorized access to data exfiltration or the execution of malicious payloads. action.escu.how_to_implement = To successfully implement this search, you need to be ingesting Windows Security Event Logs with 4698 EventCode enabled. The Windows TA is also required. action.escu.known_false_positives = Although uncommon, legitimate applications may create and delete a Scheduled Task within 30 seconds. Filter as needed. @@ -42823,9 +42823,9 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "6fa3141 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic utilizes Windows Security EventCode 4698, "A scheduled task was created," and EventCode 4699, "A scheduled task was deleted," to identify scheduled tasks that are created and deleted within a short time frame of less than 30 seconds. This behavior is indicative of a potential lateral movement attack where the Task Scheduler is abused to achieve code execution. Both red teams and adversaries may exploit the Task Scheduler for lateral movement and remote code execution.\ -To implement this analytic, ensure that you are ingesting Windows Security Event Logs with EventCode 4698 enabled. Additionally, the Windows TA (Technology Add-on) is required to parse and extract the necessary information from the logs.\ -It's important to note that while uncommon, legitimate applications may create and delete scheduled tasks within a short duration. Analysts should filter the results based on the specific context and environment to reduce false positives.\ +action.notable.param.rule_description = The following analytic utilizes Windows Security EventCode 4698, "A scheduled task was created," and EventCode 4699, "A scheduled task was deleted," to identify scheduled tasks that are created and deleted within a short time frame of less than 30 seconds. This behavior is indicative of a potential lateral movement attack where the Task Scheduler is abused to achieve code execution. Both red teams and adversaries may exploit the Task Scheduler for lateral movement and remote code execution. \ +To implement this analytic, ensure that you are ingesting Windows Security Event Logs with EventCode 4698 enabled. Additionally, the Windows TA (Technology Add-on) is required to parse and extract the necessary information from the logs. \ +It's important to note that while uncommon, legitimate applications may create and delete scheduled tasks within a short duration. Analysts should filter the results based on the specific context and environment to reduce false positives. \ Identifying short-lived scheduled tasks is valuable for a SOC as it can indicate malicious activities attempting to move laterally or execute unauthorized code on Windows systems. By detecting and investigating these events, security analysts can respond promptly to prevent further compromise and mitigate potential risks. The impact of a true positive could range from unauthorized access to data exfiltration or the execution of malicious payloads. action.notable.param.rule_title = Short Lived Scheduled Task action.notable.param.security_domain = endpoint @@ -43951,11 +43951,11 @@ search = `wineventlog_security` EventCode=4769 | eval isSuspicious = if(lower(S [ESCU - Suspicious Linux Discovery Commands - Rule] action.escu = 0 action.escu.enabled = 1 -description = This search, detects execution of suspicious bash commands from various commonly leveraged bash scripts like (AutoSUID, LinEnum, LinPeas) to perform discovery of possible paths of privilege execution, password files, vulnerable directories, executables and file permissions on a Linux host.\ +description = This search, detects execution of suspicious bash commands from various commonly leveraged bash scripts like (AutoSUID, LinEnum, LinPeas) to perform discovery of possible paths of privilege execution, password files, vulnerable directories, executables and file permissions on a Linux host. \ The search logic specifically looks for high number of distinct commands run in a short period of time. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Installation"], "mitre_attack": ["T1059.004"], "nist": ["DE.CM"]} action.escu.data_models = ["Endpoint"] -action.escu.eli5 = This search, detects execution of suspicious bash commands from various commonly leveraged bash scripts like (AutoSUID, LinEnum, LinPeas) to perform discovery of possible paths of privilege execution, password files, vulnerable directories, executables and file permissions on a Linux host.\ +action.escu.eli5 = This search, detects execution of suspicious bash commands from various commonly leveraged bash scripts like (AutoSUID, LinEnum, LinPeas) to perform discovery of possible paths of privilege execution, password files, vulnerable directories, executables and file permissions on a Linux host. \ The search logic specifically looks for high number of distinct commands run in a short period of time. action.escu.how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. action.escu.known_false_positives = Unless an administrator is using these commands to troubleshoot or audit a system, the execution of these commands should be monitored. @@ -43982,7 +43982,7 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "0edd511 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = This search, detects execution of suspicious bash commands from various commonly leveraged bash scripts like (AutoSUID, LinEnum, LinPeas) to perform discovery of possible paths of privilege execution, password files, vulnerable directories, executables and file permissions on a Linux host.\ +action.notable.param.rule_description = This search, detects execution of suspicious bash commands from various commonly leveraged bash scripts like (AutoSUID, LinEnum, LinPeas) to perform discovery of possible paths of privilege execution, password files, vulnerable directories, executables and file permissions on a Linux host. \ The search logic specifically looks for high number of distinct commands run in a short period of time. action.notable.param.rule_title = Suspicious Linux Discovery Commands action.notable.param.security_domain = endpoint @@ -44308,23 +44308,23 @@ search = | tstats `security_content_summariesonly` count values(Processes.proces [ESCU - Suspicious PlistBuddy Usage - Rule] action.escu = 0 action.escu.enabled = 1 -description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed:\ -* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist\ +description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed: \ +* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist \ Upon triage, capture the property list file being written to disk and review for further indicators. Contain the endpoint and triage further. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1543.001", "T1543"], "nist": ["DE.CM"]} action.escu.data_models = ["Endpoint"] -action.escu.eli5 = The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed:\ -* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist\ +action.escu.eli5 = The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed: \ +* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist \ Upon triage, capture the property list file being written to disk and review for further indicators. Contain the endpoint and triage further. action.escu.how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. action.escu.known_false_positives = Some legitimate applications may use PlistBuddy to create or modify property lists and possibly generate false positives. Review the property list being modified or created to confirm. @@ -44351,13 +44351,13 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "c319400 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed:\ -* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist\ +action.notable.param.rule_description = The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed: \ +* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist \ Upon triage, capture the property list file being written to disk and review for further indicators. Contain the endpoint and triage further. action.notable.param.rule_title = Suspicious PlistBuddy Usage action.notable.param.security_domain = endpoint @@ -44376,23 +44376,23 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTime [ESCU - Suspicious PlistBuddy Usage via OSquery - Rule] action.escu = 0 action.escu.enabled = 1 -description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed:\ -* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist\ +description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed: \ +* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist \ Upon triage, capture the property list file being written to disk and review for further indicators. Contain the endpoint and triage further. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1543.001", "T1543"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed:\ -* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist\ +action.escu.eli5 = The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed: \ +* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist \ Upon triage, capture the property list file being written to disk and review for further indicators. Contain the endpoint and triage further. action.escu.how_to_implement = OSQuery must be installed and configured to pick up process events (info at https://osquery.io) as well as using the Splunk OSQuery Add-on https://splunkbase.splunk.com/app/4402. Modify the macro and validate fields are correct. action.escu.known_false_positives = Some legitimate applications may use PlistBuddy to create or modify property lists and possibly generate false positives. Review the property list being modified or created to confirm. @@ -44419,13 +44419,13 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "20ba6c3 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed:\ -* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist\ -* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist\ +action.notable.param.rule_description = The following analytic identifies the use of a native MacOS utility, PlistBuddy, creating or modifying a properly list (.plist) file. In the instance of Silver Sparrow, the following commands were executed: \ +* PlistBuddy -c "Add :Label string init_verx" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :RunAtLoad bool true" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :StartInterval integer 3600" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments array" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:0 string /bin/sh" ~/Library/Launchagents/init_verx.plist \ +* PlistBuddy -c "Add :ProgramArguments:1 string -c" ~/Library/Launchagents/init_verx.plist \ Upon triage, capture the property list file being written to disk and review for further indicators. Contain the endpoint and triage further. action.notable.param.rule_title = Suspicious PlistBuddy Usage via OSquery action.notable.param.security_domain = endpoint @@ -44902,11 +44902,11 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTime [ESCU - Suspicious Scheduled Task from Public Directory - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic, "Suspicious Scheduled Task from Public Directory", detects the registration of scheduled tasks aimed to execute a binary or script from public directories, a behavior often associated with malware deployment. It utilizes the Sysmon Event ID 1 data source, searching for instances where schtasks.exe is connected with the directories users\public, \programdata\, or \windows\temp and involves the /create command.\ +description = The following analytic, "Suspicious Scheduled Task from Public Directory", detects the registration of scheduled tasks aimed to execute a binary or script from public directories, a behavior often associated with malware deployment. It utilizes the Sysmon Event ID 1 data source, searching for instances where schtasks.exe is connected with the directories users\public, \programdata\, or \windows\temp and involves the /create command. \ The registration of such scheduled tasks in public directories could suggest that an attacker is trying to maintain persistence or execute malicious scripts. If confirmed as a true positive, this could lead to data compromise, unauthorized access, and potential lateral movement within the network. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1053.005", "T1053"], "nist": ["DE.AE"]} action.escu.data_models = ["Endpoint"] -action.escu.eli5 = The following analytic, "Suspicious Scheduled Task from Public Directory", detects the registration of scheduled tasks aimed to execute a binary or script from public directories, a behavior often associated with malware deployment. It utilizes the Sysmon Event ID 1 data source, searching for instances where schtasks.exe is connected with the directories users\public, \programdata\, or \windows\temp and involves the /create command.\ +action.escu.eli5 = The following analytic, "Suspicious Scheduled Task from Public Directory", detects the registration of scheduled tasks aimed to execute a binary or script from public directories, a behavior often associated with malware deployment. It utilizes the Sysmon Event ID 1 data source, searching for instances where schtasks.exe is connected with the directories users\public, \programdata\, or \windows\temp and involves the /create command. \ The registration of such scheduled tasks in public directories could suggest that an attacker is trying to maintain persistence or execute malicious scripts. If confirmed as a true positive, this could lead to data compromise, unauthorized access, and potential lateral movement within the network. action.escu.how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. action.escu.known_false_positives = The main source of false positives could be the legitimate use of scheduled tasks from these directories. Careful tuning of this search may be necessary to suit the specifics of your environment, reducing the rate of false positives. @@ -45346,13 +45346,13 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTime [ESCU - System Processes Run From Unexpected Locations - Rule] action.escu = 0 action.escu.enabled = 1 -description = This search looks for system processes that typically execute from `C:\Windows\System32\` or `C:\Windows\SysWOW64`. This may indicate a malicious process that is trying to hide as a legitimate process.\ -This detection utilizes a lookup that is deduped `system32` and `syswow64` directories from Server 2016 and Windows 10.\ +description = This search looks for system processes that typically execute from `C:\Windows\System32\` or `C:\Windows\SysWOW64`. This may indicate a malicious process that is trying to hide as a legitimate process. \ +This detection utilizes a lookup that is deduped `system32` and `syswow64` directories from Server 2016 and Windows 10. \ During triage, review the parallel processes - what process moved the native Windows binary? identify any artifacts on disk and review. If a remote destination is contacted, what is the reputation? action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1036", "T1036.003"], "nist": ["DE.AE"]} action.escu.data_models = ["Endpoint"] -action.escu.eli5 = This search looks for system processes that typically execute from `C:\Windows\System32\` or `C:\Windows\SysWOW64`. This may indicate a malicious process that is trying to hide as a legitimate process.\ -This detection utilizes a lookup that is deduped `system32` and `syswow64` directories from Server 2016 and Windows 10.\ +action.escu.eli5 = This search looks for system processes that typically execute from `C:\Windows\System32\` or `C:\Windows\SysWOW64`. This may indicate a malicious process that is trying to hide as a legitimate process. \ +This detection utilizes a lookup that is deduped `system32` and `syswow64` directories from Server 2016 and Windows 10. \ During triage, review the parallel processes - what process moved the native Windows binary? identify any artifacts on disk and review. If a remote destination is contacted, what is the reputation? action.escu.how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. action.escu.known_false_positives = This detection may require tuning based on third party applications utilizing native Windows binaries in non-standard paths. @@ -45792,15 +45792,15 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTime [ESCU - Unloading AMSI via Reflection - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies the behavior of AMSI being tampered with. Implemented natively in many frameworks, the command will look similar to `SEtValuE($Null,(New-OBJEct COLlECtionS.GenerIC.HAshSEt{[StrINg]))}$ReF=[ReF].AsSeMbLY.GeTTyPe("System.Management.Automation.Amsi"+"Utils")` taken from Powershell-Empire.\ +description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies the behavior of AMSI being tampered with. Implemented natively in many frameworks, the command will look similar to `SEtValuE($Null,(New-OBJEct COLlECtionS.GenerIC.HAshSEt{[StrINg]))}$ReF=[ReF].AsSeMbLY.GeTTyPe("System.Management.Automation.Amsi"+"Utils")` taken from Powershell-Empire. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation", "Installation"], "mitre_attack": ["T1562", "T1059.001", "T1059"], "nist": ["DE.CM"]} action.escu.data_models = [] -action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies the behavior of AMSI being tampered with. Implemented natively in many frameworks, the command will look similar to `SEtValuE($Null,(New-OBJEct COLlECtionS.GenerIC.HAshSEt{[StrINg]))}$ReF=[ReF].AsSeMbLY.GeTTyPe("System.Management.Automation.Amsi"+"Utils")` taken from Powershell-Empire.\ +action.escu.eli5 = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies the behavior of AMSI being tampered with. Implemented natively in many frameworks, the command will look similar to `SEtValuE($Null,(New-OBJEct COLlECtionS.GenerIC.HAshSEt{[StrINg]))}$ReF=[ReF].AsSeMbLY.GeTTyPe("System.Management.Automation.Amsi"+"Utils")` taken from Powershell-Empire. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.escu.how_to_implement = To successfully implement this analytic, you will need to enable PowerShell Script Block Logging on some or all endpoints. Additional setup here https://docs.splunk.com/Documentation/UBA/5.0.4.1/GetDataIn/AddPowerShell#Configure_module_logging_for_PowerShell. action.escu.known_false_positives = Potential for some third party applications to disable AMSI upon invocation. Filter as needed. @@ -45827,9 +45827,9 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "a21e348 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all.\ -\ -This analytic identifies the behavior of AMSI being tampered with. Implemented natively in many frameworks, the command will look similar to `SEtValuE($Null,(New-OBJEct COLlECtionS.GenerIC.HAshSEt{[StrINg]))}$ReF=[ReF].AsSeMbLY.GeTTyPe("System.Management.Automation.Amsi"+"Utils")` taken from Powershell-Empire.\ +action.notable.param.rule_description = The following analytic utilizes PowerShell Script Block Logging (EventCode=4104) to identify suspicious PowerShell execution. Script Block Logging captures the command sent to PowerShell, the full command to be executed. Upon enabling, logs will output to Windows event logs. Dependent upon volume, enable on critical endpoints or all. \ + \ +This analytic identifies the behavior of AMSI being tampered with. Implemented natively in many frameworks, the command will look similar to `SEtValuE($Null,(New-OBJEct COLlECtionS.GenerIC.HAshSEt{[StrINg]))}$ReF=[ReF].AsSeMbLY.GeTTyPe("System.Management.Automation.Amsi"+"Utils")` taken from Powershell-Empire. \ During triage, review parallel processes using an EDR product or 4688 events. It will be important to understand the timeline of events around this activity. Review the entire logged PowerShell script block. action.notable.param.rule_title = Unloading AMSI via Reflection action.notable.param.security_domain = endpoint @@ -45884,11 +45884,11 @@ search = `wineventlog_security` EventCode=4769 Service_Name="*$" Account_Name!= [ESCU - Unusual Number of Kerberos Service Tickets Requested - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following hunting analytic leverages Kerberos Event 4769, A Kerberos service ticket was requested, to identify a potential kerberoasting attack against Active Directory networks. Kerberoasting allows an adversary to request kerberos tickets for domain accounts typically used as service accounts and attempt to crack them offline allowing them to obtain privileged access to the domain.\ +description = The following hunting analytic leverages Kerberos Event 4769, A Kerberos service ticket was requested, to identify a potential kerberoasting attack against Active Directory networks. Kerberoasting allows an adversary to request kerberos tickets for domain accounts typically used as service accounts and attempt to crack them offline allowing them to obtain privileged access to the domain. \ The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number service ticket requests. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Exploitation"], "mitre_attack": ["T1558", "T1558.003"], "nist": ["DE.AE"]} action.escu.data_models = [] -action.escu.eli5 = The following hunting analytic leverages Kerberos Event 4769, A Kerberos service ticket was requested, to identify a potential kerberoasting attack against Active Directory networks. Kerberoasting allows an adversary to request kerberos tickets for domain accounts typically used as service accounts and attempt to crack them offline allowing them to obtain privileged access to the domain.\ +action.escu.eli5 = The following hunting analytic leverages Kerberos Event 4769, A Kerberos service ticket was requested, to identify a potential kerberoasting attack against Active Directory networks. Kerberoasting allows an adversary to request kerberos tickets for domain accounts typically used as service accounts and attempt to crack them offline allowing them to obtain privileged access to the domain. \ The detection calculates the standard deviation for each host and leverages the 3-sigma statistical rule to identify an unusual number service ticket requests. To customize this analytic, users can try different combinations of the `bucket` span time and the calculation of the `upperBound` field. action.escu.how_to_implement = To successfully implement this search, you need to be ingesting Domain Controller and Kerberos events. The Advanced Security Audit policy setting `Audit Kerberos Authentication Service` within `Account Logon` needs to be enabled. action.escu.known_false_positives = An single endpoint requesting a large number of kerberos service tickets is not common behavior. Possible false positive scenarios include but are not limited to vulnerability scanners, administration systems and missconfigured systems. @@ -49787,13 +49787,13 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTime [ESCU - Windows Curl Download to Suspicious Path - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies the use of Windows Curl.exe downloading a file to a suspicious location.\ --O or --output is used when a file is to be downloaded and placed in a specified location.\ +description = The following analytic identifies the use of Windows Curl.exe downloading a file to a suspicious location. \ +-O or --output is used when a file is to be downloaded and placed in a specified location. \ During triage, review parallel processes for further behavior. In addition, identify if the download was successful. If a file was downloaded, capture and analyze. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Command and Control"], "mitre_attack": ["T1105"], "nist": ["DE.CM"]} action.escu.data_models = ["Endpoint"] -action.escu.eli5 = The following analytic identifies the use of Windows Curl.exe downloading a file to a suspicious location.\ --O or --output is used when a file is to be downloaded and placed in a specified location.\ +action.escu.eli5 = The following analytic identifies the use of Windows Curl.exe downloading a file to a suspicious location. \ +-O or --output is used when a file is to be downloaded and placed in a specified location. \ During triage, review parallel processes for further behavior. In addition, identify if the download was successful. If a file was downloaded, capture and analyze. action.escu.how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. action.escu.known_false_positives = It is possible Administrators or super users will use Curl for legitimate purposes. Filter as needed. @@ -49820,8 +49820,8 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "c32f091 schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies the use of Windows Curl.exe downloading a file to a suspicious location.\ --O or --output is used when a file is to be downloaded and placed in a specified location.\ +action.notable.param.rule_description = The following analytic identifies the use of Windows Curl.exe downloading a file to a suspicious location. \ +-O or --output is used when a file is to be downloaded and placed in a specified location. \ During triage, review parallel processes for further behavior. In addition, identify if the download was successful. If a file was downloaded, capture and analyze. action.notable.param.rule_title = Windows Curl Download to Suspicious Path action.notable.param.security_domain = endpoint @@ -49840,23 +49840,23 @@ search = | tstats `security_content_summariesonly` count min(_time) as firstTime [ESCU - Windows Curl Upload to Remote Destination - Rule] action.escu = 0 action.escu.enabled = 1 -description = The following analytic identifies the use of Windows Curl.exe uploading a file to a remote destination.\ -`-T` or `--upload-file` is used when a file is to be uploaded to a remotge destination.\ -\ -`-d` or `--data` POST is the HTTP method that was invented to send data to a receiving web application, and it is, for example, how most common HTML forms on the web work.\ -\ -HTTP multipart formposts are done with `-F`, but this appears to not be compatible with the Windows version of Curl. Will update if identified adversary tradecraft.\ -\ +description = The following analytic identifies the use of Windows Curl.exe uploading a file to a remote destination. \ +`-T` or `--upload-file` is used when a file is to be uploaded to a remotge destination. \ + \ +`-d` or `--data` POST is the HTTP method that was invented to send data to a receiving web application, and it is, for example, how most common HTML forms on the web work. \ + \ +HTTP multipart formposts are done with `-F`, but this appears to not be compatible with the Windows version of Curl. Will update if identified adversary tradecraft. \ + \ Adversaries may use one of the three methods based on the remote destination and what they are attempting to upload (zip vs txt). During triage, review parallel processes for further behavior. In addition, identify if the upload was successful in network logs. If a file was uploaded, isolate the endpoint and review. action.escu.mappings = {"cis20": ["CIS 10"], "kill_chain_phases": ["Command and Control"], "mitre_attack": ["T1105"], "nist": ["DE.CM"]} action.escu.data_models = ["Endpoint"] -action.escu.eli5 = The following analytic identifies the use of Windows Curl.exe uploading a file to a remote destination.\ -`-T` or `--upload-file` is used when a file is to be uploaded to a remotge destination.\ -\ -`-d` or `--data` POST is the HTTP method that was invented to send data to a receiving web application, and it is, for example, how most common HTML forms on the web work.\ -\ -HTTP multipart formposts are done with `-F`, but this appears to not be compatible with the Windows version of Curl. Will update if identified adversary tradecraft.\ -\ +action.escu.eli5 = The following analytic identifies the use of Windows Curl.exe uploading a file to a remote destination. \ +`-T` or `--upload-file` is used when a file is to be uploaded to a remotge destination. \ + \ +`-d` or `--data` POST is the HTTP method that was invented to send data to a receiving web application, and it is, for example, how most common HTML forms on the web work. \ + \ +HTTP multipart formposts are done with `-F`, but this appears to not be compatible with the Windows version of Curl. Will update if identified adversary tradecraft. \ + \ Adversaries may use one of the three methods based on the remote destination and what they are attempting to upload (zip vs txt). During triage, review parallel processes for further behavior. In addition, identify if the upload was successful in network logs. If a file was uploaded, isolate the endpoint and review. action.escu.how_to_implement = The detection is based on data that originates from Endpoint Detection and Response (EDR) agents. These agents are designed to provide security-related telemetry from the endpoints where the agent is installed. To implement this search, you must ingest logs that contain the process GUID, process name, and parent process. Additionally, you must ingest complete command-line executions. These logs must be processed using the appropriate Splunk Technology Add-ons that are specific to the EDR product. The logs must also be mapped to the `Processes` node of the `Endpoint` data model. Use the Splunk Common Information Model (CIM) to normalize the field names and speed up the data modeling process. action.escu.known_false_positives = False positives may be limited to source control applications and may be required to be filtered out. @@ -49883,13 +49883,13 @@ action.correlationsearch.metadata = {"deprecated": "0", "detection_id": "42f8f1a schedule_window = auto action.notable = 1 action.notable.param.nes_fields = user,dest -action.notable.param.rule_description = The following analytic identifies the use of Windows Curl.exe uploading a file to a remote destination.\ -`-T` or `--upload-file` is used when a file is to be uploaded to a remotge destination.\ -\ -`-d` or `--data` POST is the HTTP method that was invented to send data to a receiving web application, and it is, for example, how most common HTML forms on the web work.\ -\ -HTTP multipart formposts are done with `-F`, but this appears to not be compatible with the Windows version of Curl. Will update if identified adversary tradecraft.\ -\ +action.notable.param.rule_description = The following analytic identifies the use of Windows Curl.exe uploading a file to a remote destination. \ +`-T` or `--upload-file` is used when a file is to be uploaded to a remotge destination. \ + \ +`-d` or `--data` POST is the HTTP method that was invented to send data to a receiving web application, and it is, for example, how most common HTML forms on the web work. \ + \ +HTTP multipart formposts are done with `-F`, but this appears to not be compatible with the Windows version of Curl. Will update if identified adversary tradecraft. \ + \ Adversaries may use one of the three methods based on the remote destination and what they are attempting to upload (zip vs txt). During triage, review parallel processes for further behavior. In addition, identify if the upload was successful in network logs. If a file was uploaded, isolate the endpoint and review. action.notable.param.rule_title = Windows Curl Upload to Remote Destination action.notable.param.security_domain = endpoint @@ -50255,11 +50255,11 @@ search = `ms_defender` EventCode IN (5007) | rex field=New_Value "0x(?