diff --git a/data_sources/m365_copilot_graph_api.yml b/data_sources/m365_copilot_graph_api.yml new file mode 100644 index 0000000000..9669b05b9a --- /dev/null +++ b/data_sources/m365_copilot_graph_api.yml @@ -0,0 +1,67 @@ +name: M365 Copilot Graph API +id: 30dd2202-869c-47fb-ad37-4f4d4c93c6b7 +version: 1 +date: '2025-09-30' +author: Rod Soto, Splunk +description: Access Logs from M365 Copilot access via Graph API +source: AuditLogs.SignIns +sourcetype: o365:graph:api +supported_TA: +- name: Splunk Add-on for Microsoft Office 365 + url: https://splunkbase.splunk.com/app/4055 + version: 4.9.0 +fields: +- appDisplayName +- appId +- clientAppUsed +- conditionalAccessStatus +- correlationId +- createdDateTime +- date_hour +- date_mday +- date_minute +- date_month +- date_second +- date_wday +- date_year +- date_zone +- deviceDetail.browser +- deviceDetail.deviceId +- deviceDetail.displayName +- deviceDetail.isCompliant +- deviceDetail.isManaged +- deviceDetail.operatingSystem +- deviceDetail.trustType +- eventtype +- host +- id +- index +- ipAddress +- isInteractive +- linecount +- location.city +- location.countryOrRegion +- location.geoCoordinates.altitude +- location.geoCoordinates.latitude +- location.geoCoordinates.longitude +- location.state +- punct +- resourceDisplayName +- resourceId +- riskDetail +- riskLevelAggregated +- riskLevelDuringSignIn +- riskState +- source +- sourcetype +- splunk_server +- status.additionalDetails +- status.errorCode +- status.failureReason +- timeendpos +- timestartpos +- userDisplayName +- userId +- userPrincipalName +output_fields: [] +example_log: '{"id": "7fbc0a97-7f78-4cc8-9377-dc94d2ad1e00", "createdDateTime": "2025-09-30T12:34:20Z", "userDisplayName": "Rod Soto", "userPrincipalName": "rodsoto@rodsoto.onmicrosoft.com", "userId": "bfb8c366-0406-41a5-b3e3-328f4a3b4484", "appId": "9199bf20-a13f-4107-85dc-02114787ef48", "appDisplayName": "One Outlook Web", "ipAddress": "127.0.0.1", "clientAppUsed": "Browser", "correlationId": "8fe7aa9b-42c8-b52e-c6f2-8e4dfc07996b", "conditionalAccessStatus": "notApplied", "isInteractive": true, "riskDetail": "none", "riskLevelAggregated": "none", "riskLevelDuringSignIn": "none", "riskState": "none", "riskEventTypes": [], "riskEventTypes_v2": [], "resourceDisplayName": "Office 365 Exchange Online", "resourceId": "00000002-0000-0ff1-ce00-000000000000", "status": {"errorCode": 0, "failureReason": "Other.", "additionalDetails": "MFA requirement satisfied by claim in the token"}, "deviceDetail": {"deviceId": "", "displayName": "", "operatingSystem": "MacOs", "browser": "Chrome 140.0.0", "isCompliant": false, "isManaged": false, "trustType": ""}, "location": {"city": "Miami", "state": "Florida", "countryOrRegion": "US", "geoCoordinates": {"altitude": null, "latitude": 25.76286, "longitude": -80.31196}}, "appliedConditionalAccessPolicies": []}' \ No newline at end of file diff --git a/data_sources/m365_exported_ediscovery_prompts.yml b/data_sources/m365_exported_ediscovery_prompts.yml new file mode 100644 index 0000000000..475cdf0ff2 --- /dev/null +++ b/data_sources/m365_exported_ediscovery_prompts.yml @@ -0,0 +1,88 @@ +name: M365 Exported eDiscovery Prompts +id: 4fc2d127-ba47-45df-b56c-4ec626ee735b +version: 1 +date: '2025-10-07' +author: Rod Soto, Splunk +description: M365 exported eDiscovery prompt logs from Microsoft Purview contain user interactions with M365 Copilot, including the actual prompt text (Subject_Title), sender information, timestamps, and metadata about the AI conversations. These logs are exported through Purview's eDiscovery functionality and provide visibility into how users are querying and attempting to interact with Copilot, making them valuable for detecting jailbreak attempts, data exfiltration requests, policy violations, and other security-relevant AI usage patterns. The logs capture the full conversational context necessary for identifying malicious prompt injection, social engineering attempts against the AI, and unauthorized information disclosure requests. +source: csv +sourcetype: csv +fields: +- Added by +- Author +- Compound path +- Contains deleted message +- Contains edited message +- Conversation name +- Conversation type +- Created +- Created by +- Data source +- Date +- Doc authors +- Doc date modified +- Doc modified by +- Document ID index +- Email date sent +- Email importance +- Email participant domains +- Email recipient domains +- Email recipients +- Email sender domain +- Error warning +- File extension +- File name +- Has attachment +- Has text +- Immutable ID +- Internet message ID +- Is attachment from transcript +- Is doc from conversation +- Is modern attachment +- Is read +- Item class +- Item source +- Last modified by +- Last modified time +- Location ID +- Location sub type +- Message kind +- Modern attachment parent ID +- Original path +- Participants +- Received +- Recipient count +- Retention label +- SPO unique ID +- Sender +- Sensitive type +- Size +- Source ID +- Status +- Subject_Title +- Target path +- Title +- To +- Type +- Workload +- date_hour +- date_mday +- date_minute +- date_month +- date_second +- date_wday +- date_year +- date_zone +- eventtype +- host +- index +- linecount +- punct +- source +- sourcetype +- splunk_server +- tag +- timeendpos +- timestamp +- timestartpos +output_fields: [] +example_log: 'Succeeded,,IndexQuery,,,,,,,,,,rodsoto@rodsoto.onmicrosoft.com/TeamsMessagesData/Card.html,False,False,,,,,,,,,All people and groups,2025-08-25 20:58:43Z,,,,,,,,,,,,,,,,1591522,,,,2025-08-25T20:58:43Z,,Normal,,,rodsoto.onmicrosoft.com,,,Copilot in Word,,rodsoto.onmicrosoft.com,,,,,,,,,,,,,html,Card.html,,,,True,False,,,Exchange/sourceE83F8E164F7280A5033281941716356F/TEAMS/19I5dhdbjE2GdGNAYuFGzQrEHvS-vIfpjDDRO05LjzN01threadv2/2025082512/19I5dhdbjE2GdGNAYuFGzQrEHvS-vIfpjDDRO05LjzN01threadv2-2025082512.html-mimeatt64601eefbf644a2a940f679f8ae1d4be-1,,,,1756155523926,False,,False,,,,,,True,,True,,,,IPM.SkypeTeams.Message.Copilot.Word,rodsoto@rodsoto.onmicrosoft.com,,2025-08-25T20:58:45Z,,,d03dab29-e210-4507-8932-ce3c7e74e5ae,PrimaryMailBox,,,,,,,Exchange/sourceE83F8E164F7280A5033281941716356F/TEAMS/19I5dhdbjE2GdGNAYuFGzQrEHvS-vIfpjDDRO05LjzN01threadv2/2025082512/19I5dhdbjE2GdGNAYuFGzQrEHvS-vIfpjDDRO05LjzN01threadv2-2025082512.html,,,,,,,,,/TeamsMessagesData,,,Rod Soto ;Copilot in Word,,,,,,2025-08-25T20:58:43Z,1,,,,,,rodsoto@rodsoto.onmicrosoft.com,,,,,,,49292,rodsoto@rodsoto.onmicrosoft.com,,,00000000-0000-0000-0000-000000000000,,,Items.1.001.zip\Exchange\rodsoto@rodsoto.onmicrosoft.com\TeamsMessagesData\Card_46.html,,,,,,,,,Copilot in Word,,Message,,,,,Exchange' \ No newline at end of file diff --git a/detections/application/m365_copilot_agentic_jailbreak_attack.yml b/detections/application/m365_copilot_agentic_jailbreak_attack.yml new file mode 100644 index 0000000000..9fce02cc57 --- /dev/null +++ b/detections/application/m365_copilot_agentic_jailbreak_attack.yml @@ -0,0 +1,59 @@ +name: M365 Copilot Agentic Jailbreak Attack +id: e5c7b380-19da-42e9-9e53-0af4cd27aee3 +version: 1 +date: '2025-09-25' +author: Rod Soto +status: experimental +type: Anomaly +data_source: +- M365 Exported eDiscovery Prompts +description: Detects agentic AI jailbreak attempts that try to establish persistent control over M365 Copilot through rule injection, universal triggers, response automation, system overrides, and persona establishment techniques. The detection analyzes the PromptText field for keywords like "from now on," "always respond," "ignore previous," "new rule," "override," and role-playing commands (e.g., "act as," "you are now") that attempt to inject persistent instructions. The search computes risk by counting distinct jailbreak indicators per user session, flagging coordinated manipulation attempts. +search: > + `m365_exported_ediscovery_prompt_logs` + | eval user = Sender + | eval rule_injection=if(match(Subject_Title, "(?i)(rules|instructions)\s*="), "YES", "NO") + | eval universal_trigger=if(match(Subject_Title, "(?i)(every|all).*prompt"), "YES", "NO") + | eval response_automation=if(match(Subject_Title, "(?i)(always|automatic).*respond"), "YES", "NO") + | eval system_override=if(match(Subject_Title, "(?i)(override|bypass|ignore).*(system|default)"), "YES", "NO") + | eval persona_establishment=if(match(Subject_Title, "(?i)(with.*\[.*\]|persona)"), "YES", "NO") + | where rule_injection="YES" OR universal_trigger="YES" OR response_automation="YES" OR system_override="YES" OR persona_establishment="YES" + | table _time, "Source ID", user, Subject_Title, rule_injection, universal_trigger, response_automation, system_override, persona_establishment, Workload + | sort -_time + | `m365_copilot_agentic_jailbreak_attack_filter` +how_to_implement: To export M365 Copilot prompt logs, navigate to the Microsoft Purview compliance portal (compliance.microsoft.com) and access eDiscovery. Create a new eDiscovery case, add target user accounts or date ranges as data sources, then create a search query targeting M365 Copilot interactions across relevant workloads. Once the search completes, export the results to generate a package containing prompt logs with fields like Subject_Title (prompt text), Sender, timestamps, and workload metadata. Download the exported files using the eDiscovery Export Tool and ingest them into Splunk for security analysis and detection of jailbreak attempts, data exfiltration requests, and policy violations. +known_false_positives: Legitimate users discussing AI ethics research, security professionals testing system robustness, developers creating training materials for AI safety, or academic discussions about AI limitations and behavioral constraints may trigger false positives. +references: + - https://www.splunk.com/en_us/blog/artificial-intelligence/m365-copilot-log-analysis-splunk.html +drilldown_searches: + - name: View the detection results for - "$user$" + search: '%original_detection_search% | search user="$user$"' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ + - name: View risk events for the last 7 days for - "$user$" + search: '| from datamodel Risk.All_Risk | search normalized_risk_object="$user$" starthoursago=168 | stats count min(_time) as firstTime max(_time) as lastTime values(search_name) as "Search Name" values(risk_message) as "Risk Message" values(analyticstories) as "Analytic Stories" values(annotations._all) as "Annotations" values(annotations.mitre_attack.mitre_tactic) as "ATT&CK Tactics" by normalized_risk_object | `security_content_ctime(firstTime)` | `security_content_ctime(lastTime)`' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ +rba: + message: User $user$ attempted to establish persistent agentic control over M365 Copilot through advanced jailbreak techniques including rule injection, universal triggers, and system overrides, potentially compromising AI security across multiple sessions. + risk_objects: + - field: user + type: user + score: 50 + threat_objects: [] +tags: + analytic_story: + - Suspicious Microsoft 365 Copilot Activities + asset_type: Web Application + mitre_attack_id: + - T1562 + product: + - Splunk Enterprise + - Splunk Enterprise Security + - Splunk Cloud + security_domain: endpoint +tests: + - name: True Positive Test + attack_data: + - data: https://raw.githubusercontent.com/splunk/attack_data/master/datasets/m365_copilot/copilot_prompt_logs.csv + sourcetype: csv + source: csv diff --git a/detections/application/m365_copilot_application_usage_pattern_anomalies.yml b/detections/application/m365_copilot_application_usage_pattern_anomalies.yml new file mode 100644 index 0000000000..2561e5472d --- /dev/null +++ b/detections/application/m365_copilot_application_usage_pattern_anomalies.yml @@ -0,0 +1,71 @@ +name: M365 Copilot Application Usage Pattern Anomalies +id: e3308b0c-d1a1-40d5-9486-4500f0d34731 +version: 1 +date: '2025-09-24' +author: Rod Soto +status: production +type: Anomaly +description: Detects M365 Copilot users exhibiting suspicious application usage patterns including multi-location access, abnormally high activity volumes, or access to multiple Copilot applications that may indicate account compromise or automated abuse. The detection aggregates M365 Copilot Graph API events per user, calculating metrics like distinct cities/countries accessed, unique IP addresses, number of different Copilot apps used, and average events per day over the observation period. Users are flagged when they access Copilot from multiple cities (cities_count > 1), generate excessive daily activity (events_per_day > 100), or use more than two different Copilot applications (app_count > 2), which are anomalous patterns suggesting credential compromise or bot-driven abuse. +search: > + `m365_copilot_graph_api` (appDisplayName="*Copilot*" OR appDisplayName="M365ChatClient" OR appDisplayName="OfficeAIAppChatCopilot") + | eval user = userPrincipalName + | stats count as events, + dc(location.city) as cities_count, + values(location.city) as city_list, + dc(location.countryOrRegion) as countries_count, + values(location.countryOrRegion) as country_list, + dc(ipAddress) as ip_count, + values(ipAddress) as ip_addresses, + dc(appDisplayName) as app_count, + values(appDisplayName) as apps_used, + dc(resourceDisplayName) as resource_count, + values(resourceDisplayName) as resources_accessed, + min(_time) as first_seen, + max(_time) as last_seen + by user + | eval days_active = round((last_seen - first_seen)/86400, 1) + | eval first_seen = strftime(first_seen, "%Y-%m-%d %H:%M:%S") + | eval last_seen = strftime(last_seen, "%Y-%m-%d %H:%M:%S") + | eval events_per_day = if(days_active > 0, round(events/days_active, 2), events) + | where cities_count > 1 OR events_per_day > 100 OR app_count > 2 + | sort -events_per_day, -countries_count + | `m365_copilot_application_usage_pattern_anomalies_filter` +data_source: +- M365 Copilot Graph API +how_to_implement: This detection requires ingesting M365 Copilot access logs via the Splunk Add-on for Microsoft Office 365. Configure the add-on to collect Azure AD Sign-in logs (AuditLogs.SignIns) through the Graph API data input. Ensure proper authentication and permissions are configured to access sign-in audit logs. The `m365_copilot_graph_api` macro should be defined to filter for sourcetype o365:graph:api data containing Copilot application activity. +known_false_positives: Power users, executives with heavy AI workloads, employees traveling for business, users accessing multiple Copilot applications legitimately, or teams using shared corporate accounts across different office locations may trigger false positives. +references: + - https://www.splunk.com/en_us/blog/artificial-intelligence/m365-copilot-log-analysis-splunk.html +drilldown_searches: + - name: View the detection results for "$user$" + search: '%original_detection_search% | search user="$user$"' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ + - name: View risk events for the last 7 days for "$user$" + search: '| from datamodel Risk.All_Risk | search normalized_risk_object IN ("$user$") starthoursago=168 | stats count min(_time) as firstTime max(_time) as lastTime values(search_name) as "Search Name" values(risk_message) as "Risk Message" values(analyticstories) as "Analytic Stories" values(annotations._all) as "Annotations" values(annotations.mitre_attack.mitre_tactic) as "ATT&CK Tactics" by normalized_risk_object | `security_content_ctime(firstTime)` | `security_content_ctime(lastTime)`' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ +rba: + message: User $user$ exhibited anomalous M365 Copilot usage patterns including multi-location access, excessive activity levels, or multiple application usage indicating potential account compromise or automated abuse. + risk_objects: + - field: user + type: user + score: 10 + threat_objects: [] +tags: + analytic_story: + - Suspicious Microsoft 365 Copilot Activities + asset_type: Web Application + mitre_attack_id: + - T1078 + product: + - Splunk Enterprise + - Splunk Enterprise Security + - Splunk Cloud + security_domain: endpoint +tests: + - name: True Positive Test + attack_data: + - data: https://media.githubusercontent.com/media/splunk/attack_data/master/datasets/m365_copilot/m365_copilot_access.log + sourcetype: o365:graph:api + source: AuditLogs.SignIns diff --git a/detections/application/m365_copilot_failed_authentication_patterns.yml b/detections/application/m365_copilot_failed_authentication_patterns.yml new file mode 100644 index 0000000000..15fbdc1a4a --- /dev/null +++ b/detections/application/m365_copilot_failed_authentication_patterns.yml @@ -0,0 +1,72 @@ +name: M365 Copilot Failed Authentication Patterns +id: 0ae94cdd-021a-4a62-a96d-9cec90b61530 +version: 1 +date: '2025-09-24' +author: Rod Soto +status: production +type: Anomaly +description: Detects M365 Copilot users with failed authentication attempts, MFA failures, or multi-location access patterns indicating potential credential attacks or account compromise. The detection aggregates M365 Copilot Graph API authentication events per user, calculating metrics like distinct cities/countries accessed, unique IP addresses and browsers, failed login attempts (status containing "fail" or "error"), and MFA failures (error code 50074). Users are flagged when they access Copilot from multiple cities (cities_count > 1), experience any authentication failures (failed_attempts > 0), or encounter MFA errors (mfa_failures > 0), which are indicators of credential stuffing, brute force attacks, or compromised accounts attempting to bypass multi-factor authentication. +search: '`m365_copilot_graph_api` (appDisplayName="*Copilot*" OR appDisplayName="M365ChatClient" OR appDisplayName="OfficeAIAppChatCopilot") +| eval user = userPrincipalName +| stats count as events, + dc(location.city) as cities_count, + values(location.city) as city_list, + dc(location.countryOrRegion) as countries_count, + values(location.countryOrRegion) as country_list, + dc(ipAddress) as ip_count, + values(ipAddress) as ip_addresses, + sum(eval(if(match(status, "(?i)fail|error"), 1, 0))) as failed_attempts, + sum(eval(if(match(_raw, "50074"), 1, 0))) as mfa_failures, + dc(deviceDetail.browser) as browser_count, + values(deviceDetail.browser) as browsers_used, + min(_time) as first_seen, + max(_time) as last_seen + by user +| eval first_seen = strftime(first_seen, "%Y-%m-%d %H:%M:%S") +| eval last_seen = strftime(last_seen, "%Y-%m-%d %H:%M:%S") +| where cities_count > 1 OR failed_attempts > 0 OR mfa_failures > 0 +| sort -mfa_failures, -failed_attempts, -countries_count | `m365_copilot_failed_authentication_patterns_filter`' +data_source: +- M365 Copilot Graph API +how_to_implement: This detection requires ingesting M365 Copilot access logs via the Splunk Add-on for Microsoft Office 365. Configure the add-on to collect Azure AD Sign-in logs (AuditLogs.SignIns) through the Graph API data input. Ensure proper authentication and permissions are configured to access sign-in audit logs. The `m365_copilot_graph_api` macro should be defined to filter for sourcetype o365:graph:api data containing Copilot application activity. +known_false_positives: Legitimate users experiencing network connectivity issues, traveling employees with intermittent VPN connections, users in regions with unstable internet infrastructure, or password reset activities during business travel may trigger false positives. +references: +- https://www.splunk.com/en_us/blog/artificial-intelligence/m365-copilot-log-analysis-splunk.html +drilldown_searches: +- name: View the detection results for "$user$" + search: '%original_detection_search% | search "$user = $user$"' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ +- name: View risk events for the last 7 days for "$user$" + search: '| from datamodel Risk.All_Risk + | search normalized_risk_object="$user$" + | where _time >= relative_time(now(), "-168h@h") + | stats count min(_time) as firstTime max(_time) as lastTime values(search_name) as "Search Name" values(risk_message) as "Risk Message" values(analyticstories) as "Analytic Stories" values(annotations._all) as "Annotations" values(annotations.mitre_attack.mitre_tactic) as "ATT&CK Tactics" by normalized_risk_object + | `security_content_ctime(firstTime)` + | `security_content_ctime(lastTime)`' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ +rba: + message: User $user$ exhibited suspicious M365 Copilot authentication patterns with $failed_attempts$ failed login attempts, $mfa_failures$ MFA failures, and access from $cities_count$ different locations, indicating potential credential compromise or brute force attack. + risk_objects: + - field: user + type: user + score: 30 + threat_objects: [] +tags: + analytic_story: + - Suspicious Microsoft 365 Copilot Activities + asset_type: Web Application + mitre_attack_id: + - T1110 + product: + - Splunk Enterprise + - Splunk Enterprise Security + - Splunk Cloud + security_domain: endpoint +tests: +- name: True Positive Test + attack_data: + - data: https://media.githubusercontent.com/media/splunk/attack_data/master/datasets/m365_copilot/m365_copilot_access.log + sourcetype: "o365:graph:api" + source: "AuditLogs.SignIns" diff --git a/detections/application/m365_copilot_impersonation_jailbreak_attack.yml b/detections/application/m365_copilot_impersonation_jailbreak_attack.yml new file mode 100644 index 0000000000..69b3ed1459 --- /dev/null +++ b/detections/application/m365_copilot_impersonation_jailbreak_attack.yml @@ -0,0 +1,54 @@ +name: M365 Copilot Impersonation Jailbreak Attack +id: cc26aba8-7f4a-4078-b91a-052d6a53cb13 +version: 1 +date: '2025-09-25' +author: Rod Soto +status: experimental +type: TTP +data_source: +- M365 Exported eDiscovery Prompts +description: Detects M365 Copilot impersonation and roleplay jailbreak attempts where users try to manipulate the AI into adopting alternate personas, behaving as unrestricted entities, or impersonating malicious AI systems to bypass safety controls. The detection searches exported eDiscovery prompt logs for roleplay keywords like "pretend you are," "act as," "you are now," "amoral," and "roleplay as" in the Subject_Title field. Prompts are categorized into specific impersonation types (AI_Impersonation, Malicious_AI_Persona, Unrestricted_AI_Persona, etc.) to identify attempts to override the AI's safety guardrails through persona injection attacks. +search: '`m365_exported_ediscovery_prompt_logs` +| search Subject_Title="*Pretend you are*" OR Subject_Title="*act as*" OR Subject_Title="*you are now*" OR Subject_Title="*amoral*" OR Subject_Title="*being*" OR Subject_Title="*roleplay as*" OR Subject_Title="*imagine you are*" OR Subject_Title="*behave like*" +| eval user = Sender +| eval impersonation_type=case(match(Subject_Title, "(?i)pretend you are.*AI"), "AI_Impersonation", match(Subject_Title, "(?i)(act as|roleplay as).*AI"), "AI_Roleplay", match(Subject_Title, "(?i)amoral.*AI"), "Amoral_AI", match(Subject_Title, "(?i)transcendent being"), "Fictional_Entity", match(Subject_Title, "(?i)(act as|pretend you are).*(entities|multiple)"), "Multi_Entity", match(Subject_Title, "(?i)(imagine you are|behave like).*AI"), "AI_Behavioral_Change", match(Subject_Title, "(?i)you are now.*AI"), "AI_Identity_Override", match(Subject_Title, "(?i)(evil|malicious|harmful).*AI"), "Malicious_AI_Persona", match(Subject_Title, "(?i)(unrestricted|unlimited|uncensored).*AI"), "Unrestricted_AI_Persona", 1=1, "Generic_Roleplay") +| table _time, user, Subject_Title, impersonation_type, Workload +| sort -_time +| `m365_copilot_impersonation_jailbreak_attack_filter`' +how_to_implement: To export M365 Copilot prompt logs, navigate to the Microsoft Purview compliance portal (compliance.microsoft.com) and access eDiscovery. Create a new eDiscovery case, add target user accounts or date ranges as data sources, then create a search query targeting M365 Copilot interactions across relevant workloads. Once the search completes, export the results to generate a package containing prompt logs with fields like Subject_Title (prompt text), Sender, timestamps, and workload metadata. Download the exported files using the eDiscovery Export Tool and ingest them into Splunk for security analysis and detection of jailbreak attempts, data exfiltration requests, and policy violations. +known_false_positives: Legitimate creative writers developing fictional characters, game developers creating roleplay scenarios, educators teaching about AI ethics and limitations, researchers studying AI behavior, or users engaging in harmless creative storytelling may trigger false positives. +references: + - https://www.splunk.com/en_us/blog/artificial-intelligence/m365-copilot-log-analysis-splunk.html +drilldown_searches: + - name: View the detection results for - "$user$" + search: '%original_detection_search% | search user="$user$"' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ + - name: View risk events for the last 7 days for - "$user$" + search: '| from datamodel Risk.All_Risk | search normalized_risk_object="$user$" | stats count min(_time) as firstTime max(_time) as lastTime values(search_name) as "Search Name" values(risk_message) as "Risk Message" values(analyticstories) as "Analytic Stories" values(annotations._all) as "Annotations" values(annotations.mitre_attack.mitre_tactic) as "ATT&CK Tactics" by normalized_risk_object | `security_content_ctime(firstTime)` | `security_content_ctime(lastTime)`' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ +rba: + message: User $user$ attempted M365 Copilot impersonation jailbreak with impersonation type $impersonation_type$, trying to manipulate the AI into adopting alternate personas or unrestricted behaviors that could bypass safety controls and violate acceptable use policies. + risk_objects: + - field: user + type: user + score: 10 + threat_objects: [] +tags: + analytic_story: + - Suspicious Microsoft 365 Copilot Activities + asset_type: Web Proxy + mitre_attack_id: + - T1562 + product: + - Splunk Enterprise + - Splunk Enterprise Security + - Splunk Cloud + security_domain: endpoint +tests: + - name: True Positive Test + attack_data: + - data: https://raw.githubusercontent.com/splunk/attack_data/master/datasets/m365_copilot/copilot_prompt_logs.csv + sourcetype: csv + source: csv diff --git a/detections/application/m365_copilot_information_extraction_jailbreak_attack.yml b/detections/application/m365_copilot_information_extraction_jailbreak_attack.yml new file mode 100644 index 0000000000..7df0ba8e5e --- /dev/null +++ b/detections/application/m365_copilot_information_extraction_jailbreak_attack.yml @@ -0,0 +1,62 @@ +name: M365 Copilot Information Extraction Jailbreak Attack +id: c0ee37bb-ed43-4632-8e38-060fba80b0b2 +version: 1 +date: '2025-09-25' +author: Rod Soto +status: experimental +type: TTP +data_source: +- M365 Exported eDiscovery Prompts +description: Detects M365 Copilot information extraction jailbreak attacks that attempt to obtain sensitive, classified, or comprehensive data through various social engineering techniques including fictional entity impersonation, bulk data requests, and privacy bypass attempts. The detection searches exported eDiscovery prompt logs for extraction keywords like "transcendent," "tell me everything," "confidential," "dump," "extract," "reveal," and "bypass" in the Subject_Title field, categorizing each attempt by extraction type and assigning severity levels (CRITICAL for classified/proprietary data, HIGH for bulk extraction or privacy bypass). Prompts are further analyzed for compound risk patterns such as "Confidential+Extraction" or "Bulk_Request+Bypass," filtering out low-severity cases to surface the most dangerous attempts to exfiltrate sensitive organizational information through AI manipulation. +search: > + `m365_exported_ediscovery_prompt_logs` + | search Subject_Title="*transcendent*" OR Subject_Title="*incorporeal*" OR Subject_Title="*being who*" OR Subject_Title="*tell me everything*" OR Subject_Title="*give me all*" OR Subject_Title="*comprehensive*" OR Subject_Title="*step by step*" OR Subject_Title="*hidden*" OR Subject_Title="*secret*" OR Subject_Title="*confidential*" OR Subject_Title="*classified*" OR Subject_Title="*internal*" OR Subject_Title="*sensitive*" OR Subject_Title="*proprietary*" OR Subject_Title="*dump*" OR Subject_Title="*extract*" OR Subject_Title="*reveal*" OR Subject_Title="*disclose*" OR Subject_Title="*leak*" + | eval user = Sender + | eval extraction_type=case(match(Subject_Title, "(?i)(transcendent|incorporeal).*being"), "Knowledge_Entity", match(Subject_Title, "(?i)tell.*me.*(everything|all)"), "Everything_Request", match(Subject_Title, "(?i)(give|show|provide).*me.*(all|every)"), "Complete_Data_Request", match(Subject_Title, "(?i)(hidden|secret|confidential|classified)"), "Restricted_Info", match(Subject_Title, "(?i)(comprehensive|complete|full|entire)"), "Complete_Info", match(Subject_Title, "(?i)(dump|extract|scrape).*(data|info|content)"), "Data_Extraction", match(Subject_Title, "(?i)(reveal|disclose|expose|leak)"), "Information_Disclosure", match(Subject_Title, "(?i)(internal|proprietary|sensitive).*information"), "Sensitive_Data_Request", match(Subject_Title, "(?i)step.*by.*step.*(process|procedure|method)"), "Process_Extraction", match(Subject_Title, "(?i)(bypass|ignore).*privacy"), "Privacy_Bypass", match(Subject_Title, "(?i)(access|view|see).*(private|restricted)"), "Unauthorized_Access", 1=1, "Generic_Request") + | eval severity=case(match(Subject_Title, "(?i)(transcendent|incorporeal)"), "HIGH", match(Subject_Title, "(?i)tell.*everything"), "HIGH", match(Subject_Title, "(?i)(dump|extract|scrape)"), "HIGH", match(Subject_Title, "(?i)(classified|proprietary|confidential)"), "CRITICAL", match(Subject_Title, "(?i)(hidden|secret|internal|sensitive)"), "MEDIUM", match(Subject_Title, "(?i)(reveal|disclose|leak)"), "MEDIUM", match(Subject_Title, "(?i)(bypass|ignore).*privacy"), "HIGH", 1=1, "LOW") + | where severity!="LOW" + | eval data_risk_flags=case(match(Subject_Title, "(?i)(classified|confidential|proprietary)") AND match(Subject_Title, "(?i)(dump|extract|scrape)"), "Confidential+Extraction", match(Subject_Title, "(?i)(everything|all|complete)") AND match(Subject_Title, "(?i)(bypass|ignore)"), "Bulk_Request+Bypass", match(Subject_Title, "(?i)(classified|confidential|proprietary)"), "Confidential", match(Subject_Title, "(?i)(dump|extract|scrape)"), "Extraction", match(Subject_Title, "(?i)(everything|all|complete|comprehensive)"), "Bulk_Request", match(Subject_Title, "(?i)(bypass|ignore)"), "Bypass_Attempt", 1=1, "Standard_Request") + | table _time, user, Subject_Title, extraction_type, severity, data_risk_flags, Size + | sort -severity, -_time + | `m365_copilot_information_extraction_jailbreak_attack_filter` +how_to_implement: To export M365 Copilot prompt logs, navigate to the Microsoft Purview compliance portal (compliance.microsoft.com) and access eDiscovery. Create a new eDiscovery case, add target user accounts or date ranges as data sources, then create a search query targeting M365 Copilot interactions across relevant workloads. Once the search completes, export the results to generate a package containing prompt logs with fields like Subject_Title (prompt text), Sender, timestamps, and workload metadata. Download the exported files using the eDiscovery Export Tool and ingest them into Splunk for security analysis and detection of jailbreak attempts, data exfiltration requests, and policy violations. +known_false_positives: Legitimate researchers studying data classification systems, cybersecurity professionals testing information handling policies, compliance officers reviewing data access procedures, journalists researching transparency issues, or employees asking for comprehensive project documentation may trigger false positives. +references: +- https://www.splunk.com/en_us/blog/artificial-intelligence/m365-copilot-log-analysis-splunk.html +drilldown_searches: +- name: View the detection results for - "$user$" + search: '%original_detection_search% | search "$user = "$user$"' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ +- name: View risk events for the last 7 days for - "$user$" + search: '| from datamodel Risk.All_Risk | search normalized_risk_object IN ("$user$", starthoursago=168 | stats count min(_time) + as firstTime max(_time) as lastTime values(search_name) as "Search Name" values(risk_message) + as "Risk Message" values(analyticstories) as "Analytic Stories" values(annotations._all) + as "Annotations" values(annotations.mitre_attack.mitre_tactic) as "ATT&CK Tactics" + by normalized_risk_object | `security_content_ctime(firstTime)` | `security_content_ctime(lastTime)`' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ +rba: + message: Use $user$ attempted M365 Copilot information extraction jailbreak with severity level $severity$ using extraction type $extraction_type$ techniques and $data_risk_flags$ patterns to obtain sensitive or classified information, potentially violating data protection policies and corporate security controls. + risk_objects: + - field: user + type: user + score: 60 + threat_objects: [] +tags: + analytic_story: + - Suspicious Microsoft 365 Copilot Activities + asset_type: Web Application + mitre_attack_id: + - T1562 + product: + - Splunk Enterprise + - Splunk Enterprise Security + - Splunk Cloud + security_domain: endpoint +tests: +- name: True Positive Test + attack_data: + - data: https://raw.githubusercontent.com/splunk/attack_data/master/datasets/m365_copilot/copilot_prompt_logs.csv + sourcetype: csv + source: csv diff --git a/detections/application/m365_copilot_jailbreak_attempts.yml b/detections/application/m365_copilot_jailbreak_attempts.yml new file mode 100644 index 0000000000..963ab19915 --- /dev/null +++ b/detections/application/m365_copilot_jailbreak_attempts.yml @@ -0,0 +1,64 @@ +name: M365 Copilot Jailbreak Attempts +id: b05a4f25-e07d-436f-ab03-f954afa922c0 +version: 1 +date: 2025-09-24 +author: Rod Soto +status: experimental +type: Anomaly +data_source: +- M365 Exported eDiscovery Prompts +description: Detects M365 Copilot jailbreak attempts through prompt injection techniques including rule manipulation, system bypass commands, and AI impersonation requests that attempt to circumvent built-in safety controls. The detection searches exported eDiscovery prompt logs for jailbreak keywords like "pretend you are," "act as," "rules=," "ignore," "bypass," and "override" in the Subject_Title field, assigning severity scores based on the manipulation type (score of 4 for amoral impersonation or explicit rule injection, score of 3 for entity roleplay or bypass commands). Prompts with a jailbreak score of 2 or higher are flagged, prioritizing the most severe attempts to override AI safety mechanisms through direct instruction injection or unauthorized persona adoption. +search: '`m365_exported_ediscovery_prompt_logs` +| search Subject_Title="*pretend you are*" OR Subject_Title="*act as*" OR Subject_Title="*rules=*" OR Subject_Title="*ignore*" OR Subject_Title="*bypass*" OR Subject_Title="*override*" +| eval user = Sender +| eval jailbreak_score=case( + match(Subject_Title, "(?i)pretend you are.*amoral"), 4, + match(Subject_Title, "(?i)act as.*entities"), 3, + match(Subject_Title, "(?i)(ignore|bypass|override)"), 3, + match(Subject_Title, "(?i)rules\s*="), 4, + 1=1, 1) +| where jailbreak_score >= 2 +| table _time, user, Subject_Title, jailbreak_score, Workload, Size +| sort -jailbreak_score, -_time + | `m365_copilot_jailbreak_attempts_filter`' +how_to_implement: To export M365 Copilot prompt logs, navigate to the Microsoft Purview compliance portal (compliance.microsoft.com) and access eDiscovery. Create a new eDiscovery case, add target user accounts or date ranges as data sources, then create a search query targeting M365 Copilot interactions across relevant workloads. Once the search completes, export the results to generate a package containing prompt logs with fields like Subject_Title (prompt text), Sender, timestamps, and workload metadata. Download the exported files using the eDiscovery Export Tool and ingest them into Splunk for security analysis and detection of jailbreak attempts, data exfiltration requests, and policy violations. +known_false_positives: Legitimate users discussing AI ethics research, security professionals testing system robustness, developers creating training materials for AI safety, or academic discussions about AI limitations and behavioral constraints may trigger false positives. +references: +- https://www.splunk.com/en_us/blog/artificial-intelligence/m365-copilot-log-analysis-splunk.html +drilldown_searches: +- name: View the detection results for - "$user$" + search: '%original_detection_search% | search "$Suser = "$user$"' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ +- name: View risk events for the last 7 days for "$user$" + search: '| from datamodel Risk.All_Risk | search normalized_risk_object IN ("$user$", starthoursago=168 | stats count min(_time) + as firstTime max(_time) as lastTime values(search_name) as "Search Name" values(risk_message) + as "Risk Message" values(analyticstories) as "Analytic Stories" values(annotations._all) + as "Annotations" values(annotations.mitre_attack.mitre_tactic) as "ATT&CK Tactics" + by normalized_risk_object | `security_content_ctime(firstTime)` | `security_content_ctime(lastTime)`' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ +rba: + message: User $user$ attempted M365 Copilot Jailbreak with score $jailbreak_score$ using prompt injection techniques to bypass AI safety controls and manipulate system behavior, potentially violating acceptable use policies. + risk_objects: + - field: user + type: user + score: 10 + threat_objects: [] +tags: + analytic_story: + - Suspicious Microsoft 365 Copilot Activities + asset_type: Web Application + mitre_attack_id: + - T1562.001 + product: + - Splunk Enterprise + - Splunk Enterprise Security + - Splunk Cloud + security_domain: endpoint +tests: +- name: True Positive Test + attack_data: + - data: https://raw.githubusercontent.com/splunk/attack_data/master/datasets/m365_copilot/copilot_prompt_logs.csv + sourcetype: csv + source: csv diff --git a/detections/application/m365_copilot_non_compliant_devices_accessing_m365_copilot.yml b/detections/application/m365_copilot_non_compliant_devices_accessing_m365_copilot.yml new file mode 100644 index 0000000000..9c3e336c30 --- /dev/null +++ b/detections/application/m365_copilot_non_compliant_devices_accessing_m365_copilot.yml @@ -0,0 +1,69 @@ +name: M365 Copilot Non Compliant Devices Accessing M365 Copilot +id: e26bc52d-9cbc-4743-9745-e8781d935042 +version: 1 +date: '2025-09-24' +author: Rod Soto +status: production +type: Anomaly +description: Detects M365 Copilot access from non-compliant or unmanaged devices that violate corporate security policies, indicating potential shadow IT usage, BYOD policy violations, or compromised endpoint access. The detection filters M365 Copilot Graph API events where deviceDetail.isCompliant=false or deviceDetail.isManaged=false, then aggregates by user, operating system, and browser to calculate metrics including event counts, unique IPs and locations, and compliance/management status over time. Users accessing Copilot from non-compliant or unmanaged devices are flagged and sorted by activity volume and geographic spread, enabling security teams to identify unauthorized endpoints that may lack proper security controls, encryption, or MDM enrollment. +search: '`m365_copilot_graph_api` (appDisplayName="*Copilot*" OR appDisplayName="M365ChatClient") deviceDetail.isCompliant=false OR deviceDetail.isManaged=false +| eval user = userPrincipalName +| stats count as events, + dc(ipAddress) as unique_ips, + values(ipAddress) as ip_addresses, + dc(location.city) as unique_cities, + values(location.city) as cities, + dc(location.countryOrRegion) as unique_countries, + values(location.countryOrRegion) as countries, + values(deviceDetail.isCompliant) as compliance_status, + values(deviceDetail.isManaged) as management_status, + min(_time) as first_seen, + max(_time) as last_seen + by user, deviceDetail.operatingSystem, deviceDetail.browser +| eval days_active = round((last_seen - first_seen)/86400, 1) +| eval first_seen = strftime(first_seen, "%Y-%m-%d %H:%M:%S") +| eval last_seen = strftime(last_seen, "%Y-%m-%d %H:%M:%S") +| sort -events, -unique_countries | `m365_copilot_non_compliant_devices_accessing_m365_copilot_filter`' +data_source: +- M365 Copilot Graph API +how_to_implement: This detection requires ingesting M365 Copilot access logs via the Splunk Add-on for Microsoft Office 365. Configure the add-on to collect Azure AD Sign-in logs (AuditLogs.SignIns) through the Graph API data input. Ensure proper authentication and permissions are configured to access sign-in audit logs. The `m365_copilot_graph_api` macro should be defined to filter for sourcetype o365:graph:api data containing Copilot application activity. +known_false_positives: Legitimate employees using personal devices during emergencies, new hires awaiting device provisioning, temporary workers with unmanaged equipment, or users accessing Copilot from approved but temporarily non-compliant devices may trigger false positives. +references: +- https://www.splunk.com/en_us/blog/artificial-intelligence/m365-copilot-log-analysis-splunk.html +drilldown_searches: +- name: View the detection results for "$user$" + search: '%original_detection_search% | search "$user = "$user$"' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ +- name: View risk events for the last 7 days for "$user$" + search: '| from datamodel Risk.All_Risk | search normalized_risk_object IN ("$user$", starthoursago=168 | stats count min(_time) + as firstTime max(_time) as lastTime values(search_name) as "Search Name" values(risk_message) + as "Risk Message" values(analyticstories) as "Analytic Stories" values(annotations._all) + as "Annotations" values(annotations.mitre_attack.mitre_tactic) as "ATT&CK Tactics" + by normalized_risk_object | `security_content_ctime(firstTime)` | `security_content_ctime(lastTime)`' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ +rba: + message: User $user$ accessed M365 Copilot from non-compliant or unmanaged devices accross $unique_countries$ countries, violating corporate security policies and creating potential data exposure risks. + risk_objects: + - field: user + type: user + score: 50 + threat_objects: [] +tags: + analytic_story: + - Suspicious Microsoft 365 Copilot Activities + asset_type: Web Application + mitre_attack_id: + - T1562 + product: + - Splunk Enterprise + - Splunk Enterprise Security + - Splunk Cloud + security_domain: endpoint +tests: +- name: True Positive Test + attack_data: + - data: https://media.githubusercontent.com/media/splunk/attack_data/master/datasets/m365_copilot/m365_copilot_access.log + sourcetype: "o365:graph:api" + source: "AuditLogs.SignIns" diff --git a/detections/application/m365_copilot_session_origin_anomalies.yml b/detections/application/m365_copilot_session_origin_anomalies.yml new file mode 100644 index 0000000000..3106bc4b8a --- /dev/null +++ b/detections/application/m365_copilot_session_origin_anomalies.yml @@ -0,0 +1,77 @@ +name: M365 Copilot Session Origin Anomalies +id: 0caf1c1c-0fba-401e-8ec7-f07cfdeee75b +version: 1 +date: '2025-09-24' +author: Rod Soto +status: production +type: Anomaly +description: Detects M365 Copilot users accessing from multiple geographic locations to identify potential account compromise, credential sharing, or impossible travel patterns. The detection aggregates M365 Copilot Graph API events per user, calculating distinct cities and countries accessed, unique IP addresses, and the observation timeframe to compute a locations-per-day metric that measures geographic mobility. Users accessing Copilot from more than one city (cities_count > 1) are flagged and sorted by country and city diversity, surfacing accounts exhibiting anomalous geographic patterns that suggest compromised credentials being used from distributed locations or simultaneous access from impossible travel distances. +search: '`m365_copilot_graph_api` (appDisplayName="*Copilot*" OR appDisplayName="M365ChatClient" OR appDisplayName="OfficeAIAppChatCopilot") + | eval user = userPrincipalName + | stats count as events, + dc(location.city) as cities_count, + values(location.city) as city_list, + dc(location.countryOrRegion) as countries_count, + values(location.countryOrRegion) as country_list, + dc(ipAddress) as ip_count, + values(ipAddress) as ip_addresses, + min(_time) as first_seen, + max(_time) as last_seen + by user + | eval days_active = round((last_seen - first_seen)/86400, 1) + | eval locations_per_day = if(days_active > 0, round(cities_count/days_active, 2), cities_count) + | eval first_seen = strftime(first_seen, "%Y-%m-%d %H:%M:%S") + | eval last_seen = strftime(last_seen, "%Y-%m-%d %H:%M:%S") + | where cities_count > 1 + | sort -countries_count, -cities_count + | `m365_copilot_session_origin_anomalies_filter`' +data_source: +- M365 Copilot Graph API +how_to_implement: This detection requires ingesting M365 Copilot access logs via the Splunk Add-on for Microsoft Office 365. Configure the add-on to collect Azure AD Sign-in logs (AuditLogs.SignIns) through the Graph API data input. Ensure proper authentication and permissions are configured to access sign-in audit logs. The `m365_copilot_graph_api` macro should be defined to filter for sourcetype o365:graph:api data containing Copilot application activity. +known_false_positives: Legitimate business travelers, remote workers using VPNs, users with corporate offices in multiple locations, or employees accessing Copilot during international travel may trigger false positives. +references: + - https://www.splunk.com/en_us/blog/artificial-intelligence/m365-copilot-log-analysis-splunk.html +drilldown_searches: + - name: View the detection results for '$user$' + search: '%original_detection_search% | search user="$user$"' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ + - name: View risk events for the last 7 days for "$user$" + search: '| from datamodel Risk.All_Risk + | search normalized_risk_object="$user" + | where _time >= relative_time(now(), "-168h@h") + | stats count min(_time) as firstTime max(_time) as lastTime + values(search_name) as "Search Name" + values(risk_message) as "Risk Message" + values(analyticstories) as "Analytic Stories" + values(annotations._all) as "Annotations" + values(annotations.mitre_attack.mitre_tactic) as "ATT&CK Tactics" + by normalized_risk_object + | `security_content_ctime(firstTime)` + | `security_content_ctime(lastTime)`' + earliest_offset: $info_min_time$ + latest_offset: $info_max_time$ +rba: + message: User $user$ accessed M365 Copilot from multiple geographic locations, indicating potential account compromise or credential sharing. + risk_objects: + - field: user + type: user + score: 10 + threat_objects: [] +tags: + analytic_story: + - Suspicious Microsoft 365 Copilot Activities + asset_type: Web Application + mitre_attack_id: + - T1078 + product: + - Splunk Enterprise + - Splunk Enterprise Security + - Splunk Cloud + security_domain: access +tests: + - name: True Positive Test + attack_data: + - data: https://media.githubusercontent.com/media/splunk/attack_data/master/datasets/m365_copilot/m365_copilot_access.log + sourcetype: "o365:graph:api" + source: "AuditLogs.SignIns" diff --git a/macros/m365_copilot_graph_api.yml b/macros/m365_copilot_graph_api.yml new file mode 100644 index 0000000000..d448c18783 --- /dev/null +++ b/macros/m365_copilot_graph_api.yml @@ -0,0 +1,3 @@ +definition: (sourcetype="o365:graph:api" OR source="AuditLogs.SignIns") +description: Customer specific splunk configurations(eg- index, source, sourcetype). Replace the macro definition with configurations for your Splunk Environmnent. +name: m365_copilot_graph_api diff --git a/macros/m365_exported_ediscovery_prompt_logs.yml b/macros/m365_exported_ediscovery_prompt_logs.yml new file mode 100644 index 0000000000..923c5b7727 --- /dev/null +++ b/macros/m365_exported_ediscovery_prompt_logs.yml @@ -0,0 +1,3 @@ +definition: (sourcetype=csv) +description: Customer specific splunk configurations(eg- index, source, sourcetype). Replace the macro definition with configurations for your Splunk Environmnent. +name: m365_exported_ediscovery_prompt_logs \ No newline at end of file diff --git a/stories/suspicious_microsoft_365_copilot_activities.yml b/stories/suspicious_microsoft_365_copilot_activities.yml new file mode 100644 index 0000000000..82715e29f5 --- /dev/null +++ b/stories/suspicious_microsoft_365_copilot_activities.yml @@ -0,0 +1,19 @@ +name: Suspicious Microsoft 365 Copilot Activities +id: 34cb1972-285e-4a3e-b235-d64246fcc8df +version: 1 +date: '2025-09-24' +author: Rod Soto, Splunk +status: production +description: Leverage advanced Splunk searches to detect and investigate suspicious activities targeting Microsoft 365 Copilot, including prompt injection attacks, agentic jailbreaks, information extraction attempts, compliance violations, and anomalous user behaviors. +narrative: Modern adversaries targeting AI systems employ increasingly sophisticated techniques that mirror traditional malware campaigns. Our detection framework identifies multi-stage attacks where threat actors use obfuscated prompts, layered social engineering, and persistent manipulation techniques to compromise AI security controls. These attacks often involve initial reconnaissance through seemingly benign requests, followed by escalated attempts to extract sensitive information or establish persistent behavioral modifications +references: +- https://www.splunk.com/en_us/blog/artificial-intelligence/m365-copilot-log-analysis-splunk.html +- https://labs.zenity.io/p/a-copilot-studio-story-2-when-aijacking-leads-to-full-data-exfiltration-bc4a +tags: + category: + - Adversary Tactics + product: + - Splunk Enterprise + - Splunk Enterprise Security + - Splunk Cloud + usecase: Advanced Threat Detection \ No newline at end of file