diff --git a/.github/workflows/appinspect.yml b/.github/workflows/appinspect.yml
index 3ff579003a..d5a5bf273f 100644
--- a/.github/workflows/appinspect.yml
+++ b/.github/workflows/appinspect.yml
@@ -16,14 +16,14 @@ jobs:
with:
python-version: '3.11'
architecture: 'x64'
-
+
- name: Install Python Dependencies and ContentCTL and Atomic Red Team
run: |
echo "- Contentctl version - $(cat requirements.txt)"
pip install -r requirements.txt
git clone --depth=1 --single-branch --branch=master https://github.com/redcanaryco/atomic-red-team.git external_repos/atomic-red-team
- git clone --depth=1 --single-branch --branch=master https://github.com/mitre/cti external_repos/cti
-
+ git clone --depth=1 --single-branch --branch="ATT&CK-v18.1" https://github.com/mitre/cti external_repos/cti
+
- name: Running appinspect with enrichments
env:
APPINSPECTUSERNAME: "${{ secrets.APPINSPECTUSERNAME }}"
@@ -42,4 +42,4 @@ jobs:
name: content-latest
path: |
artifacts/DA-ESS-ContentUpdate-latest.tar.gz
- artifacts/app_inspect_report
\ No newline at end of file
+ artifacts/app_inspect_report
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 8c3e390b4b..f7b3f17fe9 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -11,19 +11,19 @@ jobs:
steps:
- name: Check out the repository code
uses: actions/checkout@v6
-
+
- uses: actions/setup-python@v6
with:
python-version: '3.11'
architecture: 'x64'
-
+
- name: Install Python Dependencies and ContentCTL and Atomic Red Team
run: |
echo "- Contentctl version - $(cat requirements.txt)"
pip install -r requirements.txt
git clone --depth=1 --single-branch --branch=master https://github.com/redcanaryco/atomic-red-team.git external_repos/atomic-red-team
- git clone --depth=1 --single-branch --branch=master https://github.com/mitre/cti external_repos/cti
-
+ git clone --depth=1 --single-branch --branch="ATT&CK-v18.1" https://github.com/mitre/cti external_repos/cti
+
- name: Running build with enrichments
run: |
contentctl build --enrichments --enforce_deprecation_mapping_requirement
@@ -36,4 +36,4 @@ jobs:
name: content-latest
path: |
artifacts/DA-ESS-ContentUpdate-latest.tar.gz
- dist/api
\ No newline at end of file
+ dist/api
diff --git a/playbooks/AD_LDAP_Account_Locking.json b/playbooks/AD_LDAP_Account_Locking.json
index 83a81d777f..580ba99770 100644
--- a/playbooks/AD_LDAP_Account_Locking.json
+++ b/playbooks/AD_LDAP_Account_Locking.json
@@ -261,7 +261,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.0.1.123902"
},
diff --git a/playbooks/AD_LDAP_Account_Unlocking.json b/playbooks/AD_LDAP_Account_Unlocking.json
index 8f0ae743f7..725f9b1bc9 100644
--- a/playbooks/AD_LDAP_Account_Unlocking.json
+++ b/playbooks/AD_LDAP_Account_Unlocking.json
@@ -261,7 +261,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.0.1.123902"
},
diff --git a/playbooks/AD_LDAP_Entity_Attribute_Lookup.json b/playbooks/AD_LDAP_Entity_Attribute_Lookup.json
index b7b18a7fcc..d22d920e4c 100644
--- a/playbooks/AD_LDAP_Entity_Attribute_Lookup.json
+++ b/playbooks/AD_LDAP_Entity_Attribute_Lookup.json
@@ -664,7 +664,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/AWS_IAM_Account_Locking.json b/playbooks/AWS_IAM_Account_Locking.json
index 2df2c985c2..d04478cd67 100644
--- a/playbooks/AWS_IAM_Account_Locking.json
+++ b/playbooks/AWS_IAM_Account_Locking.json
@@ -265,7 +265,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/AWS_IAM_Account_Unlocking.json b/playbooks/AWS_IAM_Account_Unlocking.json
index 7a95cc3f68..666606fbc5 100644
--- a/playbooks/AWS_IAM_Account_Unlocking.json
+++ b/playbooks/AWS_IAM_Account_Unlocking.json
@@ -266,7 +266,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.0.1.123902"
},
diff --git a/playbooks/Active_Directory_Disable_Account_Dispatch.json b/playbooks/Active_Directory_Disable_Account_Dispatch.json
index 1a882e29d0..24f82bfb22 100644
--- a/playbooks/Active_Directory_Disable_Account_Dispatch.json
+++ b/playbooks/Active_Directory_Disable_Account_Dispatch.json
@@ -585,7 +585,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.0.1.123902"
},
diff --git a/playbooks/Active_Directory_Enable_Account_Dispatch.json b/playbooks/Active_Directory_Enable_Account_Dispatch.json
index 7bb69e8919..2b65f70602 100644
--- a/playbooks/Active_Directory_Enable_Account_Dispatch.json
+++ b/playbooks/Active_Directory_Enable_Account_Dispatch.json
@@ -595,7 +595,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.0.1.123902"
},
diff --git a/playbooks/Attribute_Lookup_Dispatch.json b/playbooks/Attribute_Lookup_Dispatch.json
index 0c444766be..5c368ce8f0 100644
--- a/playbooks/Attribute_Lookup_Dispatch.json
+++ b/playbooks/Attribute_Lookup_Dispatch.json
@@ -365,7 +365,7 @@
}
},
"playbookName": "dispatch_input_playbooks",
- "playbookRepo": 3,
+ "playbookRepo": 1,
"playbookRepoName": "community",
"playbookType": "data",
"synchronous": true,
@@ -778,7 +778,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/Automated_Enrichment.json b/playbooks/Automated_Enrichment.json
index 0a22707539..8926852350 100644
--- a/playbooks/Automated_Enrichment.json
+++ b/playbooks/Automated_Enrichment.json
@@ -220,7 +220,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.1.0.131"
},
diff --git a/playbooks/Azure_AD_Account_Locking.json b/playbooks/Azure_AD_Account_Locking.json
index 29c3415252..62114f7a0f 100644
--- a/playbooks/Azure_AD_Account_Locking.json
+++ b/playbooks/Azure_AD_Account_Locking.json
@@ -256,7 +256,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.0.1.123902"
},
diff --git a/playbooks/Azure_AD_Account_Unlocking.json b/playbooks/Azure_AD_Account_Unlocking.json
index a102fedcdd..78a11a9275 100644
--- a/playbooks/Azure_AD_Account_Unlocking.json
+++ b/playbooks/Azure_AD_Account_Unlocking.json
@@ -256,7 +256,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.0.1.123902"
},
diff --git a/playbooks/Azure_AD_Graph_User_Attribute_Lookup.json b/playbooks/Azure_AD_Graph_User_Attribute_Lookup.json
index d3d34d968d..f3272b74dc 100644
--- a/playbooks/Azure_AD_Graph_User_Attribute_Lookup.json
+++ b/playbooks/Azure_AD_Graph_User_Attribute_Lookup.json
@@ -259,7 +259,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/Cisco_Umbrella_DNS_Denylisting.json b/playbooks/Cisco_Umbrella_DNS_Denylisting.json
index 622115d8a9..facfcfbf78 100644
--- a/playbooks/Cisco_Umbrella_DNS_Denylisting.json
+++ b/playbooks/Cisco_Umbrella_DNS_Denylisting.json
@@ -250,7 +250,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.1.0.131"
},
diff --git a/playbooks/CrowdStrike_OAuth_API_Device_Attribute_Lookup.json b/playbooks/CrowdStrike_OAuth_API_Device_Attribute_Lookup.json
index 36d273e094..cee82e0d20 100644
--- a/playbooks/CrowdStrike_OAuth_API_Device_Attribute_Lookup.json
+++ b/playbooks/CrowdStrike_OAuth_API_Device_Attribute_Lookup.json
@@ -587,7 +587,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/CrowdStrike_OAuth_API_Dynamic_Analysis.json b/playbooks/CrowdStrike_OAuth_API_Dynamic_Analysis.json
index 7e276adf27..d4c7c1d0ca 100644
--- a/playbooks/CrowdStrike_OAuth_API_Dynamic_Analysis.json
+++ b/playbooks/CrowdStrike_OAuth_API_Dynamic_Analysis.json
@@ -803,7 +803,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/CrowdStrike_OAuth_API_Endpoint_Analysis.json b/playbooks/CrowdStrike_OAuth_API_Endpoint_Analysis.json
index efa5626ad0..cd1d0926ca 100644
--- a/playbooks/CrowdStrike_OAuth_API_Endpoint_Analysis.json
+++ b/playbooks/CrowdStrike_OAuth_API_Endpoint_Analysis.json
@@ -775,7 +775,7 @@
],
"playbook_trigger": "artifact_created",
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.15",
"version": "6.3.1.178"
},
diff --git a/playbooks/CrowdStrike_OAuth_API_Executable_Denylisting.json b/playbooks/CrowdStrike_OAuth_API_Executable_Denylisting.json
index 9e94bdc1ae..16fcd99aa0 100644
--- a/playbooks/CrowdStrike_OAuth_API_Executable_Denylisting.json
+++ b/playbooks/CrowdStrike_OAuth_API_Executable_Denylisting.json
@@ -388,7 +388,7 @@
],
"playbook_trigger": "artifact_created",
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.15",
"version": "6.3.1.178"
},
diff --git a/playbooks/CrowdStrike_OAuth_API_File_Collection.json b/playbooks/CrowdStrike_OAuth_API_File_Collection.json
index b14acec4d3..8935c0f1e6 100644
--- a/playbooks/CrowdStrike_OAuth_API_File_Collection.json
+++ b/playbooks/CrowdStrike_OAuth_API_File_Collection.json
@@ -862,7 +862,7 @@
],
"playbook_trigger": "artifact_created",
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.15",
"version": "6.3.1.178"
},
diff --git a/playbooks/CrowdStrike_OAuth_API_File_Eviction.json b/playbooks/CrowdStrike_OAuth_API_File_Eviction.json
index e30f1efa49..75234572ce 100644
--- a/playbooks/CrowdStrike_OAuth_API_File_Eviction.json
+++ b/playbooks/CrowdStrike_OAuth_API_File_Eviction.json
@@ -482,7 +482,7 @@
],
"playbook_trigger": "artifact_created",
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.15",
"version": "6.3.1.178"
},
diff --git a/playbooks/CrowdStrike_OAuth_API_File_Restore.json b/playbooks/CrowdStrike_OAuth_API_File_Restore.json
index 92f2b9543a..e8a3bebb38 100644
--- a/playbooks/CrowdStrike_OAuth_API_File_Restore.json
+++ b/playbooks/CrowdStrike_OAuth_API_File_Restore.json
@@ -680,7 +680,7 @@
],
"playbook_trigger": "artifact_created",
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.15",
"version": "6.3.1.178"
},
diff --git a/playbooks/CrowdStrike_OAuth_API_Get_Device_Info.json b/playbooks/CrowdStrike_OAuth_API_Get_Device_Info.json
index 7671c0bd6f..0e25822b53 100644
--- a/playbooks/CrowdStrike_OAuth_API_Get_Device_Info.json
+++ b/playbooks/CrowdStrike_OAuth_API_Get_Device_Info.json
@@ -283,7 +283,7 @@
],
"playbook_trigger": "artifact_created",
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.15",
"version": "6.3.1.178"
},
diff --git a/playbooks/CrowdStrike_OAuth_API_Identifier_Activity_Analysis.json b/playbooks/CrowdStrike_OAuth_API_Identifier_Activity_Analysis.json
index 3163993e1c..5a8fc608cf 100644
--- a/playbooks/CrowdStrike_OAuth_API_Identifier_Activity_Analysis.json
+++ b/playbooks/CrowdStrike_OAuth_API_Identifier_Activity_Analysis.json
@@ -598,7 +598,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/CrowdStrike_OAuth_API_Network_Isolation.json b/playbooks/CrowdStrike_OAuth_API_Network_Isolation.json
index d07f406503..f7c5bc4112 100644
--- a/playbooks/CrowdStrike_OAuth_API_Network_Isolation.json
+++ b/playbooks/CrowdStrike_OAuth_API_Network_Isolation.json
@@ -344,7 +344,7 @@
],
"playbook_trigger": "artifact_created",
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.15",
"version": "6.3.1.178"
},
diff --git a/playbooks/CrowdStrike_OAuth_API_Network_Restore.json b/playbooks/CrowdStrike_OAuth_API_Network_Restore.json
index bd5a7121d0..032b7811a6 100644
--- a/playbooks/CrowdStrike_OAuth_API_Network_Restore.json
+++ b/playbooks/CrowdStrike_OAuth_API_Network_Restore.json
@@ -343,7 +343,7 @@
],
"playbook_trigger": "artifact_created",
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.15",
"version": "6.3.1.178"
},
diff --git a/playbooks/CrowdStrike_OAuth_API_Process_Termination.json b/playbooks/CrowdStrike_OAuth_API_Process_Termination.json
index 762a7ce35d..51430a56c9 100644
--- a/playbooks/CrowdStrike_OAuth_API_Process_Termination.json
+++ b/playbooks/CrowdStrike_OAuth_API_Process_Termination.json
@@ -480,7 +480,7 @@
],
"playbook_trigger": "artifact_created",
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.15",
"version": "6.3.1.178"
},
diff --git a/playbooks/DNS_Denylisting_Dispatch.json b/playbooks/DNS_Denylisting_Dispatch.json
index 32331bdb86..1994dc7959 100644
--- a/playbooks/DNS_Denylisting_Dispatch.json
+++ b/playbooks/DNS_Denylisting_Dispatch.json
@@ -735,7 +735,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.1.0.131"
},
diff --git a/playbooks/Dynamic_Analysis_Dispatch.json b/playbooks/Dynamic_Analysis_Dispatch.json
index 1d05725618..8d588e3db1 100644
--- a/playbooks/Dynamic_Analysis_Dispatch.json
+++ b/playbooks/Dynamic_Analysis_Dispatch.json
@@ -728,7 +728,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/G_Suite_for_GMail_Message_Identifier_Activity_Analysis.json b/playbooks/G_Suite_for_GMail_Message_Identifier_Activity_Analysis.json
index 0e13bbbccd..b48b9d5f7f 100644
--- a/playbooks/G_Suite_for_GMail_Message_Identifier_Activity_Analysis.json
+++ b/playbooks/G_Suite_for_GMail_Message_Identifier_Activity_Analysis.json
@@ -387,7 +387,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.11",
"version": "6.2.0.355"
},
diff --git a/playbooks/G_Suite_for_Gmail_Message_Eviction.json b/playbooks/G_Suite_for_Gmail_Message_Eviction.json
index ad88da8e31..5ad8d63cfc 100644
--- a/playbooks/G_Suite_for_Gmail_Message_Eviction.json
+++ b/playbooks/G_Suite_for_Gmail_Message_Eviction.json
@@ -279,7 +279,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.11",
"version": "6.2.0.355"
},
diff --git a/playbooks/G_Suite_for_Gmail_Search_and_Purge.json b/playbooks/G_Suite_for_Gmail_Search_and_Purge.json
index 9bfc876fbe..6fa517b75d 100644
--- a/playbooks/G_Suite_for_Gmail_Search_and_Purge.json
+++ b/playbooks/G_Suite_for_Gmail_Search_and_Purge.json
@@ -850,7 +850,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.11",
"version": "6.2.0.355"
},
diff --git a/playbooks/Identifier_Activity_Analysis_Dispatch.json b/playbooks/Identifier_Activity_Analysis_Dispatch.json
index 1b1481ba2d..314e2fa46b 100644
--- a/playbooks/Identifier_Activity_Analysis_Dispatch.json
+++ b/playbooks/Identifier_Activity_Analysis_Dispatch.json
@@ -670,7 +670,7 @@
}
],
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/Identifier_Reputation_Analysis_Dispatch.json b/playbooks/Identifier_Reputation_Analysis_Dispatch.json
index 77b42d79a3..46888c6368 100644
--- a/playbooks/Identifier_Reputation_Analysis_Dispatch.json
+++ b/playbooks/Identifier_Reputation_Analysis_Dispatch.json
@@ -275,7 +275,7 @@
}
},
"playbookName": "dispatch_input_playbooks",
- "playbookRepo": 3,
+ "playbookRepo": 1,
"playbookRepoName": "community",
"playbookType": "data",
"synchronous": true,
@@ -811,7 +811,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/Jira_Related_Tickets_Search.json b/playbooks/Jira_Related_Tickets_Search.json
index d9ad8ac64c..d3afe4225b 100644
--- a/playbooks/Jira_Related_Tickets_Search.json
+++ b/playbooks/Jira_Related_Tickets_Search.json
@@ -526,7 +526,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.1.0.131"
},
diff --git a/playbooks/MS_Graph_for_Office_365_Message_Eviction.json b/playbooks/MS_Graph_for_Office_365_Message_Eviction.json
index 2740680741..490bc62716 100644
--- a/playbooks/MS_Graph_for_Office_365_Message_Eviction.json
+++ b/playbooks/MS_Graph_for_Office_365_Message_Eviction.json
@@ -397,7 +397,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.11",
"version": "6.2.0.355"
},
diff --git a/playbooks/MS_Graph_for_Office_365_Message_Identifier_Activity_Analysis.json b/playbooks/MS_Graph_for_Office_365_Message_Identifier_Activity_Analysis.json
index 40385a6e70..f5a6e2b9bc 100644
--- a/playbooks/MS_Graph_for_Office_365_Message_Identifier_Activity_Analysis.json
+++ b/playbooks/MS_Graph_for_Office_365_Message_Identifier_Activity_Analysis.json
@@ -369,7 +369,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.11",
"version": "6.2.0.355"
},
diff --git a/playbooks/MS_Graph_for_Office_365_Message_Restore.json b/playbooks/MS_Graph_for_Office_365_Message_Restore.json
index a78aa45be3..f798e6b6e9 100644
--- a/playbooks/MS_Graph_for_Office_365_Message_Restore.json
+++ b/playbooks/MS_Graph_for_Office_365_Message_Restore.json
@@ -405,7 +405,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.11",
"version": "6.2.0.355"
},
diff --git a/playbooks/MS_Graph_for_Office_365_Search_and_Purge.json b/playbooks/MS_Graph_for_Office_365_Search_and_Purge.json
index 1b7d886e98..bfa903577e 100644
--- a/playbooks/MS_Graph_for_Office_365_Search_and_Purge.json
+++ b/playbooks/MS_Graph_for_Office_365_Search_and_Purge.json
@@ -1026,7 +1026,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.11",
"version": "6.2.0.355"
},
diff --git a/playbooks/MS_Graph_for_Office_365_Search_and_Restore.json b/playbooks/MS_Graph_for_Office_365_Search_and_Restore.json
index 6cda02bc6f..4e57b4b490 100644
--- a/playbooks/MS_Graph_for_Office_365_Search_and_Restore.json
+++ b/playbooks/MS_Graph_for_Office_365_Search_and_Restore.json
@@ -630,7 +630,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.11",
"version": "6.2.0.355"
},
diff --git a/playbooks/Panorama_Outbound_Traffic_Filtering.json b/playbooks/Panorama_Outbound_Traffic_Filtering.json
index 42e6d83e02..74d350395f 100644
--- a/playbooks/Panorama_Outbound_Traffic_Filtering.json
+++ b/playbooks/Panorama_Outbound_Traffic_Filtering.json
@@ -267,7 +267,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/PhishTank_URL_Reputation_Analysis.json b/playbooks/PhishTank_URL_Reputation_Analysis.json
index e888194217..2695aa91ce 100644
--- a/playbooks/PhishTank_URL_Reputation_Analysis.json
+++ b/playbooks/PhishTank_URL_Reputation_Analysis.json
@@ -345,7 +345,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.8",
"version": "5.5.0.108488"
},
diff --git a/playbooks/Related_Tickets_Search_Dispatch.json b/playbooks/Related_Tickets_Search_Dispatch.json
index c7e81f6e86..534658154e 100644
--- a/playbooks/Related_Tickets_Search_Dispatch.json
+++ b/playbooks/Related_Tickets_Search_Dispatch.json
@@ -4,7 +4,7 @@
"category": "Related Tickets Search",
"coa": {
"data": {
- "description": "Detects available indicators and routes them to dispatch related ticket search playbooks. The output of the analysis will update any artifacts, tasks, and indicator tags.",
+ "description": "Detects available indicators and routes them to dynamic related ticket search playbooks. The output of the analysis will update any artifacts, tasks, and indicator tags.",
"edges": [
{
"conditions": [
@@ -722,7 +722,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/ServiceNow_Related_Tickets_Search.json b/playbooks/ServiceNow_Related_Tickets_Search.json
index ccd6d885b0..e7c84b78c0 100644
--- a/playbooks/ServiceNow_Related_Tickets_Search.json
+++ b/playbooks/ServiceNow_Related_Tickets_Search.json
@@ -82,7 +82,7 @@
"targetPort": "1_in"
}
],
- "hash": "e7ead2bdc6a802950dd37c47b3f4af40d61c676c",
+ "hash": "0c56bc09507f0a442ed7f70f357f51ae6d3983fe",
"nodes": {
"0": {
"data": {
@@ -98,7 +98,7 @@
"type": "start",
"warnings": {},
"x": 19.999999999999986,
- "y": -6.394884621840902e-14
+ "y": -1.9184653865522705e-13
},
"1": {
"data": {
@@ -121,6 +121,8 @@
"advanced": {
"customName": "input filter",
"customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
"description": "Creates a dataset without None values.",
"join": [],
"note": "Creates a dataset without None values."
@@ -148,11 +150,7 @@
"errors": {},
"id": "10",
"type": "filter",
- "warnings": {
- "config": [
- "Reconfigure invalid datapath."
- ]
- },
+ "warnings": {},
"x": 60,
"y": 622
},
@@ -183,7 +181,7 @@
"type": "code",
"userCode": " process_results__output = {}\n for search_term in filtered_input_0_search_term_values:\n process_results__output[search_term] = []\n for result, query_table in zip(run_ticket_query_result_item_0, run_ticket_query_parameter_query_table):\n if not isinstance(result, list):\n list_result = [result]\n else:\n list_result = result\n for result_item in list_result:\n result_item_values = [item.lower() for item in result_item.values() if isinstance(item, str)]\n match = False\n for string_value in result_item_values:\n if search_term.lower() in string_value:\n match = True\n break\n if match:\n process_results__output[search_term].append({**result_item, **{\"ticket_type\": query_table}})\n\n",
"warnings": {},
- "x": 0,
+ "x": 1.4210854715202004e-14,
"y": 1158
},
"16": {
@@ -221,9 +219,9 @@
"errors": {},
"id": "16",
"type": "code",
- "userCode": " import re \n \n build_output__observable_array = []\n build_output__name = []\n build_output__number = []\n build_output__message = []\n build_output__start_time = []\n build_output__end_time = []\n build_output__assignee = []\n build_output__creator_name = []\n build_output__state = []\n build_output__matched_fields = []\n build_output__source_link = []\n build_output__source = []\n \n def generate_ticket_link(sample_url, ticket_type, sys_id):\n extract_host = re.search(r\"https*:\\/\\/[^\\/]+\", sample_url).group(0)\n extract_host += f\"/nav_to.do?uri={ticket_type}.do?sys_id={sys_id}\"\n return extract_host\n \n for key in process_results__output.keys():\n \n for value in process_results__output[key]:\n assigned_to = None\n caller_id = None\n matched_fields = []\n if value.get(\"assigned_to\"):\n assigned_to = value[\"assigned_to\"][\"display_value\"]\n if value.get(\"caller_id\"):\n caller_id = value[\"caller_id\"][\"display_value\"]\n \n for k, v in value.items():\n # generate matched fields where the searched entity appears\n if isinstance(v, str) and key.lower() in v.lower():\n matched_fields.append(k)\n # search for any link sample:\n if isinstance(v, dict):\n sample_link = v.get('link')\n \n source_link = generate_ticket_link(sample_link, value['ticket_type'], value['sys_id'])\n observable_object = {\n \"value\": key,\n \"ticket\": {\n \"name\": value[\"short_description\"],\n \"id\": value[\"sys_id\"],\n \"number\": value[\"number\"],\n \"message\": json.dumps(value[\"description\"]),\n \"start_time\": value[\"sys_created_on\"],\n \"end_time\": value[\"closed_at\"],\n \"assigned_to\": assigned_to,\n \"creator_name\": caller_id,\n \"state\": value[\"state\"],\n \"notes\": [value[\"work_notes\"]],\n \"comments\": [value[\"comments\"]]\n },\n \"matched_fields\": matched_fields,\n \"source\": \"ServiceNow\",\n \"source_link\": source_link\n }\n build_output__observable_array.append(observable_object)\n build_output__name.append(value[\"short_description\"])\n build_output__number.append(value[\"number\"])\n build_output__message.append(json.dumps(value[\"description\"])) # eliminate new line issues\n build_output__start_time.append(value[\"sys_created_on\"])\n build_output__end_time.append(value[\"closed_at\"])\n build_output__assignee.append(assigned_to)\n build_output__creator_name.append(caller_id)\n build_output__state.append(value[\"state\"])\n build_output__matched_fields.append(matched_fields)\n build_output__source.append(\"ServiceNow\")\n build_output__source_link.append(source_link)\n #phantom.debug(observable_object)\n\n",
+ "userCode": " import re \n \n build_output__observable_array = []\n build_output__name = []\n build_output__number = []\n build_output__message = []\n build_output__start_time = []\n build_output__end_time = []\n build_output__assignee = []\n build_output__creator_name = []\n build_output__state = []\n build_output__matched_fields = []\n build_output__source_link = []\n build_output__source = []\n \n def generate_ticket_link(sample_url, ticket_type, sys_id):\n extract_host = re.search(r\"https*:\\/\\/[^\\/]+\", sample_url).group(0)\n extract_host += f\"/nav_to.do?uri={ticket_type}.do?sys_id={sys_id}\"\n return extract_host\n \n for key in process_results__output.keys():\n \n for value in process_results__output[key]:\n assigned_to = None\n caller_id = None\n matched_fields = []\n if value.get(\"assigned_to\"):\n assigned_to = value[\"assigned_to\"][\"display_value\"]\n if value.get(\"caller_id\"):\n caller_id = value[\"caller_id\"][\"display_value\"]\n \n sample_link = None\n for k, v in value.items():\n # generate matched fields where the searched entity appears\n if isinstance(v, str) and key.lower() in v.lower():\n matched_fields.append(k)\n # search for any link sample:\n if isinstance(v, dict):\n sample_link = v.get('link')\n \n source_link = generate_ticket_link(sample_link, value['ticket_type'], value['sys_id'])\n observable_object = {\n \"value\": key,\n \"ticket\": {\n \"name\": value[\"short_description\"],\n \"id\": value[\"sys_id\"],\n \"number\": value[\"number\"],\n \"message\": json.dumps(value[\"description\"]),\n \"start_time\": value[\"sys_created_on\"],\n \"end_time\": value[\"closed_at\"],\n \"assigned_to\": assigned_to,\n \"creator_name\": caller_id,\n \"state\": value[\"state\"],\n \"notes\": [value[\"work_notes\"]],\n \"comments\": [value[\"comments\"]]\n },\n \"matched_fields\": matched_fields,\n \"source\": \"ServiceNow\",\n \"source_link\": source_link\n }\n build_output__observable_array.append(observable_object)\n build_output__name.append(value[\"short_description\"])\n build_output__number.append(value[\"number\"])\n build_output__message.append(json.dumps(value[\"description\"])) # eliminate new line issues\n build_output__start_time.append(value[\"sys_created_on\"])\n build_output__end_time.append(value[\"closed_at\"])\n build_output__assignee.append(assigned_to)\n build_output__creator_name.append(caller_id)\n build_output__state.append(value[\"state\"])\n build_output__matched_fields.append(matched_fields)\n build_output__source.append(\"ServiceNow\")\n build_output__source_link.append(source_link)\n #phantom.debug(observable_object)\n\n",
"warnings": {},
- "x": 0,
+ "x": 1.4210854715202004e-14,
"y": 1352
},
"17": {
@@ -231,7 +229,8 @@
"advanced": {
"customName": "calculate earliest time",
"customNameId": 0,
- "join": []
+ "join": [],
+ "note": "Converts the container create time into a different datetime string compatible with ServiceNow."
},
"customFunction": {
"draftMode": false,
@@ -241,6 +240,15 @@
"functionId": 2,
"functionName": "calculate_earliest_time",
"id": "17",
+ "loop": {
+ "enabled": false,
+ "exitAfterUnit": "m",
+ "exitAfterValue": 10,
+ "exitConditionEnabled": false,
+ "exitLoopAfter": 2,
+ "pauseUnit": "m",
+ "pauseValue": 2
+ },
"selectMore": false,
"type": "utility",
"utilities": {
@@ -321,7 +329,7 @@
"id": "17",
"type": "utility",
"warnings": {},
- "x": 0,
+ "x": 1.4210854715202004e-14,
"y": 474
},
"19": {
@@ -344,6 +352,15 @@
"functionId": 3,
"functionName": "run_ticket_query",
"id": "19",
+ "loop": {
+ "enabled": false,
+ "exitAfterUnit": "m",
+ "exitAfterValue": 10,
+ "exitConditionEnabled": false,
+ "exitLoopAfter": 2,
+ "pauseUnit": "m",
+ "pauseValue": 2
+ },
"parameters": {
"max_results": "100",
"query": {
@@ -412,7 +429,7 @@
"id": "25",
"type": "format",
"warnings": {},
- "x": 0,
+ "x": 1.4210854715202004e-14,
"y": 1530
},
"6": {
@@ -438,7 +455,7 @@
"type": "code",
"userCode": " \n # Default tables list to find related tickets. Adjust as needed.\n default_table_list = [\n 'incident', \n 'change_request', \n 'change_task', \n 'problem',\n 'sc_request', \n 'sc_task', \n 'sc_req_item',\n ]\n default_table_list__output = default_table_list\n \n",
"warnings": {},
- "x": 0,
+ "x": 1.4210854715202004e-14,
"y": 148
},
"8": {
@@ -447,6 +464,7 @@
"customName": "convert table list",
"customNameId": 0,
"join": [],
+ "note": "Converts the table into a custom function result that is filterable downstream.",
"refreshNotableData": false
},
"customFunction": {
@@ -457,6 +475,15 @@
"functionId": 1,
"functionName": "convert_table_list",
"id": "8",
+ "loop": {
+ "enabled": false,
+ "exitAfterUnit": "m",
+ "exitAfterValue": 10,
+ "exitConditionEnabled": false,
+ "exitLoopAfter": 2,
+ "pauseUnit": "m",
+ "pauseValue": 2
+ },
"selectMore": false,
"type": "utility",
"utilities": {
@@ -492,7 +519,7 @@
"type": "utility",
"userCode": "\n",
"warnings": {},
- "x": 0,
+ "x": 1.4210854715202004e-14,
"y": 326
},
"9": {
@@ -519,7 +546,7 @@
"id": "9",
"type": "format",
"warnings": {},
- "x": 0,
+ "x": 1.4210854715202004e-14,
"y": 802
}
},
@@ -557,12 +584,13 @@
"name": "markdown_report"
}
],
+ "playbook_trigger": "artifact_created",
"playbook_type": "data",
- "python_version": "3",
- "schema": "5.0.9",
- "version": "6.0.0.114895"
+ "python_version": "3.13",
+ "schema": "5.0.15",
+ "version": "6.3.1.176"
},
- "create_time": "2023-03-03T21:21:48.729159+00:00",
+ "create_time": "2025-01-24T16:39:26.106708+00:00",
"draft_mode": false,
"labels": [
"*"
diff --git a/playbooks/ServiceNow_Related_Tickets_Search.py b/playbooks/ServiceNow_Related_Tickets_Search.py
index 027adeb29b..28410fb4ba 100644
--- a/playbooks/ServiceNow_Related_Tickets_Search.py
+++ b/playbooks/ServiceNow_Related_Tickets_Search.py
@@ -18,7 +18,7 @@ def on_start(container):
return
@phantom.playbook_block()
-def default_table_list(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
+def default_table_list(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, loop_state_json=None, **kwargs):
phantom.debug("default_table_list() called")
################################################################################
@@ -55,7 +55,7 @@ def default_table_list(action=None, success=None, container=None, results=None,
@phantom.playbook_block()
-def convert_table_list(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
+def convert_table_list(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, loop_state_json=None, **kwargs):
phantom.debug("convert_table_list() called")
default_table_list__output = json.loads(_ if (_ := phantom.get_run_data(key="default_table_list:output")) != "" else "null") # pylint: disable=used-before-assignment
@@ -80,7 +80,7 @@ def convert_table_list(action=None, success=None, container=None, results=None,
@phantom.playbook_block()
-def space_delimiter_input(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
+def space_delimiter_input(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, loop_state_json=None, **kwargs):
phantom.debug("space_delimiter_input() called")
################################################################################
@@ -112,7 +112,7 @@ def space_delimiter_input(action=None, success=None, container=None, results=Non
@phantom.playbook_block()
-def input_filter(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
+def input_filter(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, loop_state_json=None, **kwargs):
phantom.debug("input_filter() called")
################################################################################
@@ -125,7 +125,8 @@ def input_filter(action=None, success=None, container=None, results=None, handle
conditions=[
["playbook_input:search_term", "!=", None]
],
- name="input_filter:condition_1")
+ name="input_filter:condition_1",
+ delimiter=",")
# call connected blocks if filtered artifacts or results
if matched_artifacts_1 or matched_results_1:
@@ -135,7 +136,7 @@ def input_filter(action=None, success=None, container=None, results=None, handle
@phantom.playbook_block()
-def process_results(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
+def process_results(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, loop_state_json=None, **kwargs):
phantom.debug("process_results() called")
################################################################################
@@ -185,7 +186,7 @@ def process_results(action=None, success=None, container=None, results=None, han
@phantom.playbook_block()
-def build_output(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
+def build_output(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, loop_state_json=None, **kwargs):
phantom.debug("build_output() called")
################################################################################
@@ -241,6 +242,7 @@ def generate_ticket_link(sample_url, ticket_type, sys_id):
if value.get("caller_id"):
caller_id = value["caller_id"]["display_value"]
+ sample_link = None
for k, v in value.items():
# generate matched fields where the searched entity appears
if isinstance(v, str) and key.lower() in v.lower():
@@ -306,7 +308,7 @@ def generate_ticket_link(sample_url, ticket_type, sys_id):
@phantom.playbook_block()
-def calculate_earliest_time(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
+def calculate_earliest_time(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, loop_state_json=None, **kwargs):
phantom.debug("calculate_earliest_time() called")
create_time_value = container.get("create_time", None)
@@ -337,7 +339,7 @@ def calculate_earliest_time(action=None, success=None, container=None, results=N
@phantom.playbook_block()
-def run_ticket_query(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
+def run_ticket_query(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, loop_state_json=None, **kwargs):
phantom.debug("run_ticket_query() called")
# phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
@@ -386,7 +388,7 @@ def run_ticket_query(action=None, success=None, container=None, results=None, ha
@phantom.playbook_block()
-def format_report(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
+def format_report(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, loop_state_json=None, **kwargs):
phantom.debug("format_report() called")
################################################################################
diff --git a/playbooks/Splunk_Attack_Analyzer_Dynamic_Analysis.json b/playbooks/Splunk_Attack_Analyzer_Dynamic_Analysis.json
index f7fca8706d..5116182d42 100644
--- a/playbooks/Splunk_Attack_Analyzer_Dynamic_Analysis.json
+++ b/playbooks/Splunk_Attack_Analyzer_Dynamic_Analysis.json
@@ -1,1094 +1,1098 @@
{
- "blockly": false,
- "blockly_xml": "",
- "category": "Dynamic Analysis",
- "coa": {
- "data": {
- "description": "Accepts a URL or vault_id and does detonation analysis on the objects. Generates a global report and a per observable sub-report and normalized score. The score can be customized based on a variety of factors.\n\n",
- "edges": [
- {
- "id": "port_0_to_port_2",
- "sourceNode": "0",
- "sourcePort": "0_out",
- "targetNode": "2",
- "targetPort": "2_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_2_to_port_3",
- "sourceNode": "2",
- "sourcePort": "2_out",
- "targetNode": "3",
- "targetPort": "3_in"
- },
- {
- "id": "port_8_to_port_10",
- "sourceNode": "8",
- "sourcePort": "8_out",
- "targetNode": "10",
- "targetPort": "10_in"
- },
- {
- "id": "port_10_to_port_11",
- "sourceNode": "10",
- "sourcePort": "10_out",
- "targetNode": "11",
- "targetPort": "11_in"
- },
- {
- "id": "port_11_to_port_1",
- "sourceNode": "11",
- "sourcePort": "11_out",
- "targetNode": "1",
- "targetPort": "1_in"
- },
- {
- "conditions": [
- {
- "index": 1
- }
- ],
- "id": "port_2_to_port_12",
- "sourceNode": "2",
- "sourcePort": "2_out",
- "targetNode": "12",
- "targetPort": "12_in"
- },
- {
- "id": "port_17_to_port_18",
- "sourceNode": "17",
- "sourcePort": "17_out",
- "targetNode": "18",
- "targetPort": "18_in"
- },
- {
- "id": "port_18_to_port_19",
- "sourceNode": "18",
- "sourcePort": "18_out",
- "targetNode": "19",
- "targetPort": "19_in"
- },
- {
- "id": "port_19_to_port_1",
- "sourceNode": "19",
- "sourcePort": "19_out",
- "targetNode": "1",
- "targetPort": "1_in"
- },
- {
- "id": "port_12_to_port_4",
- "sourceNode": "12",
- "sourcePort": "12_out",
- "targetNode": "4",
- "targetPort": "4_in"
- },
- {
- "id": "port_6_to_port_7",
- "sourceNode": "6",
- "sourcePort": "6_out",
- "targetNode": "7",
- "targetPort": "7_in"
- },
- {
- "id": "port_3_to_port_22",
- "sourceNode": "3",
- "sourcePort": "3_out",
- "targetNode": "22",
- "targetPort": "22_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_22_to_port_6",
- "sourceNode": "22",
- "sourcePort": "22_out",
- "targetNode": "6",
- "targetPort": "6_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_4_to_port_15",
- "sourceNode": "4",
- "sourcePort": "4_out",
- "targetNode": "15",
- "targetPort": "15_in"
- },
- {
- "id": "port_15_to_port_23",
- "sourceNode": "15",
- "sourcePort": "15_out",
- "targetNode": "23",
- "targetPort": "23_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_7_to_port_24",
- "sourceNode": "7",
- "sourcePort": "7_out",
- "targetNode": "24",
- "targetPort": "24_in"
- },
- {
- "id": "port_24_to_port_8",
- "sourceNode": "24",
- "sourcePort": "24_out",
- "targetNode": "8",
- "targetPort": "8_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_23_to_port_25",
- "sourceNode": "23",
- "sourcePort": "23_out",
- "targetNode": "25",
- "targetPort": "25_in"
- },
- {
- "id": "port_25_to_port_17",
- "sourceNode": "25",
- "sourcePort": "25_out",
- "targetNode": "17",
- "targetPort": "17_in"
- },
- {
- "id": "port_24_to_port_26",
- "sourceNode": "24",
- "sourcePort": "24_out",
- "targetNode": "26",
- "targetPort": "26_in"
- },
- {
- "id": "port_25_to_port_27",
- "sourceNode": "25",
- "sourcePort": "25_out",
- "targetNode": "27",
- "targetPort": "27_in"
- },
- {
- "id": "port_27_to_port_18",
- "sourceNode": "27",
- "sourcePort": "27_out",
- "targetNode": "18",
- "targetPort": "18_in"
- },
- {
- "id": "port_26_to_port_10",
- "sourceNode": "26",
- "sourcePort": "26_out",
- "targetNode": "10",
- "targetPort": "10_in"
- }
- ],
- "globalCustomCode": null,
- "hash": "ce4ff511d95ca23c71cdab2934963d4f079338aa",
- "nodes": {
- "0": {
- "data": {
- "advanced": {
- "join": []
- },
- "functionName": "on_start",
- "id": "0",
- "type": "start"
- },
- "errors": {},
- "id": "0",
- "type": "start",
- "warnings": {},
- "x": 190,
- "y": -8.313350008393172e-13
- },
- "1": {
- "data": {
- "advanced": {
- "join": []
- },
- "functionName": "on_finish",
- "id": "1",
- "type": "end"
- },
- "errors": {},
- "id": "1",
- "type": "end",
- "userCode": "\n # Write your custom code here...\n #phantom.debug(output)\n",
- "warnings": {},
- "x": 190,
- "y": 1994
- },
- "10": {
- "data": {
- "advanced": {
- "customName": "format url report ",
- "customNameId": 0,
- "description": "Format a summary table with the information gathered from the playbook.",
- "drop_none": true,
- "join": [],
- "note": "Format a summary table with the information gathered from the playbook."
- },
- "functionId": 1,
- "functionName": "format_url_report",
- "id": "10",
- "parameters": [
- "normalized_url_summary_output:custom_function:url",
- "normalized_url_summary_output:custom_function:scores",
- "normalized_url_summary_output:custom_function:score_id",
- "normalized_url_summary_output:custom_function:classifications",
- "normalized_url_summary_output:custom_function:job_id",
- "url_screenshot_formatting:custom_function:report"
- ],
- "template": "SOAR analyzed URL(s) using Splunk Attack Analyzer. The table below shows a summary of the information gathered.\n\n| URL | Normalized Score | Score Id | Classifications | Report Link | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} | https://app.twinwave.io/job/{4} | Splunk Attack Analyzer (SAA) |\n%%\n\nScreenshots associated with the detonated URLs are shown below (if available):\n\n{5}\n",
- "type": "format"
- },
- "errors": {},
- "id": "10",
- "type": "format",
- "userCode": "\n # Write your custom code here...\n #phantom.debug(phantom.format(container=container, template=template, parameters=parameters, name=\"format_report_url\"))\n",
- "warnings": {},
- "x": 0,
- "y": 1640
- },
- "11": {
- "data": {
- "advanced": {
- "customName": "build url output",
- "customNameId": 0,
- "description": "This block uses custom code to generate an observable dictionary to output into the observables data path.",
- "join": [],
- "note": "This block uses custom code to generate an observable dictionary to output into the observables data path."
- },
- "functionId": 3,
- "functionName": "build_url_output",
- "id": "11",
- "inputParameters": [
- "normalized_url_summary_output:custom_function:url",
- "normalized_url_summary_output:custom_function:job_id",
- "normalized_url_summary_output:custom_function:url_score_object"
- ],
- "outputVariables": [
- "observable_array"
- ],
- "type": "code"
- },
- "errors": {},
- "id": "11",
- "type": "code",
- "userCode": "\n # Write your custom code here...\n from urllib.parse import urlparse\n build_url_output__observable_array = []\n #phantom.debug(playbook_input_url_values)\n # Build URL\n\n \n for url, external_id, url_object in zip(normalized_url_summary_output__url, normalized_url_summary_output__job_id, normalized_url_summary_output__url_score_object):\n parsed_url = urlparse(url)\n #phantom.debug(\"url: {} jobs_id:{}\".format(url, external_id))\n #phantom.debug(\"parsed_url: {}, url_object: {}\".format(parsed_url, url_object))\n observable_object = {\n \"value\": url,\n \"type\": \"url\",\n \"reputation\": {\n \"orig_score\": url_object['orig_score'],\n \"score\": url_object['score'],\n \"score_id\": url_object['score_id']\n },\n \"attributes\": {\n \"hostname\": parsed_url.hostname,\n \"scheme\": parsed_url.scheme\n },\n \"malware\": {\n \"classifications\": url_object['classifications'],\n \"classification_ids\": url_object['classification_ids']\n },\n \"source\": \"Splunk Attack Analyzer\",\n \"source_link\": f\"https://app.twinwave.io/job/{external_id}\"\n }\n if url_object.get('related_observables'):\n observable_object[\"related_observables\"] = url_object['related_observables']\n \n if parsed_url.path:\n observable_object['attributes']['path'] = parsed_url.path\n if parsed_url.query:\n observable_object['attributes']['query'] = parsed_url.query\n if parsed_url.port:\n observable_object['attributes']['port'] = parsed_url.port\n\n build_url_output__observable_array.append(observable_object)\n #phantom.debug(\"build_url_output__observable_array: {}\".format(build_url_output__observable_array))\n",
- "warnings": {},
- "x": 0,
- "y": 1820
- },
- "12": {
- "data": {
- "action": "detonate file",
- "actionType": "generic",
- "advanced": {
- "customName": "file detonation",
- "customNameId": 0,
- "delayTime": 0,
- "description": "Queries SAA for information about the provided vault_id(s)",
- "join": [],
- "note": "Queries SAA for information about the provided vault_id(s)"
- },
- "connector": "Splunk Attack Analyzer Connector for Splunk SOAR",
- "connectorConfigs": [
- "splunk_attack_analyzer"
- ],
- "connectorId": "de681fee-c552-45bf-9212-827b1c7529f8",
- "connectorVersion": "v1",
- "functionId": 1,
- "functionName": "file_detonation",
- "id": "12",
- "loop": {
- "enabled": false,
- "exitAfterUnit": "m",
- "exitAfterValue": 10,
- "exitConditionEnabled": false,
- "exitLoopAfter": 2,
- "pauseUnit": "m",
- "pauseValue": 2
- },
- "parameters": {
- "file": "filtered-data:saa_input_filter:condition_2:playbook_input:vault_id"
- },
- "requiredParameters": [
- {
- "data_type": "string",
- "field": "file"
- }
- ],
- "type": "action"
- },
- "errors": {},
- "id": "12",
- "type": "action",
- "warnings": {},
- "x": 340,
- "y": 328
- },
- "15": {
- "data": {
- "action": "get job summary",
- "actionType": "investigate",
- "advanced": {
- "customName": "get file summary output",
- "customNameId": 0,
- "delayTime": 0,
- "description": "Queries SAA Forensics data relative to the JobID of URL(s) or File(s) needs to be detonated.",
- "join": [],
- "note": "Queries SAA Forensics data relative to the JobID of URL(s) or File(s) needs to be detonated."
- },
- "connector": "Splunk Attack Analyzer Connector for Splunk SOAR",
- "connectorConfigs": [
- "splunk_attack_analyzer"
- ],
- "connectorId": "de681fee-c552-45bf-9212-827b1c7529f8",
- "connectorVersion": "v1",
- "functionId": 2,
- "functionName": "get_file_summary_output",
- "id": "15",
- "loop": {
- "conditions": [
+ "blockly": false,
+ "blockly_xml": "",
+ "category": "Dynamic Analysis",
+ "coa": {
+ "data": {
+ "description": "Accepts a URL or vault_id and does detonation analysis on the objects. Generates a global report and a per observable sub-report and normalized score. The score can be customized based on a variety of factors.\n\n",
+ "edges": [
+ {
+ "id": "port_0_to_port_2",
+ "sourceNode": "0",
+ "sourcePort": "0_out",
+ "targetNode": "2",
+ "targetPort": "2_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_2_to_port_3",
+ "sourceNode": "2",
+ "sourcePort": "2_out",
+ "targetNode": "3",
+ "targetPort": "3_in"
+ },
+ {
+ "id": "port_8_to_port_10",
+ "sourceNode": "8",
+ "sourcePort": "8_out",
+ "targetNode": "10",
+ "targetPort": "10_in"
+ },
+ {
+ "id": "port_10_to_port_11",
+ "sourceNode": "10",
+ "sourcePort": "10_out",
+ "targetNode": "11",
+ "targetPort": "11_in"
+ },
+ {
+ "id": "port_11_to_port_1",
+ "sourceNode": "11",
+ "sourcePort": "11_out",
+ "targetNode": "1",
+ "targetPort": "1_in"
+ },
{
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": "==",
- "param": "get_file_summary_output:action_result.data.*.State",
- "value": "done"
- }
- ],
- "conditionIndex": 0,
- "display": "If",
- "logic": "and",
- "type": "if"
+ "conditions": [
+ {
+ "index": 1
+ }
+ ],
+ "id": "port_2_to_port_12",
+ "sourceNode": "2",
+ "sourcePort": "2_out",
+ "targetNode": "12",
+ "targetPort": "12_in"
+ },
+ {
+ "id": "port_17_to_port_18",
+ "sourceNode": "17",
+ "sourcePort": "17_out",
+ "targetNode": "18",
+ "targetPort": "18_in"
+ },
+ {
+ "id": "port_18_to_port_19",
+ "sourceNode": "18",
+ "sourcePort": "18_out",
+ "targetNode": "19",
+ "targetPort": "19_in"
+ },
+ {
+ "id": "port_19_to_port_1",
+ "sourceNode": "19",
+ "sourcePort": "19_out",
+ "targetNode": "1",
+ "targetPort": "1_in"
+ },
+ {
+ "id": "port_12_to_port_4",
+ "sourceNode": "12",
+ "sourcePort": "12_out",
+ "targetNode": "4",
+ "targetPort": "4_in"
+ },
+ {
+ "id": "port_6_to_port_7",
+ "sourceNode": "6",
+ "sourcePort": "6_out",
+ "targetNode": "7",
+ "targetPort": "7_in"
+ },
+ {
+ "id": "port_3_to_port_22",
+ "sourceNode": "3",
+ "sourcePort": "3_out",
+ "targetNode": "22",
+ "targetPort": "22_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_22_to_port_6",
+ "sourceNode": "22",
+ "sourcePort": "22_out",
+ "targetNode": "6",
+ "targetPort": "6_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_4_to_port_15",
+ "sourceNode": "4",
+ "sourcePort": "4_out",
+ "targetNode": "15",
+ "targetPort": "15_in"
+ },
+ {
+ "id": "port_15_to_port_23",
+ "sourceNode": "15",
+ "sourcePort": "15_out",
+ "targetNode": "23",
+ "targetPort": "23_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_7_to_port_24",
+ "sourceNode": "7",
+ "sourcePort": "7_out",
+ "targetNode": "24",
+ "targetPort": "24_in"
+ },
+ {
+ "id": "port_24_to_port_8",
+ "sourceNode": "24",
+ "sourcePort": "24_out",
+ "targetNode": "8",
+ "targetPort": "8_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_23_to_port_25",
+ "sourceNode": "23",
+ "sourcePort": "23_out",
+ "targetNode": "25",
+ "targetPort": "25_in"
+ },
+ {
+ "id": "port_25_to_port_17",
+ "sourceNode": "25",
+ "sourcePort": "25_out",
+ "targetNode": "17",
+ "targetPort": "17_in"
+ },
+ {
+ "id": "port_24_to_port_26",
+ "sourceNode": "24",
+ "sourcePort": "24_out",
+ "targetNode": "26",
+ "targetPort": "26_in"
+ },
+ {
+ "id": "port_25_to_port_27",
+ "sourceNode": "25",
+ "sourcePort": "25_out",
+ "targetNode": "27",
+ "targetPort": "27_in"
+ },
+ {
+ "id": "port_27_to_port_18",
+ "sourceNode": "27",
+ "sourcePort": "27_out",
+ "targetNode": "18",
+ "targetPort": "18_in"
+ },
+ {
+ "id": "port_26_to_port_10",
+ "sourceNode": "26",
+ "sourcePort": "26_out",
+ "targetNode": "10",
+ "targetPort": "10_in"
}
- ],
- "enabled": true,
- "exitAfterUnit": "m",
- "exitAfterValue": 30,
- "exitConditionEnabled": true,
- "exitLoopAfter": 15,
- "pauseUnit": "m",
- "pauseValue": 2
- },
- "parameters": {
- "job_id": "filtered-data:detonation_status_filter:condition_1:file_detonation:action_result.data.*.JobID",
- "timeout": ""
- },
- "requiredParameters": [
- {
- "data_type": "string",
- "field": "job_id"
- }
],
- "type": "action"
- },
- "errors": {},
- "id": "15",
- "type": "action",
- "userCode": "\n # Write your custom code here...\n #parameters = []\n #for job_ids in file_jobid_detonation_output__jobid:\n # for job in job_ids:\n # if job is not None:\n # parameters.append({\n # \"job_id\": job,\n # \"timeout\": 5,\n # })\n #phantom.debug(parameters)\n",
- "warnings": {},
- "x": 340,
- "y": 686
- },
- "17": {
- "data": {
- "advanced": {
- "customName": "normalized file summary output",
- "customNameId": 0,
- "description": "This block uses custom code for normalizing score. Adjust the logic as desired in the documented sections.",
- "join": [],
- "note": "This block uses custom code for normalizing score. Adjust the logic as desired in the documented sections."
- },
- "functionId": 5,
- "functionName": "normalized_file_summary_output",
- "id": "17",
- "inputParameters": [
- "filtered-data:detonation_status_filter:condition_1:file_detonation:action_result.parameter.file",
- "filtered-data:detonation_status_filter:condition_1:file_detonation:action_result.data.*.JobID",
- "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.parameter.job_id",
- "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.data.*.Submission.Name",
- "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.summary.Score",
- "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.data.*.Resources",
- "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.data.*.Verdict",
- "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.data.*.Tasks"
- ],
- "outputVariables": [
- "file_score_object",
- "scores",
- "categories",
- "score_id",
- "file",
- "job_id",
- "classifications",
- "file_name"
- ],
- "type": "code"
- },
- "errors": {},
- "id": "17",
- "type": "code",
- "userCode": " \n \n score_table = {\n \"0\":\"Unknown\",\n \"1\":\"Very_Safe\",\n \"2\":\"Safe\",\n \"3\":\"Probably_Safe\",\n \"4\":\"Leans_Safe\",\n \"5\":\"May_not_be_Safe\",\n \"6\":\"Exercise_Caution\",\n \"7\":\"Suspicious_or_Risky\",\n \"8\":\"Possibly_Malicious\",\n \"9\":\"Probably_Malicious\",\n \"10\":\"Malicious\"\n }\n\n classification_ids = {\n \"Unknown\": 0,\n \"Adware\": 1,\n \"Backdoor\": 2,\n \"Bot\": 3,\n \"Bootkit\": 4,\n \"DDOS\": 5,\n \"Downloader\": 6,\n \"Dropper\": 7,\n \"Exploit-Kit\": 8,\n \"Keylogger\": 9,\n \"Ransomware\": 10,\n \"Remote-Access-Trojan\": 11,\n \"Resource-Exploitation\": 13,\n \"Rogue-Security-Software\": 14,\n \"Rootkit\": 15,\n \"Screen-Capture\": 16,\n \"Spyware\": 17,\n \"Trojan\": 18,\n \"Virus\": 19,\n \"Webshell\": 20,\n \"Wiper\": 21,\n \"Worm\": 22,\n \"Other\": 99\n }\n\n normalized_file_summary_output__file_score_object = []\n normalized_file_summary_output__scores = []\n normalized_file_summary_output__categories = []\n normalized_file_summary_output__score_id = []\n normalized_file_summary_output__file = []\n normalized_file_summary_output__job_id = []\n normalized_file_summary_output__classifications = []\n normalized_file_summary_output__file_name = []\n \n \n def find_sha1_details(target_id, task_list):\n '''\n Attempt to find the detail object with a sha1\n '''\n for task in task_list:\n if (target_id == task.get('ResourceID')\n and task.get('Results',{}).get('Details', {}).get('sha1')):\n task_result_details = task['Results']['Details']\n task_result_details.pop('RootTaskID', None)\n return task_result_details\n return None\n\n \n ## pair forensic job results with url detonated\n job_file_dict = {}\n for orig_file, orig_job, filtered_job in zip(filtered_result_0_parameter_file, filtered_result_0_data___jobid, filtered_result_1_parameter_job_id):\n if orig_job == filtered_job:\n job_file_dict[filtered_job] = orig_file\n \n for job, file_name, score_num, resources, verdict, tasks in zip(\n filtered_result_1_parameter_job_id, \n filtered_result_1_data___submission_name, \n filtered_result_1_summary_score, \n filtered_result_1_data___resources, \n filtered_result_1_data___verdict,\n filtered_result_1_data___tasks\n ):\n \n ## translate scores\n score_id = int(score_num/10) if score_num > 0 else 0\n score = score_table[str(score_id)]\n file = job_file_dict[job]\n attributes = {}\n \n ## build.a sub dictionary of high priority related observables\n related_observables = []\n for sub_observ in resources:\n if sub_observ['Name'] != file_name:\n \n details = find_sha1_details(sub_observ['ID'], tasks)\n second_num = sub_observ['DisplayScore']\n second_num_id = int(second_num/10) if second_num > 0 else 0\n sub_observ_dict = {\n 'value': sub_observ['Name'],\n 'type': sub_observ['Type'].lower(),\n 'reputation': {\n 'score': score_table[str(second_num_id)],\n 'orig_score': second_num,\n 'score_id': second_num_id\n },\n 'source': 'Splunk Attack Analyzer'\n }\n if details:\n details['name'] = sub_observ['Name']\n details.pop('exiftool', None)\n sub_observ_dict['attributes'] = details\n # check if observ is already in related_observables\n skip_observ = False\n for idx, item in enumerate(related_observables):\n if (sub_observ.get('FileMetadata', {}).get('SHA256', 'null_one') \n == item.get('attributes', {}).get('sha256', 'null_two')\n and sub_observ['DisplayScore'] > item['reputation']['orig_score']):\n related_observables[idx] = sub_observ_dict\n skip_observ = True\n elif sub_observ['Name'] == item['value']:\n skip_observ = True\n if not skip_observ:\n related_observables.append(sub_observ_dict)\n elif sub_observ['Name'] == file_name:\n details = find_sha1_details(sub_observ['ID'], tasks)\n if details:\n details.pop('exiftool', None)\n details['name'] = file_name\n attributes = details\n else:\n file_metadata = sub_observ.get('FileMetadata', {})\n attributes = {\n 'name': file_name,\n 'sha256': file_metadata.get('SHA256'),\n 'md5': file_metadata.get('MD5'),\n 'size': file_metadata.get('Size')\n }\n if file_metadata.get('MimeType'):\n attributes['mime_type'] = file_metadata['MimeType']\n \n normalized_file_summary_output__file_score_object.append({\n 'value': file, \n 'orig_score': score_num, \n 'score': score, \n 'score_id': score_id, \n 'classifications': [verdict if verdict else \"Unknown\"],\n 'classification_ids': [classification_ids.get(verdict, 99) if verdict else 0],\n 'related_observables': related_observables,\n 'attributes': attributes\n \n })\n normalized_file_summary_output__scores.append(score)\n normalized_file_summary_output__score_id.append(score_id)\n normalized_file_summary_output__file.append(file)\n normalized_file_summary_output__file_name.append(file_name)\n normalized_file_summary_output__job_id.append(job)\n normalized_file_summary_output__classifications.append([verdict if verdict else \"Unknown\"])\n \n \n",
- "warnings": {},
- "x": 340,
- "y": 1440
- },
- "18": {
- "data": {
- "advanced": {
- "customName": "format file report ",
- "customNameId": 0,
- "description": "Format a summary table with the information gathered from the playbook.",
- "drop_none": true,
- "join": [],
- "note": "Format a summary table with the information gathered from the playbook."
- },
- "functionId": 2,
- "functionName": "format_file_report",
- "id": "18",
- "parameters": [
- "normalized_file_summary_output:custom_function:file_name",
- "normalized_file_summary_output:custom_function:scores",
- "normalized_file_summary_output:custom_function:score_id",
- "normalized_file_summary_output:custom_function:classifications",
- "normalized_file_summary_output:custom_function:job_id",
- "file_screenshot_formatting:custom_function:report"
- ],
- "template": "SOAR analyzed File(s) using Splunk Attack Analyzer. The table below shows a summary of the information gathered.\n\n| File Name | Normalized Score | Score Id | Classifications | Report Link | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} |https://app.twinwave.io/job/{4} | Splunk Attack Analyzer (SAA) |\n%%\n\nScreenshots associated with the detonated Files are shown below (if available):\n\n{5}\n\n",
- "type": "format"
- },
- "errors": {},
- "id": "18",
- "type": "format",
- "userCode": "\n # Write your custom code here...\n #phantom.debug(phantom.format(container=container, template=template, parameters=parameters, name=\"format_report_file\"))\n",
- "warnings": {},
- "x": 340,
- "y": 1638
- },
- "19": {
- "data": {
- "advanced": {
- "customName": "build file output",
- "customNameId": 0,
- "description": "This block uses custom code to generate an observable dictionary to output into the observables data path.",
- "join": [],
- "note": "This block uses custom code to generate an observable dictionary to output into the observables data path."
- },
- "functionId": 6,
- "functionName": "build_file_output",
- "id": "19",
- "inputParameters": [
- "normalized_file_summary_output:custom_function:file",
- "normalized_file_summary_output:custom_function:job_id",
- "normalized_file_summary_output:custom_function:file_score_object"
- ],
- "outputVariables": [
- "observable_array"
- ],
- "type": "code"
- },
- "errors": {},
- "id": "19",
- "type": "code",
- "userCode": "\n # Write your custom code here...\n build_file_output__observable_array = []\n for _vault_id, external_id, file_object in zip(normalized_file_summary_output__file, normalized_file_summary_output__job_id, normalized_file_summary_output__file_score_object):\n #phantom.debug(\"vault: {} id: {}\".format(_vault_id, external_id))\n observable_object = {\n \"value\": _vault_id,\n \"type\": \"hash\",\n \"attributes\": file_object['attributes'],\n \"reputation\": {\n \"orig_score\": file_object['orig_score'],\n \"score\": file_object['score'],\n \"score_id\": file_object['score_id']\n },\n \"malware\": {\n \"classifications\": file_object['classifications'],\n \"classification_ids\": file_object['classification_ids']\n },\n \"source\": \"Splunk Attack Analyzer\",\n \"source_link\":f\"https://app.twinwave.io/job/{external_id}\"\n }\n if file_object.get('related_observables'):\n observable_object[\"related_observables\"] = file_object['related_observables']\n \n build_file_output__observable_array.append(observable_object)\n \n",
- "warnings": {},
- "x": 340,
- "y": 1820
- },
- "2": {
- "data": {
- "advanced": {
- "customName": "saa input filter",
- "customNameId": 0,
- "delimiter": ",",
- "delimiter_enabled": true,
- "description": "Determine branches based on provided inputs.",
- "join": [],
- "note": "Determine branches based on provided inputs."
+ "globalCustomCode": null,
+ "hash": "74a51ccf79fc9363135cdd3b63ffe935edbeec5a",
+ "nodes": {
+ "0": {
+ "data": {
+ "advanced": {
+ "join": []
+ },
+ "functionName": "on_start",
+ "id": "0",
+ "type": "start"
+ },
+ "errors": {},
+ "id": "0",
+ "type": "start",
+ "warnings": {},
+ "x": 190,
+ "y": -8.952838470577262e-13
+ },
+ "1": {
+ "data": {
+ "advanced": {
+ "join": []
+ },
+ "functionName": "on_finish",
+ "id": "1",
+ "type": "end"
+ },
+ "errors": {},
+ "id": "1",
+ "type": "end",
+ "userCode": "\n # Write your custom code here...\n #phantom.debug(output)\n",
+ "warnings": {},
+ "x": 190,
+ "y": 1994
+ },
+ "10": {
+ "data": {
+ "advanced": {
+ "customName": "format url report ",
+ "customNameId": 0,
+ "description": "Format a summary table with the information gathered from the playbook.",
+ "drop_none": true,
+ "join": [],
+ "note": "Format a summary table with the information gathered from the playbook."
+ },
+ "functionId": 1,
+ "functionName": "format_url_report",
+ "id": "10",
+ "parameters": [
+ "normalized_url_summary_output:custom_function:url",
+ "normalized_url_summary_output:custom_function:scores",
+ "normalized_url_summary_output:custom_function:score_id",
+ "normalized_url_summary_output:custom_function:classifications",
+ "normalized_url_summary_output:custom_function:job_id",
+ "url_screenshot_formatting:custom_function:report",
+ "get_url_summary_output:action_result.summary.AppURL"
+ ],
+ "template": "SOAR analyzed URL(s) using Splunk Attack Analyzer. The table below shows a summary of the information gathered.\n\n| URL | Normalized Score | Score Id | Classifications | Report Link | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} | {6} | Splunk Attack Analyzer (SAA) |\n%%\n\nScreenshots associated with the detonated URLs are shown below (if available):\n\n{5}\n",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "10",
+ "type": "format",
+ "userCode": "\n # Write your custom code here...\n #phantom.debug(phantom.format(container=container, template=template, parameters=parameters, name=\"format_report_url\"))\n",
+ "warnings": {},
+ "x": 0,
+ "y": 1640
+ },
+ "11": {
+ "data": {
+ "advanced": {
+ "customName": "build url output",
+ "customNameId": 0,
+ "description": "This block uses custom code to generate an observable dictionary to output into the observables data path.",
+ "join": [],
+ "note": "This block uses custom code to generate an observable dictionary to output into the observables data path."
+ },
+ "functionId": 3,
+ "functionName": "build_url_output",
+ "id": "11",
+ "inputParameters": [
+ "normalized_url_summary_output:custom_function:url",
+ "normalized_url_summary_output:custom_function:job_id",
+ "normalized_url_summary_output:custom_function:url_score_object",
+ "get_url_summary_output:action_result.summary.AppURL"
+ ],
+ "outputVariables": [
+ "observable_array"
+ ],
+ "type": "code"
+ },
+ "errors": {},
+ "id": "11",
+ "type": "code",
+ "userCode": "\n # Write your custom code here...\n from urllib.parse import urlparse\n build_url_output__observable_array = []\n #phantom.debug(playbook_input_url_values)\n # Build URL\n\n \n for url, external_id, url_object, app_url in zip(normalized_url_summary_output__url, normalized_url_summary_output__job_id, normalized_url_summary_output__url_score_object, get_url_summary_output_summary_appurl):\n parsed_url = urlparse(url)\n #phantom.debug(\"url: {} jobs_id:{}\".format(url, external_id))\n #phantom.debug(\"parsed_url: {}, url_object: {}\".format(parsed_url, url_object))\n observable_object = {\n \"value\": url,\n \"type\": \"url\",\n \"reputation\": {\n \"orig_score\": url_object['orig_score'],\n \"score\": url_object['score'],\n \"score_id\": url_object['score_id']\n },\n \"attributes\": {\n \"hostname\": parsed_url.hostname,\n \"scheme\": parsed_url.scheme\n },\n \"malware\": {\n \"classifications\": url_object['classifications'],\n \"classification_ids\": url_object['classification_ids']\n },\n \"source\": \"Splunk Attack Analyzer\",\n \"source_link\": f\"{app_url}\"\n }\n if url_object.get('related_observables'):\n observable_object[\"related_observables\"] = url_object['related_observables']\n \n if parsed_url.path:\n observable_object['attributes']['path'] = parsed_url.path\n if parsed_url.query:\n observable_object['attributes']['query'] = parsed_url.query\n if parsed_url.port:\n observable_object['attributes']['port'] = parsed_url.port\n\n build_url_output__observable_array.append(observable_object)\n #phantom.debug(\"build_url_output__observable_array: {}\".format(build_url_output__observable_array))\n",
+ "warnings": {},
+ "x": 0,
+ "y": 1820
+ },
+ "12": {
+ "data": {
+ "action": "detonate file",
+ "actionType": "generic",
+ "advanced": {
+ "customName": "file detonation",
+ "customNameId": 0,
+ "delayTime": 0,
+ "description": "Queries SAA for information about the provided vault_id(s)",
+ "join": [],
+ "note": "Queries SAA for information about the provided vault_id(s)"
+ },
+ "connector": "Splunk Attack Analyzer Connector for Splunk SOAR",
+ "connectorConfigs": [
+ "splunk_attack_analyzer"
+ ],
+ "connectorId": "de681fee-c552-45bf-9212-827b1c7529f8",
+ "connectorVersion": "v1",
+ "functionId": 1,
+ "functionName": "file_detonation",
+ "id": "12",
+ "loop": {
+ "enabled": false,
+ "exitAfterUnit": "m",
+ "exitAfterValue": 10,
+ "exitConditionEnabled": false,
+ "exitLoopAfter": 2,
+ "pauseUnit": "m",
+ "pauseValue": 2
+ },
+ "parameters": {
+ "file": "filtered-data:saa_input_filter:condition_2:playbook_input:vault_id"
+ },
+ "requiredParameters": [
+ {
+ "data_type": "string",
+ "field": "file"
+ }
+ ],
+ "type": "action"
+ },
+ "errors": {},
+ "id": "12",
+ "type": "action",
+ "warnings": {},
+ "x": 340,
+ "y": 328
+ },
+ "15": {
+ "data": {
+ "action": "get job summary",
+ "actionType": "investigate",
+ "advanced": {
+ "customName": "get file summary output",
+ "customNameId": 0,
+ "delayTime": 0,
+ "description": "Queries SAA Forensics data relative to the JobID of URL(s) or File(s) needs to be detonated.",
+ "join": [],
+ "note": "Queries SAA Forensics data relative to the JobID of URL(s) or File(s) needs to be detonated."
+ },
+ "connector": "Splunk Attack Analyzer Connector for Splunk SOAR",
+ "connectorConfigs": [
+ "splunk_attack_analyzer"
+ ],
+ "connectorId": "de681fee-c552-45bf-9212-827b1c7529f8",
+ "connectorVersion": "v1",
+ "functionId": 2,
+ "functionName": "get_file_summary_output",
+ "id": "15",
+ "loop": {
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": "==",
+ "param": "get_file_summary_output:action_result.data.*.State",
+ "value": "done"
+ }
+ ],
+ "conditionIndex": 0,
+ "display": "If",
+ "logic": "and",
+ "type": "if"
+ }
+ ],
+ "enabled": true,
+ "exitAfterUnit": "m",
+ "exitAfterValue": 30,
+ "exitConditionEnabled": true,
+ "exitLoopAfter": 15,
+ "pauseUnit": "m",
+ "pauseValue": 2
+ },
+ "parameters": {
+ "job_id": "filtered-data:detonation_status_filter:condition_1:file_detonation:action_result.data.*.JobID",
+ "timeout": ""
+ },
+ "requiredParameters": [
+ {
+ "data_type": "string",
+ "field": "job_id"
+ }
+ ],
+ "type": "action"
+ },
+ "errors": {},
+ "id": "15",
+ "type": "action",
+ "userCode": "\n # Write your custom code here...\n #parameters = []\n #for job_ids in file_jobid_detonation_output__jobid:\n # for job in job_ids:\n # if job is not None:\n # parameters.append({\n # \"job_id\": job,\n # \"timeout\": 5,\n # })\n #phantom.debug(parameters)\n",
+ "warnings": {},
+ "x": 340,
+ "y": 686
+ },
+ "17": {
+ "data": {
+ "advanced": {
+ "customName": "normalized file summary output",
+ "customNameId": 0,
+ "description": "This block uses custom code for normalizing score. Adjust the logic as desired in the documented sections.",
+ "join": [],
+ "note": "This block uses custom code for normalizing score. Adjust the logic as desired in the documented sections."
+ },
+ "functionId": 5,
+ "functionName": "normalized_file_summary_output",
+ "id": "17",
+ "inputParameters": [
+ "filtered-data:detonation_status_filter:condition_1:file_detonation:action_result.parameter.file",
+ "filtered-data:detonation_status_filter:condition_1:file_detonation:action_result.data.*.JobID",
+ "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.parameter.job_id",
+ "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.data.*.Submission.Name",
+ "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.summary.Score",
+ "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.data.*.Resources",
+ "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.data.*.Verdict",
+ "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.data.*.Tasks"
+ ],
+ "outputVariables": [
+ "file_score_object",
+ "scores",
+ "categories",
+ "score_id",
+ "file",
+ "job_id",
+ "classifications",
+ "file_name"
+ ],
+ "type": "code"
+ },
+ "errors": {},
+ "id": "17",
+ "type": "code",
+ "userCode": " \n \n score_table = {\n \"0\":\"Unknown\",\n \"1\":\"Very_Safe\",\n \"2\":\"Safe\",\n \"3\":\"Probably_Safe\",\n \"4\":\"Leans_Safe\",\n \"5\":\"May_not_be_Safe\",\n \"6\":\"Exercise_Caution\",\n \"7\":\"Suspicious_or_Risky\",\n \"8\":\"Possibly_Malicious\",\n \"9\":\"Probably_Malicious\",\n \"10\":\"Malicious\"\n }\n\n classification_ids = {\n \"Unknown\": 0,\n \"Adware\": 1,\n \"Backdoor\": 2,\n \"Bot\": 3,\n \"Bootkit\": 4,\n \"DDOS\": 5,\n \"Downloader\": 6,\n \"Dropper\": 7,\n \"Exploit-Kit\": 8,\n \"Keylogger\": 9,\n \"Ransomware\": 10,\n \"Remote-Access-Trojan\": 11,\n \"Resource-Exploitation\": 13,\n \"Rogue-Security-Software\": 14,\n \"Rootkit\": 15,\n \"Screen-Capture\": 16,\n \"Spyware\": 17,\n \"Trojan\": 18,\n \"Virus\": 19,\n \"Webshell\": 20,\n \"Wiper\": 21,\n \"Worm\": 22,\n \"Other\": 99\n }\n\n normalized_file_summary_output__file_score_object = []\n normalized_file_summary_output__scores = []\n normalized_file_summary_output__categories = []\n normalized_file_summary_output__score_id = []\n normalized_file_summary_output__file = []\n normalized_file_summary_output__job_id = []\n normalized_file_summary_output__classifications = []\n normalized_file_summary_output__file_name = []\n \n \n def find_sha1_details(target_id, task_list):\n '''\n Attempt to find the detail object with a sha1\n '''\n for task in task_list:\n if (target_id == task.get('ResourceID')\n and task.get('Results',{}).get('Details', {}).get('sha1')):\n task_result_details = task['Results']['Details']\n task_result_details.pop('RootTaskID', None)\n return task_result_details\n return None\n\n \n ## pair forensic job results with url detonated\n job_file_dict = {}\n for orig_file, orig_job, filtered_job in zip(filtered_result_0_parameter_file, filtered_result_0_data___jobid, filtered_result_1_parameter_job_id):\n if orig_job == filtered_job:\n job_file_dict[filtered_job] = orig_file\n \n for job, file_name, score_num, resources, verdict, tasks in zip(\n filtered_result_1_parameter_job_id, \n filtered_result_1_data___submission_name, \n filtered_result_1_summary_score, \n filtered_result_1_data___resources, \n filtered_result_1_data___verdict,\n filtered_result_1_data___tasks\n ):\n \n ## translate scores\n score_id = int(score_num/10) if score_num > 0 else 0\n score = score_table[str(score_id)]\n file = job_file_dict[job]\n attributes = {}\n \n ## build.a sub dictionary of high priority related observables\n related_observables = []\n for sub_observ in resources:\n if sub_observ['Name'] != file_name:\n \n details = find_sha1_details(sub_observ['ID'], tasks)\n second_num = sub_observ['DisplayScore']\n second_num_id = int(second_num/10) if second_num > 0 else 0\n sub_observ_dict = {\n 'value': sub_observ['Name'],\n 'type': sub_observ['Type'].lower(),\n 'reputation': {\n 'score': score_table[str(second_num_id)],\n 'orig_score': second_num,\n 'score_id': second_num_id\n },\n 'source': 'Splunk Attack Analyzer'\n }\n if details:\n details['name'] = sub_observ['Name']\n details.pop('exiftool', None)\n sub_observ_dict['attributes'] = details\n # check if observ is already in related_observables\n skip_observ = False\n for idx, item in enumerate(related_observables):\n if (sub_observ.get('FileMetadata', {}).get('SHA256', 'null_one') \n == item.get('attributes', {}).get('sha256', 'null_two')\n and sub_observ['DisplayScore'] > item['reputation']['orig_score']):\n related_observables[idx] = sub_observ_dict\n skip_observ = True\n elif sub_observ['Name'] == item['value']:\n skip_observ = True\n if not skip_observ:\n related_observables.append(sub_observ_dict)\n elif sub_observ['Name'] == file_name:\n details = find_sha1_details(sub_observ['ID'], tasks)\n if details:\n details.pop('exiftool', None)\n details['name'] = file_name\n attributes = details\n else:\n file_metadata = sub_observ.get('FileMetadata', {})\n attributes = {\n 'name': file_name,\n 'sha256': file_metadata.get('SHA256'),\n 'md5': file_metadata.get('MD5'),\n 'size': file_metadata.get('Size')\n }\n if file_metadata.get('MimeType'):\n attributes['mime_type'] = file_metadata['MimeType']\n \n normalized_file_summary_output__file_score_object.append({\n 'value': file, \n 'orig_score': score_num, \n 'score': score, \n 'score_id': score_id, \n 'classifications': [verdict if verdict else \"Unknown\"],\n 'classification_ids': [classification_ids.get(verdict, 99) if verdict else 0],\n 'related_observables': related_observables,\n 'attributes': attributes\n \n })\n normalized_file_summary_output__scores.append(score)\n normalized_file_summary_output__score_id.append(score_id)\n normalized_file_summary_output__file.append(file)\n normalized_file_summary_output__file_name.append(file_name)\n normalized_file_summary_output__job_id.append(job)\n normalized_file_summary_output__classifications.append([verdict if verdict else \"Unknown\"])\n \n \n",
+ "warnings": {},
+ "x": 340,
+ "y": 1440
+ },
+ "18": {
+ "data": {
+ "advanced": {
+ "customName": "format file report ",
+ "customNameId": 0,
+ "description": "Format a summary table with the information gathered from the playbook.",
+ "drop_none": true,
+ "join": [],
+ "note": "Format a summary table with the information gathered from the playbook."
+ },
+ "functionId": 2,
+ "functionName": "format_file_report",
+ "id": "18",
+ "parameters": [
+ "normalized_file_summary_output:custom_function:file_name",
+ "normalized_file_summary_output:custom_function:scores",
+ "normalized_file_summary_output:custom_function:score_id",
+ "normalized_file_summary_output:custom_function:classifications",
+ "normalized_file_summary_output:custom_function:job_id",
+ "file_screenshot_formatting:custom_function:report",
+ "get_file_summary_output:action_result.summary.AppURL"
+ ],
+ "template": "SOAR analyzed File(s) using Splunk Attack Analyzer. The table below shows a summary of the information gathered.\n\n| File Name | Normalized Score | Score Id | Classifications | Report Link | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} | {6} | Splunk Attack Analyzer (SAA) |\n%%\n\nScreenshots associated with the detonated Files are shown below (if available):\n\n{5}\n\n",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "18",
+ "type": "format",
+ "userCode": "\n # Write your custom code here...\n #phantom.debug(phantom.format(container=container, template=template, parameters=parameters, name=\"format_report_file\"))\n",
+ "warnings": {},
+ "x": 340,
+ "y": 1638
+ },
+ "19": {
+ "data": {
+ "advanced": {
+ "customName": "build file output",
+ "customNameId": 0,
+ "description": "This block uses custom code to generate an observable dictionary to output into the observables data path.",
+ "join": [],
+ "note": "This block uses custom code to generate an observable dictionary to output into the observables data path."
+ },
+ "functionId": 6,
+ "functionName": "build_file_output",
+ "id": "19",
+ "inputParameters": [
+ "normalized_file_summary_output:custom_function:file",
+ "normalized_file_summary_output:custom_function:job_id",
+ "normalized_file_summary_output:custom_function:file_score_object",
+ "get_file_summary_output:action_result.summary.AppURL"
+ ],
+ "outputVariables": [
+ "observable_array"
+ ],
+ "type": "code"
+ },
+ "errors": {},
+ "id": "19",
+ "type": "code",
+ "userCode": "\n # Write your custom code here...\n build_file_output__observable_array = []\n for _vault_id, external_id, file_object, app_url in zip(normalized_file_summary_output__file, normalized_file_summary_output__job_id, normalized_file_summary_output__file_score_object, get_file_summary_output_summary_appurl):\n #phantom.debug(\"vault: {} id: {}\".format(_vault_id, external_id))\n observable_object = {\n \"value\": _vault_id,\n \"type\": \"hash\",\n \"attributes\": file_object['attributes'],\n \"reputation\": {\n \"orig_score\": file_object['orig_score'],\n \"score\": file_object['score'],\n \"score_id\": file_object['score_id']\n },\n \"malware\": {\n \"classifications\": file_object['classifications'],\n \"classification_ids\": file_object['classification_ids']\n },\n \"source\": \"Splunk Attack Analyzer\",\n \"source_link\":f\"{app_url}\"\n }\n if file_object.get('related_observables'):\n observable_object[\"related_observables\"] = file_object['related_observables']\n \n build_file_output__observable_array.append(observable_object)\n \n",
+ "warnings": {},
+ "x": 340,
+ "y": 1820
+ },
+ "2": {
+ "data": {
+ "advanced": {
+ "customName": "saa input filter",
+ "customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
+ "description": "Determine branches based on provided inputs.",
+ "join": [],
+ "note": "Determine branches based on provided inputs."
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": "!=",
+ "param": "playbook_input:url",
+ "value": ""
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "saa_url_input",
+ "logic": "and"
+ },
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 1,
+ "op": "!=",
+ "param": "playbook_input:vault_id",
+ "value": ""
+ }
+ ],
+ "conditionIndex": 1,
+ "customName": "saa_vault_id_input",
+ "logic": "and"
+ }
+ ],
+ "functionId": 1,
+ "functionName": "saa_input_filter",
+ "id": "2",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "2",
+ "type": "filter",
+ "warnings": {},
+ "x": 230,
+ "y": 148
+ },
+ "22": {
+ "data": {
+ "advanced": {
+ "customName": "url status filter",
+ "customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
+ "description": "Filters url detonation results.",
+ "join": [],
+ "note": "Filters url detonation results."
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": "==",
+ "param": "url_detonation:action_result.status",
+ "value": "success"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "saa_url_success_status",
+ "logic": "and"
+ }
+ ],
+ "functionId": 5,
+ "functionName": "url_status_filter",
+ "id": "22",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "22",
+ "type": "filter",
+ "warnings": {},
+ "x": 60,
+ "y": 506
+ },
+ "23": {
+ "data": {
+ "advanced": {
+ "customName": "file summary filter",
+ "customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
+ "description": "Filters successful file detonation job forensic results.",
+ "join": [],
+ "note": "Filters successful file detonation job forensic results."
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": "==",
+ "param": "get_file_summary_output:action_result.status",
+ "value": "success"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "get_job_file_summary_sucess",
+ "logic": "and"
+ }
+ ],
+ "functionId": 6,
+ "functionName": "file_summary_filter",
+ "id": "23",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "23",
+ "type": "filter",
+ "warnings": {},
+ "x": 400,
+ "y": 864
+ },
+ "24": {
+ "data": {
+ "action": "get job screenshots",
+ "actionType": "investigate",
+ "advanced": {
+ "customName": "get url job screenshots",
+ "customNameId": 0,
+ "description": "Add the job screenshots to the vault",
+ "join": [],
+ "note": "Add the job screenshots to the vault"
+ },
+ "connector": "Splunk Attack Analyzer Connector for Splunk SOAR",
+ "connectorConfigs": [
+ "splunk_attack_analyzer"
+ ],
+ "connectorId": "de681fee-c552-45bf-9212-827b1c7529f8",
+ "connectorVersion": "v1",
+ "functionId": 1,
+ "functionName": "get_url_job_screenshots",
+ "id": "24",
+ "loop": {
+ "enabled": false,
+ "exitAfterUnit": "m",
+ "exitAfterValue": 10,
+ "exitConditionEnabled": false,
+ "exitLoopAfter": 2,
+ "pauseUnit": "m",
+ "pauseValue": 2
+ },
+ "parameters": {
+ "job_id": "filtered-data:url_summary_filter:condition_1:get_url_summary_output:action_result.parameter.job_id"
+ },
+ "requiredParameters": [
+ {
+ "data_type": "string",
+ "field": "job_id"
+ }
+ ],
+ "type": "action"
+ },
+ "errors": {},
+ "id": "24",
+ "type": "action",
+ "warnings": {},
+ "x": 0,
+ "y": 1220
+ },
+ "25": {
+ "data": {
+ "action": "get job screenshots",
+ "actionType": "investigate",
+ "advanced": {
+ "customName": "get file job screenshots",
+ "customNameId": 0,
+ "description": "Add the job screenshots to the vault",
+ "join": [],
+ "note": "Add the job screenshots to the vault"
+ },
+ "connector": "Splunk Attack Analyzer Connector for Splunk SOAR",
+ "connectorConfigs": [
+ "splunk_attack_analyzer"
+ ],
+ "connectorId": "de681fee-c552-45bf-9212-827b1c7529f8",
+ "connectorVersion": "v1",
+ "functionId": 2,
+ "functionName": "get_file_job_screenshots",
+ "id": "25",
+ "loop": {
+ "enabled": false,
+ "exitAfterUnit": "m",
+ "exitAfterValue": 10,
+ "exitConditionEnabled": false,
+ "exitLoopAfter": 2,
+ "pauseUnit": "m",
+ "pauseValue": 2
+ },
+ "parameters": {
+ "job_id": "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.parameter.job_id"
+ },
+ "requiredParameters": [
+ {
+ "data_type": "string",
+ "field": "job_id"
+ }
+ ],
+ "type": "action"
+ },
+ "errors": {},
+ "id": "25",
+ "type": "action",
+ "warnings": {},
+ "x": 340,
+ "y": 1220
+ },
+ "26": {
+ "data": {
+ "advanced": {
+ "customName": "url screenshot formatting",
+ "customNameId": 0,
+ "description": "Custom formatting for the markdown report that shows screenshots grouped by detonated URL",
+ "join": [],
+ "note": "Custom formatting for the markdown report that shows screenshots grouped by detonated URL"
+ },
+ "functionId": 1,
+ "functionName": "url_screenshot_formatting",
+ "id": "26",
+ "inputParameters": [
+ "filtered-data:url_status_filter:condition_1:url_detonation:action_result.parameter.url",
+ "filtered-data:url_status_filter:condition_1:url_detonation:action_result.data.*.JobID",
+ "get_url_job_screenshots:action_result.parameter.job_id",
+ "get_url_job_screenshots:action_result.data.*.file_name",
+ "get_url_job_screenshots:action_result.data.*.id"
+ ],
+ "outputVariables": [
+ "report"
+ ],
+ "type": "code"
+ },
+ "errors": {},
+ "id": "26",
+ "type": "code",
+ "userCode": " url_screenshot_formatting__report = \"\"\n \n for url, job_id in zip(filtered_result_0_parameter_url, filtered_result_0_data___jobid):\n url_screenshot_formatting__report += f\"#### {url}\\n\"\n for screenshot_job, screenshot_name, screenshot_id in zip(get_url_job_screenshots_parameter_job_id, get_url_job_screenshots_result_item_1, get_url_job_screenshots_result_item_2):\n if job_id == screenshot_job:\n url_screenshot_formatting__report += f\"\\n\"\n\n",
+ "warnings": {},
+ "x": -320,
+ "y": 1440
+ },
+ "27": {
+ "data": {
+ "advanced": {
+ "customName": "file screenshot formatting",
+ "customNameId": 0,
+ "description": "Custom formatting for the markdown report that shows screenshots grouped by detonated file.",
+ "join": [],
+ "note": "Custom formatting for the markdown report that shows screenshots grouped by detonated file"
+ },
+ "functionId": 4,
+ "functionName": "file_screenshot_formatting",
+ "id": "27",
+ "inputParameters": [
+ "filtered-data:detonation_status_filter:condition_1:file_detonation:action_result.parameter.file",
+ "filtered-data:detonation_status_filter:condition_1:file_detonation:action_result.data.*.JobID",
+ "get_file_job_screenshots:action_result.parameter.job_id",
+ "get_file_job_screenshots:action_result.data.*.file_name",
+ "get_file_job_screenshots:action_result.data.*.id"
+ ],
+ "outputVariables": [
+ "report"
+ ],
+ "type": "code"
+ },
+ "errors": {},
+ "id": "27",
+ "type": "code",
+ "userCode": "\n file_screenshot_formatting__report = \"\"\n \n for file, job_id in zip(filtered_result_0_parameter_file, filtered_result_0_data___jobid):\n file_screenshot_formatting__report += f\"#### {file}\\n\"\n for screenshot_job, screenshot_name, screenshot_id in zip(get_file_job_screenshots_parameter_job_id, get_file_job_screenshots_result_item_1, get_file_job_screenshots_result_item_2):\n if job_id == screenshot_job:\n file_screenshot_formatting__report += f\"\\n\"\n\n",
+ "warnings": {},
+ "x": 680,
+ "y": 1440
+ },
+ "3": {
+ "data": {
+ "action": "detonate url",
+ "actionType": "generic",
+ "advanced": {
+ "customName": "url detonation",
+ "customNameId": 0,
+ "delayTime": 0,
+ "description": "Queries SAA for information about the provided URL(s)",
+ "join": [],
+ "note": "Queries SAA for information about the provided URL(s)"
+ },
+ "connector": "Splunk Attack Analyzer Connector for Splunk SOAR",
+ "connectorConfigs": [
+ "splunk_attack_analyzer"
+ ],
+ "connectorId": "de681fee-c552-45bf-9212-827b1c7529f8",
+ "connectorVersion": "v1",
+ "functionId": 1,
+ "functionName": "url_detonation",
+ "id": "3",
+ "loop": {
+ "enabled": false,
+ "exitAfterUnit": "m",
+ "exitAfterValue": 10,
+ "exitConditionEnabled": false,
+ "exitLoopAfter": 2,
+ "pauseUnit": "m",
+ "pauseValue": 2
+ },
+ "parameters": {
+ "url": "filtered-data:saa_input_filter:condition_1:playbook_input:url"
+ },
+ "requiredParameters": [
+ {
+ "data_type": "string",
+ "field": "url"
+ }
+ ],
+ "type": "action"
+ },
+ "errors": {},
+ "id": "3",
+ "type": "action",
+ "userCode": "\n # Write your custom code here...\n\n",
+ "warnings": {},
+ "x": 0,
+ "y": 320
+ },
+ "4": {
+ "data": {
+ "advanced": {
+ "customName": "detonation status filter",
+ "customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
+ "description": "Filters successful file detonation results.",
+ "join": [],
+ "note": "Filters successful file detonation results."
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": "==",
+ "param": "file_detonation:action_result.status",
+ "value": "success"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "saa_file_success_status",
+ "logic": "and"
+ }
+ ],
+ "functionId": 2,
+ "functionName": "detonation_status_filter",
+ "id": "4",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "4",
+ "type": "filter",
+ "warnings": {},
+ "x": 400,
+ "y": 506
+ },
+ "6": {
+ "data": {
+ "action": "get job summary",
+ "actionType": "investigate",
+ "advanced": {
+ "customName": "get url summary output",
+ "customNameId": 0,
+ "delayTime": 0,
+ "description": "Queries SAA Forensics data relative to the JobID of URL(s) or File(s) needs to be detonated.",
+ "join": [],
+ "note": "Queries SAA Forensics data relative to the JobID of URL(s) or File(s) needs to be detonated.",
+ "reviewer": ""
+ },
+ "connector": "Splunk Attack Analyzer Connector for Splunk SOAR",
+ "connectorConfigs": [
+ "splunk_attack_analyzer"
+ ],
+ "connectorId": "de681fee-c552-45bf-9212-827b1c7529f8",
+ "connectorVersion": "v1",
+ "functionId": 1,
+ "functionName": "get_url_summary_output",
+ "id": "6",
+ "loop": {
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": "==",
+ "param": "get_url_summary_output:action_result.data.*.State",
+ "value": "done"
+ }
+ ],
+ "conditionIndex": 0,
+ "display": "If",
+ "logic": "and",
+ "type": "if"
+ }
+ ],
+ "enabled": true,
+ "exitAfterUnit": "m",
+ "exitAfterValue": 30,
+ "exitConditionEnabled": true,
+ "exitLoopAfter": 15,
+ "pauseUnit": "m",
+ "pauseValue": 2
+ },
+ "parameters": {
+ "job_id": "filtered-data:url_status_filter:condition_1:url_detonation:action_result.data.*.JobID",
+ "timeout": ""
+ },
+ "requiredParameters": [
+ {
+ "data_type": "string",
+ "field": "job_id"
+ }
+ ],
+ "type": "action"
+ },
+ "errors": {},
+ "id": "6",
+ "type": "action",
+ "userCode": "\n # Write your custom code here...\n #parameters = []\n #for job_ids in url_jobid_detonation_output__jobid:\n # for job in job_ids:\n # if job is not None:\n # parameters.append({\n # \"job_id\": job,\n # \"timeout\": 5,\n # })\n #phantom.debug(parameters)\n",
+ "warnings": {},
+ "x": -1.4210854715202004e-14,
+ "y": 686
+ },
+ "7": {
+ "data": {
+ "advanced": {
+ "customName": "url summary filter",
+ "customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
+ "description": "Filters successful url detonation job forensic results.",
+ "join": [],
+ "note": "Filters successful url detonation job forensic results."
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": "==",
+ "param": "get_url_summary_output:action_result.status",
+ "value": "success"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "get_job_summary_success",
+ "logic": "and"
+ }
+ ],
+ "functionId": 3,
+ "functionName": "url_summary_filter",
+ "id": "7",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "7",
+ "type": "filter",
+ "warnings": {},
+ "x": 60,
+ "y": 864
+ },
+ "8": {
+ "data": {
+ "advanced": {
+ "customName": "normalized url summary output",
+ "customNameId": 0,
+ "description": "This block uses custom code for normalizing score. Adjust the logic as desired in the documented sections.",
+ "join": [],
+ "note": "This block uses custom code for normalizing score. Adjust the logic as desired in the documented sections."
+ },
+ "functionId": 2,
+ "functionName": "normalized_url_summary_output",
+ "id": "8",
+ "inputParameters": [
+ "filtered-data:url_status_filter:condition_1:url_detonation:action_result.parameter.url",
+ "filtered-data:url_status_filter:condition_1:url_detonation:action_result.data.*.JobID",
+ "filtered-data:url_summary_filter:condition_1:get_url_summary_output:action_result.parameter.job_id",
+ "filtered-data:url_summary_filter:condition_1:get_url_summary_output:action_result.summary.Score",
+ "filtered-data:url_summary_filter:condition_1:get_url_summary_output:action_result.data.*.Resources",
+ "filtered-data:url_summary_filter:condition_1:get_url_summary_output:action_result.data.*.Verdict"
+ ],
+ "outputVariables": [
+ "url_score_object",
+ "scores",
+ "classifications",
+ "score_id",
+ "url",
+ "job_id"
+ ],
+ "type": "code"
+ },
+ "errors": {},
+ "id": "8",
+ "type": "code",
+ "userCode": "\n score_table = {\n \"0\":\"Unknown\",\n \"1\":\"Very_Safe\",\n \"2\":\"Safe\",\n \"3\":\"Probably_Safe\",\n \"4\":\"Leans_Safe\",\n \"5\":\"May_not_be_Safe\",\n \"6\":\"Exercise_Caution\",\n \"7\":\"Suspicious_or_Risky\",\n \"8\":\"Possibly_Malicious\",\n \"9\":\"Probably_Malicious\",\n \"10\":\"Malicious\"\n }\n classification_ids = {\n \"Unknown\": 0,\n \"Adware\": 1,\n \"Backdoor\": 2,\n \"Bot\": 3,\n \"Bootkit\": 4,\n \"DDOS\": 5,\n \"Downloader\": 6,\n \"Dropper\": 7,\n \"Exploit-Kit\": 8,\n \"Keylogger\": 9,\n \"Ransomware\": 10,\n \"Remote-Access-Trojan\": 11,\n \"Resource-Exploitation\": 13,\n \"Rogue-Security-Software\": 14,\n \"Rootkit\": 15,\n \"Screen-Capture\": 16,\n \"Spyware\": 17,\n \"Trojan\": 18,\n \"Virus\": 19,\n \"Webshell\": 20,\n \"Wiper\": 21,\n \"Worm\": 22,\n \"Other\": 99\n }\n normalized_url_summary_output__url_score_object = []\n normalized_url_summary_output__scores = []\n normalized_url_summary_output__classifications = []\n normalized_url_summary_output__score_id = []\n normalized_url_summary_output__url = []\n normalized_url_summary_output__job_id = []\n \n ## pair forensic job results with url detonated\n job_url_dict = {}\n for orig_url, orig_job, filtered_job in zip(filtered_result_0_parameter_url, filtered_result_0_data___jobid, filtered_result_1_parameter_job_id):\n if orig_job == filtered_job:\n job_url_dict[filtered_job] = orig_url\n \n \n for job, score_num, resources, verdict in zip(filtered_result_1_parameter_job_id, filtered_result_1_summary_score, filtered_result_1_data___resources, filtered_result_1_data___verdict):\n \n ## translate scores\n score_id = int(score_num/10) if score_num > 0 else 0\n score = score_table[str(score_id)]\n url = job_url_dict[job]\n \n ## build a sub dictionary of high priority related observables\n related_observables = []\n for sub_observ in resources:\n if sub_observ['Name'] != url:\n second_num = sub_observ['DisplayScore']\n second_num_id = int(second_num/10) if second_num > 0 else 0\n related_observables.append({\n 'value': sub_observ['Name'],\n 'type': sub_observ['Type'].lower(),\n 'reputation': {\n 'score': score_table[str(second_num_id)],\n 'orig_score': second_num,\n 'score_id': second_num_id\n },\n 'source': 'Splunk Attack Analyzer'\n })\n \n # Attach final object\n normalized_url_summary_output__url_score_object.append({\n 'value': url, \n 'orig_score': score_num, \n 'score': score, \n 'score_id': score_id, \n 'classifications': [verdict if verdict else \"Unknown\"],\n 'classification_ids': [classification_ids.get(verdict, 99) if verdict else 0],\n 'related_observables': related_observables\n })\n normalized_url_summary_output__scores.append(score)\n normalized_url_summary_output__score_id.append(score_id)\n normalized_url_summary_output__url.append(url)\n normalized_url_summary_output__job_id.append(job)\n normalized_url_summary_output__classifications.append([verdict if verdict else \"Unknown\"])\n \n\n",
+ "warnings": {},
+ "x": 0,
+ "y": 1440
+ }
},
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": "!=",
- "param": "playbook_input:url",
- "value": ""
- }
- ],
- "conditionIndex": 0,
- "customName": "saa_url_input",
- "logic": "and"
- },
- {
- "comparisons": [
- {
- "conditionIndex": 1,
- "op": "!=",
- "param": "playbook_input:vault_id",
- "value": ""
- }
- ],
- "conditionIndex": 1,
- "customName": "saa_vault_id_input",
- "logic": "and"
- }
- ],
- "functionId": 1,
- "functionName": "saa_input_filter",
- "id": "2",
- "type": "filter"
- },
- "errors": {},
- "id": "2",
- "type": "filter",
- "warnings": {},
- "x": 230,
- "y": 148
+ "notes": "Inputs: url, vault_id\nInteractions: Splunk Attack Analyzer\nActions: url detonation, , file detonation\nOutputs: report, observables"
},
- "22": {
- "data": {
- "advanced": {
- "customName": "url status filter",
- "customNameId": 0,
- "delimiter": ",",
- "delimiter_enabled": true,
- "description": "Filters url detonation results.",
- "join": [],
- "note": "Filters url detonation results."
- },
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": "==",
- "param": "url_detonation:action_result.status",
- "value": "success"
- }
+ "input_spec": [
+ {
+ "contains": [
+ "url"
],
- "conditionIndex": 0,
- "customName": "saa_url_success_status",
- "logic": "and"
- }
- ],
- "functionId": 5,
- "functionName": "url_status_filter",
- "id": "22",
- "type": "filter"
- },
- "errors": {},
- "id": "22",
- "type": "filter",
- "warnings": {},
- "x": 60,
- "y": 506
- },
- "23": {
- "data": {
- "advanced": {
- "customName": "file summary filter",
- "customNameId": 0,
- "delimiter": ",",
- "delimiter_enabled": true,
- "description": "Filters successful file detonation job forensic results.",
- "join": [],
- "note": "Filters successful file detonation job forensic results."
+ "description": "A URL provided for reputation analysis - Splunk Attack Analyzer",
+ "name": "url"
},
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": "==",
- "param": "get_file_summary_output:action_result.status",
- "value": "success"
- }
+ {
+ "contains": [
+ "vault id"
],
- "conditionIndex": 0,
- "customName": "get_job_file_summary_sucess",
- "logic": "and"
- }
- ],
- "functionId": 6,
- "functionName": "file_summary_filter",
- "id": "23",
- "type": "filter"
- },
- "errors": {},
- "id": "23",
- "type": "filter",
- "warnings": {},
- "x": 400,
- "y": 864
- },
- "24": {
- "data": {
- "action": "get job screenshots",
- "actionType": "investigate",
- "advanced": {
- "customName": "get url job screenshots",
- "customNameId": 0,
- "description": "Add the job screenshots to the vault",
- "join": [],
- "note": "Add the job screenshots to the vault"
- },
- "connector": "Splunk Attack Analyzer Connector for Splunk SOAR",
- "connectorConfigs": [
- "splunk_attack_analyzer"
- ],
- "connectorId": "de681fee-c552-45bf-9212-827b1c7529f8",
- "connectorVersion": "v1",
- "functionId": 1,
- "functionName": "get_url_job_screenshots",
- "id": "24",
- "loop": {
- "enabled": false,
- "exitAfterUnit": "m",
- "exitAfterValue": 10,
- "exitConditionEnabled": false,
- "exitLoopAfter": 2,
- "pauseUnit": "m",
- "pauseValue": 2
- },
- "parameters": {
- "job_id": "filtered-data:url_summary_filter:condition_1:get_url_summary_output:action_result.parameter.job_id"
- },
- "requiredParameters": [
- {
- "data_type": "string",
- "field": "job_id"
- }
- ],
- "type": "action"
- },
- "errors": {},
- "id": "24",
- "type": "action",
- "warnings": {},
- "x": 0,
- "y": 1220
- },
- "25": {
- "data": {
- "action": "get job screenshots",
- "actionType": "investigate",
- "advanced": {
- "customName": "get file job screenshots",
- "customNameId": 0,
- "description": "Add the job screenshots to the vault",
- "join": [],
- "note": "Add the job screenshots to the vault"
- },
- "connector": "Splunk Attack Analyzer Connector for Splunk SOAR",
- "connectorConfigs": [
- "splunk_attack_analyzer"
- ],
- "connectorId": "de681fee-c552-45bf-9212-827b1c7529f8",
- "connectorVersion": "v1",
- "functionId": 2,
- "functionName": "get_file_job_screenshots",
- "id": "25",
- "loop": {
- "enabled": false,
- "exitAfterUnit": "m",
- "exitAfterValue": 10,
- "exitConditionEnabled": false,
- "exitLoopAfter": 2,
- "pauseUnit": "m",
- "pauseValue": 2
- },
- "parameters": {
- "job_id": "filtered-data:file_summary_filter:condition_1:get_file_summary_output:action_result.parameter.job_id"
- },
- "requiredParameters": [
- {
- "data_type": "string",
- "field": "job_id"
- }
- ],
- "type": "action"
- },
- "errors": {},
- "id": "25",
- "type": "action",
- "warnings": {},
- "x": 320,
- "y": 1200
- },
- "26": {
- "data": {
- "advanced": {
- "customName": "url screenshot formatting",
- "customNameId": 0,
- "description": "Custom formatting for the markdown report that shows screenshots grouped by detonated URL",
- "join": [],
- "note": "Custom formatting for the markdown report that shows screenshots grouped by detonated URL"
- },
- "functionId": 1,
- "functionName": "url_screenshot_formatting",
- "id": "26",
- "inputParameters": [
- "filtered-data:url_status_filter:condition_1:url_detonation:action_result.parameter.url",
- "filtered-data:url_status_filter:condition_1:url_detonation:action_result.data.*.JobID",
- "get_url_job_screenshots:action_result.parameter.job_id",
- "get_url_job_screenshots:action_result.data.*.file_name",
- "get_url_job_screenshots:action_result.data.*.id"
- ],
- "outputVariables": [
- "report"
- ],
- "type": "code"
- },
- "errors": {},
- "id": "26",
- "type": "code",
- "userCode": " url_screenshot_formatting__report = \"\"\n \n for url, job_id in zip(filtered_result_0_parameter_url, filtered_result_0_data___jobid):\n url_screenshot_formatting__report += f\"#### {url}\\n\"\n for screenshot_job, screenshot_name, screenshot_id in zip(get_url_job_screenshots_parameter_job_id, get_url_job_screenshots_result_item_1, get_url_job_screenshots_result_item_2):\n if job_id == screenshot_job:\n url_screenshot_formatting__report += f\"\\n\"\n\n",
- "warnings": {},
- "x": -320,
- "y": 1440
- },
- "27": {
- "data": {
- "advanced": {
- "customName": "file screenshot formatting",
- "customNameId": 0,
- "description": "Custom formatting for the markdown report that shows screenshots grouped by detonated file.",
- "join": [],
- "note": "Custom formatting for the markdown report that shows screenshots grouped by detonated file"
- },
- "functionId": 4,
- "functionName": "file_screenshot_formatting",
- "id": "27",
- "inputParameters": [
- "filtered-data:detonation_status_filter:condition_1:file_detonation:action_result.parameter.file",
- "filtered-data:detonation_status_filter:condition_1:file_detonation:action_result.data.*.JobID",
- "get_file_job_screenshots:action_result.parameter.job_id",
- "get_file_job_screenshots:action_result.data.*.file_name",
- "get_file_job_screenshots:action_result.data.*.id"
- ],
- "outputVariables": [
- "report"
- ],
- "type": "code"
- },
- "errors": {},
- "id": "27",
- "type": "code",
- "userCode": "\n file_screenshot_formatting__report = \"\"\n \n for file, job_id in zip(filtered_result_0_parameter_file, filtered_result_0_data___jobid):\n file_screenshot_formatting__report += f\"#### {file}\\n\"\n for screenshot_job, screenshot_name, screenshot_id in zip(get_file_job_screenshots_parameter_job_id, get_file_job_screenshots_result_item_1, get_file_job_screenshots_result_item_2):\n if job_id == screenshot_job:\n file_screenshot_formatting__report += f\"\\n\"\n\n",
- "warnings": {},
- "x": 680,
- "y": 1440
- },
- "3": {
- "data": {
- "action": "detonate url",
- "actionType": "generic",
- "advanced": {
- "customName": "url detonation",
- "customNameId": 0,
- "delayTime": 0,
- "description": "Queries SAA for information about the provided URL(s)",
- "join": [],
- "note": "Queries SAA for information about the provided URL(s)"
- },
- "connector": "Splunk Attack Analyzer Connector for Splunk SOAR",
- "connectorConfigs": [
- "splunk_attack_analyzer"
- ],
- "connectorId": "de681fee-c552-45bf-9212-827b1c7529f8",
- "connectorVersion": "v1",
- "functionId": 1,
- "functionName": "url_detonation",
- "id": "3",
- "loop": {
- "enabled": false,
- "exitAfterUnit": "m",
- "exitAfterValue": 10,
- "exitConditionEnabled": false,
- "exitLoopAfter": 2,
- "pauseUnit": "m",
- "pauseValue": 2
- },
- "parameters": {
- "url": "filtered-data:saa_input_filter:condition_1:playbook_input:url"
- },
- "requiredParameters": [
- {
- "data_type": "string",
- "field": "url"
- }
- ],
- "type": "action"
- },
- "errors": {},
- "id": "3",
- "type": "action",
- "userCode": "\n # Write your custom code here...\n\n",
- "warnings": {},
- "x": 0,
- "y": 328
- },
- "4": {
- "data": {
- "advanced": {
- "customName": "detonation status filter",
- "customNameId": 0,
- "delimiter": ",",
- "delimiter_enabled": true,
- "description": "Filters successful file detonation results.",
- "join": [],
- "note": "Filters successful file detonation results."
- },
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": "==",
- "param": "file_detonation:action_result.status",
- "value": "success"
- }
+ "description": "A vault_id provided for reputation analysis - Splunk Attack Analyzer",
+ "name": "vault_id"
+ }
+ ],
+ "output_spec": [
+ {
+ "contains": [],
+ "datapaths": [
+ "build_url_output:custom_function:observable_array",
+ "build_file_output:custom_function:observable_array"
],
- "conditionIndex": 0,
- "customName": "saa_file_success_status",
- "logic": "and"
- }
- ],
- "functionId": 2,
- "functionName": "detonation_status_filter",
- "id": "4",
- "type": "filter"
- },
- "errors": {},
- "id": "4",
- "type": "filter",
- "warnings": {},
- "x": 400,
- "y": 506
- },
- "6": {
- "data": {
- "action": "get job summary",
- "actionType": "investigate",
- "advanced": {
- "customName": "get url summary output",
- "customNameId": 0,
- "delayTime": 0,
- "description": "Queries SAA Forensics data relative to the JobID of URL(s) or File(s) needs to be detonated.",
- "join": [],
- "note": "Queries SAA Forensics data relative to the JobID of URL(s) or File(s) needs to be detonated.",
- "reviewer": ""
- },
- "connector": "Splunk Attack Analyzer Connector for Splunk SOAR",
- "connectorConfigs": [
- "splunk_attack_analyzer"
- ],
- "connectorId": "de681fee-c552-45bf-9212-827b1c7529f8",
- "connectorVersion": "v1",
- "functionId": 1,
- "functionName": "get_url_summary_output",
- "id": "6",
- "loop": {
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": "==",
- "param": "get_url_summary_output:action_result.data.*.State",
- "value": "done"
- }
- ],
- "conditionIndex": 0,
- "display": "If",
- "logic": "and",
- "type": "if"
- }
- ],
- "enabled": true,
- "exitAfterUnit": "m",
- "exitAfterValue": 30,
- "exitConditionEnabled": true,
- "exitLoopAfter": 15,
- "pauseUnit": "m",
- "pauseValue": 2
- },
- "parameters": {
- "job_id": "filtered-data:url_status_filter:condition_1:url_detonation:action_result.data.*.JobID",
- "timeout": ""
- },
- "requiredParameters": [
- {
- "data_type": "string",
- "field": "job_id"
- }
- ],
- "type": "action"
- },
- "errors": {},
- "id": "6",
- "type": "action",
- "userCode": "\n # Write your custom code here...\n #parameters = []\n #for job_ids in url_jobid_detonation_output__jobid:\n # for job in job_ids:\n # if job is not None:\n # parameters.append({\n # \"job_id\": job,\n # \"timeout\": 5,\n # })\n #phantom.debug(parameters)\n",
- "warnings": {},
- "x": -1.4210854715202004e-14,
- "y": 686
- },
- "7": {
- "data": {
- "advanced": {
- "customName": "url summary filter",
- "customNameId": 0,
- "delimiter": ",",
- "delimiter_enabled": true,
- "description": "Filters successful url detonation job forensic results.",
- "join": [],
- "note": "Filters successful url detonation job forensic results."
+ "deduplicate": false,
+ "description": "An array of observable dictionaries with value, type, score, score_id, and categories.",
+ "metadata": {},
+ "name": "observable"
},
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": "==",
- "param": "get_url_summary_output:action_result.status",
- "value": "success"
- }
+ {
+ "contains": [],
+ "datapaths": [
+ "format_url_report:formatted_data",
+ "format_file_report:formatted_data"
],
- "conditionIndex": 0,
- "customName": "get_job_summary_success",
- "logic": "and"
- }
- ],
- "functionId": 3,
- "functionName": "url_summary_filter",
- "id": "7",
- "type": "filter"
- },
- "errors": {},
- "id": "7",
- "type": "filter",
- "warnings": {},
- "x": 60,
- "y": 864
- },
- "8": {
- "data": {
- "advanced": {
- "customName": "normalized url summary output",
- "customNameId": 0,
- "description": "This block uses custom code for normalizing score. Adjust the logic as desired in the documented sections.",
- "join": [],
- "note": "This block uses custom code for normalizing score. Adjust the logic as desired in the documented sections."
- },
- "functionId": 2,
- "functionName": "normalized_url_summary_output",
- "id": "8",
- "inputParameters": [
- "filtered-data:url_status_filter:condition_1:url_detonation:action_result.parameter.url",
- "filtered-data:url_status_filter:condition_1:url_detonation:action_result.data.*.JobID",
- "filtered-data:url_summary_filter:condition_1:get_url_summary_output:action_result.parameter.job_id",
- "filtered-data:url_summary_filter:condition_1:get_url_summary_output:action_result.summary.Score",
- "filtered-data:url_summary_filter:condition_1:get_url_summary_output:action_result.data.*.Resources",
- "filtered-data:url_summary_filter:condition_1:get_url_summary_output:action_result.data.*.Verdict"
- ],
- "outputVariables": [
- "url_score_object",
- "scores",
- "classifications",
- "score_id",
- "url",
- "job_id"
- ],
- "type": "code"
- },
- "errors": {},
- "id": "8",
- "type": "code",
- "userCode": "\n score_table = {\n \"0\":\"Unknown\",\n \"1\":\"Very_Safe\",\n \"2\":\"Safe\",\n \"3\":\"Probably_Safe\",\n \"4\":\"Leans_Safe\",\n \"5\":\"May_not_be_Safe\",\n \"6\":\"Exercise_Caution\",\n \"7\":\"Suspicious_or_Risky\",\n \"8\":\"Possibly_Malicious\",\n \"9\":\"Probably_Malicious\",\n \"10\":\"Malicious\"\n }\n classification_ids = {\n \"Unknown\": 0,\n \"Adware\": 1,\n \"Backdoor\": 2,\n \"Bot\": 3,\n \"Bootkit\": 4,\n \"DDOS\": 5,\n \"Downloader\": 6,\n \"Dropper\": 7,\n \"Exploit-Kit\": 8,\n \"Keylogger\": 9,\n \"Ransomware\": 10,\n \"Remote-Access-Trojan\": 11,\n \"Resource-Exploitation\": 13,\n \"Rogue-Security-Software\": 14,\n \"Rootkit\": 15,\n \"Screen-Capture\": 16,\n \"Spyware\": 17,\n \"Trojan\": 18,\n \"Virus\": 19,\n \"Webshell\": 20,\n \"Wiper\": 21,\n \"Worm\": 22,\n \"Other\": 99\n }\n normalized_url_summary_output__url_score_object = []\n normalized_url_summary_output__scores = []\n normalized_url_summary_output__classifications = []\n normalized_url_summary_output__score_id = []\n normalized_url_summary_output__url = []\n normalized_url_summary_output__job_id = []\n \n ## pair forensic job results with url detonated\n job_url_dict = {}\n for orig_url, orig_job, filtered_job in zip(filtered_result_0_parameter_url, filtered_result_0_data___jobid, filtered_result_1_parameter_job_id):\n if orig_job == filtered_job:\n job_url_dict[filtered_job] = orig_url\n \n \n for job, score_num, resources, verdict in zip(filtered_result_1_parameter_job_id, filtered_result_1_summary_score, filtered_result_1_data___resources, filtered_result_1_data___verdict):\n \n ## translate scores\n score_id = int(score_num/10) if score_num > 0 else 0\n score = score_table[str(score_id)]\n url = job_url_dict[job]\n \n ## build a sub dictionary of high priority related observables\n related_observables = []\n for sub_observ in resources:\n if sub_observ['Name'] != url:\n second_num = sub_observ['DisplayScore']\n second_num_id = int(second_num/10) if second_num > 0 else 0\n related_observables.append({\n 'value': sub_observ['Name'],\n 'type': sub_observ['Type'].lower(),\n 'reputation': {\n 'score': score_table[str(second_num_id)],\n 'orig_score': second_num,\n 'score_id': second_num_id\n },\n 'source': 'Splunk Attack Analyzer'\n })\n \n # Attach final object\n normalized_url_summary_output__url_score_object.append({\n 'value': url, \n 'orig_score': score_num, \n 'score': score, \n 'score_id': score_id, \n 'classifications': [verdict if verdict else \"Unknown\"],\n 'classification_ids': [classification_ids.get(verdict, 99) if verdict else 0],\n 'related_observables': related_observables\n })\n normalized_url_summary_output__scores.append(score)\n normalized_url_summary_output__score_id.append(score_id)\n normalized_url_summary_output__url.append(url)\n normalized_url_summary_output__job_id.append(job)\n normalized_url_summary_output__classifications.append([verdict if verdict else \"Unknown\"])\n \n\n",
- "warnings": {},
- "x": 0,
- "y": 1440
- }
- },
- "notes": "Inputs: url, vault_id\nInteractions: Splunk Attack Analyzer\nActions: url detonation, , file detonation\nOutputs: report, observables"
- },
- "input_spec": [
- {
- "contains": [
- "url"
- ],
- "description": "A URL provided for reputation analysis - Splunk Attack Analyzer",
- "name": "url"
- },
- {
- "contains": [
- "vault id"
- ],
- "description": "A vault_id provided for reputation analysis - Splunk Attack Analyzer",
- "name": "vault_id"
- }
- ],
- "output_spec": [
- {
- "contains": [],
- "datapaths": [
- "build_url_output:custom_function:observable_array",
- "build_file_output:custom_function:observable_array"
- ],
- "deduplicate": false,
- "description": "An array of observable dictionaries with value, type, score, score_id, and categories.",
- "metadata": {},
- "name": "observable"
- },
- {
- "contains": [],
- "datapaths": [
- "format_url_report:formatted_data",
- "format_file_report:formatted_data"
+ "deduplicate": false,
+ "description": "a report contains value, score, confidence and categories",
+ "metadata": {},
+ "name": "report"
+ }
],
- "deduplicate": false,
- "description": "a report contains value, score, confidence and categories",
- "metadata": {},
- "name": "report"
- }
+ "playbook_type": "data",
+ "python_version": "3.13",
+ "schema": "5.0.11",
+ "version": "6.2.0.355"
+ },
+ "create_time": "2024-03-19T15:17:38.130754+00:00",
+ "draft_mode": false,
+ "labels": [
+ "*"
],
- "playbook_type": "data",
- "python_version": "3",
- "schema": "5.0.11",
- "version": "6.2.0.355"
- },
- "create_time": "2024-01-26T16:09:47.279213+00:00",
- "draft_mode": false,
- "labels": [
- "*"
- ],
- "tags": [
- "url",
- "ip",
- "domain",
- "sandbox",
- "D3-DA",
- "vault_id",
- "splunk_attack_analyzer"
- ]
-}
+ "tags": [
+ "url",
+ "ip",
+ "domain",
+ "sandbox",
+ "D3-DA",
+ "vault_id",
+ "splunk_attack_analyzer"
+ ]
+}
\ No newline at end of file
diff --git a/playbooks/Splunk_Attack_Analyzer_Dynamic_Analysis.png b/playbooks/Splunk_Attack_Analyzer_Dynamic_Analysis.png
index 8be98de724..f60999fb03 100644
Binary files a/playbooks/Splunk_Attack_Analyzer_Dynamic_Analysis.png and b/playbooks/Splunk_Attack_Analyzer_Dynamic_Analysis.png differ
diff --git a/playbooks/Splunk_Attack_Analyzer_Dynamic_Analysis.py b/playbooks/Splunk_Attack_Analyzer_Dynamic_Analysis.py
index dad13369fb..6b5352c86c 100644
--- a/playbooks/Splunk_Attack_Analyzer_Dynamic_Analysis.py
+++ b/playbooks/Splunk_Attack_Analyzer_Dynamic_Analysis.py
@@ -373,7 +373,7 @@ def format_url_report(action=None, success=None, container=None, results=None, h
# Format a summary table with the information gathered from the playbook.
################################################################################
- template = """SOAR analyzed URL(s) using Splunk Attack Analyzer. The table below shows a summary of the information gathered.\n\n| URL | Normalized Score | Score Id | Classifications | Report Link | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} | https://app.twinwave.io/job/{4} | Splunk Attack Analyzer (SAA) |\n%%\n\nScreenshots associated with the detonated URLs are shown below (if available):\n\n{5}\n"""
+ template = """SOAR analyzed URL(s) using Splunk Attack Analyzer. The table below shows a summary of the information gathered.\n\n| URL | Normalized Score | Score Id | Classifications | Report Link | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} | {6} | Splunk Attack Analyzer (SAA) |\n%%\n\nScreenshots associated with the detonated URLs are shown below (if available):\n\n{5}\n"""
# parameter list for template variable replacement
parameters = [
@@ -382,7 +382,8 @@ def format_url_report(action=None, success=None, container=None, results=None, h
"normalized_url_summary_output:custom_function:score_id",
"normalized_url_summary_output:custom_function:classifications",
"normalized_url_summary_output:custom_function:job_id",
- "url_screenshot_formatting:custom_function:report"
+ "url_screenshot_formatting:custom_function:report",
+ "get_url_summary_output:action_result.summary.AppURL"
]
################################################################################
@@ -411,10 +412,13 @@ def build_url_output(action=None, success=None, container=None, results=None, ha
# the observables data path.
################################################################################
+ get_url_summary_output_result_data = phantom.collect2(container=container, datapath=["get_url_summary_output:action_result.summary.AppURL"], action_results=results)
normalized_url_summary_output__url = json.loads(_ if (_ := phantom.get_run_data(key="normalized_url_summary_output:url")) != "" else "null") # pylint: disable=used-before-assignment
normalized_url_summary_output__job_id = json.loads(_ if (_ := phantom.get_run_data(key="normalized_url_summary_output:job_id")) != "" else "null") # pylint: disable=used-before-assignment
normalized_url_summary_output__url_score_object = json.loads(_ if (_ := phantom.get_run_data(key="normalized_url_summary_output:url_score_object")) != "" else "null") # pylint: disable=used-before-assignment
+ get_url_summary_output_summary_appurl = [item[0] for item in get_url_summary_output_result_data]
+
build_url_output__observable_array = None
################################################################################
@@ -428,7 +432,7 @@ def build_url_output(action=None, success=None, container=None, results=None, ha
# Build URL
- for url, external_id, url_object in zip(normalized_url_summary_output__url, normalized_url_summary_output__job_id, normalized_url_summary_output__url_score_object):
+ for url, external_id, url_object, app_url in zip(normalized_url_summary_output__url, normalized_url_summary_output__job_id, normalized_url_summary_output__url_score_object, get_url_summary_output_summary_appurl):
parsed_url = urlparse(url)
#phantom.debug("url: {} jobs_id:{}".format(url, external_id))
#phantom.debug("parsed_url: {}, url_object: {}".format(parsed_url, url_object))
@@ -449,7 +453,7 @@ def build_url_output(action=None, success=None, container=None, results=None, ha
"classification_ids": url_object['classification_ids']
},
"source": "Splunk Attack Analyzer",
- "source_link": f"https://app.twinwave.io/job/{external_id}"
+ "source_link": f"{app_url}"
}
if url_object.get('related_observables'):
observable_object["related_observables"] = url_object['related_observables']
@@ -811,7 +815,7 @@ def format_file_report(action=None, success=None, container=None, results=None,
# Format a summary table with the information gathered from the playbook.
################################################################################
- template = """SOAR analyzed File(s) using Splunk Attack Analyzer. The table below shows a summary of the information gathered.\n\n| File Name | Normalized Score | Score Id | Classifications | Report Link | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} |https://app.twinwave.io/job/{4} | Splunk Attack Analyzer (SAA) |\n%%\n\nScreenshots associated with the detonated Files are shown below (if available):\n\n{5}\n\n"""
+ template = """SOAR analyzed File(s) using Splunk Attack Analyzer. The table below shows a summary of the information gathered.\n\n| File Name | Normalized Score | Score Id | Classifications | Report Link | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} | {6} | Splunk Attack Analyzer (SAA) |\n%%\n\nScreenshots associated with the detonated Files are shown below (if available):\n\n{5}\n\n"""
# parameter list for template variable replacement
parameters = [
@@ -820,7 +824,8 @@ def format_file_report(action=None, success=None, container=None, results=None,
"normalized_file_summary_output:custom_function:score_id",
"normalized_file_summary_output:custom_function:classifications",
"normalized_file_summary_output:custom_function:job_id",
- "file_screenshot_formatting:custom_function:report"
+ "file_screenshot_formatting:custom_function:report",
+ "get_file_summary_output:action_result.summary.AppURL"
]
################################################################################
@@ -849,10 +854,13 @@ def build_file_output(action=None, success=None, container=None, results=None, h
# the observables data path.
################################################################################
+ get_file_summary_output_result_data = phantom.collect2(container=container, datapath=["get_file_summary_output:action_result.summary.AppURL"], action_results=results)
normalized_file_summary_output__file = json.loads(_ if (_ := phantom.get_run_data(key="normalized_file_summary_output:file")) != "" else "null") # pylint: disable=used-before-assignment
normalized_file_summary_output__job_id = json.loads(_ if (_ := phantom.get_run_data(key="normalized_file_summary_output:job_id")) != "" else "null") # pylint: disable=used-before-assignment
normalized_file_summary_output__file_score_object = json.loads(_ if (_ := phantom.get_run_data(key="normalized_file_summary_output:file_score_object")) != "" else "null") # pylint: disable=used-before-assignment
+ get_file_summary_output_summary_appurl = [item[0] for item in get_file_summary_output_result_data]
+
build_file_output__observable_array = None
################################################################################
@@ -861,7 +869,7 @@ def build_file_output(action=None, success=None, container=None, results=None, h
# Write your custom code here...
build_file_output__observable_array = []
- for _vault_id, external_id, file_object in zip(normalized_file_summary_output__file, normalized_file_summary_output__job_id, normalized_file_summary_output__file_score_object):
+ for _vault_id, external_id, file_object, app_url in zip(normalized_file_summary_output__file, normalized_file_summary_output__job_id, normalized_file_summary_output__file_score_object, get_file_summary_output_summary_appurl):
#phantom.debug("vault: {} id: {}".format(_vault_id, external_id))
observable_object = {
"value": _vault_id,
@@ -877,7 +885,7 @@ def build_file_output(action=None, success=None, container=None, results=None, h
"classification_ids": file_object['classification_ids']
},
"source": "Splunk Attack Analyzer",
- "source_link":f"https://app.twinwave.io/job/{external_id}"
+ "source_link":f"{app_url}"
}
if file_object.get('related_observables'):
observable_object["related_observables"] = file_object['related_observables']
diff --git a/playbooks/Splunk_Automated_Email_Investigation.json b/playbooks/Splunk_Automated_Email_Investigation.json
index dac456affe..4c758b19a9 100644
--- a/playbooks/Splunk_Automated_Email_Investigation.json
+++ b/playbooks/Splunk_Automated_Email_Investigation.json
@@ -1,1294 +1,1294 @@
{
- "blockly": false,
- "blockly_xml": "",
- "category": "Phishing",
- "coa": {
- "data": {
- "description": "Leverages Splunk technologies to determine if a .eml or .msg file in the vault is malicious, whether or not it contained suspect URLs or Files, and who may have interacted with the IoCs (email, URLs, or Files).",
- "edges": [
- {
- "id": "port_0_to_port_3",
- "sourceNode": "0",
- "sourcePort": "0_out",
- "targetNode": "3",
- "targetPort": "3_in"
- },
- {
- "id": "port_3_to_port_4",
- "sourceNode": "3",
- "sourcePort": "3_out",
- "targetNode": "4",
- "targetPort": "4_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_4_to_port_2",
- "sourceNode": "4",
- "sourcePort": "4_out",
- "targetNode": "2",
- "targetPort": "2_in"
- },
- {
- "id": "port_2_to_port_14",
- "sourceNode": "2",
- "sourcePort": "2_out",
- "targetNode": "14",
- "targetPort": "14_in"
- },
- {
- "conditions": [
- {
- "index": 1
- }
- ],
- "id": "port_14_to_port_16",
- "sourceNode": "14",
- "sourcePort": "14_out",
- "targetNode": "16",
- "targetPort": "16_in"
- },
- {
- "id": "port_7_to_port_16",
- "sourceNode": "7",
- "sourcePort": "7_out",
- "targetNode": "16",
- "targetPort": "16_in"
- },
- {
- "id": "port_20_to_port_19",
- "sourceNode": "20",
- "sourcePort": "20_out",
- "targetNode": "19",
- "targetPort": "19_in"
- },
- {
- "id": "port_21_to_port_1",
- "sourceNode": "21",
- "sourcePort": "21_out",
- "targetNode": "1",
- "targetPort": "1_in"
- },
- {
- "id": "port_19_to_port_1",
- "sourceNode": "19",
- "sourcePort": "19_out",
- "targetNode": "1",
- "targetPort": "1_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_22_to_port_7",
- "sourceNode": "22",
- "sourcePort": "22_out",
- "targetNode": "7",
- "targetPort": "7_in"
- },
- {
- "conditions": [
- {
- "index": 1
- }
- ],
- "id": "port_22_to_port_23",
- "sourceNode": "22",
- "sourcePort": "22_out",
- "targetNode": "23",
- "targetPort": "23_in"
- },
- {
- "id": "port_23_to_port_16",
- "sourceNode": "23",
- "sourcePort": "23_out",
- "targetNode": "16",
- "targetPort": "16_in"
- },
- {
- "id": "port_24_to_port_22",
- "sourceNode": "24",
- "sourcePort": "24_out",
- "targetNode": "22",
- "targetPort": "22_in"
- },
- {
- "id": "port_16_to_port_20",
- "sourceNode": "16",
- "sourcePort": "16_out",
- "targetNode": "20",
- "targetPort": "20_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_14_to_port_24",
- "sourceNode": "14",
- "sourcePort": "14_out",
- "targetNode": "24",
- "targetPort": "24_in"
- },
- {
- "id": "port_25_to_port_26",
- "sourceNode": "25",
- "sourcePort": "25_out",
- "targetNode": "26",
- "targetPort": "26_in"
- },
- {
- "id": "port_26_to_port_1",
- "sourceNode": "26",
- "sourcePort": "26_out",
- "targetNode": "1",
- "targetPort": "1_in"
- },
- {
- "id": "port_16_to_port_25",
- "sourceNode": "16",
- "sourcePort": "16_out",
- "targetNode": "25",
- "targetPort": "25_in"
- },
- {
- "conditions": [
- {
- "index": 2
- }
- ],
- "id": "port_14_to_port_27",
- "sourceNode": "14",
- "sourcePort": "14_out",
- "targetNode": "27",
- "targetPort": "27_in"
- },
- {
- "id": "port_27_to_port_21",
- "sourceNode": "27",
- "sourcePort": "27_out",
- "targetNode": "21",
- "targetPort": "21_in"
- }
- ],
- "hash": "f8687c0dc65bdfa081387f52282128e97f77b26b",
- "nodes": {
- "0": {
- "data": {
- "advanced": {
- "join": []
- },
- "functionName": "on_start",
- "id": "0",
- "type": "start"
- },
- "errors": {},
- "id": "0",
- "type": "start",
- "warnings": {},
- "x": 450,
- "y": -5.755396159656812e-13
- },
- "1": {
- "data": {
- "advanced": {
- "join": []
- },
- "functionName": "on_finish",
- "id": "1",
- "type": "end"
- },
- "errors": {},
- "id": "1",
- "type": "end",
- "warnings": {},
- "x": 340,
- "y": 2040
- },
- "14": {
- "data": {
- "advanced": {
- "customName": "high score indicator decision",
- "customNameId": 0,
- "join": [],
- "note": "Determines which path to go based on the reputation score of the email and the presence of additional indicators."
- },
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": ">",
- "param": "splunk_attack_analyzer:playbook_output:observable.related_observables.*.reputation.score_id",
- "value": "5"
- }
- ],
- "conditionIndex": 0,
- "customName": "malicious indicators exist",
- "display": "If",
- "logic": "and",
- "type": "if"
- },
- {
- "comparisons": [
- {
- "conditionIndex": 1,
- "op": ">",
- "param": "splunk_attack_analyzer:playbook_output:observable.reputation.score_id",
- "value": "5"
- }
- ],
- "conditionIndex": 1,
- "customName": "phish email",
- "display": "Else If",
- "logic": "and",
- "type": "elif"
- },
- {
- "comparisons": [
- {
- "conditionIndex": 2,
- "op": "==",
- "param": "",
- "value": ""
- }
- ],
- "conditionIndex": 2,
- "customName": "not high threat",
- "display": "Else",
- "logic": "and",
- "type": "else"
- }
- ],
- "customDatapaths": {
- "splunk_attack_analyzer": {
- "outputs:observable.related_observables": {
- "contains": [],
- "isCustomDatapath": true,
- "isDatapathArray": true,
- "label": "outputs:observable.related_observables",
- "value": "splunk_attack_analyzer:playbook_output:observable.related_observables"
+ "blockly": false,
+ "blockly_xml": "",
+ "category": "Phishing",
+ "coa": {
+ "data": {
+ "description": "Leverages Splunk technologies to determine if a .eml or .msg file in the vault is malicious, whether or not it contained suspect URLs or Files, and who may have interacted with the IoCs (email, URLs, or Files).",
+ "edges": [
+ {
+ "id": "port_0_to_port_3",
+ "sourceNode": "0",
+ "sourcePort": "0_out",
+ "targetNode": "3",
+ "targetPort": "3_in"
},
- "outputs:observable.related_observables.*.reputation": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "outputs:observable.related_observables.*.reputation",
- "value": "splunk_attack_analyzer:playbook_output:observable.related_observables.*.reputation"
+ {
+ "id": "port_3_to_port_4",
+ "sourceNode": "3",
+ "sourcePort": "3_out",
+ "targetNode": "4",
+ "targetPort": "4_in"
},
- "outputs:observable.related_observables.*.reputation.score_id": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "outputs:observable.related_observables.*.reputation.score_id",
- "value": "splunk_attack_analyzer:playbook_output:observable.related_observables.*.reputation.score_id"
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_4_to_port_2",
+ "sourceNode": "4",
+ "sourcePort": "4_out",
+ "targetNode": "2",
+ "targetPort": "2_in"
},
- "outputs:observable.reputation.score_id": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "outputs:observable.reputation.score_id",
- "value": "splunk_attack_analyzer:playbook_output:observable.reputation.score_id"
- }
- }
- },
- "functionId": 1,
- "functionName": "high_score_indicator_decision",
- "id": "14",
- "type": "decision"
- },
- "errors": {},
- "id": "14",
- "type": "decision",
- "warnings": {},
- "x": 510,
- "y": 654
- },
- "16": {
- "data": {
- "advanced": {
- "customName": "who received email",
- "customNameId": 0,
- "join": [],
- "notRequiredJoins": [
- "who_interacted_with_urls",
- "who_interacted_with_files"
- ],
- "scope": "all"
- },
- "customDatapaths": {
- "artifacts": {
- "artifact:*.cef.internetMessageId": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "artifact:*.cef.internetMessageId",
- "value": "artifact:*.cef.internetMessageId"
+ {
+ "id": "port_2_to_port_14",
+ "sourceNode": "2",
+ "sourcePort": "2_out",
+ "targetNode": "14",
+ "targetPort": "14_in"
},
- "artifact:*.cef.message-id": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "artifact:*.cef.message-id",
- "value": "artifact:*.cef.message-id"
- }
- }
- },
- "functionId": 1,
- "functionName": "who_received_email",
- "id": "16",
- "inputs": {
- "message_id": {
- "datapaths": [
- "artifact:*.cef.emailHeaders.Message-ID"
- ],
- "deduplicate": false
- },
- "sender": {
- "datapaths": [],
- "deduplicate": false
- },
- "subject": {
- "datapaths": [],
- "deduplicate": false
- }
- },
- "playbookName": "Splunk_Message_Identifier_Activity_Analysis",
- "playbookRepo": 2,
- "playbookRepoName": "local",
- "playbookType": "data",
- "synchronous": true,
- "type": "playbook"
- },
- "errors": {},
- "id": "16",
- "type": "playbook",
- "warnings": {},
- "x": 520,
- "y": 1522
- },
- "19": {
- "data": {
- "advanced": {
- "customName": "add artifacts",
- "customNameId": 0,
- "join": [],
- "note": "Add artifacts to the incident. Contains custom code to loop through the preceding custom code output properly."
- },
- "customFunction": {
- "draftMode": false,
- "name": "artifact_create",
- "repoName": "community"
- },
- "functionId": 6,
- "functionName": "add_artifacts",
- "id": "19",
- "selectMore": false,
- "tab": "customFunctions",
- "type": "utility",
- "utilities": {
- "artifact_create": {
- "description": "Create a new artifact with the specified attributes. Supports all fields available in /rest/artifact. Add any unlisted inputs as dictionary keys in input_json. Unsupported keys will automatically be dropped.",
- "fields": [
- {
- "dataTypes": [
- "phantom container id"
- ],
- "description": "Container which the artifact will be added to.",
- "inputType": "item",
- "label": "container",
- "name": "container",
- "placeholder": "container:id",
- "renderType": "datapath",
- "required": false
- },
- {
- "dataTypes": [],
- "description": "The name of the new artifact, which is optional and defaults to \"artifact\".",
- "inputType": "item",
- "label": "name",
- "name": "name",
- "placeholder": "artifact",
- "renderType": "datapath",
- "required": false
- },
- {
- "dataTypes": [],
- "description": "The label of the new artifact, which is optional and defaults to \"events\"",
- "inputType": "item",
- "label": "label",
- "name": "label",
- "placeholder": "events",
- "renderType": "datapath",
- "required": false
- },
- {
- "dataTypes": [
- ""
- ],
- "description": "The severity of the new artifact, which is optional and defaults to \"Medium\". Typically this is either \"High\", \"Medium\", or \"Low\".",
- "inputType": "item",
- "label": "severity",
- "name": "severity",
- "placeholder": "Medium",
- "renderType": "datapath",
- "required": false
- },
- {
- "dataTypes": [],
- "description": "The name of the CEF field to populate in the artifact, such as \"destinationAddress\" or \"sourceDnsDomain\". Required only if cef_value is provided.",
- "inputType": "item",
- "label": "cef_field",
- "name": "cef_field",
- "placeholder": "destinationAddress",
- "renderType": "datapath",
- "required": false
- },
- {
- "dataTypes": [
- "*"
- ],
- "description": "The value of the CEF field to populate in the artifact, such as the IP address, domain name, or file hash. Required only if cef_field is provided.",
- "inputType": "item",
- "label": "cef_value",
- "name": "cef_value",
- "placeholder": "192.0.2.192",
- "renderType": "datapath",
- "required": false
- },
- {
- "dataTypes": [],
- "description": "The CEF data type of the data in cef_value. For example, this could be \"ip\", \"hash\", or \"domain\". Optional.",
- "inputType": "item",
- "label": "cef_data_type",
- "name": "cef_data_type",
- "placeholder": "ip",
- "renderType": "datapath",
- "required": false
- },
- {
- "dataTypes": [],
- "description": "A comma-separated list of tags to apply to the created artifact, which is optional.",
- "inputType": "item",
- "label": "tags",
- "name": "tags",
- "placeholder": "tag1, tag2, tag3",
- "renderType": "datapath",
- "required": false
- },
- {
- "dataTypes": [],
- "description": "Either \"true\" or \"false\", depending on whether or not the new artifact should trigger the execution of any playbooks that are set to active on the label of the container the artifact will be added to. Optional and defaults to \"false\".",
- "inputType": "item",
- "label": "run_automation",
- "name": "run_automation",
- "placeholder": "false",
- "renderType": "datapath",
- "required": false
- },
- {
- "dataTypes": [],
- "description": "Optional parameter to modify any extra attributes of the artifact. Input_json will be merged with other inputs. In the event of a conflict, input_json will take precedence.",
- "inputType": "item",
- "label": "input_json",
- "name": "input_json",
- "placeholder": "{\"source_data_identifier\": \"1234\", \"data\": \"5678\"}",
- "renderType": "datapath",
- "required": false
- }
- ],
- "label": "artifact_create",
- "name": "artifact_create"
- }
- },
- "utilityType": "custom_function",
- "values": {
- "artifact_create": {
- "cef_data_type": null,
- "cef_field": null,
- "cef_value": null,
- "container": "container:id",
- "input_json": "convert_to_artifacts:custom_function:json",
- "label": "",
- "name": "",
- "run_automation": null,
- "severity": null,
- "tags": null
- }
- }
- },
- "errors": {},
- "id": "19",
- "type": "utility",
- "userCode": "\n parameters = []\n for artifact_json in convert_to_artifacts__json:\n\n parameters.append({\n \"name\": \"Interaction with phish indicators\",\n \"tags\": None,\n \"label\": None,\n \"severity\": None,\n \"cef_field\": None,\n \"cef_value\": None,\n \"container\": id_value,\n \"input_json\": artifact_json,\n \"cef_data_type\": None,\n \"run_automation\": None,\n })\n\n\n",
- "warnings": {},
- "x": 520,
- "y": 1820
- },
- "2": {
- "data": {
- "advanced": {
- "customName": "splunk attack analyzer",
- "customNameId": 0,
- "join": []
- },
- "functionId": 1,
- "functionName": "splunk_attack_analyzer",
- "id": "2",
- "inputs": {
- "url": {
- "datapaths": [],
- "deduplicate": false
- },
- "vault_id": {
- "datapaths": [
- "filtered-data:fiter_email_items:condition_1:get_emails_from_vault:custom_function_result.data.vault_id"
- ],
- "deduplicate": false
- }
- },
- "playbookName": "Splunk_Attack_Analyzer_Dynamic_Analysis",
- "playbookRepo": 2,
- "playbookRepoName": "local",
- "playbookType": "data",
- "synchronous": true,
- "type": "playbook"
- },
- "errors": {},
- "id": "2",
- "type": "playbook",
- "warnings": {},
- "x": 430,
- "y": 506
- },
- "20": {
- "data": {
- "advanced": {
- "customName": "convert to artifacts",
- "customNameId": 0,
- "join": [],
- "notRequiredJoins": [
- "playbook_Splunk_Identifier_Activity_Analysis_1",
- "playbook_Splunk_Message_Identifier_Activity_Analysis_1"
- ],
- "note": "Convert the output of the Playbooks into artifacts. Change labels and artifact structure as needed."
- },
- "customDatapaths": {
- "who_received_email": {
- "outputs:observable.message_identifier_activity": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "outputs:observable.message_identifier_activity",
- "value": "who_received_email:playbook_output:observable.message_identifier_activity"
- }
- }
- },
- "functionId": 2,
- "functionName": "convert_to_artifacts",
- "id": "20",
- "inputParameters": [
- "who_received_email:playbook_output:observable",
- "who_interacted_with_urls:playbook_output:observable",
- "who_interacted_with_files:playbook_output:observable",
- "filtered-data:filter_malicious_indicators:condition_1:split_related_observables:custom_function_result.data.output",
- "filtered-data:filter_malicious_indicators:condition_2:split_related_observables:custom_function_result.data.output",
- "splunk_attack_analyzer:playbook_output:observable"
- ],
- "outputVariables": [
- "json"
- ],
- "type": "code"
- },
- "errors": {},
- "id": "20",
- "type": "code",
- "userCode": " \n aggregate_data__json = []\n suspect_urls = filtered_cf_result_0_data_output\n suspect_files = filtered_cf_result_1_data_output\n suspect_emails = [item for item in splunk_attack_analyzer_output_observable_values if item]\n url_activity_list = [item for item in who_interacted_with_urls_output_observable_values if item]\n file_activity_list = [item for item in who_interacted_with_files_output_observable_values if item]\n message_activity_list = [item for item in who_received_email_output_observable_values if item]\n \n # Change labels here based on desired artifact labeling.\n # By default, the label is based on the vendor technology and IoC involved.\n email_interaction_label = \"splunk_email_interaction\"\n file_interaction_label = \"splunk_file_interaction\"\n url_interaction_label = \"splunk_url_interaction\"\n suspect_url_label = \"saa_url_report\"\n suspect_file_label = \"saa_file_report\"\n suspect_email_label = \"saa_email_report\"\n\n for email in suspect_emails:\n email.pop('related_observables', None)\n artifact = {'cef': email, 'name': f'{email[\"source\"]} reputation', 'label': suspect_email_label, 'cef_types': {'value': [email['type']]} }\n aggregate_data__json.append(artifact) \n \n for url in suspect_urls:\n artifact = {'cef': url, 'name': f'{url[\"source\"]} reputation', 'label': suspect_url_label, 'cef_types': {'value': [url['type']]}}\n aggregate_data__json.append(artifact) \n \n for file in suspect_files:\n artifact = {'cef': file, 'name': f'{file[\"source\"]} reputation', 'label': suspect_file_label, 'cef_types': {'value': [file['type']]} }\n aggregate_data__json.append(artifact) \n\n for message_activity in message_activity_list:\n aggregate_data__json.append({'cef': message_activity, 'label': email_interaction_label, 'name': 'Received suspect email', 'cef_types': {'value': [message_activity['type']]}})\n \n for url_act in url_activity_list:\n artifact = {'label': url_interaction_label, 'name': 'Interaction with suspect url', 'cef_types': {'value': [url_act['type']]}}\n identifier_activity = url_act.pop('identifier_activity', [])\n url_act.pop('total_count', None)\n artifact['cef'] = url_act\n # future proofing in case the identifier_activity changes type to dict\n if isinstance(identifier_activity, list):\n for item in identifier_activity:\n sub_artifact = artifact.copy()\n sub_artifact['cef']['identifier_activity'] = item\n aggregate_data__json.append(sub_artifact) \n else:\n artifact['cef']['identifier_activity'] = identifier_activity\n aggregate_data__json.append(artifact) \n \n for file_act in file_activity_list:\n artifact = {'label': file_interaction_label, 'name': 'Interaction with suspect file', 'cef_types': {'value': [file_act['type']]}}\n identifier_activity = file_act.pop('identifier_activity', [])\n file_act.pop('total_count', None)\n artifact['cef'] = file_act\n # future proofing in case the identifier_activity changes type to dict\n if isinstance(identifier_activity, list):\n for item in identifier_activity:\n sub_artifact = artifact.copy()\n sub_artifact['cef']['identifier_activity'] = item\n aggregate_data__json.append(sub_artifact) \n else:\n artifact['cef']['identifier_activity'] = identifier_activity\n aggregate_data__json.append(artifact) \n \n convert_to_artifacts__json = aggregate_data__json\n",
- "warnings": {},
- "x": 520,
- "y": 1670
- },
- "21": {
- "data": {
- "advanced": {
- "customName": "add analysis note",
- "customNameId": 0,
- "join": [],
- "note": "Adds a note from the dynamic analysis when the reputation score of the email is not high enough to be considered suspicious."
- },
- "functionId": 7,
- "functionName": "add_analysis_note",
- "id": "21",
- "selectMore": false,
- "tab": "apis",
- "type": "utility",
- "utilities": {
- "add_note": {
- "description": "",
- "fields": [
- {
- "description": "",
- "label": "title",
- "name": "title",
- "placeholder": "Enter a note title",
- "renderType": "datapath",
- "required": false
- },
- {
- "description": "",
- "label": "content",
- "name": "content",
- "placeholder": "Enter the note content",
- "renderType": "datapath",
- "required": false
- },
- {
- "choices": [
- "markdown",
- "html"
+ {
+ "conditions": [
+ {
+ "index": 1
+ }
],
- "default": "markdown",
- "description": "",
- "label": "note format",
- "name": "note_format",
- "placeholder": "Enter the note content",
- "renderType": "dropdown",
- "required": false
- },
- {
- "hidden": true,
- "name": "container",
- "required": false
- },
- {
- "default": "general",
- "hidden": true,
- "name": "note_type",
- "required": false
- },
- {
- "hidden": true,
- "name": "author",
- "required": false
- },
- {
- "hidden": true,
- "name": "event_id",
- "required": false
- },
- {
- "hidden": true,
- "name": "task_id",
- "required": false
- },
- {
- "hidden": true,
- "name": "trace",
- "required": false
- }
- ],
- "label": "add note",
- "name": "add_note"
- }
- },
- "utilityType": "api",
- "values": {
- "add_note": {
- "_internal": [
- "container",
- "note_type",
- "author",
- "event_id",
- "task_id",
- "trace"
- ],
- "content": "format_analyst_note:formatted_data",
- "note_format": "markdown",
- "note_type": "general",
- "title": "Final analysis"
- }
- }
- },
- "errors": {},
- "id": "21",
- "type": "utility",
- "userCode": "\n \n",
- "warnings": {},
- "x": 0,
- "y": 1818
- },
- "22": {
- "data": {
- "advanced": {
- "customName": "filter malicious indicators",
- "customNameId": 0,
- "join": [],
- "note": "Filters and creates two data sets, one for URLs greater than 5, and one for Files greater than 5."
- },
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": "==",
- "param": "split_related_observables:custom_function_result.data.output.type",
- "value": "url"
- },
- {
- "conditionIndex": 0,
- "op": ">",
- "param": "split_related_observables:custom_function_result.data.output.reputation.score_id",
- "value": "5"
- }
- ],
- "conditionIndex": 0,
- "customName": "urls",
- "logic": "and"
- },
- {
- "comparisons": [
- {
- "conditionIndex": 1,
- "op": "==",
- "param": "split_related_observables:custom_function_result.data.output.type",
- "value": "file"
- },
- {
- "conditionIndex": 1,
- "op": ">",
- "param": "split_related_observables:custom_function_result.data.output.reputation.score_id",
- "value": "5"
- }
- ],
- "conditionIndex": 1,
- "customName": "files",
- "logic": "and"
- }
- ],
- "customDatapaths": {
- "split_related_observables": {
- "data.output.reputation.score_id": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "data.output.reputation.score_id",
- "value": "split_related_observables:custom_function_result.data.output.reputation.score_id"
+ "id": "port_14_to_port_16",
+ "sourceNode": "14",
+ "sourcePort": "14_out",
+ "targetNode": "16",
+ "targetPort": "16_in"
},
- "data.output.type": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "data.output.type",
- "value": "split_related_observables:custom_function_result.data.output.type"
- }
- }
- },
- "functionId": 5,
- "functionName": "filter_malicious_indicators",
- "id": "22",
- "type": "filter"
- },
- "errors": {},
- "id": "22",
- "type": "filter",
- "warnings": {},
- "x": 490,
- "y": 1178
- },
- "23": {
- "data": {
- "advanced": {
- "customName": "who interacted with files",
- "customNameId": 0,
- "join": []
- },
- "customDatapaths": {
- "split_related_observables": {
- "data.output.attributes": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "data.output.attributes",
- "value": "split_related_observables:custom_function_result.data.output.attributes"
+ {
+ "id": "port_7_to_port_16",
+ "sourceNode": "7",
+ "sourcePort": "7_out",
+ "targetNode": "16",
+ "targetPort": "16_in"
},
- "data.output.attributes.sha256": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "data.output.attributes.sha256",
- "value": "split_related_observables:custom_function_result.data.output.attributes.sha256"
+ {
+ "id": "port_20_to_port_19",
+ "sourceNode": "20",
+ "sourcePort": "20_out",
+ "targetNode": "19",
+ "targetPort": "19_in"
},
- "data.output.value": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "data.output.value",
- "value": "split_related_observables:custom_function_result.data.output.value"
- }
- }
- },
- "functionId": 1,
- "functionName": "who_interacted_with_files",
- "id": "23",
- "inputs": {
- "domain": {
- "datapaths": [],
- "deduplicate": false
- },
- "file": {
- "datapaths": [
- "split_related_observables:custom_function_result.data.output.attributes.sha256"
- ],
- "deduplicate": false
- },
- "ip": {
- "datapaths": [],
- "deduplicate": false
- },
- "url": {
- "datapaths": [],
- "deduplicate": false
- }
- },
- "playbookName": "Splunk_Identifier_Activity_Analysis",
- "playbookRepo": 2,
- "playbookRepoName": "local",
- "playbookType": "data",
- "synchronous": true,
- "type": "playbook"
- },
- "errors": {},
- "id": "23",
- "type": "playbook",
- "warnings": {},
- "x": 520,
- "y": 1358
- },
- "24": {
- "data": {
- "advanced": {
- "customName": "split related observables",
- "customNameId": 0,
- "join": [],
- "note": "Converts the related observable array into custom function results that are filterable and callable downstream without custom code."
- },
- "customDatapaths": {
- "splunk_attack_analyzer": {
- "outputs:observable.related_observables": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "outputs:observable.related_observables",
- "value": "splunk_attack_analyzer:playbook_output:observable.related_observables"
- }
- }
- },
- "customFunction": {
- "draftMode": false,
- "name": "list_demux",
- "repoName": "community"
- },
- "functionId": 2,
- "functionName": "split_related_observables",
- "id": "24",
- "selectMore": false,
- "type": "utility",
- "utilities": {
- "list_demux": {
- "description": "Accepts a single list and converts it into multiple custom function output results. All output will be placed in the \"output\" datapath. Sub-items and sub-item variable names are dependent on the input.",
- "fields": [
- {
- "dataTypes": [
- "*"
- ],
- "description": "A list of objects. Nested lists are not unpacked.",
- "inputType": "item",
- "label": "input_list",
- "name": "input_list",
- "placeholder": "[\"list_item_1\", \"list_item_2\", \"list_item_3\"]",
- "renderType": "datapath",
- "required": false
- }
- ],
- "label": "list_demux",
- "name": "list_demux"
- }
- },
- "utilityType": "custom_function",
- "values": {
- "list_demux": {
- "input_list": "splunk_attack_analyzer:playbook_output:observable.related_observables"
- }
- }
- },
- "errors": {},
- "id": "24",
- "type": "utility",
- "warnings": {},
- "x": 430,
- "y": 1014
- },
- "25": {
- "data": {
- "advanced": {
- "customName": "merge playbook reports",
- "customNameId": 0,
- "drop_none": true,
- "join": [],
- "note": "Loop through all of the playbook output markdown reports to generate a unified note, while dropping None."
- },
- "functionId": 1,
- "functionName": "merge_playbook_reports",
- "id": "25",
- "parameters": [
- "splunk_attack_analyzer:playbook_output:report",
- "who_interacted_with_urls:playbook_output:markdown_report",
- "who_interacted_with_files:playbook_output:markdown_report",
- "who_received_email:playbook_output:markdown_report"
- ],
- "template": "%%\n{0}\n%%\n\n \n \n\n%%\n{1}\n%%\n\n \n \n\n%%\n{2}\n%%\n\n \n \n\n%%\n{3}\n%%",
- "type": "format"
- },
- "errors": {},
- "id": "25",
- "type": "format",
- "warnings": {},
- "x": 840,
- "y": 1660
- },
- "26": {
- "data": {
- "advanced": {
- "join": [],
- "note": "Add a note with all of the playbook outputs."
- },
- "functionId": 3,
- "functionName": "add_note_3",
- "id": "26",
- "selectMore": false,
- "tab": "apis",
- "type": "utility",
- "utilities": {
- "add_note": {
- "description": "",
- "fields": [
- {
- "description": "",
- "label": "title",
- "name": "title",
- "placeholder": "Enter a note title",
- "renderType": "datapath",
- "required": false
- },
- {
- "description": "",
- "label": "content",
- "name": "content",
- "placeholder": "Enter the note content",
- "renderType": "datapath",
- "required": false
- },
- {
- "choices": [
- "markdown",
- "html"
+ {
+ "id": "port_21_to_port_1",
+ "sourceNode": "21",
+ "sourcePort": "21_out",
+ "targetNode": "1",
+ "targetPort": "1_in"
+ },
+ {
+ "id": "port_19_to_port_1",
+ "sourceNode": "19",
+ "sourcePort": "19_out",
+ "targetNode": "1",
+ "targetPort": "1_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
],
- "default": "markdown",
- "description": "",
- "label": "note format",
- "name": "note_format",
- "placeholder": "Enter the note content",
- "renderType": "dropdown",
- "required": false
- },
- {
- "hidden": true,
- "name": "container",
- "required": false
- },
- {
- "default": "general",
- "hidden": true,
- "name": "note_type",
- "required": false
- },
- {
- "hidden": true,
- "name": "author",
- "required": false
- },
- {
- "hidden": true,
- "name": "event_id",
- "required": false
- },
- {
- "hidden": true,
- "name": "task_id",
- "required": false
- },
- {
- "hidden": true,
- "name": "trace",
- "required": false
- }
- ],
- "label": "add note",
- "name": "add_note"
- }
- },
- "utilityType": "api",
- "values": {
- "add_note": {
- "_internal": [
- "container",
- "note_type",
- "author",
- "event_id",
- "task_id",
- "trace"
- ],
- "content": "merge_playbook_reports:formatted_data",
- "note_format": "markdown",
- "note_type": "general",
- "title": "Analysis report"
- }
- }
- },
- "errors": {},
- "id": "26",
- "type": "utility",
- "warnings": {},
- "x": 840,
- "y": 1820
- },
- "27": {
- "data": {
- "advanced": {
- "customName": "format analyst note",
- "customNameId": 0,
- "drop_none": true,
- "join": [],
- "notRequiredJoins": [],
- "note": "Loops through the markdown reports from the previous input playbook while dropping None."
- },
- "functionId": 2,
- "functionName": "format_analyst_note",
- "id": "27",
- "parameters": [
- "splunk_attack_analyzer:playbook_output:report"
- ],
- "template": "%%\n{0}\n%%\n",
- "type": "format"
- },
- "errors": {},
- "id": "27",
- "type": "format",
- "warnings": {},
- "x": 0,
- "y": 1620
- },
- "3": {
- "data": {
- "advanced": {
- "customName": "get emails from vault",
- "customNameId": 0,
- "join": [],
- "note": "This block lists all of the files from the vault so they can be filtered downstream."
- },
- "customFunction": {
- "draftMode": false,
- "name": "vault_list",
- "repoName": "community"
- },
- "functionId": 1,
- "functionName": "get_emails_from_vault",
- "id": "3",
- "selectMore": false,
- "type": "utility",
- "utilities": {
- "vault_list": {
- "description": "List all of the vault items based on the provided criteria such as a vault id, container id, and file name. If no inputs provided, it will default to current container. Returns a list of items. May return more variables than listed in outputs.",
- "fields": [
- {
- "dataTypes": [
- "phantom container id"
+ "id": "port_22_to_port_7",
+ "sourceNode": "22",
+ "sourcePort": "22_out",
+ "targetNode": "7",
+ "targetPort": "7_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 1
+ }
],
- "description": "Optional parameter to filter vault items from this specific container. Defaults to current container if no inputs provided.",
- "inputType": "item",
- "label": "container_id",
- "name": "container_id",
- "placeholder": "container:id",
- "renderType": "datapath",
- "required": false
- },
- {
- "dataTypes": [
- "vault id"
+ "id": "port_22_to_port_23",
+ "sourceNode": "22",
+ "sourcePort": "22_out",
+ "targetNode": "23",
+ "targetPort": "23_in"
+ },
+ {
+ "id": "port_23_to_port_16",
+ "sourceNode": "23",
+ "sourcePort": "23_out",
+ "targetNode": "16",
+ "targetPort": "16_in"
+ },
+ {
+ "id": "port_24_to_port_22",
+ "sourceNode": "24",
+ "sourcePort": "24_out",
+ "targetNode": "22",
+ "targetPort": "22_in"
+ },
+ {
+ "id": "port_16_to_port_20",
+ "sourceNode": "16",
+ "sourcePort": "16_out",
+ "targetNode": "20",
+ "targetPort": "20_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
],
- "description": "Optional parameter to filter vault items matching this vault ID. Defaults to None.",
- "inputType": "item",
- "label": "vault_id",
- "name": "vault_id",
- "placeholder": "artifact:*.cef.vault_id",
- "renderType": "datapath",
- "required": false
- },
- {
- "dataTypes": [
- "*"
+ "id": "port_14_to_port_24",
+ "sourceNode": "14",
+ "sourcePort": "14_out",
+ "targetNode": "24",
+ "targetPort": "24_in"
+ },
+ {
+ "id": "port_25_to_port_26",
+ "sourceNode": "25",
+ "sourcePort": "25_out",
+ "targetNode": "26",
+ "targetPort": "26_in"
+ },
+ {
+ "id": "port_26_to_port_1",
+ "sourceNode": "26",
+ "sourcePort": "26_out",
+ "targetNode": "1",
+ "targetPort": "1_in"
+ },
+ {
+ "id": "port_16_to_port_25",
+ "sourceNode": "16",
+ "sourcePort": "16_out",
+ "targetNode": "25",
+ "targetPort": "25_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 2
+ }
],
- "description": "Optional parameter to filter vault items matching this file name. Defaults to None.",
- "inputType": "item",
- "label": "file_name",
- "name": "file_name",
- "placeholder": "artifact:*.cef.file_name",
- "renderType": "datapath",
- "required": false
- }
- ],
- "label": "vault_list",
- "name": "vault_list"
- }
- },
- "utilityType": "custom_function",
- "values": {
- "vault_list": {
- "container_id": null,
- "file_name": null,
- "vault_id": null
- }
- }
- },
- "errors": {},
- "id": "3",
- "type": "utility",
- "warnings": {},
- "x": 430,
- "y": 148
- },
- "4": {
- "data": {
- "advanced": {
- "customName": "fiter email items",
- "customNameId": 0,
- "join": [],
- "note": "Filters on .eml or .msg vault items."
- },
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": "in",
- "param": ".eml",
- "value": "get_emails_from_vault:custom_function_result.data.file_name"
- },
- {
- "conditionIndex": 0,
- "op": "in",
- "param": ".msg",
- "value": "get_emails_from_vault:custom_function_result.data.file_name"
- }
- ],
- "conditionIndex": 0,
- "customName": "email objects",
- "logic": "or"
- }
- ],
- "functionId": 1,
- "functionName": "fiter_email_items",
- "id": "4",
- "type": "filter"
- },
- "errors": {},
- "id": "4",
- "type": "filter",
- "warnings": {},
- "x": 490,
- "y": 326
- },
- "7": {
- "data": {
- "advanced": {
- "customName": "who interacted with urls",
- "customNameId": 0,
- "join": []
- },
- "customDatapaths": {
- "filter_2": {
- "condition_1:related_observables:result.data.output.value": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "condition_1:related_observables:result.data.output.value",
- "value": "filtered-data:filter_2:condition_1:related_observables:custom_function_result.data.output.value"
- }
- },
- "filter_malicious_indicators": {
- "condition_1:split_related_observables:result.data.output.value": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "condition_1:split_related_observables:result.data.output.value",
- "value": "filtered-data:filter_malicious_indicators:condition_1:split_related_observables:custom_function_result.data.output.value"
+ "id": "port_14_to_port_27",
+ "sourceNode": "14",
+ "sourcePort": "14_out",
+ "targetNode": "27",
+ "targetPort": "27_in"
+ },
+ {
+ "id": "port_27_to_port_21",
+ "sourceNode": "27",
+ "sourcePort": "27_out",
+ "targetNode": "21",
+ "targetPort": "21_in"
}
- },
- "related_observables": {
- "data.output.value": {
- "contains": [],
- "isCustomDatapath": true,
- "label": "data.output.value",
- "value": "related_observables:custom_function_result.data.output.value"
+ ],
+ "hash": "f8687c0dc65bdfa081387f52282128e97f77b26b",
+ "nodes": {
+ "0": {
+ "data": {
+ "advanced": {
+ "join": []
+ },
+ "functionName": "on_start",
+ "id": "0",
+ "type": "start"
+ },
+ "errors": {},
+ "id": "0",
+ "type": "start",
+ "warnings": {},
+ "x": 450,
+ "y": -5.755396159656812e-13
+ },
+ "1": {
+ "data": {
+ "advanced": {
+ "join": []
+ },
+ "functionName": "on_finish",
+ "id": "1",
+ "type": "end"
+ },
+ "errors": {},
+ "id": "1",
+ "type": "end",
+ "warnings": {},
+ "x": 340,
+ "y": 2040
+ },
+ "14": {
+ "data": {
+ "advanced": {
+ "customName": "high score indicator decision",
+ "customNameId": 0,
+ "join": [],
+ "note": "Determines which path to go based on the reputation score of the email and the presence of additional indicators."
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": ">",
+ "param": "splunk_attack_analyzer:playbook_output:observable.related_observables.*.reputation.score_id",
+ "value": "5"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "malicious indicators exist",
+ "display": "If",
+ "logic": "and",
+ "type": "if"
+ },
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 1,
+ "op": ">",
+ "param": "splunk_attack_analyzer:playbook_output:observable.reputation.score_id",
+ "value": "5"
+ }
+ ],
+ "conditionIndex": 1,
+ "customName": "phish email",
+ "display": "Else If",
+ "logic": "and",
+ "type": "elif"
+ },
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 2,
+ "op": "==",
+ "param": "",
+ "value": ""
+ }
+ ],
+ "conditionIndex": 2,
+ "customName": "not high threat",
+ "display": "Else",
+ "logic": "and",
+ "type": "else"
+ }
+ ],
+ "customDatapaths": {
+ "splunk_attack_analyzer": {
+ "outputs:observable.related_observables": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "isDatapathArray": true,
+ "label": "outputs:observable.related_observables",
+ "value": "splunk_attack_analyzer:playbook_output:observable.related_observables"
+ },
+ "outputs:observable.related_observables.*.reputation": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "outputs:observable.related_observables.*.reputation",
+ "value": "splunk_attack_analyzer:playbook_output:observable.related_observables.*.reputation"
+ },
+ "outputs:observable.related_observables.*.reputation.score_id": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "outputs:observable.related_observables.*.reputation.score_id",
+ "value": "splunk_attack_analyzer:playbook_output:observable.related_observables.*.reputation.score_id"
+ },
+ "outputs:observable.reputation.score_id": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "outputs:observable.reputation.score_id",
+ "value": "splunk_attack_analyzer:playbook_output:observable.reputation.score_id"
+ }
+ }
+ },
+ "functionId": 1,
+ "functionName": "high_score_indicator_decision",
+ "id": "14",
+ "type": "decision"
+ },
+ "errors": {},
+ "id": "14",
+ "type": "decision",
+ "warnings": {},
+ "x": 510,
+ "y": 654
+ },
+ "16": {
+ "data": {
+ "advanced": {
+ "customName": "who received email",
+ "customNameId": 0,
+ "join": [],
+ "notRequiredJoins": [
+ "who_interacted_with_urls",
+ "who_interacted_with_files"
+ ],
+ "scope": "all"
+ },
+ "customDatapaths": {
+ "artifacts": {
+ "artifact:*.cef.internetMessageId": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "artifact:*.cef.internetMessageId",
+ "value": "artifact:*.cef.internetMessageId"
+ },
+ "artifact:*.cef.message-id": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "artifact:*.cef.message-id",
+ "value": "artifact:*.cef.message-id"
+ }
+ }
+ },
+ "functionId": 1,
+ "functionName": "who_received_email",
+ "id": "16",
+ "inputs": {
+ "message_id": {
+ "datapaths": [
+ "artifact:*.cef.emailHeaders.Message-ID"
+ ],
+ "deduplicate": false
+ },
+ "sender": {
+ "datapaths": [],
+ "deduplicate": false
+ },
+ "subject": {
+ "datapaths": [],
+ "deduplicate": false
+ }
+ },
+ "playbookName": "Splunk_Message_Identifier_Activity_Analysis",
+ "playbookRepo": 2,
+ "playbookRepoName": "local",
+ "playbookType": "data",
+ "synchronous": true,
+ "type": "playbook"
+ },
+ "errors": {},
+ "id": "16",
+ "type": "playbook",
+ "warnings": {},
+ "x": 520,
+ "y": 1522
+ },
+ "19": {
+ "data": {
+ "advanced": {
+ "customName": "add artifacts",
+ "customNameId": 0,
+ "join": [],
+ "note": "Add artifacts to the incident. Contains custom code to loop through the preceding custom code output properly."
+ },
+ "customFunction": {
+ "draftMode": false,
+ "name": "artifact_create",
+ "repoName": "community"
+ },
+ "functionId": 6,
+ "functionName": "add_artifacts",
+ "id": "19",
+ "selectMore": false,
+ "tab": "customFunctions",
+ "type": "utility",
+ "utilities": {
+ "artifact_create": {
+ "description": "Create a new artifact with the specified attributes. Supports all fields available in /rest/artifact. Add any unlisted inputs as dictionary keys in input_json. Unsupported keys will automatically be dropped.",
+ "fields": [
+ {
+ "dataTypes": [
+ "phantom container id"
+ ],
+ "description": "Container which the artifact will be added to.",
+ "inputType": "item",
+ "label": "container",
+ "name": "container",
+ "placeholder": "container:id",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "dataTypes": [],
+ "description": "The name of the new artifact, which is optional and defaults to \"artifact\".",
+ "inputType": "item",
+ "label": "name",
+ "name": "name",
+ "placeholder": "artifact",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "dataTypes": [],
+ "description": "The label of the new artifact, which is optional and defaults to \"events\"",
+ "inputType": "item",
+ "label": "label",
+ "name": "label",
+ "placeholder": "events",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "dataTypes": [
+ ""
+ ],
+ "description": "The severity of the new artifact, which is optional and defaults to \"Medium\". Typically this is either \"High\", \"Medium\", or \"Low\".",
+ "inputType": "item",
+ "label": "severity",
+ "name": "severity",
+ "placeholder": "Medium",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "dataTypes": [],
+ "description": "The name of the CEF field to populate in the artifact, such as \"destinationAddress\" or \"sourceDnsDomain\". Required only if cef_value is provided.",
+ "inputType": "item",
+ "label": "cef_field",
+ "name": "cef_field",
+ "placeholder": "destinationAddress",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "dataTypes": [
+ "*"
+ ],
+ "description": "The value of the CEF field to populate in the artifact, such as the IP address, domain name, or file hash. Required only if cef_field is provided.",
+ "inputType": "item",
+ "label": "cef_value",
+ "name": "cef_value",
+ "placeholder": "192.0.2.192",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "dataTypes": [],
+ "description": "The CEF data type of the data in cef_value. For example, this could be \"ip\", \"hash\", or \"domain\". Optional.",
+ "inputType": "item",
+ "label": "cef_data_type",
+ "name": "cef_data_type",
+ "placeholder": "ip",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "dataTypes": [],
+ "description": "A comma-separated list of tags to apply to the created artifact, which is optional.",
+ "inputType": "item",
+ "label": "tags",
+ "name": "tags",
+ "placeholder": "tag1, tag2, tag3",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "dataTypes": [],
+ "description": "Either \"true\" or \"false\", depending on whether or not the new artifact should trigger the execution of any playbooks that are set to active on the label of the container the artifact will be added to. Optional and defaults to \"false\".",
+ "inputType": "item",
+ "label": "run_automation",
+ "name": "run_automation",
+ "placeholder": "false",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "dataTypes": [],
+ "description": "Optional parameter to modify any extra attributes of the artifact. Input_json will be merged with other inputs. In the event of a conflict, input_json will take precedence.",
+ "inputType": "item",
+ "label": "input_json",
+ "name": "input_json",
+ "placeholder": "{\"source_data_identifier\": \"1234\", \"data\": \"5678\"}",
+ "renderType": "datapath",
+ "required": false
+ }
+ ],
+ "label": "artifact_create",
+ "name": "artifact_create"
+ }
+ },
+ "utilityType": "custom_function",
+ "values": {
+ "artifact_create": {
+ "cef_data_type": null,
+ "cef_field": null,
+ "cef_value": null,
+ "container": "container:id",
+ "input_json": "convert_to_artifacts:custom_function:json",
+ "label": "",
+ "name": "",
+ "run_automation": null,
+ "severity": null,
+ "tags": null
+ }
+ }
+ },
+ "errors": {},
+ "id": "19",
+ "type": "utility",
+ "userCode": "\n parameters = []\n for artifact_json in convert_to_artifacts__json:\n\n parameters.append({\n \"name\": \"Interaction with phish indicators\",\n \"tags\": None,\n \"label\": None,\n \"severity\": None,\n \"cef_field\": None,\n \"cef_value\": None,\n \"container\": id_value,\n \"input_json\": artifact_json,\n \"cef_data_type\": None,\n \"run_automation\": None,\n })\n\n\n",
+ "warnings": {},
+ "x": 520,
+ "y": 1820
+ },
+ "2": {
+ "data": {
+ "advanced": {
+ "customName": "splunk attack analyzer",
+ "customNameId": 0,
+ "join": []
+ },
+ "functionId": 1,
+ "functionName": "splunk_attack_analyzer",
+ "id": "2",
+ "inputs": {
+ "url": {
+ "datapaths": [],
+ "deduplicate": false
+ },
+ "vault_id": {
+ "datapaths": [
+ "filtered-data:fiter_email_items:condition_1:get_emails_from_vault:custom_function_result.data.vault_id"
+ ],
+ "deduplicate": false
+ }
+ },
+ "playbookName": "Splunk_Attack_Analyzer_Dynamic_Analysis",
+ "playbookRepo": 2,
+ "playbookRepoName": "local",
+ "playbookType": "data",
+ "synchronous": true,
+ "type": "playbook"
+ },
+ "errors": {},
+ "id": "2",
+ "type": "playbook",
+ "warnings": {},
+ "x": 430,
+ "y": 506
+ },
+ "20": {
+ "data": {
+ "advanced": {
+ "customName": "convert to artifacts",
+ "customNameId": 0,
+ "join": [],
+ "notRequiredJoins": [
+ "playbook_Splunk_Identifier_Activity_Analysis_1",
+ "playbook_Splunk_Message_Identifier_Activity_Analysis_1"
+ ],
+ "note": "Convert the output of the Playbooks into artifacts. Change labels and artifact structure as needed."
+ },
+ "customDatapaths": {
+ "who_received_email": {
+ "outputs:observable.message_identifier_activity": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "outputs:observable.message_identifier_activity",
+ "value": "who_received_email:playbook_output:observable.message_identifier_activity"
+ }
+ }
+ },
+ "functionId": 2,
+ "functionName": "convert_to_artifacts",
+ "id": "20",
+ "inputParameters": [
+ "who_received_email:playbook_output:observable",
+ "who_interacted_with_urls:playbook_output:observable",
+ "who_interacted_with_files:playbook_output:observable",
+ "filtered-data:filter_malicious_indicators:condition_1:split_related_observables:custom_function_result.data.output",
+ "filtered-data:filter_malicious_indicators:condition_2:split_related_observables:custom_function_result.data.output",
+ "splunk_attack_analyzer:playbook_output:observable"
+ ],
+ "outputVariables": [
+ "json"
+ ],
+ "type": "code"
+ },
+ "errors": {},
+ "id": "20",
+ "type": "code",
+ "userCode": " \n aggregate_data__json = []\n suspect_urls = filtered_cf_result_0_data_output\n suspect_files = filtered_cf_result_1_data_output\n suspect_emails = [item for item in splunk_attack_analyzer_output_observable_values if item]\n url_activity_list = [item for item in who_interacted_with_urls_output_observable_values if item]\n file_activity_list = [item for item in who_interacted_with_files_output_observable_values if item]\n message_activity_list = [item for item in who_received_email_output_observable_values if item]\n \n # Change labels here based on desired artifact labeling.\n # By default, the label is based on the vendor technology and IoC involved.\n email_interaction_label = \"splunk_email_interaction\"\n file_interaction_label = \"splunk_file_interaction\"\n url_interaction_label = \"splunk_url_interaction\"\n suspect_url_label = \"saa_url_report\"\n suspect_file_label = \"saa_file_report\"\n suspect_email_label = \"saa_email_report\"\n\n for email in suspect_emails:\n email.pop('related_observables', None)\n artifact = {'cef': email, 'name': f'{email[\"source\"]} reputation', 'label': suspect_email_label, 'cef_types': {'value': [email['type']]} }\n aggregate_data__json.append(artifact) \n \n for url in suspect_urls:\n artifact = {'cef': url, 'name': f'{url[\"source\"]} reputation', 'label': suspect_url_label, 'cef_types': {'value': [url['type']]}}\n aggregate_data__json.append(artifact) \n \n for file in suspect_files:\n artifact = {'cef': file, 'name': f'{file[\"source\"]} reputation', 'label': suspect_file_label, 'cef_types': {'value': [file['type']]} }\n aggregate_data__json.append(artifact) \n\n for message_activity in message_activity_list:\n aggregate_data__json.append({'cef': message_activity, 'label': email_interaction_label, 'name': 'Received suspect email', 'cef_types': {'value': [message_activity['type']]}})\n \n for url_act in url_activity_list:\n artifact = {'label': url_interaction_label, 'name': 'Interaction with suspect url', 'cef_types': {'value': [url_act['type']]}}\n identifier_activity = url_act.pop('identifier_activity', [])\n url_act.pop('total_count', None)\n artifact['cef'] = url_act\n # future proofing in case the identifier_activity changes type to dict\n if isinstance(identifier_activity, list):\n for item in identifier_activity:\n sub_artifact = artifact.copy()\n sub_artifact['cef']['identifier_activity'] = item\n aggregate_data__json.append(sub_artifact) \n else:\n artifact['cef']['identifier_activity'] = identifier_activity\n aggregate_data__json.append(artifact) \n \n for file_act in file_activity_list:\n artifact = {'label': file_interaction_label, 'name': 'Interaction with suspect file', 'cef_types': {'value': [file_act['type']]}}\n identifier_activity = file_act.pop('identifier_activity', [])\n file_act.pop('total_count', None)\n artifact['cef'] = file_act\n # future proofing in case the identifier_activity changes type to dict\n if isinstance(identifier_activity, list):\n for item in identifier_activity:\n sub_artifact = artifact.copy()\n sub_artifact['cef']['identifier_activity'] = item\n aggregate_data__json.append(sub_artifact) \n else:\n artifact['cef']['identifier_activity'] = identifier_activity\n aggregate_data__json.append(artifact) \n \n convert_to_artifacts__json = aggregate_data__json\n",
+ "warnings": {},
+ "x": 520,
+ "y": 1670
+ },
+ "21": {
+ "data": {
+ "advanced": {
+ "customName": "add analysis note",
+ "customNameId": 0,
+ "join": [],
+ "note": "Adds a note from the dynamic analysis when the reputation score of the email is not high enough to be considered suspicious."
+ },
+ "functionId": 7,
+ "functionName": "add_analysis_note",
+ "id": "21",
+ "selectMore": false,
+ "tab": "apis",
+ "type": "utility",
+ "utilities": {
+ "add_note": {
+ "description": "",
+ "fields": [
+ {
+ "description": "",
+ "label": "title",
+ "name": "title",
+ "placeholder": "Enter a note title",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "description": "",
+ "label": "content",
+ "name": "content",
+ "placeholder": "Enter the note content",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "choices": [
+ "markdown",
+ "html"
+ ],
+ "default": "markdown",
+ "description": "",
+ "label": "note format",
+ "name": "note_format",
+ "placeholder": "Enter the note content",
+ "renderType": "dropdown",
+ "required": false
+ },
+ {
+ "hidden": true,
+ "name": "container",
+ "required": false
+ },
+ {
+ "default": "general",
+ "hidden": true,
+ "name": "note_type",
+ "required": false
+ },
+ {
+ "hidden": true,
+ "name": "author",
+ "required": false
+ },
+ {
+ "hidden": true,
+ "name": "event_id",
+ "required": false
+ },
+ {
+ "hidden": true,
+ "name": "task_id",
+ "required": false
+ },
+ {
+ "hidden": true,
+ "name": "trace",
+ "required": false
+ }
+ ],
+ "label": "add note",
+ "name": "add_note"
+ }
+ },
+ "utilityType": "api",
+ "values": {
+ "add_note": {
+ "_internal": [
+ "container",
+ "note_type",
+ "author",
+ "event_id",
+ "task_id",
+ "trace"
+ ],
+ "content": "format_analyst_note:formatted_data",
+ "note_format": "markdown",
+ "note_type": "general",
+ "title": "Final analysis"
+ }
+ }
+ },
+ "errors": {},
+ "id": "21",
+ "type": "utility",
+ "userCode": "\n \n",
+ "warnings": {},
+ "x": 0,
+ "y": 1818
+ },
+ "22": {
+ "data": {
+ "advanced": {
+ "customName": "filter malicious indicators",
+ "customNameId": 0,
+ "join": [],
+ "note": "Filters and creates two data sets, one for URLs greater than 5, and one for Files greater than 5."
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": "==",
+ "param": "split_related_observables:custom_function_result.data.output.type",
+ "value": "url"
+ },
+ {
+ "conditionIndex": 0,
+ "op": ">",
+ "param": "split_related_observables:custom_function_result.data.output.reputation.score_id",
+ "value": "5"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "urls",
+ "logic": "and"
+ },
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 1,
+ "op": "==",
+ "param": "split_related_observables:custom_function_result.data.output.type",
+ "value": "file"
+ },
+ {
+ "conditionIndex": 1,
+ "op": ">",
+ "param": "split_related_observables:custom_function_result.data.output.reputation.score_id",
+ "value": "5"
+ }
+ ],
+ "conditionIndex": 1,
+ "customName": "files",
+ "logic": "and"
+ }
+ ],
+ "customDatapaths": {
+ "split_related_observables": {
+ "data.output.reputation.score_id": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "data.output.reputation.score_id",
+ "value": "split_related_observables:custom_function_result.data.output.reputation.score_id"
+ },
+ "data.output.type": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "data.output.type",
+ "value": "split_related_observables:custom_function_result.data.output.type"
+ }
+ }
+ },
+ "functionId": 5,
+ "functionName": "filter_malicious_indicators",
+ "id": "22",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "22",
+ "type": "filter",
+ "warnings": {},
+ "x": 490,
+ "y": 1178
+ },
+ "23": {
+ "data": {
+ "advanced": {
+ "customName": "who interacted with files",
+ "customNameId": 0,
+ "join": []
+ },
+ "customDatapaths": {
+ "split_related_observables": {
+ "data.output.attributes": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "data.output.attributes",
+ "value": "split_related_observables:custom_function_result.data.output.attributes"
+ },
+ "data.output.attributes.sha256": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "data.output.attributes.sha256",
+ "value": "split_related_observables:custom_function_result.data.output.attributes.sha256"
+ },
+ "data.output.value": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "data.output.value",
+ "value": "split_related_observables:custom_function_result.data.output.value"
+ }
+ }
+ },
+ "functionId": 1,
+ "functionName": "who_interacted_with_files",
+ "id": "23",
+ "inputs": {
+ "domain": {
+ "datapaths": [],
+ "deduplicate": false
+ },
+ "file": {
+ "datapaths": [
+ "split_related_observables:custom_function_result.data.output.attributes.sha256"
+ ],
+ "deduplicate": false
+ },
+ "ip": {
+ "datapaths": [],
+ "deduplicate": false
+ },
+ "url": {
+ "datapaths": [],
+ "deduplicate": false
+ }
+ },
+ "playbookName": "Splunk_Identifier_Activity_Analysis",
+ "playbookRepo": 2,
+ "playbookRepoName": "local",
+ "playbookType": "data",
+ "synchronous": true,
+ "type": "playbook"
+ },
+ "errors": {},
+ "id": "23",
+ "type": "playbook",
+ "warnings": {},
+ "x": 520,
+ "y": 1358
+ },
+ "24": {
+ "data": {
+ "advanced": {
+ "customName": "split related observables",
+ "customNameId": 0,
+ "join": [],
+ "note": "Converts the related observable array into custom function results that are filterable and callable downstream without custom code."
+ },
+ "customDatapaths": {
+ "splunk_attack_analyzer": {
+ "outputs:observable.related_observables": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "outputs:observable.related_observables",
+ "value": "splunk_attack_analyzer:playbook_output:observable.related_observables"
+ }
+ }
+ },
+ "customFunction": {
+ "draftMode": false,
+ "name": "list_demux",
+ "repoName": "community"
+ },
+ "functionId": 2,
+ "functionName": "split_related_observables",
+ "id": "24",
+ "selectMore": false,
+ "type": "utility",
+ "utilities": {
+ "list_demux": {
+ "description": "Accepts a single list and converts it into multiple custom function output results. All output will be placed in the \"output\" datapath. Sub-items and sub-item variable names are dependent on the input.",
+ "fields": [
+ {
+ "dataTypes": [
+ "*"
+ ],
+ "description": "A list of objects. Nested lists are not unpacked.",
+ "inputType": "item",
+ "label": "input_list",
+ "name": "input_list",
+ "placeholder": "[\"list_item_1\", \"list_item_2\", \"list_item_3\"]",
+ "renderType": "datapath",
+ "required": false
+ }
+ ],
+ "label": "list_demux",
+ "name": "list_demux"
+ }
+ },
+ "utilityType": "custom_function",
+ "values": {
+ "list_demux": {
+ "input_list": "splunk_attack_analyzer:playbook_output:observable.related_observables"
+ }
+ }
+ },
+ "errors": {},
+ "id": "24",
+ "type": "utility",
+ "warnings": {},
+ "x": 430,
+ "y": 1014
+ },
+ "25": {
+ "data": {
+ "advanced": {
+ "customName": "merge playbook reports",
+ "customNameId": 0,
+ "drop_none": true,
+ "join": [],
+ "note": "Loop through all of the playbook output markdown reports to generate a unified note, while dropping None."
+ },
+ "functionId": 1,
+ "functionName": "merge_playbook_reports",
+ "id": "25",
+ "parameters": [
+ "splunk_attack_analyzer:playbook_output:report",
+ "who_interacted_with_urls:playbook_output:markdown_report",
+ "who_interacted_with_files:playbook_output:markdown_report",
+ "who_received_email:playbook_output:markdown_report"
+ ],
+ "template": "%%\n{0}\n%%\n\n \n \n\n%%\n{1}\n%%\n\n \n \n\n%%\n{2}\n%%\n\n \n \n\n%%\n{3}\n%%",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "25",
+ "type": "format",
+ "warnings": {},
+ "x": 840,
+ "y": 1660
+ },
+ "26": {
+ "data": {
+ "advanced": {
+ "join": [],
+ "note": "Add a note with all of the playbook outputs."
+ },
+ "functionId": 3,
+ "functionName": "add_note_3",
+ "id": "26",
+ "selectMore": false,
+ "tab": "apis",
+ "type": "utility",
+ "utilities": {
+ "add_note": {
+ "description": "",
+ "fields": [
+ {
+ "description": "",
+ "label": "title",
+ "name": "title",
+ "placeholder": "Enter a note title",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "description": "",
+ "label": "content",
+ "name": "content",
+ "placeholder": "Enter the note content",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "choices": [
+ "markdown",
+ "html"
+ ],
+ "default": "markdown",
+ "description": "",
+ "label": "note format",
+ "name": "note_format",
+ "placeholder": "Enter the note content",
+ "renderType": "dropdown",
+ "required": false
+ },
+ {
+ "hidden": true,
+ "name": "container",
+ "required": false
+ },
+ {
+ "default": "general",
+ "hidden": true,
+ "name": "note_type",
+ "required": false
+ },
+ {
+ "hidden": true,
+ "name": "author",
+ "required": false
+ },
+ {
+ "hidden": true,
+ "name": "event_id",
+ "required": false
+ },
+ {
+ "hidden": true,
+ "name": "task_id",
+ "required": false
+ },
+ {
+ "hidden": true,
+ "name": "trace",
+ "required": false
+ }
+ ],
+ "label": "add note",
+ "name": "add_note"
+ }
+ },
+ "utilityType": "api",
+ "values": {
+ "add_note": {
+ "_internal": [
+ "container",
+ "note_type",
+ "author",
+ "event_id",
+ "task_id",
+ "trace"
+ ],
+ "content": "merge_playbook_reports:formatted_data",
+ "note_format": "markdown",
+ "note_type": "general",
+ "title": "Analysis report"
+ }
+ }
+ },
+ "errors": {},
+ "id": "26",
+ "type": "utility",
+ "warnings": {},
+ "x": 840,
+ "y": 1820
+ },
+ "27": {
+ "data": {
+ "advanced": {
+ "customName": "format analyst note",
+ "customNameId": 0,
+ "drop_none": true,
+ "join": [],
+ "notRequiredJoins": [],
+ "note": "Loops through the markdown reports from the previous input playbook while dropping None."
+ },
+ "functionId": 2,
+ "functionName": "format_analyst_note",
+ "id": "27",
+ "parameters": [
+ "splunk_attack_analyzer:playbook_output:report"
+ ],
+ "template": "%%\n{0}\n%%\n",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "27",
+ "type": "format",
+ "warnings": {},
+ "x": 0,
+ "y": 1620
+ },
+ "3": {
+ "data": {
+ "advanced": {
+ "customName": "get emails from vault",
+ "customNameId": 0,
+ "join": [],
+ "note": "This block lists all of the files from the vault so they can be filtered downstream."
+ },
+ "customFunction": {
+ "draftMode": false,
+ "name": "vault_list",
+ "repoName": "community"
+ },
+ "functionId": 1,
+ "functionName": "get_emails_from_vault",
+ "id": "3",
+ "selectMore": false,
+ "type": "utility",
+ "utilities": {
+ "vault_list": {
+ "description": "List all of the vault items based on the provided criteria such as a vault id, container id, and file name. If no inputs provided, it will default to current container. Returns a list of items. May return more variables than listed in outputs.",
+ "fields": [
+ {
+ "dataTypes": [
+ "phantom container id"
+ ],
+ "description": "Optional parameter to filter vault items from this specific container. Defaults to current container if no inputs provided.",
+ "inputType": "item",
+ "label": "container_id",
+ "name": "container_id",
+ "placeholder": "container:id",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "dataTypes": [
+ "vault id"
+ ],
+ "description": "Optional parameter to filter vault items matching this vault ID. Defaults to None.",
+ "inputType": "item",
+ "label": "vault_id",
+ "name": "vault_id",
+ "placeholder": "artifact:*.cef.vault_id",
+ "renderType": "datapath",
+ "required": false
+ },
+ {
+ "dataTypes": [
+ "*"
+ ],
+ "description": "Optional parameter to filter vault items matching this file name. Defaults to None.",
+ "inputType": "item",
+ "label": "file_name",
+ "name": "file_name",
+ "placeholder": "artifact:*.cef.file_name",
+ "renderType": "datapath",
+ "required": false
+ }
+ ],
+ "label": "vault_list",
+ "name": "vault_list"
+ }
+ },
+ "utilityType": "custom_function",
+ "values": {
+ "vault_list": {
+ "container_id": null,
+ "file_name": null,
+ "vault_id": null
+ }
+ }
+ },
+ "errors": {},
+ "id": "3",
+ "type": "utility",
+ "warnings": {},
+ "x": 430,
+ "y": 148
+ },
+ "4": {
+ "data": {
+ "advanced": {
+ "customName": "fiter email items",
+ "customNameId": 0,
+ "join": [],
+ "note": "Filters on .eml or .msg vault items."
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": "in",
+ "param": ".eml",
+ "value": "get_emails_from_vault:custom_function_result.data.file_name"
+ },
+ {
+ "conditionIndex": 0,
+ "op": "in",
+ "param": ".msg",
+ "value": "get_emails_from_vault:custom_function_result.data.file_name"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "email objects",
+ "logic": "or"
+ }
+ ],
+ "functionId": 1,
+ "functionName": "fiter_email_items",
+ "id": "4",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "4",
+ "type": "filter",
+ "warnings": {},
+ "x": 490,
+ "y": 326
+ },
+ "7": {
+ "data": {
+ "advanced": {
+ "customName": "who interacted with urls",
+ "customNameId": 0,
+ "join": []
+ },
+ "customDatapaths": {
+ "filter_2": {
+ "condition_1:related_observables:result.data.output.value": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "condition_1:related_observables:result.data.output.value",
+ "value": "filtered-data:filter_2:condition_1:related_observables:custom_function_result.data.output.value"
+ }
+ },
+ "filter_malicious_indicators": {
+ "condition_1:split_related_observables:result.data.output.value": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "condition_1:split_related_observables:result.data.output.value",
+ "value": "filtered-data:filter_malicious_indicators:condition_1:split_related_observables:custom_function_result.data.output.value"
+ }
+ },
+ "related_observables": {
+ "data.output.value": {
+ "contains": [],
+ "isCustomDatapath": true,
+ "label": "data.output.value",
+ "value": "related_observables:custom_function_result.data.output.value"
+ }
+ }
+ },
+ "functionId": 1,
+ "functionName": "who_interacted_with_urls",
+ "id": "7",
+ "inputs": {
+ "domain": {
+ "datapaths": [],
+ "deduplicate": false
+ },
+ "file": {
+ "datapaths": [],
+ "deduplicate": false
+ },
+ "ip": {
+ "datapaths": [],
+ "deduplicate": false
+ },
+ "url": {
+ "datapaths": [
+ "filtered-data:filter_malicious_indicators:condition_1:split_related_observables:custom_function_result.data.output.value"
+ ],
+ "deduplicate": false
+ }
+ },
+ "playbookName": "Splunk_Identifier_Activity_Analysis",
+ "playbookRepo": 2,
+ "playbookRepoName": "local",
+ "playbookType": "data",
+ "synchronous": true,
+ "type": "playbook"
+ },
+ "errors": {},
+ "id": "7",
+ "type": "playbook",
+ "warnings": {},
+ "x": 180,
+ "y": 1358
}
- }
- },
- "functionId": 1,
- "functionName": "who_interacted_with_urls",
- "id": "7",
- "inputs": {
- "domain": {
- "datapaths": [],
- "deduplicate": false
- },
- "file": {
- "datapaths": [],
- "deduplicate": false
- },
- "ip": {
- "datapaths": [],
- "deduplicate": false
- },
- "url": {
- "datapaths": [
- "filtered-data:filter_malicious_indicators:condition_1:split_related_observables:custom_function_result.data.output.value"
- ],
- "deduplicate": false
- }
},
- "playbookName": "Splunk_Identifier_Activity_Analysis",
- "playbookRepo": 2,
- "playbookRepoName": "local",
- "playbookType": "data",
- "synchronous": true,
- "type": "playbook"
- },
- "errors": {},
- "id": "7",
- "type": "playbook",
- "warnings": {},
- "x": 180,
- "y": 1358
- }
- },
- "notes": "Ensure the four input playbooks are loaded onto the system. The input playbooks are designed to be swappable within the same category (e.g., Message Activity Analysis) with minimal to no changes downstream."
+ "notes": "Ensure the four input playbooks are loaded onto the system. The input playbooks are designed to be swappable within the same category (e.g., Message Activity Analysis) with minimal to no changes downstream."
+ },
+ "input_spec": null,
+ "output_spec": null,
+ "playbook_type": "automation",
+ "python_version": "3.13",
+ "schema": "5.0.10",
+ "version": "6.1.1.211"
},
- "input_spec": null,
- "output_spec": null,
- "playbook_type": "automation",
- "python_version": "3",
- "schema": "5.0.10",
- "version": "6.1.1.211"
- },
- "create_time": "2023-12-14T16:26:50.722446+00:00",
- "draft_mode": false,
- "labels": [
- "email"
- ],
- "tags": [
- "D3-DA",
- "D3-SRA"
- ]
-}
+ "create_time": "2023-12-14T16:26:50.722446+00:00",
+ "draft_mode": false,
+ "labels": [
+ "email"
+ ],
+ "tags": [
+ "D3-DA",
+ "D3-SRA"
+ ]
+}
\ No newline at end of file
diff --git a/playbooks/Splunk_Automated_Email_Investigation.png b/playbooks/Splunk_Automated_Email_Investigation.png
index 3e7024708c..1af59d2774 100644
Binary files a/playbooks/Splunk_Automated_Email_Investigation.png and b/playbooks/Splunk_Automated_Email_Investigation.png differ
diff --git a/playbooks/Splunk_Identifier_Activity_Analysis.json b/playbooks/Splunk_Identifier_Activity_Analysis.json
index 1f5a4e5a3b..417ec61545 100644
--- a/playbooks/Splunk_Identifier_Activity_Analysis.json
+++ b/playbooks/Splunk_Identifier_Activity_Analysis.json
@@ -1,1179 +1,1179 @@
{
- "blockly": false,
- "blockly_xml": "",
- "category": "Identifier Activity Analysis",
- "coa": {
- "data": {
- "description": "Accepts a file_hash, domain name, URL, or IP Address, and asks Splunk for a list of devices and users that have interacted with each. It then produces a normalized output and summary table. Defaults to -30d searches.",
- "edges": [
- {
- "id": "port_0_to_port_2",
- "sourceNode": "0",
- "sourcePort": "0_out",
- "targetNode": "2",
- "targetPort": "2_in"
- },
- {
- "conditions": [
- {
- "index": 1
- }
- ],
- "id": "port_2_to_port_4",
- "sourceNode": "2",
- "sourcePort": "2_out",
- "targetNode": "4",
- "targetPort": "4_in"
- },
- {
- "conditions": [
- {
- "index": 2
- }
- ],
- "id": "port_2_to_port_5",
- "sourceNode": "2",
- "sourcePort": "2_out",
- "targetNode": "5",
- "targetPort": "5_in"
- },
- {
- "conditions": [
- {
- "index": 3
- }
- ],
- "id": "port_2_to_port_6",
- "sourceNode": "2",
- "sourcePort": "2_out",
- "targetNode": "6",
- "targetPort": "6_in"
- },
- {
- "id": "port_3_to_port_8",
- "sourceNode": "3",
- "sourcePort": "3_out",
- "targetNode": "8",
- "targetPort": "8_in"
- },
- {
- "id": "port_4_to_port_9",
- "sourceNode": "4",
- "sourcePort": "4_out",
- "targetNode": "9",
- "targetPort": "9_in"
- },
- {
- "id": "port_5_to_port_10",
- "sourceNode": "5",
- "sourcePort": "5_out",
- "targetNode": "10",
- "targetPort": "10_in"
- },
- {
- "id": "port_6_to_port_11",
- "sourceNode": "6",
- "sourcePort": "6_out",
- "targetNode": "11",
- "targetPort": "11_in"
- },
- {
- "id": "port_8_to_port_12",
- "sourceNode": "8",
- "sourcePort": "8_out",
- "targetNode": "12",
- "targetPort": "12_in"
- },
- {
- "id": "port_9_to_port_13",
- "sourceNode": "9",
- "sourcePort": "9_out",
- "targetNode": "13",
- "targetPort": "13_in"
- },
- {
- "id": "port_10_to_port_14",
- "sourceNode": "10",
- "sourcePort": "10_out",
- "targetNode": "14",
- "targetPort": "14_in"
- },
- {
- "id": "port_11_to_port_15",
- "sourceNode": "11",
- "sourcePort": "11_out",
- "targetNode": "15",
- "targetPort": "15_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_12_to_port_16",
- "sourceNode": "12",
- "sourcePort": "12_out",
- "targetNode": "16",
- "targetPort": "16_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_13_to_port_17",
- "sourceNode": "13",
- "sourcePort": "13_out",
- "targetNode": "17",
- "targetPort": "17_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_14_to_port_18",
- "sourceNode": "14",
- "sourcePort": "14_out",
- "targetNode": "18",
- "targetPort": "18_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_15_to_port_19",
- "sourceNode": "15",
- "sourcePort": "15_out",
- "targetNode": "19",
- "targetPort": "19_in"
- },
- {
- "id": "port_16_to_port_20",
- "sourceNode": "16",
- "sourcePort": "16_out",
- "targetNode": "20",
- "targetPort": "20_in"
- },
- {
- "id": "port_17_to_port_21",
- "sourceNode": "17",
- "sourcePort": "17_out",
- "targetNode": "21",
- "targetPort": "21_in"
- },
- {
- "id": "port_18_to_port_22",
- "sourceNode": "18",
- "sourcePort": "18_out",
- "targetNode": "22",
- "targetPort": "22_in"
- },
- {
- "id": "port_19_to_port_23",
- "sourceNode": "19",
- "sourcePort": "19_out",
- "targetNode": "23",
- "targetPort": "23_in"
- },
- {
- "id": "port_20_to_port_1",
- "sourceNode": "20",
- "sourcePort": "20_out",
- "targetNode": "1",
- "targetPort": "1_in"
- },
- {
- "id": "port_21_to_port_1",
- "sourceNode": "21",
- "sourcePort": "21_out",
- "targetNode": "1",
- "targetPort": "1_in"
- },
- {
- "id": "port_22_to_port_1",
- "sourceNode": "22",
- "sourcePort": "22_out",
- "targetNode": "1",
- "targetPort": "1_in"
- },
- {
- "id": "port_23_to_port_1",
- "sourceNode": "23",
- "sourcePort": "23_out",
- "targetNode": "1",
- "targetPort": "1_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_2_to_port_24",
- "sourceNode": "2",
- "sourcePort": "2_out",
- "targetNode": "24",
- "targetPort": "24_in"
- },
- {
- "id": "port_24_to_port_3",
- "sourceNode": "24",
- "sourcePort": "24_out",
- "targetNode": "3",
- "targetPort": "3_in"
- }
- ],
- "hash": "0604d756765c915ea237e4795b7e394632189f1d",
- "nodes": {
- "0": {
- "data": {
- "advanced": {
- "join": []
- },
- "functionName": "on_start",
- "id": "0",
- "type": "start"
- },
- "errors": {},
- "id": "0",
- "type": "start",
- "warnings": {},
- "x": 530,
- "y": -6.394884621840902e-14
- },
- "1": {
- "data": {
- "advanced": {
- "join": []
- },
- "functionName": "on_finish",
- "id": "1",
- "type": "end"
- },
- "errors": {},
- "id": "1",
- "type": "end",
- "warnings": {},
- "x": 530,
- "y": 1398
- },
- "10": {
- "data": {
- "action": "run query",
- "actionType": "investigate",
- "advanced": {
- "customName": "run domain query",
- "customNameId": 0,
- "description": "May need to change search command dependent on your format block",
- "join": [],
- "note": "Splunk query execution"
- },
- "connector": "Splunk",
- "connectorConfigs": [
- "splunk"
- ],
- "connectorId": "91883aa8-9c81-470b-97a1-5d8f7995f560",
- "connectorVersion": "v1",
- "functionId": 3,
- "functionName": "run_domain_query",
- "id": "10",
- "parameters": {
- "command": "tstats",
- "query": "build_domain_query:formatted_data",
- "search_mode": "smart",
- "start_time": "-30d"
- },
- "requiredParameters": [
- {
- "data_type": "string",
- "field": "query"
- },
- {
- "data_type": "string",
- "default": "search",
- "field": "command"
- },
- {
- "data_type": "string",
- "default": "smart",
- "field": "search_mode"
- }
- ],
- "type": "action"
- },
- "errors": {},
- "id": "10",
- "type": "action",
- "warnings": {},
- "x": 680,
- "y": 684
- },
- "11": {
- "data": {
- "action": "run query",
- "actionType": "investigate",
- "advanced": {
- "customName": "run IP query",
- "customNameId": 0,
- "description": "May need to change search command dependent on your format block",
- "join": [],
- "note": "Splunk query execution"
- },
- "connector": "Splunk",
- "connectorConfigs": [
- "splunk"
- ],
- "connectorId": "91883aa8-9c81-470b-97a1-5d8f7995f560",
- "connectorVersion": "v1",
- "functionId": 4,
- "functionName": "run_ip_query",
- "id": "11",
- "parameters": {
- "command": "tstats",
- "display": "src, src_asset_id, src_dns, src_ip",
- "query": "build_ip_query:formatted_data",
- "search_mode": "smart",
- "start_time": "-30d"
- },
- "requiredParameters": [
- {
- "data_type": "string",
- "field": "query"
- },
- {
- "data_type": "string",
- "default": "search",
- "field": "command"
- },
- {
- "data_type": "string",
- "default": "smart",
- "field": "search_mode"
- }
+ "blockly": false,
+ "blockly_xml": "",
+ "category": "Identifier Activity Analysis",
+ "coa": {
+ "data": {
+ "description": "Accepts a file_hash, domain name, URL, or IP Address, and asks Splunk for a list of devices and users that have interacted with each. It then produces a normalized output and summary table. Defaults to -30d searches.",
+ "edges": [
+ {
+ "id": "port_0_to_port_2",
+ "sourceNode": "0",
+ "sourcePort": "0_out",
+ "targetNode": "2",
+ "targetPort": "2_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 1
+ }
+ ],
+ "id": "port_2_to_port_4",
+ "sourceNode": "2",
+ "sourcePort": "2_out",
+ "targetNode": "4",
+ "targetPort": "4_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 2
+ }
+ ],
+ "id": "port_2_to_port_5",
+ "sourceNode": "2",
+ "sourcePort": "2_out",
+ "targetNode": "5",
+ "targetPort": "5_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 3
+ }
+ ],
+ "id": "port_2_to_port_6",
+ "sourceNode": "2",
+ "sourcePort": "2_out",
+ "targetNode": "6",
+ "targetPort": "6_in"
+ },
+ {
+ "id": "port_3_to_port_8",
+ "sourceNode": "3",
+ "sourcePort": "3_out",
+ "targetNode": "8",
+ "targetPort": "8_in"
+ },
+ {
+ "id": "port_4_to_port_9",
+ "sourceNode": "4",
+ "sourcePort": "4_out",
+ "targetNode": "9",
+ "targetPort": "9_in"
+ },
+ {
+ "id": "port_5_to_port_10",
+ "sourceNode": "5",
+ "sourcePort": "5_out",
+ "targetNode": "10",
+ "targetPort": "10_in"
+ },
+ {
+ "id": "port_6_to_port_11",
+ "sourceNode": "6",
+ "sourcePort": "6_out",
+ "targetNode": "11",
+ "targetPort": "11_in"
+ },
+ {
+ "id": "port_8_to_port_12",
+ "sourceNode": "8",
+ "sourcePort": "8_out",
+ "targetNode": "12",
+ "targetPort": "12_in"
+ },
+ {
+ "id": "port_9_to_port_13",
+ "sourceNode": "9",
+ "sourcePort": "9_out",
+ "targetNode": "13",
+ "targetPort": "13_in"
+ },
+ {
+ "id": "port_10_to_port_14",
+ "sourceNode": "10",
+ "sourcePort": "10_out",
+ "targetNode": "14",
+ "targetPort": "14_in"
+ },
+ {
+ "id": "port_11_to_port_15",
+ "sourceNode": "11",
+ "sourcePort": "11_out",
+ "targetNode": "15",
+ "targetPort": "15_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_12_to_port_16",
+ "sourceNode": "12",
+ "sourcePort": "12_out",
+ "targetNode": "16",
+ "targetPort": "16_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_13_to_port_17",
+ "sourceNode": "13",
+ "sourcePort": "13_out",
+ "targetNode": "17",
+ "targetPort": "17_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_14_to_port_18",
+ "sourceNode": "14",
+ "sourcePort": "14_out",
+ "targetNode": "18",
+ "targetPort": "18_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_15_to_port_19",
+ "sourceNode": "15",
+ "sourcePort": "15_out",
+ "targetNode": "19",
+ "targetPort": "19_in"
+ },
+ {
+ "id": "port_16_to_port_20",
+ "sourceNode": "16",
+ "sourcePort": "16_out",
+ "targetNode": "20",
+ "targetPort": "20_in"
+ },
+ {
+ "id": "port_17_to_port_21",
+ "sourceNode": "17",
+ "sourcePort": "17_out",
+ "targetNode": "21",
+ "targetPort": "21_in"
+ },
+ {
+ "id": "port_18_to_port_22",
+ "sourceNode": "18",
+ "sourcePort": "18_out",
+ "targetNode": "22",
+ "targetPort": "22_in"
+ },
+ {
+ "id": "port_19_to_port_23",
+ "sourceNode": "19",
+ "sourcePort": "19_out",
+ "targetNode": "23",
+ "targetPort": "23_in"
+ },
+ {
+ "id": "port_20_to_port_1",
+ "sourceNode": "20",
+ "sourcePort": "20_out",
+ "targetNode": "1",
+ "targetPort": "1_in"
+ },
+ {
+ "id": "port_21_to_port_1",
+ "sourceNode": "21",
+ "sourcePort": "21_out",
+ "targetNode": "1",
+ "targetPort": "1_in"
+ },
+ {
+ "id": "port_22_to_port_1",
+ "sourceNode": "22",
+ "sourcePort": "22_out",
+ "targetNode": "1",
+ "targetPort": "1_in"
+ },
+ {
+ "id": "port_23_to_port_1",
+ "sourceNode": "23",
+ "sourcePort": "23_out",
+ "targetNode": "1",
+ "targetPort": "1_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_2_to_port_24",
+ "sourceNode": "2",
+ "sourcePort": "2_out",
+ "targetNode": "24",
+ "targetPort": "24_in"
+ },
+ {
+ "id": "port_24_to_port_3",
+ "sourceNode": "24",
+ "sourcePort": "24_out",
+ "targetNode": "3",
+ "targetPort": "3_in"
+ }
],
- "type": "action"
- },
- "errors": {},
- "id": "11",
- "type": "action",
- "warnings": {},
- "x": 1020,
- "y": 684
- },
- "12": {
- "data": {
- "advanced": {
- "customName": "filter URL query",
- "customNameId": 0,
- "delimiter": ",",
- "delimiter_enabled": true,
- "join": [],
- "note": "only proceed when there are results"
+ "hash": "0604d756765c915ea237e4795b7e394632189f1d",
+ "nodes": {
+ "0": {
+ "data": {
+ "advanced": {
+ "join": []
+ },
+ "functionName": "on_start",
+ "id": "0",
+ "type": "start"
+ },
+ "errors": {},
+ "id": "0",
+ "type": "start",
+ "warnings": {},
+ "x": 530,
+ "y": -6.394884621840902e-14
+ },
+ "1": {
+ "data": {
+ "advanced": {
+ "join": []
+ },
+ "functionName": "on_finish",
+ "id": "1",
+ "type": "end"
+ },
+ "errors": {},
+ "id": "1",
+ "type": "end",
+ "warnings": {},
+ "x": 530,
+ "y": 1398
+ },
+ "10": {
+ "data": {
+ "action": "run query",
+ "actionType": "investigate",
+ "advanced": {
+ "customName": "run domain query",
+ "customNameId": 0,
+ "description": "May need to change search command dependent on your format block",
+ "join": [],
+ "note": "Splunk query execution"
+ },
+ "connector": "Splunk",
+ "connectorConfigs": [
+ "splunk"
+ ],
+ "connectorId": "91883aa8-9c81-470b-97a1-5d8f7995f560",
+ "connectorVersion": "v1",
+ "functionId": 3,
+ "functionName": "run_domain_query",
+ "id": "10",
+ "parameters": {
+ "command": "tstats",
+ "query": "build_domain_query:formatted_data",
+ "search_mode": "smart",
+ "start_time": "-30d"
+ },
+ "requiredParameters": [
+ {
+ "data_type": "string",
+ "field": "query"
+ },
+ {
+ "data_type": "string",
+ "default": "search",
+ "field": "command"
+ },
+ {
+ "data_type": "string",
+ "default": "smart",
+ "field": "search_mode"
+ }
+ ],
+ "type": "action"
+ },
+ "errors": {},
+ "id": "10",
+ "type": "action",
+ "warnings": {},
+ "x": 680,
+ "y": 684
+ },
+ "11": {
+ "data": {
+ "action": "run query",
+ "actionType": "investigate",
+ "advanced": {
+ "customName": "run IP query",
+ "customNameId": 0,
+ "description": "May need to change search command dependent on your format block",
+ "join": [],
+ "note": "Splunk query execution"
+ },
+ "connector": "Splunk",
+ "connectorConfigs": [
+ "splunk"
+ ],
+ "connectorId": "91883aa8-9c81-470b-97a1-5d8f7995f560",
+ "connectorVersion": "v1",
+ "functionId": 4,
+ "functionName": "run_ip_query",
+ "id": "11",
+ "parameters": {
+ "command": "tstats",
+ "display": "src, src_asset_id, src_dns, src_ip",
+ "query": "build_ip_query:formatted_data",
+ "search_mode": "smart",
+ "start_time": "-30d"
+ },
+ "requiredParameters": [
+ {
+ "data_type": "string",
+ "field": "query"
+ },
+ {
+ "data_type": "string",
+ "default": "search",
+ "field": "command"
+ },
+ {
+ "data_type": "string",
+ "default": "smart",
+ "field": "search_mode"
+ }
+ ],
+ "type": "action"
+ },
+ "errors": {},
+ "id": "11",
+ "type": "action",
+ "warnings": {},
+ "x": 1020,
+ "y": 684
+ },
+ "12": {
+ "data": {
+ "advanced": {
+ "customName": "filter URL query",
+ "customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
+ "join": [],
+ "note": "only proceed when there are results"
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": ">",
+ "param": "run_url_query:action_result.summary.total_events",
+ "value": "0"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "Results",
+ "logic": "and"
+ }
+ ],
+ "functionId": 2,
+ "functionName": "filter_url_query",
+ "id": "12",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "12",
+ "type": "filter",
+ "warnings": {},
+ "x": 60,
+ "y": 862
+ },
+ "13": {
+ "data": {
+ "advanced": {
+ "customName": "filter file query",
+ "customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
+ "join": [],
+ "note": "only proceed when there are results"
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": ">",
+ "param": "run_file_query:action_result.summary.total_events",
+ "value": "0"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "Results",
+ "logic": "and"
+ }
+ ],
+ "functionId": 3,
+ "functionName": "filter_file_query",
+ "id": "13",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "13",
+ "type": "filter",
+ "warnings": {},
+ "x": 400,
+ "y": 862
+ },
+ "14": {
+ "data": {
+ "advanced": {
+ "customName": "filter domain query",
+ "customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
+ "join": [],
+ "note": "only proceed when there are results"
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": ">",
+ "param": "run_domain_query:action_result.summary.total_events",
+ "value": "0"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "Results",
+ "logic": "and"
+ }
+ ],
+ "functionId": 4,
+ "functionName": "filter_domain_query",
+ "id": "14",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "14",
+ "type": "filter",
+ "warnings": {},
+ "x": 740,
+ "y": 862
+ },
+ "15": {
+ "data": {
+ "advanced": {
+ "customName": "filter IP query",
+ "customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
+ "join": [],
+ "note": "only proceed when there are results"
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": ">",
+ "param": "run_ip_query:action_result.summary.total_events",
+ "value": "0"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "Results",
+ "logic": "and"
+ }
+ ],
+ "functionId": 5,
+ "functionName": "filter_ip_query",
+ "id": "15",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "15",
+ "type": "filter",
+ "warnings": {},
+ "x": 1080,
+ "y": 862
+ },
+ "16": {
+ "data": {
+ "advanced": {
+ "customName": "format URL report",
+ "customNameId": 0,
+ "description": "Markdown report used in calling playbook",
+ "drop_none": true,
+ "join": [],
+ "note": "Markdown report output"
+ },
+ "functionId": 5,
+ "functionName": "format_url_report",
+ "id": "16",
+ "parameters": [
+ "filtered-data:input_filter:condition_1:playbook_input:url",
+ "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.src_dns",
+ "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.src_ip",
+ "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.src_asset_id",
+ "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.user"
+ ],
+ "template": "SOAR searched for occurrences of `{0}` within your environment using Splunk. The table below shows a summary of the information gathered.\n\n| URL | Computer | IP Address | Asset ID | User | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} | {4} | Splunk |\n%%",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "16",
+ "type": "format",
+ "warnings": {},
+ "x": 0,
+ "y": 1042
+ },
+ "17": {
+ "data": {
+ "advanced": {
+ "customName": "format file report",
+ "customNameId": 0,
+ "description": "Markdown report used in calling playbook",
+ "drop_none": true,
+ "join": [],
+ "note": "Markdown report output"
+ },
+ "functionId": 6,
+ "functionName": "format_file_report",
+ "id": "17",
+ "parameters": [
+ "filtered-data:input_filter:condition_2:playbook_input:file",
+ "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.dest_dns",
+ "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.dest_ip",
+ "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.dest_asset_id",
+ "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.user",
+ "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.process_name"
+ ],
+ "template": "SOAR searched for occurrences of `{0}` within your environment using Splunk. The table below shows a summary of the information gathered.\n\n| File | Process Name | Computer | IP Address | Asset ID | User | Source |\n| --- | --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {5} | {1} | {2} | {3} | {4} | Splunk |\n%%",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "17",
+ "type": "format",
+ "warnings": {},
+ "x": 340,
+ "y": 1042
+ },
+ "18": {
+ "data": {
+ "advanced": {
+ "customName": "format domain report",
+ "customNameId": 0,
+ "description": "Markdown report used in calling playbook",
+ "drop_none": true,
+ "join": [],
+ "note": "Markdown report output"
+ },
+ "functionId": 7,
+ "functionName": "format_domain_report",
+ "id": "18",
+ "parameters": [
+ "filtered-data:input_filter:condition_3:playbook_input:domain",
+ "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_dns",
+ "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_ip",
+ "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_asset_id",
+ "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_user"
+ ],
+ "template": "SOAR searched for occurrences of `{0}` within your environment using Splunk. The table below shows a summary of the information gathered.\n\n| Domain | Computer | IP Address | Asset ID | User | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} | {4} | Splunk |\n%%",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "18",
+ "type": "format",
+ "warnings": {},
+ "x": 680,
+ "y": 1042
+ },
+ "19": {
+ "data": {
+ "advanced": {
+ "customName": "format ip report",
+ "customNameId": 0,
+ "description": "Markdown report used in calling playbook",
+ "drop_none": true,
+ "join": [],
+ "note": "Markdown report output"
+ },
+ "functionId": 8,
+ "functionName": "format_ip_report",
+ "id": "19",
+ "parameters": [
+ "filtered-data:input_filter:condition_4:playbook_input:ip",
+ "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.src_dns",
+ "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.src_ip",
+ "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.src_asset_id",
+ "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.user"
+ ],
+ "template": "SOAR searched for occurrences of `{0}` within your environment using Splunk. The table below shows a summary of the information gathered.\n\n| IP | Computer | IP Address | Asset ID | User | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} | {4} | Splunk |\n%%",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "19",
+ "type": "format",
+ "warnings": {},
+ "x": 1020,
+ "y": 1042
+ },
+ "2": {
+ "data": {
+ "advanced": {
+ "customName": "input filter",
+ "customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
+ "join": [],
+ "note": "Routing paths by indicator type"
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": "!=",
+ "param": "playbook_input:url",
+ "value": "None"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "url",
+ "logic": "and"
+ },
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 1,
+ "op": "!=",
+ "param": "playbook_input:file",
+ "value": "None"
+ }
+ ],
+ "conditionIndex": 1,
+ "customName": "file",
+ "logic": "and"
+ },
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 2,
+ "op": "!=",
+ "param": "playbook_input:domain",
+ "value": "None"
+ }
+ ],
+ "conditionIndex": 2,
+ "customName": "domain",
+ "logic": "and"
+ },
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 3,
+ "op": "!=",
+ "param": "playbook_input:ip",
+ "value": "None"
+ }
+ ],
+ "conditionIndex": 3,
+ "customName": "ip",
+ "logic": "and"
+ }
+ ],
+ "functionId": 1,
+ "functionName": "input_filter",
+ "id": "2",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "2",
+ "type": "filter",
+ "warnings": {},
+ "x": 580,
+ "y": 300
+ },
+ "20": {
+ "data": {
+ "advanced": {
+ "customName": "build URL output",
+ "customNameId": 0,
+ "description": "Observable object expected by calling playbook",
+ "join": [],
+ "note": "Observable output"
+ },
+ "functionId": 1,
+ "functionName": "build_url_output",
+ "id": "20",
+ "inputParameters": [
+ "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.url",
+ "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.src_dns",
+ "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.src_ip",
+ "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.src_asset_id",
+ "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.user"
+ ],
+ "outputVariables": [
+ "observable_array"
+ ],
+ "type": "code"
+ },
+ "errors": {},
+ "id": "20",
+ "type": "code",
+ "userCode": "\n # Init variables + convenience naming\n build_url_output__observable_array = []\n observable_dict = {}\n \n # Build device list\n for indicator, dns, ip, asset_id, user in zip(filtered_result_0_data___url, filtered_result_0_data___src_dns, filtered_result_0_data___src_ip, filtered_result_0_data___src_asset_id, filtered_result_0_data___user): \n \n device = {\n \"name\": dns,\n \"id\": asset_id, \n \"ip_address\": ip,\n \"operating_system\": \"Unknown\",\n \"user\": user\n }\n\n # Drop devices from list if we don't know anything about them\n if device.get(\"name\") == \"Unknown\" and device.get(\"id\") == \"Unknown\" and device.get(\"ip_address\") == \"Unknown\" and device.get(\"user\") == \"Unknown\":\n continue\n\n # Add to or update observable_dict\n if observable_dict.get('indicator'):\n observable_dict['identifier_activity'].append(device)\n else:\n observable_dict[indicator] ={\n \"value\": indicator,\n \"type\": \"url\",\n \"source\": \"Splunk\",\n \"identifier_activity\": [device]\n }\n\n for key in observable_dict.keys():\n observable_dict[key]['total_count'] = len(observable_dict[key]['identifier_activity'])\n build_url_output__observable_array.append(observable_dict[key])\n\n",
+ "warnings": {},
+ "x": 0,
+ "y": 1220
+ },
+ "21": {
+ "data": {
+ "advanced": {
+ "customName": "build file output",
+ "customNameId": 0,
+ "description": "Observable object expected by calling playbook",
+ "join": [],
+ "note": "Observable output"
+ },
+ "functionId": 2,
+ "functionName": "build_file_output",
+ "id": "21",
+ "inputParameters": [
+ "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.process_hash",
+ "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.dest_dns",
+ "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.dest_ip",
+ "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.dest_asset_id",
+ "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.user"
+ ],
+ "outputVariables": [
+ "observable_array"
+ ],
+ "type": "code"
+ },
+ "errors": {},
+ "id": "21",
+ "type": "code",
+ "userCode": "\n \n # Init variables + convenience naming\n build_file_output__observable_array = []\n observable_dict = {}\n \n # Build device list\n for indicator, dns, ip, asset_id, user in zip(filtered_result_0_data___process_hash, filtered_result_0_data___dest_dns, filtered_result_0_data___dest_ip, filtered_result_0_data___dest_asset_id, filtered_result_0_data___user): \n \n device = {\n \"name\": dns,\n \"id\": asset_id, \n \"ip_address\": ip,\n \"operating_system\": \"Unknown\",\n \"user\": user\n }\n\n # Drop devices from list if we don't know anything about them\n if device.get(\"name\") == \"Unknown\" and device.get(\"id\") == \"Unknown\" and device.get(\"ip_address\") == \"Unknown\" and device.get(\"user\") == \"Unknown\":\n continue\n\n # Add to or update observable_dict\n if observable_dict.get('indicator'):\n observable_dict['identifier_activity'].append(device)\n else:\n observable_dict[indicator] ={\n \"value\": indicator,\n \"type\": \"hash\",\n \"source\": \"Splunk\",\n \"identifier_activity\": [device]\n }\n\n for key in observable_dict.keys():\n observable_dict[key]['total_count'] = len(observable_dict[key]['identifier_activity'])\n build_file_output__observable_array.append(observable_dict[key])\n\n",
+ "warnings": {},
+ "x": 340,
+ "y": 1220
+ },
+ "22": {
+ "data": {
+ "advanced": {
+ "customName": "build domain output",
+ "customNameId": 0,
+ "description": "Observable object expected by calling playbook",
+ "join": [],
+ "note": "Observable output"
+ },
+ "functionId": 3,
+ "functionName": "build_domain_output",
+ "id": "22",
+ "inputParameters": [
+ "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.query",
+ "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_dns",
+ "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_ip",
+ "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_asset_id",
+ "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_user"
+ ],
+ "outputVariables": [
+ "observable_array"
+ ],
+ "type": "code"
+ },
+ "errors": {},
+ "id": "22",
+ "type": "code",
+ "userCode": "\n # Init variables + convenience naming\n build_domain_output__observable_array = []\n observable_dict = {}\n \n # Build device list\n for indicator, dns, ip, asset_id, user in zip(filtered_result_0_data___query, filtered_result_0_data___src_dns, filtered_result_0_data___src_ip, filtered_result_0_data___src_asset_id, filtered_result_0_data___src_user): \n \n device = {\n \"name\": dns,\n \"id\": asset_id, \n \"ip_address\": ip,\n \"operating_system\": \"Unknown\",\n \"user\": user\n }\n\n # Drop devices from list if we don't know anything about them\n if device.get(\"name\") == \"Unknown\" and device.get(\"id\") == \"Unknown\" and device.get(\"ip_address\") == \"Unknown\" and device.get(\"user\") == \"Unknown\":\n continue\n\n # Add to or update observable_dict\n if observable_dict.get('indicator'):\n observable_dict['identifier_activity'].append(device)\n else:\n observable_dict[indicator] ={\n \"value\": indicator,\n \"type\": \"url\",\n \"source\": \"Splunk\",\n \"identifier_activity\": [device]\n }\n\n for key in observable_dict.keys():\n observable_dict[key]['total_count'] = len(observable_dict[key]['identifier_activity'])\n build_domain_output__observable_array.append(observable_dict[key])\n \n",
+ "warnings": {},
+ "x": 680,
+ "y": 1220
+ },
+ "23": {
+ "data": {
+ "advanced": {
+ "customName": "build ip output",
+ "customNameId": 0,
+ "description": "Observable object expected by calling playbook",
+ "join": [],
+ "note": "Observable output"
+ },
+ "functionId": 4,
+ "functionName": "build_ip_output",
+ "id": "23",
+ "inputParameters": [
+ "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.dest_ip",
+ "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.src_dns",
+ "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.src_ip",
+ "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.src_asset_id",
+ "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.user"
+ ],
+ "outputVariables": [
+ "observable_array"
+ ],
+ "type": "code"
+ },
+ "errors": {},
+ "id": "23",
+ "type": "code",
+ "userCode": " \n # Init variables + convenience naming\n build_ip_output__observable_array = []\n observable_dict = {}\n\n # Build device list\n for indicator, dns, ip, asset_id, user in zip(filtered_result_0_data___dest_ip, filtered_result_0_data___src_dns, filtered_result_0_data___src_ip, filtered_result_0_data___src_asset_id, filtered_result_0_data___user): \n \n device = {\n \"name\": dns,\n \"id\": asset_id, \n \"ip_address\": ip,\n \"operating_system\": \"Unknown\",\n \"user\": user\n }\n\n # Drop devices from list if we don't know anything about them\n if device.get(\"name\") == \"Unknown\" and device.get(\"id\") == \"Unknown\" and device.get(\"ip_address\") == \"Unknown\" and device.get(\"user\") == \"Unknown\":\n continue\n\n # Add to or update observable_dict\n if observable_dict.get('indicator'):\n observable_dict['identifier_activity'].append(device)\n else:\n observable_dict[indicator] ={\n \"value\": indicator,\n \"type\": \"url\",\n \"source\": \"Splunk\",\n \"identifier_activity\": [device]\n }\n\n for key in observable_dict.keys():\n observable_dict[key]['total_count'] = len(observable_dict[key]['identifier_activity'])\n build_ip_output__observable_array.append(observable_dict[key])\n \n",
+ "warnings": {},
+ "x": 1020,
+ "y": 1220
+ },
+ "24": {
+ "data": {
+ "advanced": {
+ "customName": "parse url",
+ "customNameId": 0,
+ "description": "",
+ "join": [],
+ "note": "Parse URL into its components."
+ },
+ "customFunction": {
+ "draftMode": false,
+ "name": "url_parse",
+ "repoName": "community"
+ },
+ "functionId": 1,
+ "functionName": "parse_url",
+ "id": "24",
+ "selectMore": false,
+ "type": "utility",
+ "utilities": {
+ "url_parse": {
+ "description": "Separate a URL into its components using urlparse() from the urllib module of Python 3.",
+ "fields": [
+ {
+ "dataTypes": [
+ "url"
+ ],
+ "description": "The URL to parse",
+ "inputType": "item",
+ "label": "input_url",
+ "name": "input_url",
+ "placeholder": "artifact:*.cef.requestUrl",
+ "renderType": "datapath",
+ "required": false
+ }
+ ],
+ "label": "url_parse",
+ "name": "url_parse"
+ }
+ },
+ "utilityType": "custom_function",
+ "values": {
+ "url_parse": {
+ "input_url": "playbook_input:url"
+ }
+ }
+ },
+ "errors": {},
+ "id": "24",
+ "type": "utility",
+ "warnings": {},
+ "x": 0,
+ "y": 328
+ },
+ "3": {
+ "data": {
+ "advanced": {
+ "customName": "build URL query",
+ "customNameId": 0,
+ "description": "Query may need editing to reflect your splunk environment",
+ "join": [],
+ "note": "Splunk query template"
+ },
+ "functionId": 1,
+ "functionName": "build_url_query",
+ "id": "3",
+ "parameters": [
+ "parse_url:custom_function_result.data.netloc",
+ "parse_url:custom_function_result.data.path"
+ ],
+ "template": "count fillnull_value=\"Unknown\" from datamodel=Web.Web where Web.url IN (\n%%\n\"*{0}{1}*\" \n%%\n) by Web.src Web.user Web.url | `drop_dm_object_name(\"Web\")` | `get_asset(src)` | fields url, src, src_asset_id, src_dns, src_ip, user | fillnull value=\"Unknown\"",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "3",
+ "type": "format",
+ "warnings": {},
+ "x": 0,
+ "y": 506
+ },
+ "4": {
+ "data": {
+ "advanced": {
+ "customName": "build file query",
+ "customNameId": 0,
+ "description": "Query may need editing to reflect your splunk environment",
+ "join": [],
+ "note": "Splunk query template"
+ },
+ "functionId": 2,
+ "functionName": "build_file_query",
+ "id": "4",
+ "parameters": [
+ "filtered-data:input_filter:condition_2:playbook_input:file"
+ ],
+ "template": "count fillnull_value=\"Unknown\" values(Processes.process_name) as process_name from datamodel=Endpoint.Processes where (Processes.process_hash IN (\n%%\n\"{0}\" \n%%\n) OR Processes.process_hash IN (\n%%\n\"*{0}*\" \n%%\n)) by Processes.dest Processes.user Processes.process_hash | `drop_dm_object_name(\"Processes\")` | `get_asset(dest)` | fields process_hash, process_name, dest, dest_asset_id, dest_dns, dest_ip, user | fillnull value=\"Unknown\"",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "4",
+ "type": "format",
+ "warnings": {},
+ "x": 340,
+ "y": 500
+ },
+ "5": {
+ "data": {
+ "advanced": {
+ "customName": "build domain query",
+ "customNameId": 0,
+ "description": "Query may need editing to reflect your splunk environment",
+ "join": [],
+ "note": "Splunk query template"
+ },
+ "functionId": 3,
+ "functionName": "build_domain_query",
+ "id": "5",
+ "parameters": [
+ "filtered-data:input_filter:condition_3:playbook_input:domain"
+ ],
+ "template": "count from datamodel=Network_Resolution where DNS.query IN (\n%%\n\"{0}\" \n%%\n) by DNS.src DNS.query | `drop_dm_object_name(\"DNS\")` | `get_asset(src)` | fields query, src, src_asset_id, src_dns, src_ip, src_owner |rename src_owner as src_user | fillnull value=\"Unknown\"",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "5",
+ "type": "format",
+ "warnings": {},
+ "x": 680,
+ "y": 506
+ },
+ "6": {
+ "data": {
+ "advanced": {
+ "customName": "build IP query",
+ "customNameId": 0,
+ "description": "Query may need editing to reflect your splunk environment",
+ "join": [],
+ "note": "Splunk query template"
+ },
+ "functionId": 4,
+ "functionName": "build_ip_query",
+ "id": "6",
+ "parameters": [
+ "filtered-data:input_filter:condition_4:playbook_input:ip"
+ ],
+ "template": "count fillnull_value=\"Unknown\" from datamodel=Network_Traffic where All_Traffic.dest_ip IN (\n%%\n\"{0}\" \n%%\n) by All_Traffic.src All_Traffic.user All_Traffic.dest_ip | `drop_dm_object_name(\"All_Traffic\")` | `get_asset(src)` | fields src, src_asset_id, src_dns, src_ip, dest_ip, user | fillnull value=\"Unknown\"",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "6",
+ "type": "format",
+ "warnings": {},
+ "x": 1020,
+ "y": 506
+ },
+ "8": {
+ "data": {
+ "action": "run query",
+ "actionType": "investigate",
+ "advanced": {
+ "customName": "run URL query",
+ "customNameId": 0,
+ "description": "May need to change search command dependent on your format block",
+ "join": [],
+ "note": "Splunk query execution"
+ },
+ "connector": "Splunk",
+ "connectorConfigs": [
+ "splunk"
+ ],
+ "connectorId": "91883aa8-9c81-470b-97a1-5d8f7995f560",
+ "connectorVersion": "v1",
+ "functionId": 1,
+ "functionName": "run_url_query",
+ "id": "8",
+ "parameters": {
+ "command": "tstats",
+ "query": "build_url_query:formatted_data",
+ "search_mode": "smart",
+ "start_time": "-30d"
+ },
+ "requiredParameters": [
+ {
+ "data_type": "string",
+ "field": "query"
+ },
+ {
+ "data_type": "string",
+ "default": "search",
+ "field": "command"
+ },
+ {
+ "data_type": "string",
+ "default": "smart",
+ "field": "search_mode"
+ }
+ ],
+ "type": "action"
+ },
+ "errors": {},
+ "id": "8",
+ "type": "action",
+ "warnings": {},
+ "x": 0,
+ "y": 684
+ },
+ "9": {
+ "data": {
+ "action": "run query",
+ "actionType": "investigate",
+ "advanced": {
+ "customName": "run file query",
+ "customNameId": 0,
+ "description": "May need to change search command dependent on your format block",
+ "join": [],
+ "note": "Splunk query execution"
+ },
+ "connector": "Splunk",
+ "connectorConfigs": [
+ "splunk"
+ ],
+ "connectorId": "91883aa8-9c81-470b-97a1-5d8f7995f560",
+ "connectorVersion": "v1",
+ "functionId": 2,
+ "functionName": "run_file_query",
+ "id": "9",
+ "parameters": {
+ "command": "tstats",
+ "query": "build_file_query:formatted_data",
+ "search_mode": "smart",
+ "start_time": "-30d"
+ },
+ "requiredParameters": [
+ {
+ "data_type": "string",
+ "field": "query"
+ },
+ {
+ "data_type": "string",
+ "default": "search",
+ "field": "command"
+ },
+ {
+ "data_type": "string",
+ "default": "smart",
+ "field": "search_mode"
+ }
+ ],
+ "type": "action"
+ },
+ "errors": {},
+ "id": "9",
+ "type": "action",
+ "warnings": {},
+ "x": 340,
+ "y": 684
+ }
},
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": ">",
- "param": "run_url_query:action_result.summary.total_events",
- "value": "0"
- }
- ],
- "conditionIndex": 0,
- "customName": "Results",
- "logic": "and"
- }
- ],
- "functionId": 2,
- "functionName": "filter_url_query",
- "id": "12",
- "type": "filter"
- },
- "errors": {},
- "id": "12",
- "type": "filter",
- "warnings": {},
- "x": 60,
- "y": 862
+ "notes": "Inputs: file_hash, domain, url, ip_address\nInteractions: Splunk\nActions: run_query\nOutputs: observable, markdown report"
},
- "13": {
- "data": {
- "advanced": {
- "customName": "filter file query",
- "customNameId": 0,
- "delimiter": ",",
- "delimiter_enabled": true,
- "join": [],
- "note": "only proceed when there are results"
- },
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": ">",
- "param": "run_file_query:action_result.summary.total_events",
- "value": "0"
- }
+ "input_spec": [
+ {
+ "contains": [
+ "url"
],
- "conditionIndex": 0,
- "customName": "Results",
- "logic": "and"
- }
- ],
- "functionId": 3,
- "functionName": "filter_file_query",
- "id": "13",
- "type": "filter"
- },
- "errors": {},
- "id": "13",
- "type": "filter",
- "warnings": {},
- "x": 400,
- "y": 862
- },
- "14": {
- "data": {
- "advanced": {
- "customName": "filter domain query",
- "customNameId": 0,
- "delimiter": ",",
- "delimiter_enabled": true,
- "join": [],
- "note": "only proceed when there are results"
+ "description": "A URL to look for within your events in Splunk",
+ "name": "url"
},
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": ">",
- "param": "run_domain_query:action_result.summary.total_events",
- "value": "0"
- }
+ {
+ "contains": [
+ "hash"
],
- "conditionIndex": 0,
- "customName": "Results",
- "logic": "and"
- }
- ],
- "functionId": 4,
- "functionName": "filter_domain_query",
- "id": "14",
- "type": "filter"
- },
- "errors": {},
- "id": "14",
- "type": "filter",
- "warnings": {},
- "x": 740,
- "y": 862
- },
- "15": {
- "data": {
- "advanced": {
- "customName": "filter IP query",
- "customNameId": 0,
- "delimiter": ",",
- "delimiter_enabled": true,
- "join": [],
- "note": "only proceed when there are results"
+ "description": "A file hash to look for within your events in Splunk",
+ "name": "file"
},
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": ">",
- "param": "run_ip_query:action_result.summary.total_events",
- "value": "0"
- }
+ {
+ "contains": [
+ "domain"
],
- "conditionIndex": 0,
- "customName": "Results",
- "logic": "and"
- }
- ],
- "functionId": 5,
- "functionName": "filter_ip_query",
- "id": "15",
- "type": "filter"
- },
- "errors": {},
- "id": "15",
- "type": "filter",
- "warnings": {},
- "x": 1080,
- "y": 862
- },
- "16": {
- "data": {
- "advanced": {
- "customName": "format URL report",
- "customNameId": 0,
- "description": "Markdown report used in calling playbook",
- "drop_none": true,
- "join": [],
- "note": "Markdown report output"
- },
- "functionId": 5,
- "functionName": "format_url_report",
- "id": "16",
- "parameters": [
- "filtered-data:input_filter:condition_1:playbook_input:url",
- "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.src_dns",
- "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.src_ip",
- "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.src_asset_id",
- "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.user"
- ],
- "template": "SOAR searched for occurrences of `{0}` within your environment using Splunk. The table below shows a summary of the information gathered.\n\n| URL | Computer | IP Address | Asset ID | User | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} | {4} | Splunk |\n%%",
- "type": "format"
- },
- "errors": {},
- "id": "16",
- "type": "format",
- "warnings": {},
- "x": 0,
- "y": 1042
- },
- "17": {
- "data": {
- "advanced": {
- "customName": "format file report",
- "customNameId": 0,
- "description": "Markdown report used in calling playbook",
- "drop_none": true,
- "join": [],
- "note": "Markdown report output"
- },
- "functionId": 6,
- "functionName": "format_file_report",
- "id": "17",
- "parameters": [
- "filtered-data:input_filter:condition_2:playbook_input:file",
- "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.dest_dns",
- "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.dest_ip",
- "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.dest_asset_id",
- "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.user",
- "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.process_name"
- ],
- "template": "SOAR searched for occurrences of `{0}` within your environment using Splunk. The table below shows a summary of the information gathered.\n\n| File | Process Name | Computer | IP Address | Asset ID | User | Source |\n| --- | --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {5} | {1} | {2} | {3} | {4} | Splunk |\n%%",
- "type": "format"
- },
- "errors": {},
- "id": "17",
- "type": "format",
- "warnings": {},
- "x": 340,
- "y": 1042
- },
- "18": {
- "data": {
- "advanced": {
- "customName": "format domain report",
- "customNameId": 0,
- "description": "Markdown report used in calling playbook",
- "drop_none": true,
- "join": [],
- "note": "Markdown report output"
- },
- "functionId": 7,
- "functionName": "format_domain_report",
- "id": "18",
- "parameters": [
- "filtered-data:input_filter:condition_3:playbook_input:domain",
- "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_dns",
- "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_ip",
- "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_asset_id",
- "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_user"
- ],
- "template": "SOAR searched for occurrences of `{0}` within your environment using Splunk. The table below shows a summary of the information gathered.\n\n| Domain | Computer | IP Address | Asset ID | User | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} | {4} | Splunk |\n%%",
- "type": "format"
- },
- "errors": {},
- "id": "18",
- "type": "format",
- "warnings": {},
- "x": 680,
- "y": 1042
- },
- "19": {
- "data": {
- "advanced": {
- "customName": "format ip report",
- "customNameId": 0,
- "description": "Markdown report used in calling playbook",
- "drop_none": true,
- "join": [],
- "note": "Markdown report output"
- },
- "functionId": 8,
- "functionName": "format_ip_report",
- "id": "19",
- "parameters": [
- "filtered-data:input_filter:condition_4:playbook_input:ip",
- "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.src_dns",
- "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.src_ip",
- "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.src_asset_id",
- "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.user"
- ],
- "template": "SOAR searched for occurrences of `{0}` within your environment using Splunk. The table below shows a summary of the information gathered.\n\n| IP | Computer | IP Address | Asset ID | User | Source |\n| --- | --- | --- | --- | --- | --- |\n%%\n| `{0}` | {1} | {2} | {3} | {4} | Splunk |\n%%",
- "type": "format"
- },
- "errors": {},
- "id": "19",
- "type": "format",
- "warnings": {},
- "x": 1020,
- "y": 1042
- },
- "2": {
- "data": {
- "advanced": {
- "customName": "input filter",
- "customNameId": 0,
- "delimiter": ",",
- "delimiter_enabled": true,
- "join": [],
- "note": "Routing paths by indicator type"
+ "description": "A domain name to look for within your events in Splunk",
+ "name": "domain"
},
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": "!=",
- "param": "playbook_input:url",
- "value": "None"
- }
- ],
- "conditionIndex": 0,
- "customName": "url",
- "logic": "and"
- },
- {
- "comparisons": [
- {
- "conditionIndex": 1,
- "op": "!=",
- "param": "playbook_input:file",
- "value": "None"
- }
- ],
- "conditionIndex": 1,
- "customName": "file",
- "logic": "and"
- },
- {
- "comparisons": [
- {
- "conditionIndex": 2,
- "op": "!=",
- "param": "playbook_input:domain",
- "value": "None"
- }
+ {
+ "contains": [
+ "ip"
],
- "conditionIndex": 2,
- "customName": "domain",
- "logic": "and"
- },
- {
- "comparisons": [
- {
- "conditionIndex": 3,
- "op": "!=",
- "param": "playbook_input:ip",
- "value": "None"
- }
+ "description": "An IP Address to look for within your events in Splunk",
+ "name": "ip"
+ }
+ ],
+ "output_spec": [
+ {
+ "contains": [],
+ "datapaths": [
+ "build_url_output:custom_function:observable_array",
+ "build_domain_output:custom_function:observable_array",
+ "build_ip_output:custom_function:observable_array",
+ "build_file_output:custom_function:observable_array"
],
- "conditionIndex": 3,
- "customName": "ip",
- "logic": "and"
- }
- ],
- "functionId": 1,
- "functionName": "input_filter",
- "id": "2",
- "type": "filter"
- },
- "errors": {},
- "id": "2",
- "type": "filter",
- "warnings": {},
- "x": 580,
- "y": 300
- },
- "20": {
- "data": {
- "advanced": {
- "customName": "build URL output",
- "customNameId": 0,
- "description": "Observable object expected by calling playbook",
- "join": [],
- "note": "Observable output"
- },
- "functionId": 1,
- "functionName": "build_url_output",
- "id": "20",
- "inputParameters": [
- "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.url",
- "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.src_dns",
- "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.src_ip",
- "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.src_asset_id",
- "filtered-data:filter_url_query:condition_1:run_url_query:action_result.data.*.user"
- ],
- "outputVariables": [
- "observable_array"
- ],
- "type": "code"
- },
- "errors": {},
- "id": "20",
- "type": "code",
- "userCode": "\n # Init variables + convenience naming\n build_url_output__observable_array = []\n observable_dict = {}\n \n # Build device list\n for indicator, dns, ip, asset_id, user in zip(filtered_result_0_data___url, filtered_result_0_data___src_dns, filtered_result_0_data___src_ip, filtered_result_0_data___src_asset_id, filtered_result_0_data___user): \n \n device = {\n \"name\": dns,\n \"id\": asset_id, \n \"ip_address\": ip,\n \"operating_system\": \"Unknown\",\n \"user\": user\n }\n\n # Drop devices from list if we don't know anything about them\n if device.get(\"name\") == \"Unknown\" and device.get(\"id\") == \"Unknown\" and device.get(\"ip_address\") == \"Unknown\" and device.get(\"user\") == \"Unknown\":\n continue\n\n # Add to or update observable_dict\n if observable_dict.get('indicator'):\n observable_dict['identifier_activity'].append(device)\n else:\n observable_dict[indicator] ={\n \"value\": indicator,\n \"type\": \"url\",\n \"source\": \"Splunk\",\n \"identifier_activity\": [device]\n }\n\n for key in observable_dict.keys():\n observable_dict[key]['total_count'] = len(observable_dict[key]['identifier_activity'])\n build_url_output__observable_array.append(observable_dict[key])\n\n",
- "warnings": {},
- "x": 0,
- "y": 1220
- },
- "21": {
- "data": {
- "advanced": {
- "customName": "build file output",
- "customNameId": 0,
- "description": "Observable object expected by calling playbook",
- "join": [],
- "note": "Observable output"
- },
- "functionId": 2,
- "functionName": "build_file_output",
- "id": "21",
- "inputParameters": [
- "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.process_hash",
- "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.dest_dns",
- "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.dest_ip",
- "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.dest_asset_id",
- "filtered-data:filter_file_query:condition_1:run_file_query:action_result.data.*.user"
- ],
- "outputVariables": [
- "observable_array"
- ],
- "type": "code"
- },
- "errors": {},
- "id": "21",
- "type": "code",
- "userCode": "\n \n # Init variables + convenience naming\n build_file_output__observable_array = []\n observable_dict = {}\n \n # Build device list\n for indicator, dns, ip, asset_id, user in zip(filtered_result_0_data___process_hash, filtered_result_0_data___dest_dns, filtered_result_0_data___dest_ip, filtered_result_0_data___dest_asset_id, filtered_result_0_data___user): \n \n device = {\n \"name\": dns,\n \"id\": asset_id, \n \"ip_address\": ip,\n \"operating_system\": \"Unknown\",\n \"user\": user\n }\n\n # Drop devices from list if we don't know anything about them\n if device.get(\"name\") == \"Unknown\" and device.get(\"id\") == \"Unknown\" and device.get(\"ip_address\") == \"Unknown\" and device.get(\"user\") == \"Unknown\":\n continue\n\n # Add to or update observable_dict\n if observable_dict.get('indicator'):\n observable_dict['identifier_activity'].append(device)\n else:\n observable_dict[indicator] ={\n \"value\": indicator,\n \"type\": \"hash\",\n \"source\": \"Splunk\",\n \"identifier_activity\": [device]\n }\n\n for key in observable_dict.keys():\n observable_dict[key]['total_count'] = len(observable_dict[key]['identifier_activity'])\n build_file_output__observable_array.append(observable_dict[key])\n\n",
- "warnings": {},
- "x": 340,
- "y": 1220
- },
- "22": {
- "data": {
- "advanced": {
- "customName": "build domain output",
- "customNameId": 0,
- "description": "Observable object expected by calling playbook",
- "join": [],
- "note": "Observable output"
- },
- "functionId": 3,
- "functionName": "build_domain_output",
- "id": "22",
- "inputParameters": [
- "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.query",
- "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_dns",
- "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_ip",
- "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_asset_id",
- "filtered-data:filter_domain_query:condition_1:run_domain_query:action_result.data.*.src_user"
- ],
- "outputVariables": [
- "observable_array"
- ],
- "type": "code"
- },
- "errors": {},
- "id": "22",
- "type": "code",
- "userCode": "\n # Init variables + convenience naming\n build_domain_output__observable_array = []\n observable_dict = {}\n \n # Build device list\n for indicator, dns, ip, asset_id, user in zip(filtered_result_0_data___query, filtered_result_0_data___src_dns, filtered_result_0_data___src_ip, filtered_result_0_data___src_asset_id, filtered_result_0_data___src_user): \n \n device = {\n \"name\": dns,\n \"id\": asset_id, \n \"ip_address\": ip,\n \"operating_system\": \"Unknown\",\n \"user\": user\n }\n\n # Drop devices from list if we don't know anything about them\n if device.get(\"name\") == \"Unknown\" and device.get(\"id\") == \"Unknown\" and device.get(\"ip_address\") == \"Unknown\" and device.get(\"user\") == \"Unknown\":\n continue\n\n # Add to or update observable_dict\n if observable_dict.get('indicator'):\n observable_dict['identifier_activity'].append(device)\n else:\n observable_dict[indicator] ={\n \"value\": indicator,\n \"type\": \"url\",\n \"source\": \"Splunk\",\n \"identifier_activity\": [device]\n }\n\n for key in observable_dict.keys():\n observable_dict[key]['total_count'] = len(observable_dict[key]['identifier_activity'])\n build_domain_output__observable_array.append(observable_dict[key])\n \n",
- "warnings": {},
- "x": 680,
- "y": 1220
- },
- "23": {
- "data": {
- "advanced": {
- "customName": "build ip output",
- "customNameId": 0,
- "description": "Observable object expected by calling playbook",
- "join": [],
- "note": "Observable output"
- },
- "functionId": 4,
- "functionName": "build_ip_output",
- "id": "23",
- "inputParameters": [
- "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.dest_ip",
- "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.src_dns",
- "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.src_ip",
- "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.src_asset_id",
- "filtered-data:filter_ip_query:condition_1:run_ip_query:action_result.data.*.user"
- ],
- "outputVariables": [
- "observable_array"
- ],
- "type": "code"
- },
- "errors": {},
- "id": "23",
- "type": "code",
- "userCode": " \n # Init variables + convenience naming\n build_ip_output__observable_array = []\n observable_dict = {}\n\n # Build device list\n for indicator, dns, ip, asset_id, user in zip(filtered_result_0_data___dest_ip, filtered_result_0_data___src_dns, filtered_result_0_data___src_ip, filtered_result_0_data___src_asset_id, filtered_result_0_data___user): \n \n device = {\n \"name\": dns,\n \"id\": asset_id, \n \"ip_address\": ip,\n \"operating_system\": \"Unknown\",\n \"user\": user\n }\n\n # Drop devices from list if we don't know anything about them\n if device.get(\"name\") == \"Unknown\" and device.get(\"id\") == \"Unknown\" and device.get(\"ip_address\") == \"Unknown\" and device.get(\"user\") == \"Unknown\":\n continue\n\n # Add to or update observable_dict\n if observable_dict.get('indicator'):\n observable_dict['identifier_activity'].append(device)\n else:\n observable_dict[indicator] ={\n \"value\": indicator,\n \"type\": \"url\",\n \"source\": \"Splunk\",\n \"identifier_activity\": [device]\n }\n\n for key in observable_dict.keys():\n observable_dict[key]['total_count'] = len(observable_dict[key]['identifier_activity'])\n build_ip_output__observable_array.append(observable_dict[key])\n \n",
- "warnings": {},
- "x": 1020,
- "y": 1220
- },
- "24": {
- "data": {
- "advanced": {
- "customName": "parse url",
- "customNameId": 0,
- "description": "",
- "join": [],
- "note": "Parse URL into its components."
- },
- "customFunction": {
- "draftMode": false,
- "name": "url_parse",
- "repoName": "community"
+ "deduplicate": false,
+ "description": "An array of Observable objects ",
+ "metadata": {},
+ "name": "observable"
},
- "functionId": 1,
- "functionName": "parse_url",
- "id": "24",
- "selectMore": false,
- "type": "utility",
- "utilities": {
- "url_parse": {
- "description": "Separate a URL into its components using urlparse() from the urllib module of Python 3.",
- "fields": [
- {
- "dataTypes": [
- "url"
- ],
- "description": "The URL to parse",
- "inputType": "item",
- "label": "input_url",
- "name": "input_url",
- "placeholder": "artifact:*.cef.requestUrl",
- "renderType": "datapath",
- "required": false
- }
+ {
+ "contains": [],
+ "datapaths": [
+ "format_url_report:formatted_data",
+ "format_file_report:formatted_data",
+ "format_domain_report:formatted_data",
+ "format_ip_report:formatted_data"
],
- "label": "url_parse",
- "name": "url_parse"
- }
- },
- "utilityType": "custom_function",
- "values": {
- "url_parse": {
- "input_url": "playbook_input:url"
- }
+ "deduplicate": false,
+ "description": "An array of markdown reports",
+ "metadata": {},
+ "name": "markdown_report"
}
- },
- "errors": {},
- "id": "24",
- "type": "utility",
- "warnings": {},
- "x": 0,
- "y": 328
- },
- "3": {
- "data": {
- "advanced": {
- "customName": "build URL query",
- "customNameId": 0,
- "description": "Query may need editing to reflect your splunk environment",
- "join": [],
- "note": "Splunk query template"
- },
- "functionId": 1,
- "functionName": "build_url_query",
- "id": "3",
- "parameters": [
- "parse_url:custom_function_result.data.netloc",
- "parse_url:custom_function_result.data.path"
- ],
- "template": "count fillnull_value=\"Unknown\" from datamodel=Web.Web where Web.url IN (\n%%\n\"*{0}{1}*\" \n%%\n) by Web.src Web.user Web.url | `drop_dm_object_name(\"Web\")` | `get_asset(src)` | fields url, src, src_asset_id, src_dns, src_ip, user | fillnull value=\"Unknown\"",
- "type": "format"
- },
- "errors": {},
- "id": "3",
- "type": "format",
- "warnings": {},
- "x": 0,
- "y": 506
- },
- "4": {
- "data": {
- "advanced": {
- "customName": "build file query",
- "customNameId": 0,
- "description": "Query may need editing to reflect your splunk environment",
- "join": [],
- "note": "Splunk query template"
- },
- "functionId": 2,
- "functionName": "build_file_query",
- "id": "4",
- "parameters": [
- "filtered-data:input_filter:condition_2:playbook_input:file"
- ],
- "template": "count fillnull_value=\"Unknown\" values(Processes.process_name) as process_name from datamodel=Endpoint.Processes where (Processes.process_hash IN (\n%%\n\"{0}\" \n%%\n) OR Processes.process_hash IN (\n%%\n\"*{0}*\" \n%%\n)) by Processes.dest Processes.user Processes.process_hash | `drop_dm_object_name(\"Processes\")` | `get_asset(dest)` | fields process_hash, process_name, dest, dest_asset_id, dest_dns, dest_ip, user | fillnull value=\"Unknown\"",
- "type": "format"
- },
- "errors": {},
- "id": "4",
- "type": "format",
- "warnings": {},
- "x": 340,
- "y": 500
- },
- "5": {
- "data": {
- "advanced": {
- "customName": "build domain query",
- "customNameId": 0,
- "description": "Query may need editing to reflect your splunk environment",
- "join": [],
- "note": "Splunk query template"
- },
- "functionId": 3,
- "functionName": "build_domain_query",
- "id": "5",
- "parameters": [
- "filtered-data:input_filter:condition_3:playbook_input:domain"
- ],
- "template": "count from datamodel=Network_Resolution where DNS.query IN (\n%%\n\"{0}\" \n%%\n) by DNS.src DNS.query | `drop_dm_object_name(\"DNS\")` | `get_asset(src)` | fields query, src, src_asset_id, src_dns, src_ip, src_owner |rename src_owner as src_user | fillnull value=\"Unknown\"",
- "type": "format"
- },
- "errors": {},
- "id": "5",
- "type": "format",
- "warnings": {},
- "x": 680,
- "y": 506
- },
- "6": {
- "data": {
- "advanced": {
- "customName": "build IP query",
- "customNameId": 0,
- "description": "Query may need editing to reflect your splunk environment",
- "join": [],
- "note": "Splunk query template"
- },
- "functionId": 4,
- "functionName": "build_ip_query",
- "id": "6",
- "parameters": [
- "filtered-data:input_filter:condition_4:playbook_input:ip"
- ],
- "template": "count fillnull_value=\"Unknown\" from datamodel=Network_Traffic where All_Traffic.dest_ip IN (\n%%\n\"{0}\" \n%%\n) by All_Traffic.src All_Traffic.user All_Traffic.dest_ip | `drop_dm_object_name(\"All_Traffic\")` | `get_asset(src)` | fields src, src_asset_id, src_dns, src_ip, dest_ip, user | fillnull value=\"Unknown\"",
- "type": "format"
- },
- "errors": {},
- "id": "6",
- "type": "format",
- "warnings": {},
- "x": 1020,
- "y": 506
- },
- "8": {
- "data": {
- "action": "run query",
- "actionType": "investigate",
- "advanced": {
- "customName": "run URL query",
- "customNameId": 0,
- "description": "May need to change search command dependent on your format block",
- "join": [],
- "note": "Splunk query execution"
- },
- "connector": "Splunk",
- "connectorConfigs": [
- "splunk"
- ],
- "connectorId": "91883aa8-9c81-470b-97a1-5d8f7995f560",
- "connectorVersion": "v1",
- "functionId": 1,
- "functionName": "run_url_query",
- "id": "8",
- "parameters": {
- "command": "tstats",
- "query": "build_url_query:formatted_data",
- "search_mode": "smart",
- "start_time": "-30d"
- },
- "requiredParameters": [
- {
- "data_type": "string",
- "field": "query"
- },
- {
- "data_type": "string",
- "default": "search",
- "field": "command"
- },
- {
- "data_type": "string",
- "default": "smart",
- "field": "search_mode"
- }
- ],
- "type": "action"
- },
- "errors": {},
- "id": "8",
- "type": "action",
- "warnings": {},
- "x": 0,
- "y": 684
- },
- "9": {
- "data": {
- "action": "run query",
- "actionType": "investigate",
- "advanced": {
- "customName": "run file query",
- "customNameId": 0,
- "description": "May need to change search command dependent on your format block",
- "join": [],
- "note": "Splunk query execution"
- },
- "connector": "Splunk",
- "connectorConfigs": [
- "splunk"
- ],
- "connectorId": "91883aa8-9c81-470b-97a1-5d8f7995f560",
- "connectorVersion": "v1",
- "functionId": 2,
- "functionName": "run_file_query",
- "id": "9",
- "parameters": {
- "command": "tstats",
- "query": "build_file_query:formatted_data",
- "search_mode": "smart",
- "start_time": "-30d"
- },
- "requiredParameters": [
- {
- "data_type": "string",
- "field": "query"
- },
- {
- "data_type": "string",
- "default": "search",
- "field": "command"
- },
- {
- "data_type": "string",
- "default": "smart",
- "field": "search_mode"
- }
- ],
- "type": "action"
- },
- "errors": {},
- "id": "9",
- "type": "action",
- "warnings": {},
- "x": 340,
- "y": 684
- }
- },
- "notes": "Inputs: file_hash, domain, url, ip_address\nInteractions: Splunk\nActions: run_query\nOutputs: observable, markdown report"
- },
- "input_spec": [
- {
- "contains": [
- "url"
- ],
- "description": "A URL to look for within your events in Splunk",
- "name": "url"
- },
- {
- "contains": [
- "hash"
- ],
- "description": "A file hash to look for within your events in Splunk",
- "name": "file"
- },
- {
- "contains": [
- "domain"
- ],
- "description": "A domain name to look for within your events in Splunk",
- "name": "domain"
- },
- {
- "contains": [
- "ip"
],
- "description": "An IP Address to look for within your events in Splunk",
- "name": "ip"
- }
- ],
- "output_spec": [
- {
- "contains": [],
- "datapaths": [
- "build_url_output:custom_function:observable_array",
- "build_domain_output:custom_function:observable_array",
- "build_ip_output:custom_function:observable_array",
- "build_file_output:custom_function:observable_array"
- ],
- "deduplicate": false,
- "description": "An array of Observable objects ",
- "metadata": {},
- "name": "observable"
- },
- {
- "contains": [],
- "datapaths": [
- "format_url_report:formatted_data",
- "format_file_report:formatted_data",
- "format_domain_report:formatted_data",
- "format_ip_report:formatted_data"
- ],
- "deduplicate": false,
- "description": "An array of markdown reports",
- "metadata": {},
- "name": "markdown_report"
- }
+ "playbook_type": "data",
+ "python_version": "3.13",
+ "schema": "5.0.10",
+ "version": "6.1.1.211"
+ },
+ "create_time": "2023-12-21T15:17:45.576110+00:00",
+ "draft_mode": false,
+ "labels": [
+ "*"
],
- "playbook_type": "data",
- "python_version": "3",
- "schema": "5.0.10",
- "version": "6.1.1.211"
- },
- "create_time": "2023-12-21T15:17:45.576110+00:00",
- "draft_mode": false,
- "labels": [
- "*"
- ],
- "tags": [
- "domain",
- "url",
- "ip",
- "file_hash",
- "splunk",
- "identifier_activity",
- "D3-IAA"
- ]
-}
+ "tags": [
+ "domain",
+ "url",
+ "ip",
+ "file_hash",
+ "splunk",
+ "identifier_activity",
+ "D3-IAA"
+ ]
+}
\ No newline at end of file
diff --git a/playbooks/Splunk_Identifier_Activity_Analysis.png b/playbooks/Splunk_Identifier_Activity_Analysis.png
index 9bce6b26e0..493573875c 100644
Binary files a/playbooks/Splunk_Identifier_Activity_Analysis.png and b/playbooks/Splunk_Identifier_Activity_Analysis.png differ
diff --git a/playbooks/Splunk_Message_Identifier_Activity_Analysis.json b/playbooks/Splunk_Message_Identifier_Activity_Analysis.json
index 7523f80f54..740804605f 100644
--- a/playbooks/Splunk_Message_Identifier_Activity_Analysis.json
+++ b/playbooks/Splunk_Message_Identifier_Activity_Analysis.json
@@ -1,389 +1,389 @@
{
- "blockly": false,
- "blockly_xml": "",
- "category": "Message Identifier Activity Analysis",
- "coa": {
- "data": {
- "description": "Accepts an internet message id, and asks Splunk \n to look for records that have a matching internet message id. It then produces a normalized output and summary table.\n\nRef: D3-IAA https://d3fend.mitre.org/technique/d3f:IdentifierActivityAnalysis/",
- "edges": [
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_3_to_port_5",
- "sourceNode": "3",
- "sourcePort": "3_out",
- "targetNode": "5",
- "targetPort": "5_in"
- },
- {
- "id": "port_5_to_port_6",
- "sourceNode": "5",
- "sourcePort": "5_out",
- "targetNode": "6",
- "targetPort": "6_in"
- },
- {
- "id": "port_7_to_port_8",
- "sourceNode": "7",
- "sourcePort": "7_out",
- "targetNode": "8",
- "targetPort": "8_in"
- },
- {
- "id": "port_8_to_port_1",
- "sourceNode": "8",
- "sourcePort": "8_out",
- "targetNode": "1",
- "targetPort": "1_in"
- },
- {
- "id": "port_6_to_port_9",
- "sourceNode": "6",
- "sourcePort": "6_out",
- "targetNode": "9",
- "targetPort": "9_in"
- },
- {
- "conditions": [
- {
- "index": 0
- }
- ],
- "id": "port_9_to_port_7",
- "sourceNode": "9",
- "sourcePort": "9_out",
- "targetNode": "7",
- "targetPort": "7_in"
- },
- {
- "id": "port_0_to_port_3",
- "sourceNode": "0",
- "sourcePort": "0_out",
- "targetNode": "3",
- "targetPort": "3_in"
- }
- ],
- "hash": "03b4a7115a05c8be38f151e65c6feeeec2d4a77a",
- "nodes": {
- "0": {
- "data": {
- "advanced": {
- "join": []
- },
- "functionName": "on_start",
- "id": "0",
- "type": "start"
- },
- "errors": {},
- "id": "0",
- "type": "start",
- "warnings": {},
- "x": 20,
- "y": 0
- },
- "1": {
- "data": {
- "advanced": {
- "join": []
- },
- "functionName": "on_finish",
- "id": "1",
- "type": "end"
- },
- "errors": {},
- "id": "1",
- "type": "end",
- "warnings": {},
- "x": 20,
- "y": 1220
- },
- "3": {
- "data": {
- "advanced": {
- "customName": "input filter",
- "customNameId": 0,
- "delimiter": ",",
- "delimiter_enabled": true,
- "join": [],
- "note": "Ensure there is an input"
- },
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": "!=",
- "param": "playbook_input:message_id",
- "value": "None"
- },
- {
- "op": "!=",
- "param": "playbook_input:sender",
- "value": "None"
- },
- {
- "op": "!=",
- "param": "playbook_input:subject",
- "value": "None"
- }
- ],
- "conditionIndex": 0,
- "customName": "Input Valid",
- "logic": "or"
- }
+ "blockly": false,
+ "blockly_xml": "",
+ "category": "Message Identifier Activity Analysis",
+ "coa": {
+ "data": {
+ "description": "Accepts an internet message id, and asks Splunk \n to look for records that have a matching internet message id. It then produces a normalized output and summary table.\n\nRef: D3-IAA https://d3fend.mitre.org/technique/d3f:IdentifierActivityAnalysis/",
+ "edges": [
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_3_to_port_5",
+ "sourceNode": "3",
+ "sourcePort": "3_out",
+ "targetNode": "5",
+ "targetPort": "5_in"
+ },
+ {
+ "id": "port_5_to_port_6",
+ "sourceNode": "5",
+ "sourcePort": "5_out",
+ "targetNode": "6",
+ "targetPort": "6_in"
+ },
+ {
+ "id": "port_7_to_port_8",
+ "sourceNode": "7",
+ "sourcePort": "7_out",
+ "targetNode": "8",
+ "targetPort": "8_in"
+ },
+ {
+ "id": "port_8_to_port_1",
+ "sourceNode": "8",
+ "sourcePort": "8_out",
+ "targetNode": "1",
+ "targetPort": "1_in"
+ },
+ {
+ "id": "port_6_to_port_9",
+ "sourceNode": "6",
+ "sourcePort": "6_out",
+ "targetNode": "9",
+ "targetPort": "9_in"
+ },
+ {
+ "conditions": [
+ {
+ "index": 0
+ }
+ ],
+ "id": "port_9_to_port_7",
+ "sourceNode": "9",
+ "sourcePort": "9_out",
+ "targetNode": "7",
+ "targetPort": "7_in"
+ },
+ {
+ "id": "port_0_to_port_3",
+ "sourceNode": "0",
+ "sourcePort": "0_out",
+ "targetNode": "3",
+ "targetPort": "3_in"
+ }
],
- "functionId": 1,
- "functionName": "input_filter",
- "id": "3",
- "type": "filter"
- },
- "errors": {},
- "id": "3",
- "type": "filter",
- "warnings": {},
- "x": 60,
- "y": 148
- },
- "5": {
- "data": {
- "advanced": {
- "customName": "format message query",
- "customNameId": 0,
- "drop_none": false,
- "join": [],
- "note": "construct Splunk query with substitution of input",
- "separator": ""
+ "hash": "03b4a7115a05c8be38f151e65c6feeeec2d4a77a",
+ "nodes": {
+ "0": {
+ "data": {
+ "advanced": {
+ "join": []
+ },
+ "functionName": "on_start",
+ "id": "0",
+ "type": "start"
+ },
+ "errors": {},
+ "id": "0",
+ "type": "start",
+ "warnings": {},
+ "x": 20,
+ "y": 0
+ },
+ "1": {
+ "data": {
+ "advanced": {
+ "join": []
+ },
+ "functionName": "on_finish",
+ "id": "1",
+ "type": "end"
+ },
+ "errors": {},
+ "id": "1",
+ "type": "end",
+ "warnings": {},
+ "x": 20,
+ "y": 1220
+ },
+ "3": {
+ "data": {
+ "advanced": {
+ "customName": "input filter",
+ "customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
+ "join": [],
+ "note": "Ensure there is an input"
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": "!=",
+ "param": "playbook_input:message_id",
+ "value": "None"
+ },
+ {
+ "op": "!=",
+ "param": "playbook_input:sender",
+ "value": "None"
+ },
+ {
+ "op": "!=",
+ "param": "playbook_input:subject",
+ "value": "None"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "Input Valid",
+ "logic": "or"
+ }
+ ],
+ "functionId": 1,
+ "functionName": "input_filter",
+ "id": "3",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "3",
+ "type": "filter",
+ "warnings": {},
+ "x": 60,
+ "y": 148
+ },
+ "5": {
+ "data": {
+ "advanced": {
+ "customName": "format message query",
+ "customNameId": 0,
+ "drop_none": false,
+ "join": [],
+ "note": "construct Splunk query with substitution of input",
+ "separator": ""
+ },
+ "functionId": 1,
+ "functionName": "format_message_query",
+ "id": "5",
+ "parameters": [
+ "filtered-data:input_filter:condition_1:playbook_input:message_id",
+ "filtered-data:input_filter:condition_1:playbook_input:subject",
+ "filtered-data:input_filter:condition_1:playbook_input:sender"
+ ],
+ "template": "summariesonly=false fillnull_value=\"Unknown\" count from datamodel=Email.All_Email where (All_Email.message_id IN (\n%%\n\"{0}\" \n%%\n) OR All_Email.subject IN (\n%%\n\"{1}\" \n%%\n) OR All_Email.src_user IN (\n%%\n\"{2}\" \n%%\n)) by All_Email.orig_recipient, All_Email.recipient, All_Email.src_user, All_Email.subject All_Email.message_id | `drop_dm_object_name(\"All_Email\")` | rename orig_recipient as Addressee, recipient as Recipient, src_user as Sender, subject as Subject message_id as Message_Id | fields Addressee, Recipient, Sender, Subject, Message_Id | fillnull value=\"Unknown\"",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "5",
+ "type": "format",
+ "warnings": {},
+ "x": 0,
+ "y": 320
+ },
+ "6": {
+ "data": {
+ "action": "run query",
+ "actionType": "investigate",
+ "advanced": {
+ "customName": "run message query",
+ "customNameId": 0,
+ "join": [],
+ "note": "Run Splunk query created in prior block"
+ },
+ "connector": "Splunk",
+ "connectorConfigs": [
+ "splunk"
+ ],
+ "connectorId": "91883aa8-9c81-470b-97a1-5d8f7995f560",
+ "connectorVersion": "v1",
+ "functionId": 1,
+ "functionName": "run_message_query",
+ "id": "6",
+ "parameters": {
+ "command": "tstats",
+ "display": "Addressee,Recipient,Sender,Subject",
+ "query": "format_message_query:formatted_data",
+ "search_mode": "smart"
+ },
+ "requiredParameters": [
+ {
+ "data_type": "string",
+ "field": "query"
+ },
+ {
+ "data_type": "string",
+ "default": "search",
+ "field": "command"
+ },
+ {
+ "data_type": "string",
+ "default": "smart",
+ "field": "search_mode"
+ }
+ ],
+ "type": "action"
+ },
+ "errors": {},
+ "id": "6",
+ "type": "action",
+ "warnings": {},
+ "x": 0,
+ "y": 506
+ },
+ "7": {
+ "data": {
+ "advanced": {
+ "customName": "format message report",
+ "customNameId": 0,
+ "join": [],
+ "note": "Format the markdown table summary of this playbook execution"
+ },
+ "functionId": 2,
+ "functionName": "format_message_report",
+ "id": "7",
+ "parameters": [
+ "filtered-data:input_filter:condition_1:playbook_input:message_id",
+ "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Recipient",
+ "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Addressee",
+ "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Subject",
+ "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Sender",
+ "filtered-data:input_filter:condition_1:playbook_input:subject",
+ "filtered-data:input_filter:condition_1:playbook_input:sender"
+ ],
+ "template": "SOAR searched for occurrences of one of the following:\n- Message ID(s): `{0}` \n- Subject(s): `{5}`\n- Sender(s): `{6}`\nwithin your environment using Splunk's Email data model. The table below shows a summary of the information gathered.\n\n| Recipient | Addressed To | Subject | Sender |\n| --- | --- | --- | --- |\n%%\n| {1} | {2} | {3} | {4} |\n%%\n\n",
+ "type": "format"
+ },
+ "errors": {},
+ "id": "7",
+ "type": "format",
+ "warnings": {},
+ "x": 0,
+ "y": 860
+ },
+ "8": {
+ "data": {
+ "advanced": {
+ "customName": "build message output",
+ "customNameId": 0,
+ "description": "Logic regarding observable construction goes here",
+ "join": [],
+ "note": "Format the observable output object produced by this playbook"
+ },
+ "functionId": 1,
+ "functionName": "build_message_output",
+ "id": "8",
+ "inputParameters": [
+ "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Recipient",
+ "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Addressee",
+ "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Subject",
+ "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Sender",
+ "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Message_Id"
+ ],
+ "outputVariables": [
+ "observable_array"
+ ],
+ "type": "code"
+ },
+ "errors": {},
+ "id": "8",
+ "type": "code",
+ "userCode": "\n # Variable renames for convenience\n recipients = filtered_result_0_data___recipient\n addressees = filtered_result_0_data___addressee\n subjects = filtered_result_0_data___subject\n senders = filtered_result_0_data___sender\n message_id = filtered_result_0_data___message_id\n \n build_message_output__observable_array = []\n recordList = []\n \n # unwind records\n for recipient, addressee, subject, sender, message_id in zip(recipients, addressees, subjects, senders, filtered_result_0_data___message_id):\n record = {\n \"recipient\": recipient,\n \"addressee\": addressee,\n \"subject\": subject,\n \"sender\": sender\n }\n\n observable = {\n \"value\": message_id,\n \"type\": \"internet message ID\",\n \"count\": len(recordList),\n \"source\": \"Splunk\",\n \"message_identifier_activity\": record\n }\n\n build_message_output__observable_array.append(observable)\n \n",
+ "warnings": {},
+ "x": 0,
+ "y": 1042
+ },
+ "9": {
+ "data": {
+ "advanced": {
+ "customName": "Results filter",
+ "customNameId": 0,
+ "delimiter": ",",
+ "delimiter_enabled": true,
+ "join": [],
+ "note": "Ensure there are results"
+ },
+ "conditions": [
+ {
+ "comparisons": [
+ {
+ "conditionIndex": 0,
+ "op": ">",
+ "param": "run_message_query:action_result.summary.total_events",
+ "value": "0"
+ }
+ ],
+ "conditionIndex": 0,
+ "customName": "Results exist",
+ "logic": "and"
+ }
+ ],
+ "functionId": 2,
+ "functionName": "results_filter",
+ "id": "9",
+ "type": "filter"
+ },
+ "errors": {},
+ "id": "9",
+ "type": "filter",
+ "warnings": {},
+ "x": 60,
+ "y": 684
+ }
},
- "functionId": 1,
- "functionName": "format_message_query",
- "id": "5",
- "parameters": [
- "filtered-data:input_filter:condition_1:playbook_input:message_id",
- "filtered-data:input_filter:condition_1:playbook_input:subject",
- "filtered-data:input_filter:condition_1:playbook_input:sender"
- ],
- "template": "summariesonly=false fillnull_value=\"Unknown\" count from datamodel=Email.All_Email where (All_Email.message_id IN (\n%%\n\"{0}\" \n%%\n) OR All_Email.subject IN (\n%%\n\"{1}\" \n%%\n) OR All_Email.src_user IN (\n%%\n\"{2}\" \n%%\n)) by All_Email.orig_recipient, All_Email.recipient, All_Email.src_user, All_Email.subject All_Email.message_id | `drop_dm_object_name(\"All_Email\")` | rename orig_recipient as Addressee, recipient as Recipient, src_user as Sender, subject as Subject message_id as Message_Id | fields Addressee, Recipient, Sender, Subject, Message_Id | fillnull value=\"Unknown\"",
- "type": "format"
- },
- "errors": {},
- "id": "5",
- "type": "format",
- "warnings": {},
- "x": 0,
- "y": 320
- },
- "6": {
- "data": {
- "action": "run query",
- "actionType": "investigate",
- "advanced": {
- "customName": "run message query",
- "customNameId": 0,
- "join": [],
- "note": "Run Splunk query created in prior block"
- },
- "connector": "Splunk",
- "connectorConfigs": [
- "splunk"
- ],
- "connectorId": "91883aa8-9c81-470b-97a1-5d8f7995f560",
- "connectorVersion": "v1",
- "functionId": 1,
- "functionName": "run_message_query",
- "id": "6",
- "parameters": {
- "command": "tstats",
- "display": "Addressee,Recipient,Sender,Subject",
- "query": "format_message_query:formatted_data",
- "search_mode": "smart"
- },
- "requiredParameters": [
- {
- "data_type": "string",
- "field": "query"
- },
- {
- "data_type": "string",
- "default": "search",
- "field": "command"
- },
- {
- "data_type": "string",
- "default": "smart",
- "field": "search_mode"
- }
- ],
- "type": "action"
- },
- "errors": {},
- "id": "6",
- "type": "action",
- "warnings": {},
- "x": 0,
- "y": 506
+ "notes": "Inputs: internet message id\nInteractions: Splunk\nActions: run query\nOutputs: observable, markdown report"
},
- "7": {
- "data": {
- "advanced": {
- "customName": "format message report",
- "customNameId": 0,
- "join": [],
- "note": "Format the markdown table summary of this playbook execution"
+ "input_spec": [
+ {
+ "contains": [
+ "internet message id"
+ ],
+ "description": "An Internet Message ID to look for within message logs in Splunk",
+ "name": "message_id"
},
- "functionId": 2,
- "functionName": "format_message_report",
- "id": "7",
- "parameters": [
- "filtered-data:input_filter:condition_1:playbook_input:message_id",
- "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Recipient",
- "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Addressee",
- "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Subject",
- "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Sender",
- "filtered-data:input_filter:condition_1:playbook_input:subject",
- "filtered-data:input_filter:condition_1:playbook_input:sender"
- ],
- "template": "SOAR searched for occurrences of one of the following:\n- Message ID(s): `{0}` \n- Subject(s): `{5}`\n- Sender(s): `{6}`\nwithin your environment using Splunk's Email data model. The table below shows a summary of the information gathered.\n\n| Recipient | Addressed To | Subject | Sender |\n| --- | --- | --- | --- |\n%%\n| {1} | {2} | {3} | {4} |\n%%\n\n",
- "type": "format"
- },
- "errors": {},
- "id": "7",
- "type": "format",
- "warnings": {},
- "x": 0,
- "y": 860
- },
- "8": {
- "data": {
- "advanced": {
- "customName": "build message output",
- "customNameId": 0,
- "description": "Logic regarding observable construction goes here",
- "join": [],
- "note": "Format the observable output object produced by this playbook"
+ {
+ "contains": [
+ "email"
+ ],
+ "description": "A sender address to look for within message logs in Splunk",
+ "name": "sender"
},
- "functionId": 1,
- "functionName": "build_message_output",
- "id": "8",
- "inputParameters": [
- "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Recipient",
- "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Addressee",
- "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Subject",
- "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Sender",
- "filtered-data:results_filter:condition_1:run_message_query:action_result.data.*.Message_Id"
- ],
- "outputVariables": [
- "observable_array"
- ],
- "type": "code"
- },
- "errors": {},
- "id": "8",
- "type": "code",
- "userCode": "\n # Variable renames for convenience\n recipients = filtered_result_0_data___recipient\n addressees = filtered_result_0_data___addressee\n subjects = filtered_result_0_data___subject\n senders = filtered_result_0_data___sender\n message_id = filtered_result_0_data___message_id\n \n build_message_output__observable_array = []\n recordList = []\n \n # unwind records\n for recipient, addressee, subject, sender, message_id in zip(recipients, addressees, subjects, senders, filtered_result_0_data___message_id):\n record = {\n \"recipient\": recipient,\n \"addressee\": addressee,\n \"subject\": subject,\n \"sender\": sender\n }\n\n observable = {\n \"value\": message_id,\n \"type\": \"internet message ID\",\n \"count\": len(recordList),\n \"source\": \"Splunk\",\n \"message_identifier_activity\": record\n }\n\n build_message_output__observable_array.append(observable)\n \n",
- "warnings": {},
- "x": 0,
- "y": 1042
- },
- "9": {
- "data": {
- "advanced": {
- "customName": "Results filter",
- "customNameId": 0,
- "delimiter": ",",
- "delimiter_enabled": true,
- "join": [],
- "note": "Ensure there are results"
+ {
+ "contains": [],
+ "description": "A subject line to look for within message logs in Splunk",
+ "name": "subject"
+ }
+ ],
+ "output_spec": [
+ {
+ "contains": [],
+ "datapaths": [
+ "build_message_output:custom_function:observable_array"
+ ],
+ "deduplicate": false,
+ "description": "An array of Observable objects ",
+ "metadata": {},
+ "name": "observable"
},
- "conditions": [
- {
- "comparisons": [
- {
- "conditionIndex": 0,
- "op": ">",
- "param": "run_message_query:action_result.summary.total_events",
- "value": "0"
- }
+ {
+ "contains": [],
+ "datapaths": [
+ "format_message_report:formatted_data"
],
- "conditionIndex": 0,
- "customName": "Results exist",
- "logic": "and"
- }
- ],
- "functionId": 2,
- "functionName": "results_filter",
- "id": "9",
- "type": "filter"
- },
- "errors": {},
- "id": "9",
- "type": "filter",
- "warnings": {},
- "x": 60,
- "y": 684
- }
- },
- "notes": "Inputs: internet message id\nInteractions: Splunk\nActions: run query\nOutputs: observable, markdown report"
- },
- "input_spec": [
- {
- "contains": [
- "internet message id"
- ],
- "description": "An Internet Message ID to look for within message logs in Splunk",
- "name": "message_id"
- },
- {
- "contains": [
- "email"
- ],
- "description": "A sender address to look for within message logs in Splunk",
- "name": "sender"
- },
- {
- "contains": [],
- "description": "A subject line to look for within message logs in Splunk",
- "name": "subject"
- }
- ],
- "output_spec": [
- {
- "contains": [],
- "datapaths": [
- "build_message_output:custom_function:observable_array"
- ],
- "deduplicate": false,
- "description": "An array of Observable objects ",
- "metadata": {},
- "name": "observable"
- },
- {
- "contains": [],
- "datapaths": [
- "format_message_report:formatted_data"
+ "deduplicate": false,
+ "description": "An array of markdown reports",
+ "metadata": {},
+ "name": "markdown_report"
+ }
],
- "deduplicate": false,
- "description": "An array of markdown reports",
- "metadata": {},
- "name": "markdown_report"
- }
+ "playbook_type": "data",
+ "python_version": "3.13",
+ "schema": "5.0.10",
+ "version": "6.1.0.131"
+ },
+ "create_time": "2023-10-12T23:49:46.906299+00:00",
+ "draft_mode": false,
+ "labels": [
+ "*"
],
- "playbook_type": "data",
- "python_version": "3",
- "schema": "5.0.10",
- "version": "6.1.0.131"
- },
- "create_time": "2023-10-12T23:49:46.906299+00:00",
- "draft_mode": false,
- "labels": [
- "*"
- ],
- "tags": [
- "message_identifier_activity",
- "internet_message_id",
- "splunk",
- "D3-IAA"
- ]
-}
+ "tags": [
+ "message_identifier_activity",
+ "internet_message_id",
+ "splunk",
+ "D3-IAA"
+ ]
+}
\ No newline at end of file
diff --git a/playbooks/Splunk_Message_Identifier_Activity_Analysis.png b/playbooks/Splunk_Message_Identifier_Activity_Analysis.png
index de4d93bd8c..bbebbcb8b8 100644
Binary files a/playbooks/Splunk_Message_Identifier_Activity_Analysis.png and b/playbooks/Splunk_Message_Identifier_Activity_Analysis.png differ
diff --git a/playbooks/Splunk_Notable_Related_Tickets_Search.json b/playbooks/Splunk_Notable_Related_Tickets_Search.json
index 84522a1c0d..7cedba1aba 100644
--- a/playbooks/Splunk_Notable_Related_Tickets_Search.json
+++ b/playbooks/Splunk_Notable_Related_Tickets_Search.json
@@ -410,7 +410,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/URL_Outbound_Traffic_Filtering_Dispatch.json b/playbooks/URL_Outbound_Traffic_Filtering_Dispatch.json
index d0a69109c5..bf1aed9c4c 100644
--- a/playbooks/URL_Outbound_Traffic_Filtering_Dispatch.json
+++ b/playbooks/URL_Outbound_Traffic_Filtering_Dispatch.json
@@ -685,7 +685,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/UrlScan_IO_Dynamic_Analysis.json b/playbooks/UrlScan_IO_Dynamic_Analysis.json
index 6cb60fa847..6728fdf87e 100644
--- a/playbooks/UrlScan_IO_Dynamic_Analysis.json
+++ b/playbooks/UrlScan_IO_Dynamic_Analysis.json
@@ -545,7 +545,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.8",
"version": "5.5.0.108488"
},
diff --git a/playbooks/VirusTotal_v3_Dynamic_Analysis.json b/playbooks/VirusTotal_v3_Dynamic_Analysis.json
index 1697b284b1..16d560f4df 100644
--- a/playbooks/VirusTotal_v3_Dynamic_Analysis.json
+++ b/playbooks/VirusTotal_v3_Dynamic_Analysis.json
@@ -581,7 +581,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.0.1.123902"
},
diff --git a/playbooks/VirusTotal_v3_Identifier_Reputation_Analysis.json b/playbooks/VirusTotal_v3_Identifier_Reputation_Analysis.json
index 62dc4b317b..aa8fcb2e23 100644
--- a/playbooks/VirusTotal_v3_Identifier_Reputation_Analysis.json
+++ b/playbooks/VirusTotal_v3_Identifier_Reputation_Analysis.json
@@ -1058,7 +1058,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.0.1.123902"
},
diff --git a/playbooks/Windows_Defender_ATP_Identifier_Activity_Analysis.json b/playbooks/Windows_Defender_ATP_Identifier_Activity_Analysis.json
index d65ce167f7..b4a120b28f 100644
--- a/playbooks/Windows_Defender_ATP_Identifier_Activity_Analysis.json
+++ b/playbooks/Windows_Defender_ATP_Identifier_Activity_Analysis.json
@@ -495,7 +495,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.9",
"version": "6.0.0.114895"
},
diff --git a/playbooks/Zscaler_Outbound_Traffic_Filtering.json b/playbooks/Zscaler_Outbound_Traffic_Filtering.json
index 04a5c3c26f..ed42097778 100644
--- a/playbooks/Zscaler_Outbound_Traffic_Filtering.json
+++ b/playbooks/Zscaler_Outbound_Traffic_Filtering.json
@@ -258,7 +258,7 @@
}
],
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.10",
"version": "6.0.1.123902"
},
diff --git a/playbooks/activedirectory_reset_password.json b/playbooks/activedirectory_reset_password.json
index 2a5f022a36..83faaed258 100644
--- a/playbooks/activedirectory_reset_password.json
+++ b/playbooks/activedirectory_reset_password.json
@@ -2311,7 +2311,7 @@
},
"notes": "This playbook uses the following Apps:\n - LDAP (set password) - reset the password of a user\n\nDeployment Notes:\n - This playbook works on artifacts with artifact:*.cef.compromisedUserName which can be created as shown in the playbook \"recorded_future_handle_leaked_credentials\"\n - The prompt is hard-coded to use \"admin\" as the user, so change it to the correct user or role"
},
- "python_version": "3",
+ "python_version": "3.13",
"schema": 4,
"version": "4.10.0.40677"
},
diff --git a/playbooks/aws_disable_user_accounts.json b/playbooks/aws_disable_user_accounts.json
index fecc5e822e..d1cc960110 100644
--- a/playbooks/aws_disable_user_accounts.json
+++ b/playbooks/aws_disable_user_accounts.json
@@ -324,7 +324,7 @@
],
"output_spec": null,
"playbook_type": "data",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.3",
"version": "5.0.1.66250"
},
diff --git a/playbooks/aws_find_inactive_users.json b/playbooks/aws_find_inactive_users.json
index 9d92624dc5..f6b2ba31d7 100644
--- a/playbooks/aws_find_inactive_users.json
+++ b/playbooks/aws_find_inactive_users.json
@@ -492,7 +492,7 @@
"input_spec": null,
"output_spec": null,
"playbook_type": "automation",
- "python_version": "3",
+ "python_version": "3.13",
"schema": "5.0.3",
"version": "5.0.1.66250"
},
diff --git a/playbooks/crowdstrike_malware_triage.json b/playbooks/crowdstrike_malware_triage.json
index 588879e265..1347ae6228 100644
--- a/playbooks/crowdstrike_malware_triage.json
+++ b/playbooks/crowdstrike_malware_triage.json
@@ -9673,7 +9673,7 @@
},
"notes": "This playbook uses the following Apps:\n - CrowdStrike OAuth (get indicator, hunt file, and more) [asset name = crowdstrike_oauth] - Investigate and respond on the endpoint with CrowdStrike Falcon\n\nDeployment Notes:\n - Change the target user of the prompt from admin to the appropriate user or role"
},
- "python_version": "3",
+ "python_version": "3.13",
"schema": 4,
"version": "4.10.0.40961"
},
diff --git a/playbooks/custom_functions/artifact_create.json b/playbooks/custom_functions/artifact_create.json
index 0216acff05..b6b78e691f 100644
--- a/playbooks/custom_functions/artifact_create.json
+++ b/playbooks/custom_functions/artifact_create.json
@@ -1,6 +1,6 @@
{
- "create_time": "2022-03-02T15:10:11.903102+00:00",
- "custom_function_id": "1a37fa07ccabdb1d166e84cf508cfd7a15d8d662",
+ "create_time": "2026-03-12T01:52:29.767919+00:00",
+ "custom_function_id": "b705f07c883eae033c60aebe4779bd4eba61121d",
"description": "Create a new artifact with the specified attributes. Supports all fields available in /rest/artifact. Add any unlisted inputs as dictionary keys in input_json. Unsupported keys will automatically be dropped.",
"draft_mode": false,
"inputs": [
@@ -90,6 +90,7 @@
"description": "The ID of the created artifact."
}
],
- "platform_version": "5.2.1.78411",
- "python_version": "3"
+ "outputs_type": "item",
+ "platform_version": "8.4.0.158",
+ "python_version": "3.13"
}
\ No newline at end of file
diff --git a/playbooks/custom_functions/artifact_create.py b/playbooks/custom_functions/artifact_create.py
index 14331e2f6c..cd3b7870a8 100644
--- a/playbooks/custom_functions/artifact_create.py
+++ b/playbooks/custom_functions/artifact_create.py
@@ -37,6 +37,9 @@ def artifact_create(container=None, name=None, label=None, severity=None, cef_fi
new_artifact['container_id'] = container
elif isinstance(container, dict):
new_artifact['container_id'] = container['id']
+ elif isinstance(container, str) and container == 'container:id':
+ # fetch container info directly
+ new_artifact['container_id'] = phantom.get_current_container_id_()
else:
raise TypeError("container is neither an int nor a dictionary")
@@ -57,14 +60,18 @@ def artifact_create(container=None, name=None, label=None, severity=None, cef_fi
# run_automation must be "true" or "false" and defaults to "false"
if run_automation:
- if not isinstance(run_automation, str):
- raise TypeError("run automation must be a string")
- if run_automation.lower() == 'true':
- new_artifact['run_automation'] = True
- elif run_automation.lower() == 'false':
- new_artifact['run_automation'] = False
+ if isinstance(run_automation, str):
+ if run_automation.lower() == 'true':
+ new_artifact['run_automation'] = True
+ elif run_automation.lower() == 'false':
+ new_artifact['run_automation'] = False
+ else:
+ raise ValueError("run_automation must be either 'true' or 'false'")
+ elif not isinstance(run_automation, bool):
+ raise TypeError("run automation must be a string or bool")
else:
- raise ValueError("run_automation must be either 'true' or 'false'")
+ new_artifact['run_automation'] = run_automation
+
else:
new_artifact['run_automation'] = False
@@ -102,7 +109,6 @@ def artifact_create(container=None, name=None, label=None, severity=None, cef_fi
phantom.debug(f"Unsupported key: '{json_key}'")
# now actually create the artifact
- phantom.debug(f"Creating artifact with the following details: '{new_artifact}'")
response_json = phantom.requests.post(rest_artifact, json=new_artifact, verify=False).json()
if response_json.get('message', '') == 'artifact already exists':
phantom.debug(f"Artifact already exists: '{response_json['existing_artifact_id']}'")
diff --git a/playbooks/custom_functions/artifact_update.json b/playbooks/custom_functions/artifact_update.json
index e8f7a68f78..cbd814b35e 100644
--- a/playbooks/custom_functions/artifact_update.json
+++ b/playbooks/custom_functions/artifact_update.json
@@ -1,14 +1,14 @@
{
- "create_time": "2021-07-24T01:17:48.431013+00:00",
- "custom_function_id": "1d358be9992079dad6d3313d465e65318930cf70",
- "description": "Update an artifact with the specified attributes. All parameters are optional, except that cef_field and cef_value must both be provided if one is provided.",
+ "create_time": "2022-05-26T21:13:45.063456+00:00",
+ "custom_function_id": "202e71446b1780879a3a48f659f0f6c3f6adcd28",
+ "description": "Update an artifact with the specified attributes. All parameters are optional, except that an artifact_id must be provided and if one of cef_field or cef_value is provided then they must both be provided. Supports all fields available in /rest/artifact. Add any unlisted inputs as dictionary keys in input_json. Unsupported keys will automatically be dropped.",
"draft_mode": false,
"inputs": [
{
"contains_type": [
"phantom artifact id"
],
- "description": "ID of the artifact to update, which is required.",
+ "description": "ID of the artifact to update, which is required unless artifact_id is a key within input_json",
"input_type": "item",
"name": "artifact_id",
"placeholder": "1234"
@@ -21,16 +21,16 @@
"placeholder": "artifact"
},
{
- "contains_type": [],
+ "contains_type": [
+ ""
+ ],
"description": "Change the label of the artifact.",
"input_type": "item",
"name": "label",
"placeholder": "events"
},
{
- "contains_type": [
- ""
- ],
+ "contains_type": [],
"description": "Change the severity of the artifact. Typically this is either \"High\", \"Medium\", or \"Low\".",
"input_type": "item",
"name": "severity",
@@ -66,6 +66,13 @@
"name": "tags",
"placeholder": "tag1, tag2, tag3"
},
+ {
+ "contains_type": [],
+ "description": "Optional input. Either True or False with default as False. If set to True, existing tags on the indicator record will be replaced by the provided input. If set to False, the new tags will be appended to the existing indicator tags.",
+ "input_type": "item",
+ "name": "overwrite_tags",
+ "placeholder": "True or False"
+ },
{
"contains_type": [],
"description": "Optional parameter to modify any extra attributes of the artifact. Input_json will be merged with other inputs. In the event of a conflict, input_json will take precedence.",
@@ -75,6 +82,6 @@
}
],
"outputs": [],
- "platform_version": "4.10.4.56260",
- "python_version": "3"
+ "platform_version": "5.3.1.84890",
+ "python_version": "3.13"
}
\ No newline at end of file
diff --git a/playbooks/custom_functions/artifact_update.py b/playbooks/custom_functions/artifact_update.py
index e60dbb03bb..0638b2acae 100644
--- a/playbooks/custom_functions/artifact_update.py
+++ b/playbooks/custom_functions/artifact_update.py
@@ -1,9 +1,9 @@
-def artifact_update(artifact_id=None, name=None, label=None, severity=None, cef_field=None, cef_value=None, cef_data_type=None, tags=None, input_json=None, **kwargs):
+def artifact_update(artifact_id=None, name=None, label=None, severity=None, cef_field=None, cef_value=None, cef_data_type=None, tags=None, overwrite_tags=None, input_json=None, **kwargs):
"""
- Update an artifact with the specified attributes. All parameters are optional, except that cef_field and cef_value must both be provided if one is provided.
+ Update an artifact with the specified attributes. All parameters are optional, except that an artifact_id must be provided and if one of cef_field or cef_value is provided then they must both be provided. Supports all fields available in /rest/artifact. Add any unlisted inputs as dictionary keys in input_json. Unsupported keys will automatically be dropped.
Args:
- artifact_id (CEF type: phantom artifact id): ID of the artifact to update, which is required.
+ artifact_id (CEF type: phantom artifact id): ID of the artifact to update, which is required unless artifact_id is a key within input_json
name: Change the name of the artifact.
label: Change the label of the artifact.
severity: Change the severity of the artifact. Typically this is either "High", "Medium", or "Low".
@@ -11,6 +11,7 @@ def artifact_update(artifact_id=None, name=None, label=None, severity=None, cef_
cef_value (CEF type: *): The value of the CEF field to populate in the artifact, such as the IP address, domain name, or file hash. Required only if cef_field is provided.
cef_data_type: The CEF data type of the data in cef_value. For example, this could be "ip", "hash", or "domain". Optional, but only operational if cef_field is provided.
tags: A comma-separated list of tags to apply to the artifact, which is optional.
+ overwrite_tags: Optional input. Either True or False with default as False. If set to True, existing tags on the indicator record will be replaced by the provided input. If set to False, the new tags will be appended to the existing indicator tags.
input_json: Optional parameter to modify any extra attributes of the artifact. Input_json will be merged with other inputs. In the event of a conflict, input_json will take precedence.
Returns a JSON-serializable object that implements the configured data paths:
@@ -20,17 +21,49 @@ def artifact_update(artifact_id=None, name=None, label=None, severity=None, cef_
import json
import phantom.rules as phantom
- updated_artifact = {}
+ outputs = {}
+ json_dict = None
+ valid_keys = [
+ 'artifact_id', 'artifact_type', 'cef', 'cef_data', 'cef_types', 'field_mapping',
+ 'data', 'end_time', 'label', 'name', 'owner_id',
+ 'raw_data', 'run_automation', 'severity','start_time', 'tags', 'type'
+ ]
+
+ # check the input_json first, because it might have an artifact_id in it
+ if input_json:
+ # ensure valid input_json
+ if isinstance(input_json, dict):
+ json_dict = input_json
+ elif isinstance(input_json, str):
+ json_dict = json.loads(input_json)
+ else:
+ raise ValueError("input_json must be either 'dict' or valid json 'string'")
+ if 'artifact_id' in json_dict:
+ artifact_id = int(json_dict['artifact_id'])
if not isinstance(artifact_id, int):
raise TypeError("artifact_id is required")
-
+
+ rest_artifact = phantom.build_phantom_rest_url('artifact', artifact_id)
+ updated_artifact = phantom.requests.get(rest_artifact, verify=False).json()
+ if updated_artifact.get('failed'):
+ raise RuntimeError(f"GET /rest/artifact/{artifact_id} failed, {updated_artifact.get('message')}")
+
if name:
updated_artifact['name'] = name
if label:
updated_artifact['label'] = label
if severity:
- updated_artifact['severity'] = severity
+ updated_artifact['severity'] = severity
+ if overwrite_tags:
+ if not isinstance(overwrite_tags, bool):
+ raise TypeError("Must be a boolean, True or False")
+ if tags:
+ if not overwrite_tags:
+ new_tags = updated_artifact['tags'] + tags.replace(" ", "").split(",")
+ updated_artifact['tags'] = list(set(new_tags))
+ else:
+ updated_artifact['tags'] = list(set(tags.replace(" ", "").split(",")))
# validate that if cef_field or cef_value is provided, the other is also provided
if (cef_field and not cef_value) or (cef_value and not cef_field):
@@ -38,28 +71,45 @@ def artifact_update(artifact_id=None, name=None, label=None, severity=None, cef_
# cef_data should be formatted {cef_field: cef_value}
if cef_field:
- updated_artifact['cef'] = {cef_field: cef_value}
+ updated_artifact['cef'].update({cef_field: cef_value})
if cef_data_type and isinstance(cef_data_type, str):
- updated_artifact['cef_types'] = {cef_field: [cef_data_type]}
-
- # separate tags by comma
- if tags:
- tags = tags.replace(" ", "").split(",")
- updated_artifact['tags'] = tags
-
- if input_json:
- json_dict = json.loads(input_json)
+ updated_artifact['cef_types'].update({cef_field: [cef_data_type]})
+
+ if json_dict:
# Merge dictionaries, using the value from json_dict if there are any conflicting keys
for json_key in json_dict:
- updated_artifact[json_key] = json_dict[json_key]
+ if json_key == 'artifact_id':
+ continue
+ if json_key in valid_keys:
+ # translate keys supported in phantom.add_artifact() to their corresponding values in /rest/artifact
+ if json_key == 'raw_data':
+ updated_artifact['data'].update(json_dict[json_key])
+ elif json_key == 'cef_data':
+ updated_artifact['cef'].update(json_dict[json_key])
+ elif json_key == 'artifact_type':
+ updated_artifact['type'] = json_dict[json_key]
+ elif json_key == 'field_mapping':
+ updated_artifact['cef_types'].update(json_dict[json_key])
+ else:
+ if isinstance(updated_artifact[json_key], dict):
+ updated_artifact[json_key].update(json_dict[json_key])
+ elif isinstance(updated_artifact[json_key], list):
+ updated_artifact[json_key] = updated_artifact[json_key] + json_dict[json_key]
+ updated_artifact[json_key] = list(set(updated_artifact[json_key]))
+ else:
+ updated_artifact[json_key] = json_dict[json_key]
+ else:
+ phantom.debug(f"Unsupported key: '{json_key}'")
# now actually update the artifact
- phantom.debug('updating artifact {} with the following attributes:\n{}'.format(artifact_id, updated_artifact))
- url = phantom.build_phantom_rest_url('artifact', artifact_id)
- response = phantom.requests.post(url, json=updated_artifact, verify=False).json()
-
- phantom.debug('POST /rest/artifact returned the following response:\n{}'.format(response))
- if 'success' not in response or response['success'] != True:
+ phantom.debug('Updating artifact {} with the following attributes:\n{}'.format(artifact_id, updated_artifact))
+ response_json = phantom.requests.post(rest_artifact, json=updated_artifact, verify=False).json()
+ if not response_json.get('success'):
raise RuntimeError("POST /rest/artifact failed")
+ else:
+ phantom.debug(response_json)
+ outputs['artifact_id'] = response_json['id']
- return
+ # Return a JSON-serializable object
+ assert json.dumps(outputs) # Will raise an exception if the :outputs: object is not JSON-serializable
+ return outputs
diff --git a/playbooks/custom_functions/asset_get_attributes.json b/playbooks/custom_functions/asset_get_attributes.json
index e438ef4871..b042d20c5d 100644
--- a/playbooks/custom_functions/asset_get_attributes.json
+++ b/playbooks/custom_functions/asset_get_attributes.json
@@ -73,5 +73,5 @@
}
],
"platform_version": "4.10.6.61906",
- "python_version": "3"
+ "python_version": "3.13"
}
\ No newline at end of file
diff --git a/playbooks/custom_functions/base64_decode.json b/playbooks/custom_functions/base64_decode.json
index f3d97da0cd..ffcc02b0c5 100644
--- a/playbooks/custom_functions/base64_decode.json
+++ b/playbooks/custom_functions/base64_decode.json
@@ -47,5 +47,5 @@
}
],
"platform_version": "5.0.1.66250",
- "python_version": "3"
+ "python_version": "3.13"
}
\ No newline at end of file
diff --git a/playbooks/custom_functions/collect_by_cef_type.json b/playbooks/custom_functions/collect_by_cef_type.json
index 8be2ec73ff..e6af711ca7 100644
--- a/playbooks/custom_functions/collect_by_cef_type.json
+++ b/playbooks/custom_functions/collect_by_cef_type.json
@@ -1,56 +1,68 @@
{
- "create_time": "2021-08-24T16:17:12.241297+00:00",
- "custom_function_id": "98612a9a22a18dff43b6644ed00c2c523d348d79",
+ "create_time": "2023-03-03T00:00:37.704553+00:00",
+ "custom_function_id": "b3e34e57be90e0cf7f542a9faa16a7318ae9da75",
"description": "Collect all artifact values that match the desired CEF data types, such as \"ip\", \"url\", \"sha1\", or \"all\". Optionally also filter for artifacts that have the specified tags.",
"draft_mode": false,
"inputs": [
{
- "contains_type": [
- "phantom container id"
- ],
- "description": "Container ID or container object.",
+ "contains_type": [],
+ "description": "(Optional) Collect data from this container. Defaults to current container.",
"input_type": "item",
"name": "container",
"placeholder": "container:id"
},
{
"contains_type": [],
- "description": "The CEF data type to collect values for. This could be a single string or a comma separated list such as \"hash,filehash,file_hash\". The special value \"all\" can also be used to collect all field values from all artifacts.",
- "input_type": "item",
+ "description": "(Required) The CEF data type to collect values for. This could be a single string or a comma separated list such as \"hash,filehash,file_hash\". The special value \"all\" can also be used to collect all field values from all artifacts.",
+ "input_type": "list",
"name": "data_types",
"placeholder": "data_type1, data_type2, data_type3"
},
{
"contains_type": [],
- "description": "If tags are provided, only return fields from artifacts that have all of the provided tags. This could be an individual tag or a comma separated list.",
+ "description": "(Optional) Define custom scope and defaults to 'new'. Advanced Settings Scope is not passed to a custom function. Options are 'all' or 'new'.",
"input_type": "item",
- "name": "tags",
- "placeholder": "tag1,tag2,tag3"
+ "name": "scope",
+ "placeholder": "new"
},
{
"contains_type": [],
- "description": "Defaults to 'new'. Define custom scope. Advanced Settings Scope is not passed to a custom function. Options are 'all' or 'new'.",
- "input_type": "item",
- "name": "scope",
- "placeholder": "new"
+ "description": "(Optional) Only return fields from artifacts that have all of the provided tags. This could be an individual tag or a comma separated list.",
+ "input_type": "list",
+ "name": "tags",
+ "placeholder": "tag1,tag2,tag3"
}
],
"outputs": [
- {
- "contains_type": [
- "*"
- ],
- "data_path": "*.artifact_value",
- "description": "The value of the field with the matching CEF data type."
- },
{
"contains_type": [
"phantom artifact id"
],
- "data_path": "*.artifact_id",
+ "data_path": "artifact_id",
"description": "ID of the artifact that contains the value."
+ },
+ {
+ "contains_type": [],
+ "data_path": "artifact_tags",
+ "description": "The tags associated with the artifact"
+ },
+ {
+ "contains_type": [],
+ "data_path": "cef_keys",
+ "description": "A list of keys from that artifact where this value appears. This will usually be a list of 1."
+ },
+ {
+ "contains_type": [],
+ "data_path": "cef_types",
+ "description": "A list of cef types associated with this value"
+ },
+ {
+ "contains_type": [],
+ "data_path": "cef_value",
+ "description": "The value of the field with the matching CEF data type."
}
],
- "platform_version": "4.10.6.61906",
- "python_version": "3"
+ "outputs_type": "list",
+ "platform_version": "6.0.0.114228",
+ "python_version": "3.13"
}
\ No newline at end of file
diff --git a/playbooks/custom_functions/collect_by_cef_type.py b/playbooks/custom_functions/collect_by_cef_type.py
index a38f0d41cc..6d53e9d214 100644
--- a/playbooks/custom_functions/collect_by_cef_type.py
+++ b/playbooks/custom_functions/collect_by_cef_type.py
@@ -1,90 +1,138 @@
-def collect_by_cef_type(container=None, data_types=None, tags=None, scope=None, **kwargs):
+def collect_by_cef_type(container=None, data_types=None, scope=None, tags=None, **kwargs):
"""
Collect all artifact values that match the desired CEF data types, such as "ip", "url", "sha1", or "all". Optionally also filter for artifacts that have the specified tags.
Args:
- container (CEF type: phantom container id): Container ID or container object.
- data_types: The CEF data type to collect values for. This could be a single string or a comma separated list such as "hash,filehash,file_hash". The special value "all" can also be used to collect all field values from all artifacts.
- tags: If tags are provided, only return fields from artifacts that have all of the provided tags. This could be an individual tag or a comma separated list.
- scope: Defaults to 'new'. Define custom scope. Advanced Settings Scope is not passed to a custom function. Options are 'all' or 'new'.
+ container: (Optional) Collect data from this container. Defaults to current container.
+ data_types: (Required) The CEF data type to collect values for. This could be a single string or a comma separated list such as "hash,filehash,file_hash". The special value "all" can also be used to collect all field values from all artifacts.
+ scope: (Optional) Define custom scope and defaults to 'new'. Advanced Settings Scope is not passed to a custom function. Options are 'all' or 'new'.
+ tags: (Optional) Only return fields from artifacts that have all of the provided tags. This could be an individual tag or a comma separated list.
Returns a JSON-serializable object that implements the configured data paths:
- *.artifact_value (CEF type: *): The value of the field with the matching CEF data type.
- *.artifact_id (CEF type: phantom artifact id): ID of the artifact that contains the value.
+ artifact_id (CEF type: phantom artifact id): ID of the artifact that contains the value.
+ artifact_tags: The tags associated with the artifact
+ cef_keys: A list of keys from that artifact where this value appears. This will usually be a list of 1.
+ cef_types: A list of cef types associated with this value
+ cef_value: The value of the field with the matching CEF data type.
"""
############################ Custom Code Goes Below This Line #################################
import json
import phantom.rules as phantom
import traceback
-
+
+ outputs = []
# validate container and get ID
if isinstance(container, dict) and container['id']:
container_dict = container
container_id = container['id']
elif isinstance(container, int):
- rest_container = phantom.requests.get(uri=phantom.build_phantom_rest_url('container', container), verify=False).json()
- if 'id' not in rest_container:
- raise ValueError('Failed to find container with id {container}')
- container_dict = rest_container
+ container_dict = phantom.get_container(container)
container_id = container
+ elif not container:
+ container_id = phantom.get_current_container_id_()
+ container_dict = phantom.get_container(container_id)
else:
raise TypeError("The input 'container' is neither a container dictionary nor an int, so it cannot be used")
# validate the data_types input
- if not data_types or not isinstance(data_types, str):
- raise ValueError("The input 'data_types' must exist and must be a string")
- # if data_types has a comma, split it and treat it as a list
- elif "," in data_types:
+ if isinstance(data_types, list) and len(data_types) == 1:
+ data_types = [item.strip() for item in data_types[0].split(",")]
+ elif isinstance(data_types, str):
data_types = [item.strip() for item in data_types.split(",")]
- # else it must be a single data type
- else:
- data_types = [data_types]
-
+ elif not data_types:
+ raise ValueError("The input 'data_types' is required and was blank")
+ if 'all' in data_types and scope == 'new':
+ raise ValueError("'all' datatypes not compatible with 'new' scope")
+
# validate scope input
if isinstance(scope, str) and scope.lower() in ['new', 'all']:
scope = scope.lower()
elif not scope:
- scope = None
+ if 'all' in data_types:
+ scope = 'all'
+ else:
+ scope = 'new'
else:
raise ValueError("The input 'scope' is not one of 'new' or 'all'")
- # split tags if it contains commas or use as-is
+ # tag inpuit validation
if not tags:
tags = []
# if tags has a comma, split it and treat it as a list
- elif tags and "," in tags:
+ elif isinstance(tags, list) and len(tags) == 1:
+ tags = [item.strip() for item in tags[0].split(",")]
+ elif isinstance(tags, str):
tags = [item.strip() for item in tags.split(",")]
- # if there is no comma, treat it as a single tag
+
+ if 'all' not in data_types and scope == 'new':
+ # collect all values matching the cef type (this is to support scope)
+ collected_field_values = phantom.collect_from_contains(
+ container=container_dict,
+ action_results=None,
+ contains=data_types,
+ scope=scope,
+ tags=tags
+ )
else:
- tags = [tags]
-
- # collect all values matching the cef type (which was previously called "contains")
- collected_field_values = phantom.collect_from_contains(container=container_dict, action_results=None, contains=data_types, scope=scope)
- phantom.debug(f'found the following field values: {collected_field_values}')
-
- # collect all the artifacts in the container to get the artifact IDs
- artifacts = phantom.requests.get(uri=phantom.build_phantom_rest_url('container', container_id, 'artifacts'), params={'page_size': 0}, verify=False).json()['data']
-
+ collected_field_values = []
+
+ # terminate early because there were no new artifacts
+ if 'all' not in data_types and scope == 'new' and not collected_field_values:
+ phantom.debug("No new artifacts found")
+ return outputs
+
+ # fetch all artifacts in the container
+ container_artifact_url = phantom.build_phantom_rest_url('artifact')
+ container_artifact_url += f'?_filter_container={container_id}&page_size=0&include_all_cef_types'
+ artifacts = phantom.requests.get(container_artifact_url, verify=False).json()['data']
# build the output list from artifacts with the collected field values
- outputs = []
for artifact in artifacts:
# if any tags are provided, make sure each provided tag is in the artifact's tags
if tags:
if not set(tags).issubset(set(artifact['tags'])):
continue
- # "all" is a special value to collect every value from every artifact
- if data_types == ['all']:
- for cef_key in artifact['cef']:
- new_output = {'artifact_value': artifact['cef'][cef_key], 'artifact_id': artifact['id']}
- if new_output not in outputs:
- outputs.append(new_output)
- continue
- for cef_key in artifact['cef']:
- if artifact['cef'][cef_key] in collected_field_values:
- new_output = {'artifact_value': artifact['cef'][cef_key], 'artifact_id': artifact['id']}
- if new_output not in outputs:
- outputs.append(new_output)
+
+ cef_dict = {}
+ for cef_key, cef_value in artifact['cef'].items():
+ match = False
+
+ # "all" is a special value to collect every value from every artifact
+ if 'all' in data_types:
+ # if user put 'new' in scope
+ if collected_field_values and str(cef_value) in collected_field_values:
+ match = True
+ # if user put 'all' in scope
+ elif not collected_field_values:
+ match = True
+
+ # if user put 'new' in scope
+ elif scope == 'new' and str(cef_value) in collected_field_values:
+ match = True
+
+ # if user put 'all' in scope
+ elif scope == 'all':
+ for data_type in data_types:
+ if data_type and data_type in artifact['cef_types'].get(cef_key, []):
+ match = True
+ if match:
+ if cef_dict and str(cef_value) in cef_dict.keys():
+ cef_dict[str(cef_value)]['cef_keys'].append(cef_key)
+ if artifact['cef_types'].get(cef_key):
+ cef_dict[str(cef_value)]['cef_types'].update(artifact['cef_types'][cef_key])
+ else:
+ cef_dict[str(cef_value)] = {
+ 'cef_keys': list(set([cef_key])),
+ 'cef_value': str(cef_value),
+ 'artifact_id': artifact['id'],
+ 'artifact_tags': list(set(artifact['tags'])),
+ 'cef_types': set(artifact['cef_types'].get(cef_key, []))
+ }
+
+ for item in cef_dict.values():
+ item['cef_types'] = list(item['cef_types'])
+ outputs.append(item)
# Return a JSON-serializable object
+ assert isinstance(outputs, list) # Will raise an exception if the :outputs: object is not a list
assert json.dumps(outputs) # Will raise an exception if the :outputs: object is not JSON-serializable
return outputs
diff --git a/playbooks/custom_functions/container_merge.json b/playbooks/custom_functions/container_merge.json
index c6ac131e42..9d3d4841e0 100644
--- a/playbooks/custom_functions/container_merge.json
+++ b/playbooks/custom_functions/container_merge.json
@@ -1,6 +1,6 @@
{
- "create_time": "2022-02-25T14:52:47.172543+00:00",
- "custom_function_id": "1f8ae8e7978b750272fbbaba5efe4e6127a9a6a7",
+ "create_time": "2022-03-31T20:37:10.886463+00:00",
+ "custom_function_id": "d47390da04b668a34bb3dee22eba8b41f7531d06",
"description": "An alternative to the add-to-case API call. This function will copy all artifacts, automation, notes and comments over from every container within the container_list into the target_container. The target_container will be upgraded to a case.\n\nThe notes will be copied over with references to the child containers from where they came. A note will be left in the child containers with a link to the target container. The child containers will be marked as evidence within the target container. \n\nAny notes left as a consequence of the merge process will be skipped in subsequent merges.",
"draft_mode": false,
"inputs": [
@@ -37,5 +37,5 @@
],
"outputs": [],
"platform_version": "5.2.1.78411",
- "python_version": "3"
+ "python_version": "3.13"
}
\ No newline at end of file
diff --git a/playbooks/custom_functions/container_merge.py b/playbooks/custom_functions/container_merge.py
index 49cbcce3bf..99270911c2 100644
--- a/playbooks/custom_functions/container_merge.py
+++ b/playbooks/custom_functions/container_merge.py
@@ -19,6 +19,9 @@ def container_merge(target_container=None, container_list=None, workbook=None, c
import json
import phantom.rules as phantom
+ def check_numeric_list(input_list):
+ return (all(isinstance(x, int) for x in input_list) or all(x.isnumeric() for x in input_list))
+
outputs = {}
# Check if valid target_container input was provided
@@ -31,17 +34,28 @@ def container_merge(target_container=None, container_list=None, workbook=None, c
container_url = phantom.build_phantom_rest_url('container', container['id'])
+
# Check if container_list input is a list of IDs
- if isinstance(container_list, list) and (all(isinstance(x, int) for x in container_list) or all(x.isnumeric() for x in container_list)):
- pass
+ if isinstance(container_list, str):
+ container_list = container_list.replace('[','').replace(']','').replace(' ','').split(',')
+ if not check_numeric_list(container_list):
+ raise TypeError(f"container_list '{container_list}' is not a list of integers")
+ elif isinstance(container_list, list):
+ if not check_numeric_list(container_list):
+ raise TypeError(f"container_list '{container_list}' is not a list of integers")
+ elif isinstance(container_list, int):
+ container_list = [container_list]
else:
raise TypeError(f"container_list '{container_list}' is not a list of integers")
-
+
+ # ensure all ids are integers
+ container_list = [int(item) for item in container_list]
+
## Prep parent container as case with workbook ##
workbook_name = phantom.requests.get(container_url, verify=False).json().get('workflow_name')
# If workbook already exists, proceed to promote to case
if workbook_name:
- phantom.debug("workbook already exists. adding [Parent] to container name and promoting to case")
+ phantom.debug("Workbook already exists - adding [Parent] to container name and promoting to case")
update_data = {'container_type': 'case'}
if not '[Parent]' in container['name']:
update_data['name'] = "[Parent] {}".format(container['name'])
@@ -50,7 +64,7 @@ def container_merge(target_container=None, container_list=None, workbook=None, c
phantom.update(container, update_data)
# If no workbook exists, add one
else:
- phantom.debug("no workbook in container. adding one by name or using the default")
+ phantom.debug("No workbook in container - adding one by name or using the default")
# If workbook ID was provided, add it
if isinstance(workbook, int):
workbook_id = workbook
@@ -81,11 +95,11 @@ def container_merge(target_container=None, container_list=None, workbook=None, c
raise RuntimeError(f"Error occurred during workbook add for workbook '{workbook_name}'")
## Check if current phase is set. If not, set the current phase to the first available phase to avoid artifact merge error ##
- if not container.get('current_phase_id'):
- phantom.debug("no current phase, so setting first available phase to current")
+ if not container.get('current_phase_id') and not container.get('current_phase'):
+ phantom.debug("No current phase - setting first available phase to current")
workbook_phase_url = phantom.build_phantom_rest_url('workbook_phase') + "?_filter_container={}".format(container['id'])
request_json = phantom.requests.get(workbook_phase_url, verify=False).json()
- update_data = {'current_phase_id': request_json['data'][0]['id']}
+ update_data = {'current_phase': request_json['data'][0]['id']}
phantom.update(container, update_data)
child_container_list = []
@@ -149,6 +163,8 @@ def container_merge(target_container=None, container_list=None, workbook=None, c
## Close child container
if isinstance(close_containers, str) and close_containers.lower() == 'true':
phantom.set_status(container=child_container_id, status="closed")
+ elif isinstance(close_containers, bool) and close_containers:
+ phantom.set_status(container=child_container_id, status="closed")
### End child container processing ###
diff --git a/playbooks/custom_functions/container_update.json b/playbooks/custom_functions/container_update.json
index 5ac43b8864..0dee473c11 100644
--- a/playbooks/custom_functions/container_update.json
+++ b/playbooks/custom_functions/container_update.json
@@ -81,5 +81,5 @@
],
"outputs": [],
"platform_version": "4.10.4.56260",
- "python_version": "3"
+ "python_version": "3.13"
}
\ No newline at end of file
diff --git a/playbooks/custom_functions/custom_list_enumerate.json b/playbooks/custom_functions/custom_list_enumerate.json
index 0c12ad0d42..7b94c9d821 100644
--- a/playbooks/custom_functions/custom_list_enumerate.json
+++ b/playbooks/custom_functions/custom_list_enumerate.json
@@ -67,5 +67,5 @@
}
],
"platform_version": "4.10.2.47587",
- "python_version": "3"
+ "python_version": "3.13"
}
\ No newline at end of file
diff --git a/playbooks/custom_functions/custom_list_value_in_strings.json b/playbooks/custom_functions/custom_list_value_in_strings.json
index 34d25c78e6..ef54c480e4 100644
--- a/playbooks/custom_functions/custom_list_value_in_strings.json
+++ b/playbooks/custom_functions/custom_list_value_in_strings.json
@@ -52,5 +52,5 @@
}
],
"platform_version": "4.10.7.63984",
- "python_version": "3"
+ "python_version": "3.13"
}
\ No newline at end of file
diff --git a/playbooks/custom_functions/datetime_modify.json b/playbooks/custom_functions/datetime_modify.json
index b5f4ffec40..3ec3eeca7b 100644
--- a/playbooks/custom_functions/datetime_modify.json
+++ b/playbooks/custom_functions/datetime_modify.json
@@ -62,5 +62,5 @@
}
],
"platform_version": "4.10.6.61906",
- "python_version": "3"
+ "python_version": "3.13"
}
\ No newline at end of file
diff --git a/playbooks/custom_functions/debug.json b/playbooks/custom_functions/debug.json
index e514aefa3e..0b38b2bb62 100644
--- a/playbooks/custom_functions/debug.json
+++ b/playbooks/custom_functions/debug.json
@@ -117,5 +117,5 @@
}
],
"platform_version": "4.10.3.51237",
- "python_version": "3"
+ "python_version": "3.13"
}
\ No newline at end of file
diff --git a/playbooks/custom_functions/find_related_containers.json b/playbooks/custom_functions/find_related_containers.json
index 30aeb111f7..294429cb99 100644
--- a/playbooks/custom_functions/find_related_containers.json
+++ b/playbooks/custom_functions/find_related_containers.json
@@ -1,23 +1,30 @@
{
- "create_time": "2022-02-10T18:24:50.244936+00:00",
- "custom_function_id": "5781e3d5a4773b2c48afd429768fd81b5e733e54",
- "description": "Takes a provided list of indicator values to search for and finds all related containers. It will produce a list of the related container details.",
+ "create_time": "2023-06-08T22:31:47.040451+00:00",
+ "custom_function_id": "3f25b298ab0e3ca7d88f890d1091fd29b6c4211b",
+ "description": "Takes a list of indicator values or field names that may appear in other containers on the system. If any related containers are found, it will produce a list of the related container details.",
"draft_mode": false,
"inputs": [
{
"contains_type": [
"*"
],
- "description": "An indicator value to search on, such as a file hash or IP address. To search on all indicator values in the container, use \"*\".",
+ "description": "A comma separated list of fields to search on. Do not use data paths, only field names. Not compatible with value_list. Field/value combinations are OR'd together. Only containers that share the exact value(s) contained in the exact field(s) will contribute to minimum_match_count. ",
"input_type": "list",
- "name": "value_list",
- "placeholder": "*"
+ "name": "field_list",
+ "placeholder": "destinationAddress, requestURL"
},
{
"contains_type": [
"*"
],
- "description": "The minimum number of similar indicator records that a container must have to be considered \"related.\" If no match count provided, this will default to 1.",
+ "description": "A list of indicator values to search on, such as a file hash or IP address. Values are OR'd together. To search on all indicator values in the container, use \"*\". Not compatible with field_list.",
+ "input_type": "list",
+ "name": "value_list",
+ "placeholder": "artifact:*.cef.destinationAddress"
+ },
+ {
+ "contains_type": [],
+ "description": "The minimum number of values from the value_list parameter or the fields from the field_list that must match with related containers. Supports an integer or the string 'all'. Adding 'all' will set the minimum_match_count to the length of the number of unique values in the value_list or the number of unique fields in the field_list. If no match count provided, this will default to 1.",
"input_type": "item",
"name": "minimum_match_count",
"placeholder": "1-100"
@@ -39,29 +46,33 @@
"placeholder": "-30d"
},
{
- "contains_type": [],
- "description": "Optional comma-separated list of statuses to filter on. Only containers that have statuses matching an item in this list will be included.",
- "input_type": "item",
+ "contains_type": [
+ ""
+ ],
+ "description": "Optional comma-separated list of status IDs or status names to filter on. Only containers that have statuses matching an item in this list will be included. If status names are provided, the automation user must have administrator privileges. Use `/rest/container_status` to obtain status ids instead of adding administrator privileges.",
+ "input_type": "list",
"name": "filter_status",
"placeholder": "open"
},
{
- "contains_type": [],
+ "contains_type": [
+ "phantom container label"
+ ],
"description": "Optional comma-separated list of labels to filter on. Only containers that have labels matching an item in this list will be included.",
- "input_type": "item",
+ "input_type": "list",
"name": "filter_label",
"placeholder": "events"
},
{
"contains_type": [],
"description": "Optional comma-separated list of severities to filter on. Only containers that have severities matching an item in this list will be included.",
- "input_type": "item",
+ "input_type": "list",
"name": "filter_severity",
"placeholder": "medium"
},
{
"contains_type": [],
- "description": "Optional parameter to filter containers that are in a case or not. Defaults to True (drop containers that are already in a case).",
+ "description": "Optional parameter to filter containers that are in a case or not. True for only containers in cases, False for only containers not in cases. Default is all containers.",
"input_type": "item",
"name": "filter_in_case",
"placeholder": "True or False"
@@ -78,7 +89,7 @@
{
"contains_type": [],
"data_path": "*.container_indicator_match_count",
- "description": "The number of indicators matched to the related container"
+ "description": "The number of indicators matched to the related container if value_list provided"
},
{
"contains_type": [],
@@ -103,7 +114,7 @@
{
"contains_type": [],
"data_path": "*.indicator_ids",
- "description": "Indicator ID that matched"
+ "description": "Indicator ID that matched if value list provided"
},
{
"contains_type": [
@@ -111,8 +122,14 @@
],
"data_path": "*.container_url",
"description": "Link to container"
+ },
+ {
+ "contains_type": [],
+ "data_path": "*.container_field_list",
+ "description": "List of fields that matched if field_list provided."
}
],
- "platform_version": "5.2.1.78411",
- "python_version": "3"
+ "outputs_type": "item",
+ "platform_version": "5.5.0.108488",
+ "python_version": "3.13"
}
\ No newline at end of file
diff --git a/playbooks/custom_functions/find_related_containers.py b/playbooks/custom_functions/find_related_containers.py
index 4fd6bea923..aa1fba6e10 100644
--- a/playbooks/custom_functions/find_related_containers.py
+++ b/playbooks/custom_functions/find_related_containers.py
@@ -1,26 +1,28 @@
-def find_related_containers(value_list=None, minimum_match_count=None, container=None, earliest_time=None, filter_status=None, filter_label=None, filter_severity=None, filter_in_case=None, **kwargs):
+def find_related_containers(field_list=None, value_list=None, minimum_match_count=None, container=None, earliest_time=None, filter_status=None, filter_label=None, filter_severity=None, filter_in_case=None, **kwargs):
"""
- Takes a provided list of indicator values to search for and finds all related containers. It will produce a list of the related container details.
+ Takes a list of indicator values or field names that may appear in other containers on the system. If any related containers are found, it will produce a list of the related container details.
Args:
- value_list (CEF type: *): An indicator value to search on, such as a file hash or IP address. To search on all indicator values in the container, use "*".
- minimum_match_count (CEF type: *): The minimum number of similar indicator records that a container must have to be considered "related." If no match count provided, this will default to 1.
+ field_list (CEF type: *): A comma separated list of fields to search on. Do not use data paths, only field names. Not compatible with value_list. Field/value combinations are OR'd together. Only containers that share the exact value(s) contained in the exact field(s) will contribute to minimum_match_count.
+ value_list (CEF type: *): A list of indicator values to search on, such as a file hash or IP address. Values are OR'd together. To search on all indicator values in the container, use "*". Not compatible with field_list.
+ minimum_match_count: The minimum number of values from the value_list parameter or the fields from the field_list that must match with related containers. Supports an integer or the string 'all'. Adding 'all' will set the minimum_match_count to the length of the number of unique values in the value_list or the number of unique fields in the field_list. If no match count provided, this will default to 1.
container (CEF type: phantom container id): The container to run indicator analysis against. Supports container object or container_id. This container will also be excluded from the results for related_containers.
earliest_time: Optional modifier to only consider related containers within a time window. Default is -30d. Supports year (y), month (m), day (d), hour (h), or minute (m) Custom function will always set the earliest container window based on the input container "create_time".
- filter_status: Optional comma-separated list of statuses to filter on. Only containers that have statuses matching an item in this list will be included.
- filter_label: Optional comma-separated list of labels to filter on. Only containers that have labels matching an item in this list will be included.
+ filter_status: Optional comma-separated list of status IDs or status names to filter on. Only containers that have statuses matching an item in this list will be included. If status names are provided, the automation user must have administrator privileges. Use `/rest/container_status` to obtain status ids instead of adding administrator privileges.
+ filter_label (CEF type: phantom container label): Optional comma-separated list of labels to filter on. Only containers that have labels matching an item in this list will be included.
filter_severity: Optional comma-separated list of severities to filter on. Only containers that have severities matching an item in this list will be included.
- filter_in_case: Optional parameter to filter containers that are in a case or not. Defaults to True (drop containers that are already in a case).
+ filter_in_case: Optional parameter to filter containers that are in a case or not. True for only containers in cases, False for only containers not in cases. Default is all containers.
Returns a JSON-serializable object that implements the configured data paths:
*.container_id (CEF type: *): The unique id of the related container
- *.container_indicator_match_count: The number of indicators matched to the related container
+ *.container_indicator_match_count: The number of indicators matched to the related container if value_list provided
*.container_status: The status of the related container e.g. new, open, closed
*.container_type: The type of the related container, e.g. default or case
*.container_name: The name of the related container
*.in_case: True or False if the related container is already included in a case
- *.indicator_ids: Indicator ID that matched
+ *.indicator_ids: Indicator ID that matched if value list provided
*.container_url (CEF type: url): Link to container
+ *.container_field_list: List of fields that matched if field_list provided.
"""
############################ Custom Code Goes Below This Line #################################
import json
@@ -28,32 +30,168 @@ def find_related_containers(value_list=None, minimum_match_count=None, container
import re
from datetime import datetime, timedelta
from urllib import parse
-
+ from hashlib import sha256
+ from collections import Counter
+ from typing import Tuple
+
outputs = []
- related_containers = []
- indicator_id_dictionary = {}
- container_dictionary = {}
offset_time = None
- base_url = phantom.get_base_url()
- indicator_by_value_url = phantom.build_phantom_rest_url('indicator_by_value')
- indicator_common_container_url = phantom.build_phantom_rest_url('indicator_common_container')
- container_url = phantom.build_phantom_rest_url('container')
-
- # Get indicator ids based on value_list
- def format_offset_time(seconds):
+ def grouper(seq, size) -> iter:
+ # for iterting over a list {size} at a time
+ return (seq[pos:pos + size] for pos in range(0, len(seq), size))
+
+ def get_status_ids(status_list) -> list:
+ status_url = phantom.build_phantom_rest_url('container_status')
+ status_url += f'?_filter_name__in={status_list}'
+ status_response = phantom.requests.get(status_url, verify=False).json()
+ if status_response.get('failed') == True and "does not have permission" in status_response.get('message', ""):
+ raise RuntimeError(
+ "User does not have permission to view container_status. "
+ "Consider using a list of status ids instead of status names - "
+ "`/rest/container_status` can be used to find Status IDs."
+ )
+ return [item['id'] for item in status_response.get('data', [])]
+
+
+ def format_offset_time(seconds) -> str:
+ # Get indicator ids based on value_list
datetime_obj = datetime.now() - timedelta(seconds=seconds)
formatted_time = datetime_obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
return formatted_time
- def fetch_indicator_ids(value_list):
- indicator_id_list = []
- for value in value_list:
- params = {'indicator_value': f'{value}', 'timerange': 'all'}
- indicator_id = phantom.requests.get(indicator_by_value_url, params=params, verify=False).json().get('id')
- if indicator_id:
- indicator_id_list.append(indicator_id)
- return indicator_id_list
+ def build_outputs(container_dict, **kwargs) -> list:
+ # Take container dict of id and indicators and built a list of outputs
+ output_list = []
+ base_url = phantom.get_base_url()
+ container_url = phantom.build_phantom_rest_url('container') + '?page_size=0'
+
+ for k, v in kwargs.items():
+ if v:
+ if k == 'earliest_time':
+ container_url += f'&_filter_create_time__gt="{format_offset_time(v)}"'
+ if k == 'filter_status':
+ container_url += f'&_filter_status__in={v}'
+ if k == 'filter_label':
+ container_url += f'&_filter_label__in={v}'
+ if k == 'filter_severity':
+ container_url += f'&_filter_severity__in={v}'
+ if k == 'filter_in_case':
+ container_url += f'&_filter_in_case="{str(v).lower()}"'
+
+ container_id_list = list(container_dict.keys())
+ for group in grouper(container_id_list, 100):
+ query_url = container_url + f'&_filter_id__in={group}'
+ container_response = phantom.requests.get(query_url, verify=False).json()
+ for container_data in container_response.get('data', []):
+ temp_dict = {
+ 'container_id': container_data['id'],
+ 'container_status': container_data['status'],
+ 'container_type': container_data['container_type'],
+ 'container_name': container_data['name'],
+ 'container_url': base_url.rstrip('/') + f"/mission/{container_data['id']}",
+ 'in_case': container_data['in_case']
+ }
+ if container_dict[str(container_data['id'])].get('indicator_ids'):
+ indicator_ids = list(container_dict[str(container_data['id'])]['indicator_ids'])
+ match_count = len(indicator_ids)
+ temp_dict['container_indicator_match_count'] = match_count
+ temp_dict['indicator_ids'] = indicator_ids
+ if container_dict[str(container_data['id'])].get('field_list'):
+ field_list = container_dict[str(container_data['id'])]['field_list']
+ temp_dict['container_field_list'] = field_list
+ output_list.append(temp_dict)
+ return output_list
+
+ def get_field_dictionary(field_list, current_container) -> list:
+ pairing_list = []
+ artifact_url = phantom.build_phantom_rest_url('container', current_container, 'artifacts')
+ artifact_list = phantom.requests.get(artifact_url, verify=False).json()['data']
+ if not artifact_list:
+ raise RuntimeError(f"No artifacts found for {current_container}")
+ for artifact in artifact_list:
+ for field in field_list:
+ if artifact['cef'].get(field):
+ pairing_list.append({field: artifact['cef'][field]})
+ # deduplicate and return
+ return list({str(i):i for i in pairing_list}.values())
+
+ def find_common_containers_by_field(pairing_list, current_container, seconds) -> dict:
+ container_dictionary = {}
+ artifact_url = phantom.build_phantom_rest_url('artifact')
+ offset_time = format_offset_time(seconds)
+ for pairing in pairing_list:
+ for k,v in pairing.items():
+ params = {
+ '_filter_create_time__gt': f'"{offset_time}"',
+ f'_filter_cef__{k}': f'"{v}"',
+ 'page_size': 0,
+ '_exclude_container': current_container
+ }
+ related_artifacts = phantom.requests.get(artifact_url, params=params, verify=False).json()
+ if related_artifacts.get('data'):
+ container_id_set = set([item['container'] for item in related_artifacts['data']])
+ for container_id in container_id_set:
+ if not container_dictionary.get(str(container_id)):
+ container_dictionary[str(container_id)] = {'field_list': [{k: v}]}
+ else:
+ container_dictionary[str(container_id)]['field_list'].append({k: v})
+ for key in list(container_dictionary.keys()):
+ field_list = container_dictionary[key]['field_list']
+ container_dictionary[key]['field_list'] = list({str(i):i for i in field_list}.values())
+ return container_dictionary
+
+ def fetch_indicators(value_list) -> Tuple[dict, set]:
+ # Creats a dictionary with the value_hash as the key with a sub dictionary of indicator ids
+ indicator_dictionary = {}
+ indicator_url = phantom.build_phantom_rest_url('indicator')
+ hashed_list = [sha256(item.encode('utf-8')).hexdigest() for item in value_list]
+ indicator_id_set = set()
+ for group in grouper(hashed_list, 100):
+ query_url = indicator_url + f'?_filter_value_hash__in={group}&timerange=all&page_size=0'
+ indicator_response = phantom.requests.get(query_url, verify=False).json()
+ for data in indicator_response.get('data', []):
+ indicator_dictionary[data['value_hash']] = {'indicator_id': data['id']}
+ indicator_id_set.add(data['id'])
+ return indicator_dictionary, indicator_id_set
+
+ def add_common_containers(indicator_dictionary) -> dict:
+ # Adds container_ids to the indicator dictionary
+ indicator_common_container_url = phantom.build_phantom_rest_url('indicator_common_container') + '?page_size=0'
+ for indicator_hash, dict_object in indicator_dictionary.items():
+ indicator_dictionary[indicator_hash].update({'container_ids': []})
+ query_url = indicator_common_container_url + f"&indicator_ids={dict_object['indicator_id']}"
+ container_response = phantom.requests.get(query_url, verify=False).json()
+ for container_object in container_response:
+ indicator_dictionary[indicator_hash]['container_ids'].append(container_object['container_id'])
+ return indicator_dictionary
+
+ def match_indicator_per_container(indicator_dictionary, exclude_container) -> dict:
+ # Create a new dictionary filled with container_ids as keys and the set of related indicators as values
+ container_dictionary = {}
+ for dict_object in indicator_dictionary.values():
+ for container_id in dict_object['container_ids']:
+ if container_id != exclude_container:
+ if not container_dictionary.get(str(container_id)):
+ container_dictionary[str(container_id)] = {'indicator_ids': {dict_object['indicator_id']}}
+ else:
+ container_dictionary[str(container_id)]['indicator_ids'].add(dict_object['indicator_id'])
+ return container_dictionary
+
+ def test_minimum_match(minimum_match_count, input_list) -> None:
+ # Fail early if minimum_match_count exceeds the number of provided values
+ if isinstance(minimum_match_count, int) and minimum_match_count > len(input_list):
+ raise RuntimeError(
+ f"The provided minimum_match_count '{minimum_match_count}' "
+ f"exceeds the number of unique fields or values given: '{len(input_list)}'. "
+ f"Try providing additional values in the value_list, additional fields in the field_list, "
+ f"decreasing the minimum_match_count, or entering 'all' in minimum_match_count."
+ )
+ return
+
+
+ ## Start Input Checking ##
+ ## -------------------- ##
# Ensure valid time modifier
if earliest_time:
@@ -64,8 +202,11 @@ def fetch_indicator_ids(value_list):
integer, char = (re.findall(pattern, earliest_time)[0])
time_in_seconds = int(integer) * char_lookup[char.lower()]
else:
- raise RuntimeError(f'earliest_time string "{earliest_time}" is incorrectly formatted. Format is -