diff --git a/.devcontainer/createCommand.sh b/.devcontainer/createCommand.sh
index d4dd1bd3d0d2..3b4f29cc5ff7 100755
--- a/.devcontainer/createCommand.sh
+++ b/.devcontainer/createCommand.sh
@@ -3,27 +3,22 @@
set -e
echo "Fixing permissions"
-
-sudo chown demisto /workspaces /workspaces/content
-sudo chown -R demisto /workspaces/content/.vscode /workspaces/content/.git /workspaces/content/.venv /workspaces/content/node_modules /workspaces/content/package-lock.json
+# get current folder name
+repo=${PWD##*/}
+sudo chown demisto /workspaces /workspaces/$repo
+sudo chown -R demisto /workspaces/$repo/.vscode /workspaces/content/.git /workspaces/$repo/.venv /workspaces/$repo/node_modules /workspaces/$repo/package-lock.json
sudo chown -R demisto $HOME
-echo "Setting up VSCode paths"
-
-cp .devcontainer/settings.json .vscode/settings.json
-touch CommonServerUserPython.py
-touch DemistoClassApiModule.py
-path=$(printf '%s:' Packs/ApiModules/Scripts/*)
-rm -f .env
-echo "PYTHONPATH=""$path"":$PYTHONPATH" >> .env
-echo "MYPYPATH=""$path"":$MYPYPATH" >> .env
-
echo "Setting up git safe directory"
-git config --global --add safe.directory /workspaces/content
+git config --global --add safe.directory /workspaces/$repo
echo "Setting up content dependencies"
.hooks/bootstrap
+echo "Setting up VSCode"
+poetry run demisto-sdk setup-env
+
+
echo "Run demisto-sdk pre-commit to cache dependencies"
-poetry run demisto-sdk pre-commit >/dev/null 2>&1 || true
\ No newline at end of file
+poetry run demisto-sdk pre-commit --mode=commit >/dev/null 2>&1 || true
\ No newline at end of file
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index cff0a6bbd696..92359eec2862 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -24,11 +24,6 @@
"DEMISTO_SDK_CONTENT_PATH": "${containerWorkspaceFolder}",
"PYTHONPATH": "${containerWorkspaceFolder}:${containerWorkspaceFolder}/Packs/Base/Scripts/CommonServerPython/:${containerWorkspaceFolder}/Packs/Base/Scripts/CommonServerUserPython/:${containerWorkspaceFolder}/Tests/demistomock/:${containerEnv:PYTHONPATH}",
"MYPYPATH": "${containerWorkspaceFolder}:${containerWorkspaceFolder}/Packs/Base/Scripts/CommonServerPython/:${containerWorkspaceFolder}/Packs/Base/Scripts/CommonServerUserPython/:${containerWorkspaceFolder}/Tests/demistomock/:${containerEnv:PYTHONPATH}",
- "DEMISTO_BASE_URL": "${localEnv:DEMISTO_BASE_URL}",
- "DEMISTO_USERNAME": "${localEnv:DEMISTO_USERNAME}",
- "DEMISTO_PASSWORD": "${localEnv:DEMISTO_PASSWORD}",
- "DEMISTO_VERIFY_SSL": "${localEnv:DEMISTO_VERIFY_SSL}",
- "DEMISTO_API_KEY": "${localEnv:DEMISTO_API_KEY}"
},
"customizations": {
"vscode": {
@@ -97,7 +92,7 @@
// "ghcr.io/devcontainers/features/common-utils:1"
// ],
"onCreateCommand": "sudo dos2unix -n .devcontainer/createCommand.sh .devcontainer/createCommand_unix.sh && bash .devcontainer/createCommand_unix.sh",
-"postStartCommand": "poetry install && poetry run demisto-sdk pre-commit >/dev/null 2>&1 || true",
+"postStartCommand": "git pull || true && poetry install && poetry run demisto-sdk pre-commit --mode=commit >/dev/null 2>&1 || true",
"hostRequirements": {
"cpus": 4,
"memory": "8gb",
diff --git a/.devcontainer/settings.json b/.devcontainer/settings.json
deleted file mode 100644
index 308ef566d63c..000000000000
--- a/.devcontainer/settings.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python",
- "python.linting.enabled": true,
- "python.linting.mypyEnabled": true,
- "python.linting.flake8Enabled": false,
- "python.linting.mypyArgs": [
- "--follow-imports=silent",
- "--ignore-missing-imports",
- "--show-column-numbers",
- "--no-pretty",
- "--allow-redefinition",
- "--namespace-packages"
- ],
- "python.testing.pytestEnabled": true,
- "python.testing.pytestArgs": [
- "-v"
- ],
- "python.testing.cwd": "${workspaceFolder}/Packs/HelloWorld/Integrations/HelloWorld"
-}
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 3cc59626e513..0af1f1b8f5a1 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -5,6 +5,8 @@
/Tests/Marketplace/approved_categories.json @yaakovpraisler @bakatzir @GuyLibermanPA @demisto/content-leaders
/Tests/Marketplace/core_packs_list.json @yaakovpraisler @bakatzir @GuyLibermanPA @demisto/content-leaders
/Tests/Marketplace/core_packs_mpv2_list.json @yaakovpraisler @bakatzir @GuyLibermanPA @demisto/content-leaders
+/Tests/Marketplace/versions-metadata.json @yaakovpraisler
+/Tests/Marketplace/corepacks_override.json @yaakovpraisler
# Docker native image
/Tests/docker_native_image_config.json @GuyAfik @JudahSchwartz @samuelFain
@@ -35,7 +37,7 @@
# PANW Products
/Packs/Palo_Alto_Networks_Enterprise_DLP/ @DeanArbel
-/Packs/PAN-OS/Integrations/ @GuyAfik @jlevypaloalto
+/Packs/PAN-OS/Integrations/ @jlevypaloalto
/Packs/PrismaCloudCompute/Integrations/ @GuyAfik
/Packs/PrismaSaasSecurity/Integrations/ @GuyAfik
diff --git a/.github/workflows/create-internal-pr-from-external.yml b/.github/workflows/create-internal-pr-from-external.yml
index bb3726063e62..207f78ad3d5d 100644
--- a/.github/workflows/create-internal-pr-from-external.yml
+++ b/.github/workflows/create-internal-pr-from-external.yml
@@ -10,6 +10,9 @@ jobs:
runs-on: ubuntu-latest
if: github.repository == 'demisto/content' && github.event.action == 'closed' && github.event.pull_request.merged == true && github.event.pull_request.head.repo.fork == true
steps:
+ - name: set pythonpath
+ run: |
+ echo "PYTHONPATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
- name: Checkout
uses: actions/checkout@v4
- name: Setup Python
diff --git a/.github/workflows/handle-new-external-pr.yml b/.github/workflows/handle-new-external-pr.yml
index b7b5bee46f95..50ad5129c5c4 100644
--- a/.github/workflows/handle-new-external-pr.yml
+++ b/.github/workflows/handle-new-external-pr.yml
@@ -31,7 +31,9 @@ jobs:
- name: Install Python Dependencies
run: |
poetry install --with ci
-
+ - name: set pythonpath
+ run: |
+ echo "PYTHONPATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
- name: Update External PR
env:
CONTENTBOT_GH_ADMIN_TOKEN: ${{ secrets.CONTENTBOT_GH_ADMIN_TOKEN }}
diff --git a/.github/workflows/trigger-contribution-build-temp.yml b/.github/workflows/trigger-contribution-build-temp.yml
new file mode 100644
index 000000000000..bfea82d3e7ce
--- /dev/null
+++ b/.github/workflows/trigger-contribution-build-temp.yml
@@ -0,0 +1,35 @@
+name: Trigger Contribution Build Temp
+on:
+ pull_request_target:
+ types: [labeled]
+ branches:
+ - 'contrib/AradCarmi**'
+
+jobs:
+ trigget_contrib_build:
+ runs-on: ubuntu-latest
+ if: github.event.action == 'labeled' && contains(github.event.pull_request.labels.*.name, 'ready-for-instance-test') == true && github.event.pull_request.head.repo.fork == true
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Setup Python
+ uses: actions/setup-python@v3
+ with:
+ python-version: '3.9'
+ - name: Setup Poetry
+ uses: Gr1N/setup-poetry@v8
+ - name: Install Python Dependencies
+ run: |
+ poetry install --with ci
+ - name: Trigger Contribution Build
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number }}
+ BASE_BRANCH: ${{ github.event.pull_request.base.ref }}
+ CONTRIB_BRANCH: ${{ github.event.pull_request.head.label }}
+ CONTRIB_REPO: ${{ github.event.repository.name }}
+ USERNAME: ${{ secrets.SECRET_CHECK_USER_NG }}
+ PASSWORD: ${{ secrets.SECRET_CHECK_PASS_NG }}
+ GOLD_SERVER_URL: ${{ secrets.GOLD_SERVER_URL_NG }}
+ run: |
+ echo "Trigger contribution build for PR: $PR_NUMBER with base branch: $BASE_BRANCH contrib branch: $CONTRIB_BRANCH"
+ poetry run python ./Utils/github_workflow_scripts/trigger_contribution_build_temp.py --pr_number $PR_NUMBER --base_branch $BASE_BRANCH --contrib_branch $CONTRIB_BRANCH --contrib_repo $CONTRIB_REPO --username $USERNAME --password $PASSWORD --gold_server_url $GOLD_SERVER_URL
diff --git a/.github/workflows/update-demisto-sdk-version.yml b/.github/workflows/update-demisto-sdk-version.yml
index 614851c495ed..4a5fa795d1d8 100644
--- a/.github/workflows/update-demisto-sdk-version.yml
+++ b/.github/workflows/update-demisto-sdk-version.yml
@@ -49,9 +49,9 @@ jobs:
run: |
poetry add --group dev demisto-sdk@${{inputs.release_version}}
poetry lock --no-update
- git add .
source .venv/bin/activate
demisto-sdk pre-commit --no-validate --no-secrets --no-unit-test
+ git add .
- name: Create pull request
run: |
diff --git a/.gitlab/ci/.gitlab-ci.global.yml b/.gitlab/ci/.gitlab-ci.global.yml
index 56e49a73b73c..d2183e46f7d9 100644
--- a/.gitlab/ci/.gitlab-ci.global.yml
+++ b/.gitlab/ci/.gitlab-ci.global.yml
@@ -206,7 +206,7 @@
.build_parameters: &build_parameters
- section_start "Build Parameters" --collapsed
- echo "Environment Variables:"
- - set | grep -E "^ARTIFACTS_FOLDER.*=|^JIRA_.*=|^NIGHTLY=|^INSTANCE_TESTS=|^SERVER_BRANCH_NAME=|^ARTIFACT_BUILD_NUM=|^DEMISTO_SDK_NIGHTLY=|^TIME_TO_LIVE=|^CONTRIB_BRANCH=|^FORCE_PACK_UPLOAD=|^PACKS_TO_UPLOAD=|^BUCKET_UPLOAD=|^STORAGE_BASE_PATH=|^OVERRIDE_ALL_PACKS=|^GCS_MARKET_BUCKET=|^GCS_MARKET_V2_BUCKET=|^GCS_MARKET_XPANSE_BUCKET=|^SLACK_.*=|^NVM_DIR=|^NODE_VERSION=|^PATH=|^ARTIFACTS_FOLDER=|^ARTIFACTS_FOLDER_INSTANCE=|^ARTIFACTS_FOLDER_SERVER_TYPE=|^ENV_RESULTS_PATH=|^LAST_UPLOAD_COMMIT=|^DEMISTO_SDK_LOG_FILE_SIZE=|^DEMISTO_SDK_LOG_FILE_COUNT=|^DEMISTO_SDK_LOG_FILE_PATH=|^DEMISTO_SDK_LOG_NO_COLORS=|^DEMISTO_SDK_LOG_NOTIFY_PATH=|^POETRY_VIRTUALENVS_OPTIONS_ALWAYS_COPY=" | sort
+ - set | grep -E "^ARTIFACTS_FOLDER.*=|^JIRA_.*=|^NIGHTLY=|^INSTANCE_TESTS=|^SERVER_BRANCH_NAME=|^ARTIFACT_BUILD_NUM=|^DEMISTO_SDK_NIGHTLY=|^TIME_TO_LIVE=|^CONTRIB_BRANCH=|^FORCE_PACK_UPLOAD=|^PACKS_TO_UPLOAD=|^BUCKET_UPLOAD=|^STORAGE_BASE_PATH=|^OVERRIDE_ALL_PACKS=|^GCS_MARKET_BUCKET=|^GCS_MARKET_V2_BUCKET=|^GCS_MARKET_XPANSE_BUCKET=|^SLACK_.*=|^NVM_DIR=|^NODE_VERSION=|^PATH=|^ARTIFACTS_FOLDER=|^ARTIFACTS_FOLDER_INSTANCE=|^ARTIFACTS_FOLDER_SERVER_TYPE=|^ENV_RESULTS_PATH=|^LAST_UPLOAD_COMMIT=|^DEMISTO_SDK_LOG_FILE_SIZE=|^DEMISTO_SDK_LOG_FILE_COUNT=|^DEMISTO_SDK_LOG_FILE_PATH=|^DEMISTO_SDK_LOG_NO_COLORS=|^DEMISTO_SDK_LOG_NOTIFY_PATH=|^POETRY_VIRTUALENVS_OPTIONS_ALWAYS_COPY=|^DEMISTO_SDK_NIGHTLY=|^OVERRIDE_SDK_REF=|^SDK_REF=" | sort
- echo "Versions Installed:"
- python --version
- python3 --version
diff --git a/.gitlab/ci/.gitlab-ci.on-push.yml b/.gitlab/ci/.gitlab-ci.on-push.yml
index aa654bbbf8f8..04bc1a6b3e88 100644
--- a/.gitlab/ci/.gitlab-ci.on-push.yml
+++ b/.gitlab/ci/.gitlab-ci.on-push.yml
@@ -734,7 +734,9 @@ test-upload-flow:
- section_start "Create Testing Branch"
- export BRANCH="${CI_COMMIT_BRANCH}-upload_test_branch-${CI_PIPELINE_ID}"
- - python3 ./Utils/test_upload_flow/create_test_branch.py -tb $BRANCH -a "${ARTIFACTS_FOLDER}" -g $GITLAB_PUSH_TOKEN
+ - echo "${BRANCH}" > "${ARTIFACTS_FOLDER}/test_upload_flow_branch.txt"
+ - python3 ./Utils/test_upload_flow/create_test_branch.py -tb "${BRANCH}" -a "${ARTIFACTS_FOLDER}" -g "${GITLAB_PUSH_TOKEN}"
+
- echo "Created test branch:${BRANCH}"
- section_end "Create Testing Branch"
@@ -760,6 +762,15 @@ test-upload-flow:
- python3 ./Utils/test_upload_flow/verify_bucket.py -a "${ARTIFACTS_FOLDER}" -s $GCS_MARKET_KEY -sb $current_storage_base_path -b $ALL_BUCKETS
- section_end "Verify Created Testing Bucket"
- job-done
+ after_script:
+ - !reference [.default-after-script]
+ - section_start "Delete Testing Branch"
+ - |
+ if [ -f "${ARTIFACTS_FOLDER}/test_upload_flow_branch.txt" ]; then
+ BRANCH=$(cat "${ARTIFACTS_FOLDER}/test_upload_flow_branch.txt")
+ python3 ./Utils/test_upload_flow/delete_test_branch.py -tb "${BRANCH}" -g "${GITLAB_PUSH_TOKEN}"
+ fi
+ - section_end "Delete Testing Branch"
.server_test_playbooks_results:
stage: results
diff --git a/.gitlab/ci/.gitlab-ci.sdk-nightly.yml b/.gitlab/ci/.gitlab-ci.sdk-nightly.yml
index bc9cd7b72f0a..1f4e7789ab5f 100644
--- a/.gitlab/ci/.gitlab-ci.sdk-nightly.yml
+++ b/.gitlab/ci/.gitlab-ci.sdk-nightly.yml
@@ -526,9 +526,12 @@ demisto-sdk-nightly:trigger-slack-notify:
PIPELINE_TO_QUERY: $CI_PIPELINE_ID
WORKFLOW: "Demisto SDK Nightly"
JOB_NAME: "demisto-sdk-nightly:fan-in"
+ DEMISTO_SDK_NIGHTLY: "true"
+ OVERRIDE_SDK_REF: $OVERRIDE_SDK_REF
+ SDK_REF: $SDK_REF
SLACK_CHANNEL: $SLACK_CHANNEL
SLACK_JOB: "true"
- SLACK_ALLOW_FAILURE: 'false'
+ SLACK_ALLOW_FAILURE: "false"
CI_PROJECT_ID: $CI_PROJECT_ID
CI_SERVER_URL: $CI_SERVER_URL
JIRA_SERVER_URL: $JIRA_SERVER_URL
diff --git a/.gitlab/ci/.gitlab-ci.slack-notify.yml b/.gitlab/ci/.gitlab-ci.slack-notify.yml
index 183a5b59314d..214f17390814 100644
--- a/.gitlab/ci/.gitlab-ci.slack-notify.yml
+++ b/.gitlab/ci/.gitlab-ci.slack-notify.yml
@@ -1,4 +1,3 @@
-
default:
image: docker-io.art.code.pan.run/devdemisto/gitlab-content-ci:1.0.0.64455
artifacts:
@@ -11,19 +10,18 @@ default:
stages:
- notify
-
include:
- local: .gitlab/ci/.gitlab-ci.variables.yml
- local: .gitlab/ci/.gitlab-ci.global.yml
-
slack-notify:
tags:
- gke
stage: notify
extends: .default-job-settings
script:
- - python3 ./Tests/scripts/gitlab_slack_notifier.py -p "${PIPELINE_TO_QUERY}" -s "${SLACK_TOKEN}" -c "${GITLAB_STATUS_TOKEN}" -ch "${SLACK_CHANNEL}" --triggering-workflow "${WORKFLOW}" --allow-failure "${SLACK_ALLOW_FAILURE}"
+ - !reference [.download-demisto-conf]
+ - python3 ./Tests/scripts/gitlab_slack_notifier.py -p "${PIPELINE_TO_QUERY}" -s "${SLACK_TOKEN}" -c "${GITLAB_STATUS_TOKEN}" -ch "${SLACK_CHANNEL}" --triggering-workflow "${WORKFLOW}" --allow-failure "${SLACK_ALLOW_FAILURE}" --name-mapping_path "${CI_PROJECT_DIR}/name_mapping.json"
retry:
max: 2
needs:
diff --git a/.pre-commit-config_template.yaml b/.pre-commit-config_template.yaml
index 84c7c09dba4a..bfbd26238644 100644
--- a/.pre-commit-config_template.yaml
+++ b/.pre-commit-config_template.yaml
@@ -289,7 +289,7 @@ repos:
- decorator==5.1.1 ; python_version >= "3.8" and python_version < "3.11"
- defusedxml==0.7.1 ; python_version >= "3.8" and python_version < "3.11"
- demisto-py==3.2.13 ; python_version >= "3.8" and python_version < "3.11"
- - demisto-sdk==1.25.1 ; python_version >= "3.8" and python_version < "3.11"
+ - demisto-sdk==1.25.3 ; python_version >= "3.8" and python_version < "3.11"
- dictdiffer==0.9.0 ; python_version >= "3.8" and python_version < "3.11"
- dictor==0.1.12 ; python_version >= "3.8" and python_version < "3.11"
- distlib==0.3.7 ; python_version >= "3.8" and python_version < "3.11"
@@ -331,6 +331,7 @@ repos:
- jsonschema==4.19.2 ; python_version >= "3.8" and python_version < "3.11"
- junitparser==3.1.0 ; python_version >= "3.8" and python_version < "3.11"
- lazy-object-proxy==1.9.0 ; python_version >= "3.8" and python_version < "3.11"
+ - lxml==5.1.0 ; python_version >= "3.8" and python_version < "3.11"
- mccabe==0.6.1 ; python_version >= "3.8" and python_version < "3.11"
- mergedeep==1.3.4 ; python_version >= "3.8" and python_version < "3.11"
- more-itertools==9.1.0 ; python_version >= "3.8" and python_version < "3.11"
diff --git a/Packs/AWS-EC2/ReleaseNotes/1_4_1.md b/Packs/AWS-EC2/ReleaseNotes/1_4_1.md
new file mode 100644
index 000000000000..97148ac2fd0f
--- /dev/null
+++ b/Packs/AWS-EC2/ReleaseNotes/1_4_1.md
@@ -0,0 +1,3 @@
+## AWS - EC2
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/AWS-EC2/pack_metadata.json b/Packs/AWS-EC2/pack_metadata.json
index b8393cd9466c..b72d8572f427 100644
--- a/Packs/AWS-EC2/pack_metadata.json
+++ b/Packs/AWS-EC2/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AWS - EC2",
"description": "Amazon Web Services Elastic Compute Cloud (EC2)",
"support": "xsoar",
- "currentVersion": "1.4.0",
+ "currentVersion": "1.4.1",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AWS-IAM/ReleaseNotes/1_1_54.md b/Packs/AWS-IAM/ReleaseNotes/1_1_54.md
new file mode 100644
index 000000000000..2579c4c26952
--- /dev/null
+++ b/Packs/AWS-IAM/ReleaseNotes/1_1_54.md
@@ -0,0 +1,3 @@
+## AWS - IAM
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/AWS-IAM/pack_metadata.json b/Packs/AWS-IAM/pack_metadata.json
index 9d1029b7e6a4..7dc60039b2de 100644
--- a/Packs/AWS-IAM/pack_metadata.json
+++ b/Packs/AWS-IAM/pack_metadata.json
@@ -3,7 +3,7 @@
"description": "Amazon Web Services Identity and Access Management (IAM)",
"support": "xsoar",
"author": "Cortex XSOAR",
- "currentVersion": "1.1.53",
+ "currentVersion": "1.1.54",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
"created": "2020-04-14T00:00:00Z",
diff --git a/Packs/AWS-Route53/ReleaseNotes/1_1_26.md b/Packs/AWS-Route53/ReleaseNotes/1_1_26.md
new file mode 100644
index 000000000000..847d3668bdca
--- /dev/null
+++ b/Packs/AWS-Route53/ReleaseNotes/1_1_26.md
@@ -0,0 +1,3 @@
+## AWS - Route53
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/AWS-Route53/pack_metadata.json b/Packs/AWS-Route53/pack_metadata.json
index 230adf9d9e51..a8dd9ba90713 100644
--- a/Packs/AWS-Route53/pack_metadata.json
+++ b/Packs/AWS-Route53/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AWS - Route53",
"description": "Amazon Web Services Managed Cloud DNS Service.",
"support": "xsoar",
- "currentVersion": "1.1.25",
+ "currentVersion": "1.1.26",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AWS-S3/ReleaseNotes/1_2_20.md b/Packs/AWS-S3/ReleaseNotes/1_2_20.md
new file mode 100644
index 000000000000..b530392bde3a
--- /dev/null
+++ b/Packs/AWS-S3/ReleaseNotes/1_2_20.md
@@ -0,0 +1,3 @@
+## AWS - S3
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/AWS-S3/pack_metadata.json b/Packs/AWS-S3/pack_metadata.json
index a6602fa980ee..d10772690a52 100644
--- a/Packs/AWS-S3/pack_metadata.json
+++ b/Packs/AWS-S3/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AWS - S3",
"description": "Amazon Web Services Simple Storage Service (S3)",
"support": "xsoar",
- "currentVersion": "1.2.19",
+ "currentVersion": "1.2.20",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AWS_WAF/README.md b/Packs/AWS_WAF/README.md
index 5dfd12dd31b6..35cfaf676204 100644
--- a/Packs/AWS_WAF/README.md
+++ b/Packs/AWS_WAF/README.md
@@ -1,8 +1,7 @@
-<~XSIAM>
# AWS WAF
This pack includes Cortex XSIAM content.
-
+<~XSIAM>
## Configuration on Server Side
- For information on configuring **ACL web logging**, refer to the following [documentation](https://docs.aws.amazon.com/waf/latest/developerguide/logging-management.html).
- For information on sending ACL web logs to **S3 bucket**, refer to the following [documentation](https://docs.aws.amazon.com/waf/latest/developerguide/logging-s3.html).
@@ -29,15 +28,12 @@ To create or configure the Amazon S3 collector, use the information described [h
| Vendor | Set as 'aws'. | aws |
| Product | Set as 'waf'. | waf |
| Compression | Select 'gzip'. | gzip |
-
-
~XSIAM>
AWS WAF is a web application firewall service that lets you monitor web requests that are forwarded to an Amazon API Gateway API, an Amazon CloudFront distribution, or an Application Load Balancer.
You can protect those resources based on conditions that you specify, such as the IP addresses that the requests originate from.
## What does this pack do
-### AWS WAF
This integration enables you to:
- Create, retrieve, update, or delete IP sets.
- Create, retrieve, update, or delete Regex patterns sets.
diff --git a/Packs/AccessInvestigation/ReleaseNotes/1_2_6.md b/Packs/AccessInvestigation/ReleaseNotes/1_2_6.md
new file mode 100644
index 000000000000..4a0c5ba98e57
--- /dev/null
+++ b/Packs/AccessInvestigation/ReleaseNotes/1_2_6.md
@@ -0,0 +1,3 @@
+## Access Investigation
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/AccessInvestigation/pack_metadata.json b/Packs/AccessInvestigation/pack_metadata.json
index b448dd15e861..d81e356c3cff 100644
--- a/Packs/AccessInvestigation/pack_metadata.json
+++ b/Packs/AccessInvestigation/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Access Investigation",
"description": "This Content Pack automates response to unauthorised access incidents and contains customer access incident views and layouts to aid investigation.",
"support": "xsoar",
- "currentVersion": "1.2.5",
+ "currentVersion": "1.2.6",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AgariPhishingDefense/Integrations/AgariPhishingDefense/AgariPhishingDefense.yml b/Packs/AgariPhishingDefense/Integrations/AgariPhishingDefense/AgariPhishingDefense.yml
index 561f95709ea7..e8624a9574a5 100644
--- a/Packs/AgariPhishingDefense/Integrations/AgariPhishingDefense/AgariPhishingDefense.yml
+++ b/Packs/AgariPhishingDefense/Integrations/AgariPhishingDefense/AgariPhishingDefense.yml
@@ -323,7 +323,7 @@ script:
required: true
description: Remediate suspected message.
name: apd-remediate-message
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
isfetch: true
runonce: false
script: '-'
diff --git a/Packs/AgariPhishingDefense/ReleaseNotes/1_1_16.md b/Packs/AgariPhishingDefense/ReleaseNotes/1_1_16.md
new file mode 100644
index 000000000000..007aaf4043e1
--- /dev/null
+++ b/Packs/AgariPhishingDefense/ReleaseNotes/1_1_16.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Agari Phishing Defense
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/AgariPhishingDefense/pack_metadata.json b/Packs/AgariPhishingDefense/pack_metadata.json
index 73608dd5fee4..2b0d2a119312 100644
--- a/Packs/AgariPhishingDefense/pack_metadata.json
+++ b/Packs/AgariPhishingDefense/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Agari Phishing Defense",
"description": "Use the Agari Phishing Defense integration to retrieve Policy Events as Incidents, retrieve messages and remediate suspected messages.",
"support": "partner",
- "currentVersion": "1.1.15",
+ "currentVersion": "1.1.16",
"author": "Agari",
"url": "https://www.agari.com/support/",
"email": "support@agari.com",
diff --git a/Packs/ApiModules/ReleaseNotes/2_2_21.md b/Packs/ApiModules/ReleaseNotes/2_2_21.md
new file mode 100644
index 000000000000..0de76861a8b8
--- /dev/null
+++ b/Packs/ApiModules/ReleaseNotes/2_2_21.md
@@ -0,0 +1,4 @@
+
+#### Scripts
+##### CoreIRApiModule
+- Improved implementation of *xdr-get-incidents* by unifying similar code blocks.
\ No newline at end of file
diff --git a/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule.py b/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule.py
index ab00ae5edc98..b77e1c25db64 100644
--- a/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule.py
+++ b/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule.py
@@ -148,6 +148,136 @@ def __init__(self, base_url: str, headers: dict, timeout: int = 120, proxy: bool
super().__init__(base_url=base_url, headers=headers, proxy=proxy, verify=verify)
self.timeout = timeout
+ def get_incidents(self, incident_id_list=None, lte_modification_time=None, gte_modification_time=None,
+ lte_creation_time=None, gte_creation_time=None, status=None, starred=None,
+ starred_incidents_fetch_window=None, sort_by_modification_time=None, sort_by_creation_time=None,
+ page_number=0, limit=100, gte_creation_time_milliseconds=0):
+ """
+ Filters and returns incidents
+
+ :param incident_id_list: List of incident ids - must be list
+ :param lte_modification_time: string of time format "2019-12-31T23:59:00"
+ :param gte_modification_time: string of time format "2019-12-31T23:59:00"
+ :param lte_creation_time: string of time format "2019-12-31T23:59:00"
+ :param gte_creation_time: string of time format "2019-12-31T23:59:00"
+ :param starred_incidents_fetch_window: string of time format "2019-12-31T23:59:00"
+ :param starred: True if the incident is starred, else False
+ :param status: string of status
+ :param sort_by_modification_time: optional - enum (asc,desc)
+ :param sort_by_creation_time: optional - enum (asc,desc)
+ :param page_number: page number
+ :param limit: maximum number of incidents to return per page
+ :param gte_creation_time_milliseconds: greater than time in milliseconds
+ :return:
+ """
+ search_from = page_number * limit
+ search_to = search_from + limit
+
+ request_data = {
+ 'search_from': search_from,
+ 'search_to': search_to,
+ }
+
+ if sort_by_creation_time and sort_by_modification_time:
+ raise ValueError('Should be provide either sort_by_creation_time or '
+ 'sort_by_modification_time. Can\'t provide both')
+ if sort_by_creation_time:
+ request_data['sort'] = {
+ 'field': 'creation_time',
+ 'keyword': sort_by_creation_time
+ }
+ elif sort_by_modification_time:
+ request_data['sort'] = {
+ 'field': 'modification_time',
+ 'keyword': sort_by_modification_time
+ }
+
+ filters = []
+ if incident_id_list is not None and len(incident_id_list) > 0:
+ filters.append({
+ 'field': 'incident_id_list',
+ 'operator': 'in',
+ 'value': incident_id_list
+ })
+
+ if status:
+ filters.append({
+ 'field': 'status',
+ 'operator': 'eq',
+ 'value': status
+ })
+
+ if starred and starred_incidents_fetch_window:
+ filters.append({
+ 'field': 'starred',
+ 'operator': 'eq',
+ 'value': True
+ })
+ filters.append({
+ 'field': 'creation_time',
+ 'operator': 'gte',
+ 'value': starred_incidents_fetch_window
+ })
+ if demisto.command() == 'fetch-incidents':
+ if len(filters) > 0:
+ request_data['filters'] = filters
+ incidents = self.handle_fetch_starred_incidents(limit, page_number, request_data)
+ return incidents
+
+ else:
+ if lte_creation_time:
+ filters.append({
+ 'field': 'creation_time',
+ 'operator': 'lte',
+ 'value': date_to_timestamp(lte_creation_time, TIME_FORMAT)
+ })
+
+ if gte_creation_time:
+ filters.append({
+ 'field': 'creation_time',
+ 'operator': 'gte',
+ 'value': date_to_timestamp(gte_creation_time, TIME_FORMAT)
+ })
+
+ if lte_modification_time:
+ filters.append({
+ 'field': 'modification_time',
+ 'operator': 'lte',
+ 'value': date_to_timestamp(lte_modification_time, TIME_FORMAT)
+ })
+
+ if gte_modification_time:
+ filters.append({
+ 'field': 'modification_time',
+ 'operator': 'gte',
+ 'value': date_to_timestamp(gte_modification_time, TIME_FORMAT)
+ })
+
+ if gte_creation_time_milliseconds > 0:
+ filters.append({
+ 'field': 'creation_time',
+ 'operator': 'gte',
+ 'value': gte_creation_time_milliseconds
+ })
+
+ if len(filters) > 0:
+ request_data['filters'] = filters
+
+ res = self._http_request(
+ method='POST',
+ url_suffix='/incidents/get_incidents/',
+ json_data={'request_data': request_data},
+ headers=self._headers,
+ timeout=self.timeout
+ )
+ incidents = res.get('reply', {}).get('incidents', [])
+
+ return incidents
+
+ def handle_fetch_starred_incidents(self, limit: int, page_number: int, request_data: Dict[Any, Any]) -> List[Any]:
+ """Called from get_incidents if the command is fetch-incidents. Implement in child classes."""
+ return []
+
def get_endpoints(self,
endpoint_id_list=None,
dist_name=None,
@@ -3969,3 +4099,101 @@ def _warn_if_module_is_disabled(e: DemistoException) -> None:
outputs_key_field='id',
outputs=outputs,
)
+
+
+def get_incidents_command(client, args):
+ """
+ Retrieve a list of incidents from XDR, filtered by some filters.
+ """
+
+ # sometimes incident id can be passed as integer from the playbook
+ incident_id_list = args.get('incident_id_list')
+ if isinstance(incident_id_list, int):
+ incident_id_list = str(incident_id_list)
+
+ incident_id_list = argToList(incident_id_list)
+ # make sure all the ids passed are strings and not integers
+ for index, id_ in enumerate(incident_id_list):
+ if isinstance(id_, int | float):
+ incident_id_list[index] = str(id_)
+
+ lte_modification_time = args.get('lte_modification_time')
+ gte_modification_time = args.get('gte_modification_time')
+ since_modification_time = args.get('since_modification_time')
+
+ if since_modification_time and gte_modification_time:
+ raise ValueError('Can\'t set both since_modification_time and lte_modification_time')
+ if since_modification_time:
+ gte_modification_time, _ = parse_date_range(since_modification_time, TIME_FORMAT)
+
+ lte_creation_time = args.get('lte_creation_time')
+ gte_creation_time = args.get('gte_creation_time')
+ since_creation_time = args.get('since_creation_time')
+
+ if since_creation_time and gte_creation_time:
+ raise ValueError('Can\'t set both since_creation_time and lte_creation_time')
+ if since_creation_time:
+ gte_creation_time, _ = parse_date_range(since_creation_time, TIME_FORMAT)
+
+ statuses = argToList(args.get('status', ''))
+
+ starred = args.get('starred')
+ starred_incidents_fetch_window = args.get('starred_incidents_fetch_window', '3 days')
+ starred_incidents_fetch_window, _ = parse_date_range(starred_incidents_fetch_window, to_timestamp=True)
+
+ sort_by_modification_time = args.get('sort_by_modification_time')
+ sort_by_creation_time = args.get('sort_by_creation_time')
+
+ page = int(args.get('page', 0))
+ limit = int(args.get('limit', 100))
+
+ # If no filters were given, return a meaningful error message
+ if not incident_id_list and (not lte_modification_time and not gte_modification_time and not since_modification_time
+ and not lte_creation_time and not gte_creation_time and not since_creation_time
+ and not statuses and not starred):
+ raise ValueError("Specify a query for the incidents.\nFor example:"
+ " since_creation_time=\"1 year\" sort_by_creation_time=\"desc\" limit=10")
+
+ if statuses:
+ raw_incidents = []
+
+ for status in statuses:
+ raw_incidents += client.get_incidents(
+ incident_id_list=incident_id_list,
+ lte_modification_time=lte_modification_time,
+ gte_modification_time=gte_modification_time,
+ lte_creation_time=lte_creation_time,
+ gte_creation_time=gte_creation_time,
+ sort_by_creation_time=sort_by_creation_time,
+ sort_by_modification_time=sort_by_modification_time,
+ page_number=page,
+ limit=limit,
+ status=status,
+ starred=starred,
+ starred_incidents_fetch_window=starred_incidents_fetch_window,
+ )
+
+ if len(raw_incidents) > limit:
+ raw_incidents = raw_incidents[:limit]
+ else:
+ raw_incidents = client.get_incidents(
+ incident_id_list=incident_id_list,
+ lte_modification_time=lte_modification_time,
+ gte_modification_time=gte_modification_time,
+ lte_creation_time=lte_creation_time,
+ gte_creation_time=gte_creation_time,
+ sort_by_creation_time=sort_by_creation_time,
+ sort_by_modification_time=sort_by_modification_time,
+ page_number=page,
+ limit=limit,
+ starred=starred,
+ starred_incidents_fetch_window=starred_incidents_fetch_window,
+ )
+
+ return (
+ tableToMarkdown('Incidents', raw_incidents),
+ {
+ f'{args.get("integration_context_brand", "CoreApiModule")}.Incident(val.incident_id==obj.incident_id)': raw_incidents
+ },
+ raw_incidents
+ )
diff --git a/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule_test.py b/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule_test.py
index d76b684a63de..0998019aa670 100644
--- a/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule_test.py
+++ b/Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule_test.py
@@ -12,7 +12,7 @@
from CoreIRApiModule import CoreClient
from CoreIRApiModule import add_tag_to_endpoints_command, remove_tag_from_endpoints_command, quarantine_files_command, \
isolate_endpoint_command, list_user_groups_command, parse_user_groups, list_users_command, list_roles_command, \
- change_user_role_command, list_risky_users_or_host_command, enrich_error_message_id_group_role
+ change_user_role_command, list_risky_users_or_host_command, enrich_error_message_id_group_role, get_incidents_command
test_client = CoreClient(
base_url='https://test_api.com/public_api/v1', headers={}
@@ -3697,3 +3697,95 @@ def __init__(self, status_code) -> None:
DemistoException(message=error_message, res=MockException(500)), "test", "test"
)
assert error_response == expected_error_message[0]
+
+
+def get_incident_by_status(incident_id_list=None, lte_modification_time=None, gte_modification_time=None,
+ lte_creation_time=None, gte_creation_time=None, starred=None,
+ starred_incidents_fetch_window=None, status=None, sort_by_modification_time=None,
+ sort_by_creation_time=None, page_number=0, limit=100, gte_creation_time_milliseconds=0):
+ """
+ The function simulate the client.get_incidents method for the test_fetch_incidents_filtered_by_status
+ and for the test_get_incident_list_by_status.
+ The function got the status as a string, and return from the json file only the incidents
+ that are in the given status.
+ """
+ incidents_list = load_test_data('./test_data/get_incidents_list.json')['reply']['incidents']
+ return [incident for incident in incidents_list if incident['status'] == status]
+
+
+class TestGetIncidents:
+
+ def test_get_incident_list(self, requests_mock):
+ """
+ Given: Incidents returned from client.
+ When: Running get_incidents_command.
+ Then: Ensure the outputs contain the incidents from the client.
+ """
+
+ get_incidents_list_response = load_test_data('./test_data/get_incidents_list.json')
+ requests_mock.post(f'{Core_URL}/public_api/v1/incidents/get_incidents/', json=get_incidents_list_response)
+
+ client = CoreClient(
+ base_url=f'{Core_URL}/public_api/v1', headers={}
+ )
+
+ args = {
+ 'incident_id_list': '1 day'
+ }
+ _, outputs, _ = get_incidents_command(client, args)
+
+ expected_output = {
+ 'CoreApiModule.Incident(val.incident_id==obj.incident_id)':
+ get_incidents_list_response.get('reply').get('incidents')
+ }
+ assert expected_output == outputs
+
+ def test_get_incident_list_by_status(self, mocker):
+ """
+ Given: A status query, and incidents filtered by the query.
+ When: Running get_incidents_command.
+ Then: Ensure outputs contain the incidents from the client.
+ """
+
+ get_incidents_list_response = load_test_data('./test_data/get_incidents_list.json')
+
+ client = CoreClient(
+ base_url=f'{Core_URL}/public_api/v1', headers={}
+ )
+
+ args = {
+ 'incident_id_list': '1 day',
+ 'status': 'under_investigation,new'
+ }
+ mocker.patch.object(client, 'get_incidents', side_effect=get_incident_by_status)
+
+ _, outputs, _ = get_incidents_command(client, args)
+
+ expected_output = {
+ 'CoreApiModule.Incident(val.incident_id==obj.incident_id)':
+ get_incidents_list_response.get('reply').get('incidents')
+ }
+ assert expected_output == outputs
+
+ def test_get_starred_incident_list(self, requests_mock):
+ """
+ Given: A query with starred parameters.
+ When: Running get_incidents_command.
+ Then: Ensure the starred output is returned.
+ """
+
+ get_incidents_list_response = load_test_data('./test_data/get_starred_incidents_list.json')
+ requests_mock.post(f'{Core_URL}/public_api/v1/incidents/get_incidents/', json=get_incidents_list_response)
+
+ client = CoreClient(
+ base_url=f'{Core_URL}/public_api/v1', headers={}
+ )
+
+ args = {
+ 'incident_id_list': '1 day',
+ 'starred': True,
+ 'starred_incidents_fetch_window': '3 days'
+ }
+ _, outputs, _ = get_incidents_command(client, args)
+
+ assert outputs['CoreApiModule.Incident(val.incident_id==obj.incident_id)'][0]['starred'] is True
diff --git a/Packs/ApiModules/Scripts/CoreIRApiModule/test_data/get_incidents_list.json b/Packs/ApiModules/Scripts/CoreIRApiModule/test_data/get_incidents_list.json
new file mode 100644
index 000000000000..b2c9d9921db3
--- /dev/null
+++ b/Packs/ApiModules/Scripts/CoreIRApiModule/test_data/get_incidents_list.json
@@ -0,0 +1,54 @@
+{
+ "reply": {
+ "total_count": 1,
+ "result_count": 1,
+ "incidents": [
+ {
+ "incident_id": "1",
+ "creation_time": 1575806909185,
+ "modification_time": 1575813875168,
+ "detection_time": null,
+ "status": "under_investigation",
+ "severity": "medium",
+ "description": "'Local Analysis Malware' generated by XDR Agent detected on host AAAAA involving user Administrator",
+ "assigned_user_mail": null,
+ "assigned_user_pretty_name": null,
+ "alert_count": 1,
+ "low_severity_alert_count": 0,
+ "med_severity_alert_count": 1,
+ "high_severity_alert_count": 0,
+ "user_count": 1,
+ "host_count": 1,
+ "notes": null,
+ "resolve_comment": null,
+ "manual_severity": null,
+ "manual_description": null,
+ "xdr_url": "https://demisto.hello.com/incident-view/1",
+ "starred": false
+ },
+ {
+ "incident_id": "2",
+ "creation_time": 1575806909185,
+ "modification_time": 1575813875168,
+ "detection_time": null,
+ "status": "new",
+ "severity": "high",
+ "description": "'Local Analysis Malware' generated by XDR Agent detected on host BBBBB involving user Administrator",
+ "assigned_user_mail": null,
+ "assigned_user_pretty_name": null,
+ "alert_count": 1,
+ "low_severity_alert_count": 0,
+ "med_severity_alert_count": 1,
+ "high_severity_alert_count": 0,
+ "user_count": 1,
+ "host_count": 1,
+ "notes": null,
+ "resolve_comment": null,
+ "manual_severity": null,
+ "manual_description": null,
+ "xdr_url": "https://demisto.hello.com/incident-view/2",
+ "starred": false
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/Packs/ApiModules/Scripts/CoreIRApiModule/test_data/get_starred_incidents_list.json b/Packs/ApiModules/Scripts/CoreIRApiModule/test_data/get_starred_incidents_list.json
new file mode 100644
index 000000000000..ec265b182c62
--- /dev/null
+++ b/Packs/ApiModules/Scripts/CoreIRApiModule/test_data/get_starred_incidents_list.json
@@ -0,0 +1,54 @@
+{
+ "reply": {
+ "total_count": 2,
+ "result_count": 2,
+ "incidents": [
+ {
+ "incident_id": "3",
+ "creation_time": 1575806909185,
+ "modification_time": 1575813875168,
+ "detection_time": null,
+ "status": "under_investigation",
+ "severity": "medium",
+ "description": "'Local Analysis Malware' generated by XDR Agent detected on host AAAAA involving user Administrator",
+ "assigned_user_mail": null,
+ "assigned_user_pretty_name": null,
+ "alert_count": 1,
+ "low_severity_alert_count": 0,
+ "med_severity_alert_count": 1,
+ "high_severity_alert_count": 0,
+ "user_count": 1,
+ "host_count": 1,
+ "notes": null,
+ "resolve_comment": null,
+ "manual_severity": null,
+ "manual_description": null,
+ "xdr_url": "https://demisto.hello.com/incident-view/1",
+ "starred": true
+ },
+ {
+ "incident_id": "4",
+ "creation_time": 1575806909185,
+ "modification_time": 1575813875168,
+ "detection_time": null,
+ "status": "new",
+ "severity": "high",
+ "description": "'Local Analysis Malware' generated by XDR Agent detected on host BBBBB involving user Administrator",
+ "assigned_user_mail": null,
+ "assigned_user_pretty_name": null,
+ "alert_count": 1,
+ "low_severity_alert_count": 0,
+ "med_severity_alert_count": 1,
+ "high_severity_alert_count": 0,
+ "user_count": 1,
+ "host_count": 1,
+ "notes": null,
+ "resolve_comment": null,
+ "manual_severity": null,
+ "manual_description": null,
+ "xdr_url": "https://demisto.hello.com/incident-view/2",
+ "starred": true
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/Packs/ApiModules/Scripts/TAXII2ApiModule/TAXII2ApiModule.py b/Packs/ApiModules/Scripts/TAXII2ApiModule/TAXII2ApiModule.py
index 5d750a4defd9..e8169418fab4 100644
--- a/Packs/ApiModules/Scripts/TAXII2ApiModule/TAXII2ApiModule.py
+++ b/Packs/ApiModules/Scripts/TAXII2ApiModule/TAXII2ApiModule.py
@@ -1,3 +1,5 @@
+import logging
+
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
# pylint: disable=E9010, E9011
@@ -17,6 +19,20 @@
# disable insecure warnings
urllib3.disable_warnings()
+
+class SuppressWarningFilter(logging.Filter): # pragma: no cover
+ def filter(self, record):
+ # Suppress all logger records, but send the important ones to demisto logger
+ if record.levelno == logging.WARNING:
+ demisto.debug(record.getMessage())
+ elif record.levelno in [logging.ERROR, logging.CRITICAL]:
+ demisto.error(record.getMessage())
+ return False
+
+
+v21_logger = logging.getLogger("taxii2client.v21")
+v21_logger.addFilter(SuppressWarningFilter())
+
# CONSTANTS
TAXII_VER_2_0 = "2.0"
TAXII_VER_2_1 = "2.1"
@@ -182,23 +198,23 @@ def reached_limit(limit: int, element_count: int):
class Taxii2FeedClient:
def __init__(
- self,
- url: str,
- collection_to_fetch,
- proxies,
- verify: bool,
- objects_to_fetch: list[str],
- skip_complex_mode: bool = False,
- username: Optional[str] = None,
- password: Optional[str] = None,
- field_map: Optional[dict] = None,
- tags: Optional[list] = None,
- tlp_color: Optional[str] = None,
- limit_per_request: int = DFLT_LIMIT_PER_REQUEST,
- certificate: str = None,
- key: str = None,
- default_api_root: str = None,
- update_custom_fields: bool = False
+ self,
+ url: str,
+ collection_to_fetch,
+ proxies,
+ verify: bool,
+ objects_to_fetch: list[str],
+ skip_complex_mode: bool = False,
+ username: Optional[str] = None,
+ password: Optional[str] = None,
+ field_map: Optional[dict] = None,
+ tags: Optional[list] = None,
+ tlp_color: Optional[str] = None,
+ limit_per_request: int = DFLT_LIMIT_PER_REQUEST,
+ certificate: str = None,
+ key: str = None,
+ default_api_root: str = None,
+ update_custom_fields: bool = False
):
"""
TAXII 2 Client used to poll and parse indicators in XSOAR formar
@@ -282,7 +298,7 @@ def init_server(self, version=TAXII_VER_2_1):
)
if self.auth_header:
# add auth_header to the session object
- self._conn.session.headers = merge_setting(self._conn.session.headers, # type: ignore[attr-defined]
+ self._conn.session.headers = merge_setting(self._conn.session.headers, # type: ignore[attr-defined]
{self.auth_header: self.auth_key},
dict_class=CaseInsensitiveDict)
@@ -879,7 +895,7 @@ def parse_intrusion_set(self, intrusion_set_obj: dict[str, Any]) -> list[dict[st
return [intrusion_set]
def parse_general_sco_indicator(
- self, sco_object: dict[str, Any], value_mapping: str = 'value'
+ self, sco_object: dict[str, Any], value_mapping: str = 'value'
) -> list[dict[str, Any]]:
"""
Parses a single SCO indicator.
@@ -1161,6 +1177,7 @@ def build_iterator(self, limit: int = -1, **kwargs) -> list[dict[str, str]]:
return []
try:
+ demisto.debug(f"Fetching {page_size} objects from TAXII server")
envelopes = self.poll_collection(page_size, **kwargs) # got data from server
indicators = self.load_stix_objects_from_envelope(envelopes, limit)
except InvalidJSONError as e:
@@ -1210,46 +1227,53 @@ def load_stix_objects_from_envelope(self, envelopes: types.GeneratorType, limit:
def parse_generator_type_envelope(self, envelopes: types.GeneratorType, parse_objects_func, limit: int = -1):
indicators = []
relationships_lst = []
- for envelope in envelopes:
- stix_objects = envelope.get("objects")
- if not stix_objects:
- # no fetched objects
- break
-
- # we should build the id_to_object dict before iteration as some object reference each other
- self.id_to_object.update(
- {
- obj.get('id'): obj for obj in stix_objects
- if obj.get('type') not in ['extension-definition', 'relationship']
- }
- )
-
- # now we have a list of objects, go over each obj, save id with obj, parse the obj
- for obj in stix_objects:
- obj_type = obj.get('type')
-
- # we currently don't support extension object
- if obj_type == 'extension-definition':
- continue
- elif obj_type == 'relationship':
- relationships_lst.append(obj)
- continue
-
- if not parse_objects_func.get(obj_type):
- demisto.debug(f'There is no parsing function for object type {obj_type}, '
- f'available parsing functions are for types: {",".join(parse_objects_func.keys())}.')
- continue
- if result := parse_objects_func[obj_type](obj):
- indicators.extend(result)
- self.update_last_modified_indicator_date(obj.get("modified"))
-
- if reached_limit(limit, len(indicators)):
- return indicators, relationships_lst
+ try:
+ for envelope in envelopes:
+ stix_objects = envelope.get("objects")
+ if not stix_objects:
+ # no fetched objects
+ break
+ # we should build the id_to_object dict before iteration as some object reference each other
+ self.id_to_object.update(
+ {
+ obj.get('id'): obj for obj in stix_objects
+ if obj.get('type') not in ['extension-definition', 'relationship']
+ }
+ )
+ # now we have a list of objects, go over each obj, save id with obj, parse the obj
+ for obj in stix_objects:
+ obj_type = obj.get('type')
+
+ # we currently don't support extension object
+ if obj_type == 'extension-definition':
+ demisto.debug(f'There is no parsing function for object type "extension-definition", for object {obj}.')
+ continue
+ elif obj_type == 'relationship':
+ relationships_lst.append(obj)
+ continue
+
+ if not parse_objects_func.get(obj_type):
+ demisto.debug(f'There is no parsing function for object type {obj_type}, for object {obj}.')
+
+ continue
+ if result := parse_objects_func[obj_type](obj):
+ indicators.extend(result)
+ self.update_last_modified_indicator_date(obj.get("modified"))
+
+ if reached_limit(limit, len(indicators)):
+ demisto.debug("Reached limit of indicators to fetch")
+ return indicators, relationships_lst
+ except Exception as e:
+ if len(indicators) == 0:
+ demisto.debug("No Indicator were parsed")
+ raise e
+ demisto.debug(f"Failed while parsing envelopes, succeeded to retrieve {len(indicators)} indicators.")
+ demisto.debug("Finished parsing all objects")
return indicators, relationships_lst
def poll_collection(
- self, page_size: int, **kwargs
+ self, page_size: int, **kwargs
) -> types.GeneratorType:
"""
Polls a taxii collection
@@ -1280,7 +1304,7 @@ def get_page_size(self, max_limit: int, cur_limit: int) -> int:
@staticmethod
def extract_indicators_from_stix_objects(
- stix_objs: list[dict[str, str]], required_objects: list[str]
+ stix_objs: list[dict[str, str]], required_objects: list[str]
) -> list[dict[str, str]]:
"""
Extracts indicators from taxii objects
@@ -1294,11 +1318,11 @@ def extract_indicators_from_stix_objects(
return extracted_objs
def get_indicators_from_indicator_groups(
- self,
- indicator_groups: list[tuple[str, str]],
- indicator_obj: dict[str, str],
- indicator_types: dict[str, str],
- field_map: dict[str, str],
+ self,
+ indicator_groups: list[tuple[str, str]],
+ indicator_obj: dict[str, str],
+ indicator_types: dict[str, str],
+ field_map: dict[str, str],
) -> list[dict[str, str]]:
"""
Get indicators from indicator regex groups
@@ -1383,7 +1407,7 @@ def create_indicator(self, indicator_obj, type_, value, field_map):
@staticmethod
def extract_indicator_groups_from_pattern(
- pattern: str, regexes: list
+ pattern: str, regexes: list
) -> list[tuple[str, str]]:
"""
Extracts indicator [`type`, `indicator`] groups from pattern
diff --git a/Packs/Arkime/Integrations/Arkime/Arkime.yml b/Packs/Arkime/Integrations/Arkime/Arkime.yml
index c7f5bca03df9..3c1f13a84252 100644
--- a/Packs/Arkime/Integrations/Arkime/Arkime.yml
+++ b/Packs/Arkime/Integrations/Arkime/Arkime.yml
@@ -29,7 +29,7 @@ script:
script: ''
type: python
subtype: python3
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
commands:
- name: arkime-connection-list
description: Gets a list of nodes and links and returns them to the client.
diff --git a/Packs/Arkime/ReleaseNotes/1_0_21.md b/Packs/Arkime/ReleaseNotes/1_0_21.md
new file mode 100644
index 000000000000..e1c5bb4de570
--- /dev/null
+++ b/Packs/Arkime/ReleaseNotes/1_0_21.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Arkime
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/Arkime/pack_metadata.json b/Packs/Arkime/pack_metadata.json
index 3ad23aefcab1..f7468d891a8d 100644
--- a/Packs/Arkime/pack_metadata.json
+++ b/Packs/Arkime/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Arkime",
"description": "Arkime (formerly Moloch) is a large scale, open source, indexed packet capture and search tool.",
"support": "xsoar",
- "currentVersion": "1.0.20",
+ "currentVersion": "1.0.21",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/Armis/Integrations/Armis/Armis.yml b/Packs/Armis/Integrations/Armis/Armis.yml
index 8b309f667923..bde64a901b77 100644
--- a/Packs/Armis/Integrations/Armis/Armis.yml
+++ b/Packs/Armis/Integrations/Armis/Armis.yml
@@ -385,7 +385,7 @@ script:
- contextPath: Armis.Device.visibility
description: The visibility of the device.
type: String
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
isfetch: true
runonce: false
script: '-'
diff --git a/Packs/Armis/Integrations/ArmisEventCollector/ArmisEventCollector.yml b/Packs/Armis/Integrations/ArmisEventCollector/ArmisEventCollector.yml
index 6a66cda657a7..35d8acd72c44 100644
--- a/Packs/Armis/Integrations/ArmisEventCollector/ArmisEventCollector.yml
+++ b/Packs/Armis/Integrations/ArmisEventCollector/ArmisEventCollector.yml
@@ -99,7 +99,7 @@ script:
script: '-'
type: python
subtype: python3
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
marketplaces:
- marketplacev2
fromversion: 6.10.0
diff --git a/Packs/Armis/ReleaseNotes/1_1_12.md b/Packs/Armis/ReleaseNotes/1_1_12.md
new file mode 100644
index 000000000000..6683c5f934b7
--- /dev/null
+++ b/Packs/Armis/ReleaseNotes/1_1_12.md
@@ -0,0 +1,5 @@
+#### Integrations
+##### Armis Event Collector
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
+##### Armis
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/Armis/pack_metadata.json b/Packs/Armis/pack_metadata.json
index a4cbe3cf25bc..b56acdf69833 100755
--- a/Packs/Armis/pack_metadata.json
+++ b/Packs/Armis/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Armis",
"description": "Agentless and passive security platform that sees, identifies, and classifies every device, tracks behavior, identifies threats, and takes action automatically to protect critical information and systems",
"support": "partner",
- "currentVersion": "1.1.11",
+ "currentVersion": "1.1.12",
"author": "Armis Corporation",
"url": "https://support.armis.com/",
"email": "support@armis.com",
diff --git a/Packs/Asset/ReleaseNotes/1_0_7.md b/Packs/Asset/ReleaseNotes/1_0_7.md
new file mode 100644
index 000000000000..203fd737cf46
--- /dev/null
+++ b/Packs/Asset/ReleaseNotes/1_0_7.md
@@ -0,0 +1,3 @@
+## Asset
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/Asset/pack_metadata.json b/Packs/Asset/pack_metadata.json
index 0fab926c4cc7..31b4b3d40efe 100644
--- a/Packs/Asset/pack_metadata.json
+++ b/Packs/Asset/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Asset",
"description": "Base pack for any packs using asset fields.",
"support": "xsoar",
- "currentVersion": "1.0.6",
+ "currentVersion": "1.0.7",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AutoFocus/ReleaseNotes/2_1_19.md b/Packs/AutoFocus/ReleaseNotes/2_1_19.md
new file mode 100644
index 000000000000..fee86f06e103
--- /dev/null
+++ b/Packs/AutoFocus/ReleaseNotes/2_1_19.md
@@ -0,0 +1,3 @@
+## AutoFocus by Palo Alto Networks
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/AutoFocus/pack_metadata.json b/Packs/AutoFocus/pack_metadata.json
index 74ecb51420db..57a0460de00b 100644
--- a/Packs/AutoFocus/pack_metadata.json
+++ b/Packs/AutoFocus/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "AutoFocus by Palo Alto Networks",
"description": "Use the Palo Alto Networks AutoFocus integration to distinguish the most\n important threats from everyday commodity attacks.",
"support": "xsoar",
- "currentVersion": "2.1.18",
+ "currentVersion": "2.1.19",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/Azure-Enrichment-Remediation/ReleaseNotes/1_1_12.md b/Packs/Azure-Enrichment-Remediation/ReleaseNotes/1_1_12.md
new file mode 100644
index 000000000000..124d4dca3a6c
--- /dev/null
+++ b/Packs/Azure-Enrichment-Remediation/ReleaseNotes/1_1_12.md
@@ -0,0 +1,3 @@
+## Azure Enrichment and Remediation
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/Azure-Enrichment-Remediation/pack_metadata.json b/Packs/Azure-Enrichment-Remediation/pack_metadata.json
index f50f61ce6191..50747cb25863 100644
--- a/Packs/Azure-Enrichment-Remediation/pack_metadata.json
+++ b/Packs/Azure-Enrichment-Remediation/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Azure Enrichment and Remediation",
"description": "Playbooks using multiple Azure content packs for enrichment and remediation purposes",
"support": "xsoar",
- "currentVersion": "1.1.11",
+ "currentVersion": "1.1.12",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AzureCompute/ReleaseNotes/1_2_20.md b/Packs/AzureCompute/ReleaseNotes/1_2_20.md
new file mode 100644
index 000000000000..2ed829c82a52
--- /dev/null
+++ b/Packs/AzureCompute/ReleaseNotes/1_2_20.md
@@ -0,0 +1,3 @@
+## Azure Compute
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/AzureCompute/pack_metadata.json b/Packs/AzureCompute/pack_metadata.json
index e39b5e6765e8..8ae62c4c94f9 100644
--- a/Packs/AzureCompute/pack_metadata.json
+++ b/Packs/AzureCompute/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Azure Compute",
"description": "Create and Manage Azure Virtual Machines",
"support": "xsoar",
- "currentVersion": "1.2.19",
+ "currentVersion": "1.2.20",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AzureNetworkSecurityGroups/ReleaseNotes/1_2_23.md b/Packs/AzureNetworkSecurityGroups/ReleaseNotes/1_2_23.md
new file mode 100644
index 000000000000..1c49af22c1cc
--- /dev/null
+++ b/Packs/AzureNetworkSecurityGroups/ReleaseNotes/1_2_23.md
@@ -0,0 +1,3 @@
+## Azure Network Security Groups
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/AzureNetworkSecurityGroups/pack_metadata.json b/Packs/AzureNetworkSecurityGroups/pack_metadata.json
index e57b60f6c74f..00807e845399 100644
--- a/Packs/AzureNetworkSecurityGroups/pack_metadata.json
+++ b/Packs/AzureNetworkSecurityGroups/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Azure Network Security Groups",
"description": "Azure Network Security Groups are used to filter network traffic to and from Azure resources in an Azure virtual network",
"support": "xsoar",
- "currentVersion": "1.2.22",
+ "currentVersion": "1.2.23",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AzureStorageContainer/Integrations/AzureStorageContainer/AzureStorageContainer.yml b/Packs/AzureStorageContainer/Integrations/AzureStorageContainer/AzureStorageContainer.yml
index be60dce471d5..156c9ec35359 100644
--- a/Packs/AzureStorageContainer/Integrations/AzureStorageContainer/AzureStorageContainer.yml
+++ b/Packs/AzureStorageContainer/Integrations/AzureStorageContainer/AzureStorageContainer.yml
@@ -315,7 +315,7 @@ script:
type: unknown
description: create SAS token for container.
name: azure-storage-container-sas-create
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
runonce: false
script: ''
subtype: python3
diff --git a/Packs/AzureStorageContainer/ReleaseNotes/1_0_19.md b/Packs/AzureStorageContainer/ReleaseNotes/1_0_19.md
new file mode 100644
index 000000000000..4faa3cbac556
--- /dev/null
+++ b/Packs/AzureStorageContainer/ReleaseNotes/1_0_19.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Azure Storage Container
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/AzureStorageContainer/pack_metadata.json b/Packs/AzureStorageContainer/pack_metadata.json
index c95266f33b17..13ddecf08408 100644
--- a/Packs/AzureStorageContainer/pack_metadata.json
+++ b/Packs/AzureStorageContainer/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Azure Storage Container",
"description": "Create and Manage Azure Storage Container services.",
"support": "xsoar",
- "currentVersion": "1.0.18",
+ "currentVersion": "1.0.19",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/AzureStorageQueue/Integrations/AzureStorageQueue/AzureStorageQueue.yml b/Packs/AzureStorageQueue/Integrations/AzureStorageQueue/AzureStorageQueue.yml
index 9e55cfbe6a87..8380df8672ab 100644
--- a/Packs/AzureStorageQueue/Integrations/AzureStorageQueue/AzureStorageQueue.yml
+++ b/Packs/AzureStorageQueue/Integrations/AzureStorageQueue/AzureStorageQueue.yml
@@ -242,7 +242,7 @@ script:
description: Delete all messages from the specified Queue.
execution: true
name: azure-storage-queue-message-clear
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
isfetch: true
runonce: false
script: '-'
diff --git a/Packs/AzureStorageQueue/ReleaseNotes/1_0_19.md b/Packs/AzureStorageQueue/ReleaseNotes/1_0_19.md
new file mode 100644
index 000000000000..d3e0d7ed785a
--- /dev/null
+++ b/Packs/AzureStorageQueue/ReleaseNotes/1_0_19.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Azure Storage Queue
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/AzureStorageQueue/pack_metadata.json b/Packs/AzureStorageQueue/pack_metadata.json
index a2cc1901fd6a..3b6d04aa70f0 100644
--- a/Packs/AzureStorageQueue/pack_metadata.json
+++ b/Packs/AzureStorageQueue/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Azure Storage Queue",
"description": "Create and Manage Azure Storage Queues and Messages.",
"support": "xsoar",
- "currentVersion": "1.0.18",
+ "currentVersion": "1.0.19",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/Barracuda_Cloudgen_Firewall/ParsingRules/Barracuda_Cloudgen_FirewallParsingRules/Barracuda_Cloudgen_FirewallParsingRules.xif b/Packs/Barracuda_Cloudgen_Firewall/ParsingRules/Barracuda_Cloudgen_FirewallParsingRules/Barracuda_Cloudgen_FirewallParsingRules.xif
index 81d316634388..50f04d17f2d4 100644
--- a/Packs/Barracuda_Cloudgen_Firewall/ParsingRules/Barracuda_Cloudgen_FirewallParsingRules/Barracuda_Cloudgen_FirewallParsingRules.xif
+++ b/Packs/Barracuda_Cloudgen_Firewall/ParsingRules/Barracuda_Cloudgen_FirewallParsingRules/Barracuda_Cloudgen_FirewallParsingRules.xif
@@ -13,7 +13,7 @@ filter _raw_log ~= "^.*(\d{10})\s.*BYF" OR _raw_log ~= "\S{3}\s+\S+\s\d{2}\:\d{2
| alter
tmp_final_time = coalesce(tmp_time1_convert, tmp_time2)
| alter
- tmp_diff_time = timestamp_diff(tmp_final_time , _insert_time , "DAY")
+ tmp_diff_time = timestamp_diff(tmp_final_time , _insert_time , "MILLISECOND")
| alter
tmp_time_after_check = if (tmp_diff_time > 0, parse_timestamp("%F %H:%M:%S%z", replace(to_string(tmp_time2), tmp_time2_gen_year, to_string(subtract (to_number(tmp_time2_gen_year), 1)))), tmp_final_time)
| alter
diff --git a/Packs/Barracuda_Cloudgen_Firewall/ReleaseNotes/1_0_2.md b/Packs/Barracuda_Cloudgen_Firewall/ReleaseNotes/1_0_2.md
new file mode 100644
index 000000000000..dd351d685e29
--- /dev/null
+++ b/Packs/Barracuda_Cloudgen_Firewall/ReleaseNotes/1_0_2.md
@@ -0,0 +1,6 @@
+
+#### Parsing Rules
+
+##### Barracuda Cloudgen Firewall Parsing Rules
+
+Updated the Parsing Rule logic.
diff --git a/Packs/Barracuda_Cloudgen_Firewall/pack_metadata.json b/Packs/Barracuda_Cloudgen_Firewall/pack_metadata.json
index e54aaf14ce3a..ee5726a4b3e9 100644
--- a/Packs/Barracuda_Cloudgen_Firewall/pack_metadata.json
+++ b/Packs/Barracuda_Cloudgen_Firewall/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Barracuda Cloudgen Firewall",
"description": "Barracuda Cloudgen Friewall modeling rule and parsing rule for XSIAM",
"support": "xsoar",
- "currentVersion": "1.0.1",
+ "currentVersion": "1.0.2",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/Base/ReleaseNotes/1_33_19.md b/Packs/Base/ReleaseNotes/1_33_19.md
new file mode 100644
index 000000000000..9fb93e60e7f6
--- /dev/null
+++ b/Packs/Base/ReleaseNotes/1_33_19.md
@@ -0,0 +1,3 @@
+## Base
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/Base/ReleaseNotes/1_33_20.md b/Packs/Base/ReleaseNotes/1_33_20.md
new file mode 100644
index 000000000000..d0d198bf962c
--- /dev/null
+++ b/Packs/Base/ReleaseNotes/1_33_20.md
@@ -0,0 +1,7 @@
+
+#### Scripts
+
+##### DBotFindSimilarIncidentsByIndicators
+
+- Improved implementation for better performance.
+- Updated the Docker image to: *demisto/ml:1.0.0.86222*.
diff --git a/Packs/Base/Scripts/CommonServerPython/CommonServerPython_test.py b/Packs/Base/Scripts/CommonServerPython/CommonServerPython_test.py
index 933c0ad68487..56f16815d74d 100644
--- a/Packs/Base/Scripts/CommonServerPython/CommonServerPython_test.py
+++ b/Packs/Base/Scripts/CommonServerPython/CommonServerPython_test.py
@@ -3236,6 +3236,7 @@ def test_http_client_debug_int_logger_sensitive_query_params(mocker):
class TestParseDateRange:
@staticmethod
+ @freeze_time("2024-01-15 17:00:00 UTC")
def test_utc_time_sanity():
utc_now = datetime.utcnow()
utc_start_time, utc_end_time = parse_date_range('2 days', utc=True)
@@ -3244,6 +3245,7 @@ def test_utc_time_sanity():
assert abs(utc_start_time - utc_end_time).days == 2
@staticmethod
+ @freeze_time("2024-01-15 17:00:00 UTC")
def test_local_time_sanity():
local_now = datetime.now()
local_start_time, local_end_time = parse_date_range('73 minutes', utc=False)
@@ -3252,6 +3254,7 @@ def test_local_time_sanity():
assert abs(local_start_time - local_end_time).seconds / 60 == 73
@staticmethod
+ @freeze_time("2024-01-15 17:00:00 UTC")
def test_with_trailing_spaces():
utc_now = datetime.utcnow()
utc_start_time, utc_end_time = parse_date_range('2 days ', utc=True)
@@ -3269,6 +3272,7 @@ def test_case_insensitive():
assert abs(utc_start_time - utc_end_time).days == 2
@staticmethod
+ @freeze_time("2024-01-15 17:00:00 UTC")
def test_error__invalid_input_format(mocker):
mocker.patch.object(sys, 'exit', side_effect=Exception('mock exit'))
demisto_results = mocker.spy(demisto, 'results')
@@ -3281,6 +3285,7 @@ def test_error__invalid_input_format(mocker):
assert 'date_range must be "number date_range_unit"' in results['Contents']
@staticmethod
+ @freeze_time("2024-01-15 17:00:00 UTC")
def test_error__invalid_time_value_not_a_number(mocker):
mocker.patch.object(sys, 'exit', side_effect=Exception('mock exit'))
demisto_results = mocker.spy(demisto, 'results')
@@ -3293,6 +3298,7 @@ def test_error__invalid_time_value_not_a_number(mocker):
assert 'The time value is invalid' in results['Contents']
@staticmethod
+ @freeze_time("2024-01-15 17:00:00 UTC")
def test_error__invalid_time_value_not_an_integer(mocker):
mocker.patch.object(sys, 'exit', side_effect=Exception('mock exit'))
demisto_results = mocker.spy(demisto, 'results')
@@ -3305,6 +3311,7 @@ def test_error__invalid_time_value_not_an_integer(mocker):
assert 'The time value is invalid' in results['Contents']
@staticmethod
+ @freeze_time("2024-01-15 17:00:00 UTC")
def test_error__invalid_time_unit(mocker):
mocker.patch.object(sys, 'exit', side_effect=Exception('mock exit'))
demisto_results = mocker.spy(demisto, 'results')
diff --git a/Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators.py b/Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators.py
index 1ba14079a065..0759db3d57ed 100644
--- a/Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators.py
+++ b/Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators.py
@@ -9,37 +9,49 @@
import re
import math
+ROUND_SCORING = 2
+PLAYGROUND_PATTERN = "[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}"
+
+# Mutual indicators fields/columns
+INDICATOR_ID_FIELD = "id"
+VALUE_FIELD = "value"
+SCORE_FIELD = "score"
+INVESTIGATION_IDS_FIELD = "investigationIDs"
+INDICATOR_TYPE_FIELD = "indicator_type"
+INDICATOR_LINK_COLUMN = "indicatorID"
+TYPE_COLUMN = "type"
+REPUTATION_COLUMN = "Reputation"
+INVOLVED_INCIDENTS_COUNT_COLUMN = "involvedIncidentsCount"
+
+INDICATOR_FIELDS_TO_POPULATE_FROM_QUERY = [
+ INDICATOR_ID_FIELD, INDICATOR_TYPE_FIELD, INVESTIGATION_IDS_FIELD, SCORE_FIELD, VALUE_FIELD
+]
+MUTUAL_INDICATORS_HEADERS = [
+ INDICATOR_LINK_COLUMN, VALUE_FIELD, TYPE_COLUMN, REPUTATION_COLUMN, INVOLVED_INCIDENTS_COUNT_COLUMN
+]
+
+# Similar incidents fields/columns
+INCIDENT_ID_FIELD = "id"
+CREATED_FIELD = "created"
+NAME_FIELD = "name"
+STATUS_FIELD = "status"
+INCIDENT_LINK_COLUMN = "incident ID"
+INDICATORS_COLUMN = "indicators"
+SIMILARITY_SCORE_COLUMN = "similarity indicators"
+IDENTICAL_INDICATORS_COLUMN = "Identical indicators"
+
+FIRST_COLUMNS_INCIDENTS_DISPLAY = [INCIDENT_LINK_COLUMN, CREATED_FIELD, NAME_FIELD]
+FIELDS_TO_EXCLUDE_FROM_DISPLAY = [INCIDENT_ID_FIELD]
+
STATUS_DICT = {
0: "Pending",
1: "Active",
2: "Closed",
3: "Archive",
}
-
-ROUND_SCORING = 2
-PLAYGROUND_PATTERN = '[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}'
-FIRST_COLUMNS_INCIDENTS_DISPLAY = ['incident ID', 'created', 'name']
-FIELDS_TO_REMOVE_TO_DISPLAY = ['id']
-INCIDENT_FIELDS_TO_USE = ['indicators']
-FIELD_INDICATOR_TYPE = 'indicator_type'
-
-
-def normalize(x: list[str]) -> str:
- """
- Normalize function for indicators
- :param x: list of indicators
- :return:
- """
- return ' '.join(x)
-
-
-def identity_score(x):
- """
- Identity function
- :param x: object
- :return:
- """
- return x
+INDICATOR_LINK_FORMAT = "[{0}](#/indicator/{0})"
+INCIDENT_LINK_FORMAT = "[{0}](#/Details/{0})"
+DATE_FORMAT = "%Y-%m-%d"
def flatten_list(my_list: list[list]) -> list:
@@ -56,574 +68,464 @@ class FrequencyIndicators(BaseEstimator, TransformerMixin):
FrequencyIndicators class for indicator frequencies computation
"""
- def __init__(self, incident_field, normalize_function, current_incident):
- self.incident_field = incident_field
- self.normalize_function = normalize_function
- self.frequency = {}
- if self.normalize_function:
- current_incident = current_incident[self.incident_field].apply(self.normalize_function)
- else:
- current_incident = current_incident[self.incident_field]
- self.vocabulary = current_incident.iloc[0].split(' ')
-
- def fit(self, x):
- if self.normalize_function:
- x = x[self.incident_field].apply(self.normalize_function)
- else:
- x = x[self.incident_field]
+ def __init__(self, incident_field: str, actual_incident: pd.DataFrame) -> None:
+ self.column_name = incident_field
+ self.frequency: dict = {}
+ self.vocabulary = actual_incident[self.column_name].iloc[0].split(" ")
+
+ def fit(self, x: pd.DataFrame) -> 'FrequencyIndicators':
+ x = x[self.column_name]
size = len(x) + 1
- frequencies = Counter(flatten_list([t.split(' ') for t in x.values]))
+ frequencies = Counter(flatten_list([t.split(" ") for t in x.values]))
frequencies.update(Counter(self.vocabulary))
self.frequency = {k: math.log(1 + size / v) for k, v in frequencies.items()}
return self
- def transform(self, x):
- if self.normalize_function:
- x = x[self.incident_field].apply(self.normalize_function)
- else:
- x = x[self.incident_field]
- return x.apply(self.compute_term_score)
+ def transform(self, x: pd.DataFrame) -> pd.DataFrame:
+ return x[self.column_name].apply(self.compute_term_score)
def compute_term_score(self, indicators_values_string: str) -> float:
- x = indicators_values_string.split(' ')
+ x = indicators_values_string.split(" ")
return sum([1 * self.frequency[word] for word in self.vocabulary if word in x]) / sum(
[self.frequency[word] for word in self.vocabulary])
-TRANSFORMATION = {
- 'frequency_indicators': {'transformer': FrequencyIndicators,
- 'normalize': None,
- 'scoring_function': identity_score
- }
-}
-
-
-class Transformer():
- def __init__(self, p_transformer_type, incident_field, p_incidents_df, p_current_incident, p_params):
+class FrequencyIndicatorsTransformer:
+ def __init__(self, incidents_df: pd.DataFrame, actual_incident: pd.DataFrame):
"""
- :param p_transformer_type: One of the key value of TRANSFORMATION dict
- :param incident_field: incident field used in this transformation
- :param p_incidents_df: DataFrame of incident (should contains one columns which same name than incident_field)
- :param p_current_incident: DataFrame of the current incident
- :param p_params: Dictionary of all the transformation - TRANSFORMATION
+ :param incidents_df: DataFrame of related incidents
+ :param actual_incident: DataFrame of the actual incident
"""
- self.transformer_type = p_transformer_type
- self.incident_field = incident_field
- self.current_incident = p_current_incident
- self.incidents_df = p_incidents_df
- self.params = p_params
+ self.incidents_df = incidents_df
+ self.actual_incident = actual_incident
+ self.transformed_column = INDICATORS_COLUMN
+ self.scoring_function = lambda x: x
def fit_transform(self):
- transformation = self.params[self.transformer_type]
- transformer = transformation['transformer'](self.incident_field, transformation['normalize'],
- self.current_incident)
- X_vect = transformer.fit_transform(self.incidents_df)
- incident_vect = transformer.transform(self.current_incident)
- return X_vect, incident_vect
+ transformer = FrequencyIndicators(
+ self.transformed_column,
+ self.actual_incident,
+ )
+ x_vector = transformer.fit_transform(self.incidents_df)
+ incident_vect = transformer.transform(self.actual_incident)
+ return x_vector, incident_vect
def get_score(self):
- scoring_function = self.params[self.transformer_type]['scoring_function']
- X_vect, incident_vect = self.fit_transform()
- distance = scoring_function(X_vect)
- self.incidents_df['similarity %s' % self.incident_field] = np.round(distance, ROUND_SCORING)
+ x_vector, _ = self.fit_transform()
+ distance = self.scoring_function(x_vector)
+ self.incidents_df[SIMILARITY_SCORE_COLUMN] = np.round(distance, ROUND_SCORING)
return self.incidents_df
class Model:
- def __init__(self, p_transformation):
+ def __init__(
+ self,
+ incident_to_match: pd.DataFrame,
+ incidents_df: pd.DataFrame,
+ similarity_threshold: float,
+ max_incidents: int,
+ ) -> None:
"""
- :param p_transformation: Dict with the transformers parameters - TRANSFORMATION
+ :param incident_to_match: Dataframe with one incident
+ :param incidents_df: Dataframe with all the incidents
+ :param similarity_threshold: The similarity threshold
+ :param max_incidents: Maximum number of incidents to return
"""
- self.transformation = p_transformation
+ self.incident_to_match = incident_to_match
+ self.incidents_df = incidents_df
+ self.threshold = similarity_threshold
+ self.max_incidents = max_incidents
- def init_prediction(self, p_incident_to_match, p_incidents_df, p_fields_for_frequencyIndicators=[]):
- """
- :param p_incident_to_match: Dataframe with one incident
- :param p_incidents_df: Dataframe with all the incidents
- :param p_fields_indicators_transformation: list of incident fields that for the transformer 'indicators'
- :return:
- """
- self.incident_to_match = p_incident_to_match
- self.incidents_df = p_incidents_df
- self.fields_for_frequencyIndicators = p_fields_for_frequencyIndicators
-
- def predict(self):
- self.remove_empty_field()
+ def predict(self) -> pd.DataFrame:
self.get_score()
+ self.filter_results()
self.prepare_for_display()
return self.incidents_df
- def remove_empty_field(self):
- remove_list = []
- for field in self.fields_for_frequencyIndicators:
- if field not in self.incident_to_match.columns or not self.incident_to_match[field].values[
- 0] or not isinstance(self.incident_to_match[field].values[0], str) or \
- self.incident_to_match[field].values[0] == 'None' or \
- self.incident_to_match[field].values[0] == 'N/A':
- remove_list.append(field)
- self.fields_for_frequencyIndicators = [x for x in self.fields_for_frequencyIndicators if
- x not in remove_list]
-
- def get_score(self):
- for field in self.fields_for_frequencyIndicators:
- t = Transformer('frequency_indicators', field, self.incidents_df, self.incident_to_match,
- self.transformation)
- t.get_score()
+ def get_score(self) -> None:
+ t = FrequencyIndicatorsTransformer(
+ self.incidents_df,
+ self.incident_to_match,
+ )
+ t.get_score()
+
+ def filter_results(self) -> None:
+ self.incidents_df = self.incidents_df[self.incidents_df[SIMILARITY_SCORE_COLUMN] > self.threshold]
+ self.incidents_df = self.incidents_df.sort_values([SIMILARITY_SCORE_COLUMN], ascending=False)
+ self.incidents_df = self.incidents_df.head(self.max_incidents)
+
+ def prepare_for_display(self) -> None:
+ vocabulary = self.incident_to_match[INDICATORS_COLUMN].iloc[0].split(" ")
+ self.incidents_df[IDENTICAL_INDICATORS_COLUMN] = self.incidents_df[INDICATORS_COLUMN].apply(
+ lambda x: ",".join([id for id in x.split(" ") if id in vocabulary])
+ )
+
+
+def get_indicators_of_actual_incident(
+ incident_id: str,
+ indicator_types: list[str],
+ min_number_of_indicators: int,
+ max_incidents_per_indicator: int,
+) -> dict[str, dict]:
+ """ Given an incident ID, returns a map between IDs of its related indicators and their data
+
+ :param incident_id: ID of actual incident
+ :param indicators_types: list of indicators type accepted
+ :param min_number_of_indicators: Min number of indicators in the actual incident
+ :param max_incidents_per_indicator: Max incidents in indicators for white list
+ :return: a map from indicator ids of the actual incident to their data
+ """
+ args = {"query": f"investigationIDs:{incident_id}"}
+ demisto.debug(f"Executing findIndicators with {args=}")
+ indicators = execute_command("findIndicators", args, fail_on_error=True)
+ if not indicators:
+ return {}
+ indicators = [i for i in indicators if len(i.get("investigationIDs") or []) <= max_incidents_per_indicator]
+ if indicator_types:
+ indicators = [x for x in indicators if x[INDICATOR_TYPE_FIELD].lower() in indicator_types]
+ if len(indicators) < min_number_of_indicators:
+ return {}
+
+ indicators_data = {ind[INDICATOR_ID_FIELD]: ind for ind in indicators}
+ demisto.debug(
+ f"Found {len(indicators_data)} indicators for incident {incident_id}: {list(indicators_data.keys())}"
+ )
+ return indicators_data
+
+
+def get_related_incidents(
+ indicators: dict[str, dict],
+ query: str,
+ from_date: str | None,
+) -> list[str]:
+ """ Given indicators data including their related incidents,
+ filters their related incidents by query and date and returns a list of the incident IDs.
- def prepare_for_display(self):
- vocabulary = self.incident_to_match['indicators'].iloc[0].split(' ')
- self.incidents_df['Identical indicators'] = self.incidents_df['indicators'].apply(
- lambda x: ','.join([id for id in x.split(' ') if id in vocabulary]))
+ :param indicators: List of indicators
+ :param query: A query to filter the related incidents by
+ :param from_date: A created date to filter the related incidents by
+ :return: The list of the related incident IDs
+ """
+ incident_ids = flatten_list([i.get("investigationIDs") or [] for i in indicators.values()])
+ incident_ids = list({x for x in incident_ids if not re.match(PLAYGROUND_PATTERN, x)})
+ if not (query or from_date) or not incident_ids:
+ demisto.debug(f"Found {len(incident_ids)} related incidents: {incident_ids}")
+ return incident_ids
+
+ args = {
+ "query": f"{query + ' AND ' if query else ''}incident.id:({' '.join(incident_ids)})",
+ "populateFields": INCIDENT_ID_FIELD,
+ "fromDate": from_date,
+ }
+ demisto.debug(f"Executing GetIncidentsByQuery with {args=}")
+ res = execute_command("GetIncidentsByQuery", args, fail_on_error=True)
+ incident_ids = [incident[INCIDENT_ID_FIELD] for incident in json.loads(res)]
+ demisto.debug(f"Found {len(incident_ids)} related incidents: {incident_ids}")
+ return incident_ids
-def get_all_indicators_for_incident(incident_id: str) -> list[dict]:
- """
- Get indicators for one incident
- :param incident_id: incident id
- :return:
- """
- query = 'investigationIDs:%s' % incident_id
- res = demisto.executeCommand("findIndicators", {'query': query})
- if is_error(res):
- get_error(res)
- if not res[0]['Contents']:
+def get_indicators_of_related_incidents(
+ incident_ids: list[str],
+ max_incidents_per_indicator: int,
+) -> list[dict]:
+ if not incident_ids:
+ demisto.debug("No mutual indicators were found.")
return []
- indicators = res[0]['Contents']
+
+ args = {
+ "query": f"investigationIDs:({' '.join(incident_ids)})",
+ "populateFields": ",".join(INDICATOR_FIELDS_TO_POPULATE_FROM_QUERY),
+ }
+ demisto.debug(f"Executing GetIndicatorsByQuery with {args=}")
+ indicators = execute_command("GetIndicatorsByQuery", args, fail_on_error=True)
+ indicators = [i for i in indicators if len(i.get("investigationIDs") or []) <= max_incidents_per_indicator]
+ indicators_ids = [ind[INDICATOR_ID_FIELD] for ind in indicators]
+ demisto.debug(f"Found {len(indicators_ids)} related indicators: {indicators_ids}")
return indicators
-def get_number_of_invs_for_indicators(indicator: dict) -> int:
- """
- :param indicator: list of dict representing indicators
- :return: lenght of investigation ids for this indicators
- """
- invs = indicator.get('investigationIDs') or []
- return len(invs)
+def get_mutual_indicators(
+ related_indicators: list[dict],
+ indicators_of_actual_incident: dict[str, dict],
+) -> list[dict]:
+ mutual_indicators = [
+ ind for ind in related_indicators if ind[INDICATOR_ID_FIELD] in indicators_of_actual_incident
+ ]
+ mutual_indicators_ids = [ind[INDICATOR_ID_FIELD] for ind in mutual_indicators]
+ demisto.debug(f"Found {len(mutual_indicators_ids)} mutual indicators: {mutual_indicators_ids}")
+ return mutual_indicators
+
+
+def get_mutual_indicators_df(
+ indicators: list[dict],
+ incident_ids: list[str],
+) -> pd.DataFrame:
+ indicators_df = pd.DataFrame(indicators)
+ if not indicators_df.empty:
+ indicators_df[INVOLVED_INCIDENTS_COUNT_COLUMN] = indicators_df[INVESTIGATION_IDS_FIELD].apply(
+ lambda inv_ids: sum(id_ in incident_ids for id_ in inv_ids),
+ )
+ indicators_df[INDICATOR_LINK_COLUMN] = indicators_df[INDICATOR_ID_FIELD].apply(
+ lambda x: INDICATOR_LINK_FORMAT.format(x)
+ )
+ indicators_df = indicators_df.sort_values(
+ [SCORE_FIELD, INVOLVED_INCIDENTS_COUNT_COLUMN],
+ ascending=False,
+ )
+ indicators_df[REPUTATION_COLUMN] = indicators_df[SCORE_FIELD].apply(scoreToReputation) # pylint: disable=E1137
+ indicators_df = indicators_df.rename({INDICATOR_TYPE_FIELD: TYPE_COLUMN}, axis=1)
+ return indicators_df
-def get_indicators_from_incident_ids(ids: list[str]) -> list[dict]:
- """
- Get indicators for list of incidents ids
- :param ids: List of incident ids
- :return: List of indicators for each id
- """
- ids_string = []
- for id_ in ids:
- ids_string.append('incident.id: "%s"' % id_)
- query = " OR ".join(ids_string)
- res = demisto.executeCommand('findIndicators', {
- 'query': query
- })
- if is_error(res):
- get_error(res)
- if not res[0]['Contents']:
- return []
- indicators = res[0]['Contents']
- return indicators
+def mutual_indicators_results(mutual_indicators: list[dict], incident_ids: list[str]):
+ indicators_df = get_mutual_indicators_df(mutual_indicators, incident_ids)
+ outputs = [] if indicators_df.empty else indicators_df[[INDICATOR_ID_FIELD, VALUE_FIELD]].to_dict(orient="records")
+ readable_output = tableToMarkdown(
+ "Mutual Indicators",
+ indicators_df.to_dict(orient="records"),
+ headers=MUTUAL_INDICATORS_HEADERS,
+ headerTransform=pascalToSpace,
+ )
+ return CommandResults(
+ outputs=outputs,
+ outputs_prefix="MutualIndicators.indicators",
+ readable_output=readable_output,
+ )
+
+
+def create_actual_incident_df(indicators_of_actual_incident: dict[str, dict]) -> pd.DataFrame:
+ return pd.DataFrame(
+ data=[" ".join(indicators_of_actual_incident.keys())],
+ columns=[INDICATORS_COLUMN],
+ )
-def match_indicators_incident(indicators: list[dict], incident_ids: list[str]) -> dict[str, list]:
+def create_related_incidents_df(
+ indicators: list[dict],
+ incident_ids: list[str],
+ actual_incident_id: str,
+) -> dict[str, list]:
"""
:param indicators: list of dict representing indicators
:param incident_ids: list of incident ids
:return: dict of {incident id : list of indicators ids related to this incident)
"""
- d = {k: [] for k in incident_ids} # type: dict[str, list]
- for indicator in indicators:
- inv_ids = indicator.get('investigationIDs', None)
- if inv_ids:
- for inv_id in inv_ids:
- if inv_id in d:
- d[inv_id] = d[inv_id] + [indicator['id']]
- return d
-
-
-def enriched_incidents(df, fields_incident_to_display, from_date: str):
- """
- Enriched incidents with data
- :param df: Incidents dataFrame
- :param fields_incident_to_display: Fields selected for enrichement
- :param from_date: from_date
- :return: Incidents dataFrame enriched
- """
- if 'id' in df.columns:
- ids = df.id.tolist()
- else:
- ids = df.index
- ids_string = []
- for id_ in ids:
- ids_string.append('id: "%s"' % id_)
- query = " OR ".join(ids_string)
- res = demisto.executeCommand('GetIncidentsByQuery', {
- 'query': query,
- 'populateFields': ' , '.join(fields_incident_to_display),
- 'fromDate': from_date,
- })
- if is_error(res):
- return_error(res)
- if not json.loads(res[0]['Contents']):
- return []
- else:
- incidents = json.loads(res[0]['Contents'])
- incidents_dict = {incident['id']: incident for incident in incidents}
- for field in fields_incident_to_display:
- if field == 'created':
- df[field] = [incidents_dict.get(id_, {}).get(field, '')[:10] if
- len(incidents_dict.get(id_, {}).get(field, '')) > 10 else '' for id_ in ids]
- elif field == 'status':
- df[field] = [STATUS_DICT.get(incidents_dict.get(id_, {}).get(field, '')) if
- incidents_dict.get(id_, {}).get(field, '') in STATUS_DICT else ' ' for id_ in ids]
- else:
- df[field] = [incidents_dict.get(id_, {}).get(field, '') for id_ in ids]
- return df
-
-
-def return_outputs_custom(readable_output, outputs=None):
- return_entry = {
- "Type": entryTypes["note"],
- "HumanReadable": readable_output,
- "ContentsFormat": formats['json'],
- "Contents": outputs,
- "EntryContext": outputs,
+ incidents_to_indicators = {
+ inc_id: [
+ indicator[INDICATOR_ID_FIELD] for indicator in indicators
+ if inc_id in (indicator.get(INVESTIGATION_IDS_FIELD) or [])
+ ]
+ for inc_id in incident_ids
}
- return CommandResults(outputs=return_entry)
-
-
-def return_no_mututal_indicators_found_entry():
- hr = '### Mutual Indicators' + '\n'
- hr += 'No mutual indicators were found.'
- return_outputs_custom(hr, add_context_key(create_context_for_indicators()))
-
-
-def create_context_for_indicators(indicators_df=None):
- if indicators_df is None:
- indicators_context = []
- else:
- indicators_df.rename({'Value': 'value'}, axis=1, inplace=True) # noqa: PD002
- indicators_df = indicators_df[['id', 'value']]
- indicators_context = indicators_df.to_dict(orient='records')
- return {'indicators': indicators_context}
-
-
-def add_context_key(entry_context):
- new_context = {}
- for k, v in entry_context.items():
- new_context['{}.{}'.format('MutualIndicators', k)] = v
- return new_context
-
-
-def return_indicator_entry(incident_ids, indicators_types, indicators_list):
- indicators_query = 'investigationIDs:({})'.format(' '.join(f'"{id_}"' for id_ in incident_ids))
- fields = ['id', 'indicator_type', 'investigationIDs', 'relatedIncCount', 'score', 'value']
- indicators_args = {'query': indicators_query, 'limit': '150', 'populateFields': ','.join(fields)}
- res = demisto.executeCommand('GetIndicatorsByQuery', args=indicators_args)
- if is_error(res):
- return_error(res)
- indicators = res[0]['Contents']
- indicators_df = pd.DataFrame(data=indicators)
- if len(indicators_df) == 0:
- return_no_mututal_indicators_found_entry()
- return indicators_df
- indicators_df = indicators_df[indicators_df['relatedIncCount'] < 150]
- indicators_df['Involved Incidents Count'] = \
- indicators_df['investigationIDs'].apply(lambda x: sum(id_ in incident_ids for id_ in x))
- indicators_df = indicators_df[indicators_df['Involved Incidents Count'] > 1]
- if indicators_types:
- indicators_df = indicators_df[indicators_df.indicator_type.isin(indicators_types)]
- indicators_df = indicators_df[indicators_df.id.isin([x.get('id') for x in indicators_list])]
- if len(indicators_df) == 0:
- return_no_mututal_indicators_found_entry()
- return indicators_df
- indicators_df['Id'] = indicators_df['id'].apply(lambda x: f"[{x}](#/indicator/{x})")
- indicators_df = indicators_df.sort_values(['score', 'Involved Incidents Count'], ascending=False)
- indicators_df['Reputation'] = indicators_df['score'].apply(scoreToReputation)
- indicators_df.rename({'value': 'Value', 'indicator_type': 'Type'}, axis=1, inplace=True) # noqa: PD002
- indicators_headers = ['Id', 'Value', 'Type', 'Reputation', 'Involved Incidents Count']
- hr = tableToMarkdown('Mutual Indicators', indicators_df.to_dict(orient='records'),
- headers=indicators_headers)
- return_outputs_custom(hr, add_context_key(create_context_for_indicators(indicators_df)))
- return indicators_df
+ return pd.DataFrame.from_dict(
+ data={
+ k: " ".join(v) for k, v in incidents_to_indicators.items()
+ if k != actual_incident_id
+ },
+ orient="index",
+ columns=[INDICATORS_COLUMN],
+ )
-def get_indicators_map(indicators: list[dict]) -> dict[str, dict]:
- """
- :param indicators: list of dict representing indicators
- :return: Dictionary {id of indicators: indicators}
+def enrich_incidents(
+ incidents: pd.DataFrame,
+ fields_to_display: list,
+) -> pd.DataFrame:
"""
- return {ind['id']: ind for ind in indicators}
-
-
-def join(my_list: list) -> str:
- return ' '.join(my_list)
-
+ Enriches a DataFrame of incidents with the given fields to display.
-def organize_data(similar_incidents: pd.DataFrame, indicators_map: dict[str, dict], threshold: float,
- max_incidents_to_display: int) \
- -> pd.DataFrame:
- """
- Clean and organize dataframe before displaying
- :param similar_incidents: DataFrame of incident
- :param indicators_map: Dict of indicators
- :param threshold: threshold for similarity score
- :param max_incidents_to_display: Max number of incidents we want to display
- :return: Clean DataFrame of incident
- """
- similar_incidents = similar_incidents.reset_index().rename(columns={'index': 'id'})
- similar_incidents['incident ID'] = similar_incidents['id'].apply(lambda _id: f"[{_id}](#/Details/{_id})")
- similar_incidents['Identical indicators'] = similar_incidents['Identical indicators'].apply(
- lambda _ids: '\n'.join(
- [indicators_map.get(x).get('value') if indicators_map.get(x) else ' ' for x in # type: ignore
- _ids.split(',')])) # type: ignore
- similar_incidents = similar_incidents[['incident ID', 'id', 'Identical indicators', 'similarity indicators']]
- similar_incidents = similar_incidents[similar_incidents['similarity indicators'] > threshold]
- similar_incidents.sort_values(['similarity indicators'], inplace=True, ascending=False) # noqa: PD002
- return similar_incidents.head(max_incidents_to_display)
-
-
-def return_no_similar_incident_found_entry():
- hr = '### No Similar indicators' + '\n'
- hr += 'No Similar indicators were found.'
- return CommandResults(readable_output=hr,
- outputs={'DBotFindSimilarIncidentsByIndicators': create_context_for_incidents()},
- raw_response={})
-
-
-def create_context_for_incidents(similar_incidents=pd.DataFrame()):
- """
- Return context from dataframe of incident
- :param similar_incidents: DataFrame of incidents with indicators
- :return: context
- """
- if len(similar_incidents) == 0:
- context = {
- 'similarIncidentList': {},
- 'isSimilarIncidentFound': False
- }
- else:
- context = {
- 'similarIncident': (similar_incidents.to_dict(orient='records')),
- 'isSimilarIncidentFound': True
- }
- return context
-
-
-def display_actual_incident(incident_df: pd.DataFrame, incident_id: str, fields_incident_to_display: list[str],
- from_date: str) -> CommandResults:
- """
- Display current incident
- :param incident_df: DataFrame of incident
- :param incident_id: incident ID
- :param fields_incident_to_display: fields to display
- :param from_date: fields to from_date
- :return: None
- """
- incident_df['id'] = [incident_id]
- incident_df = enriched_incidents(incident_df, fields_incident_to_display, from_date)
- incident_df['Incident ID'] = incident_df['id'].apply(lambda _id: f"[{_id}](#/Details/{_id})")
- col_incident = incident_df.columns.tolist()
- col_incident = FIRST_COLUMNS_INCIDENTS_DISPLAY + [x for x in col_incident if
- x not in FIRST_COLUMNS_INCIDENTS_DISPLAY + ['id', 'indicators']]
- col_incident = [x.title() for x in col_incident]
- incident_df = incident_df.rename(str.title, axis='columns')
- incident_json = incident_df.to_dict(orient='records')
- return CommandResults(readable_output=tableToMarkdown("Actual Incident",
- incident_json,
- col_incident))
-
-
-def load_indicators_for_current_incident(incident_id: str, indicators_types: list[str], min_nb_of_indicators: int,
- max_indicators_for_white_list: int):
- """
- Take
- :param incident_id: ID of current incident
- :param indicators_types: list of indicators type accepted
- :param limit_nb_of_indicators: Min number of indicators in the current incident
- :param max_indicators: Max incidents in indicators for white list
- :return: return [*indicators] and dictionnary {key: indicators} and if early_stop
- """
- indicators = get_all_indicators_for_incident(incident_id)
- if not indicators:
- return_no_mututal_indicators_found_entry()
- return_no_similar_incident_found_entry()
- return [], {}, True
- indicators_map = get_indicators_map(indicators)
- indicators = list(
- filter(lambda x: get_number_of_invs_for_indicators(x) < max_indicators_for_white_list, indicators))
- if indicators_types:
- indicators = [x for x in indicators if x.get(FIELD_INDICATOR_TYPE) in indicators_types]
- if len(indicators) < min_nb_of_indicators:
- return_no_mututal_indicators_found_entry()
- return_no_similar_incident_found_entry()
- return [], {}, True
- return indicators, indicators_map, False
-
-
-def get_incidents_ids_related_to_indicators(indicators, query):
- """
- Return incident ids from a list of indicators
- :param indicators: List of indicators
- :return: [*incidents_ids]
+ :param similar_incidents: Incidents dataFrame
+ :param fields_to_display: Fields selected for enrichement
+ :return: Incidents dataFrame enriched
"""
- incident_ids = [indicator.get('investigationIDs', None) for indicator in indicators if
- indicator.get('investigationIDs', None)]
- incident_ids = flatten_list(incident_ids)
- p = re.compile(PLAYGROUND_PATTERN)
- incident_ids = [x for x in incident_ids if not p.match(x)]
- incident_ids = get_incidents_filtered_from_query(incident_ids, query)
- if not incident_ids:
- return_no_mututal_indicators_found_entry()
- return_no_similar_incident_found_entry()
- return [], True
- return incident_ids, False
-
-
-def get_ids_condition_clause(incident_ids):
- if incident_ids:
- return "incident.id:(" + " ".join(incident_ids) + ")"
- return ""
-
-
-def get_incidents_filtered_from_query(incident_ids, query):
- ids_condition_clause = get_ids_condition_clause(incident_ids)
- query += " AND %s" % ids_condition_clause
- res = demisto.executeCommand('GetIncidentsByQuery', {
- 'query': query,
- 'populateFields': 'id'
- })
- if is_error(res):
- get_error(res)
- if not json.loads(res[0]['Contents']):
- return []
- else:
- filtered_incidents_dict = json.loads(res[0]['Contents'])
- filtered_incidents = [incident['id'] for incident in filtered_incidents_dict]
- return filtered_incidents
+ if incidents.empty:
+ return incidents
+ incident_ids = incidents.id.tolist() if INCIDENT_ID_FIELD in incidents.columns else incidents.index
-def get_related_incidents_with_indicators(incident_ids: list[str], indicators_types: list[str],
- incident_id: str) -> pd.DataFrame:
- """
- Create dataframe of incident with indicators from incidents ids list
- :param incident_ids: List if incident id
- :param indicators_types: List of indicators type
- :param incident_id: current incident (in order to remove it)
- :return: dataframe of incident with indicators
- """
- indicators_related = get_indicators_from_incident_ids(incident_ids)
- if not indicators_related:
- return_no_similar_incident_found_entry()
- return pd.DataFrame(), True
- if indicators_types:
- indicators_related = [x for x in indicators_related if x.get(FIELD_INDICATOR_TYPE) in indicators_types]
- if not indicators_related:
- return_no_similar_incident_found_entry()
- return pd.DataFrame(), True
- incidents_with_indicators = match_indicators_incident(indicators_related, incident_ids)
- incidents_with_indicators_join = {k: join(v) for k, v in incidents_with_indicators.items()}
- incidents_with_indicators_join.pop(incident_id, None)
- if not bool(incidents_with_indicators_join):
- return_no_similar_incident_found_entry()
- return pd.DataFrame(), True
- incidents_df = pd.DataFrame.from_dict(incidents_with_indicators_join, orient='index')
- incidents_df.columns = ['indicators']
- return incidents_df, False
-
-
-def organize_current_incident(current_incident_df, indicators_map):
- current_incident_df['Indicators'] = current_incident_df['indicators'].apply(
- lambda _ids: '\n'.join(
- [indicators_map.get(x).get('value') if indicators_map.get(x) else ' ' for x in # type: ignore
- _ids.split(' ')])) # type: ignore
- return current_incident_df
-
-
-def return_outputs_tagged(similar_incidents: pd.DataFrame, context: dict, tag: Optional[str] = None):
- colums_to_display = FIRST_COLUMNS_INCIDENTS_DISPLAY + [x for x in similar_incidents.columns.tolist() if
- x not in FIRST_COLUMNS_INCIDENTS_DISPLAY + FIELDS_TO_REMOVE_TO_DISPLAY]
- similar_incidents_renamed = similar_incidents.rename(str.title, axis='columns')
- similar_incidents_json = similar_incidents_renamed.to_dict(orient='records')
- colums_to_display = [x.title() for x in colums_to_display]
- readable_output = tableToMarkdown("Similar incidents", similar_incidents_json, colums_to_display)
- return_entry = {
- "Type": entryTypes["note"],
- "HumanReadable": readable_output,
- "ContentsFormat": formats['json'],
- "Contents": similar_incidents.to_dict(orient='records'),
- "EntryContext": {'DBotFindSimilarIncidentsByIndicators': context},
+ args = {
+ "query": f"incident.id:({' '.join(incident_ids)})",
+ "populateFields": ",".join(fields_to_display),
}
- if tag is not None:
- return_entry["Tags"] = [tag]
- return return_entry
-
-
-def main():
- max_indicators_for_white_list = int(demisto.args()['maxIncidentsInIndicatorsForWhiteList'])
- min_nb_of_indicators = int(demisto.args()['minNumberOfIndicators'])
- threshold = float(demisto.args()['threshold'])
- indicators_types = demisto.args().get('indicatorsTypes')
- if indicators_types:
- indicators_types = indicators_types.split(',')
- indicators_types = [x.strip() for x in indicators_types if x]
- show_actual_incident = demisto.args().get('showActualIncident')
- max_incidents_to_display = int(demisto.args()['maxIncidentsToDisplay'])
- fields_incident_to_display = demisto.args()['fieldsIncidentToDisplay'].split(',')
- fields_incident_to_display = [x.strip() for x in fields_incident_to_display if x]
- fields_incident_to_display = list(set(['created', 'name'] + fields_incident_to_display))
- from_date = demisto.args().get('fromDate')
- query = demisto.args().get('query', "")
-
- # load the Dcurrent incident
- incident_id = demisto.args().get('incidentId')
- if not incident_id:
- incident = demisto.incidents()[0]
- incident_id = incident['id']
-
- # load the related indicators to the incidents
- indicators, indicators_map, early_exit = load_indicators_for_current_incident(incident_id, indicators_types,
- min_nb_of_indicators,
- max_indicators_for_white_list)
- if early_exit:
- return
-
- # Get the Investigation IDs related to the indicators if the incidents
- incident_ids, early_exit = get_incidents_ids_related_to_indicators(indicators, query)
- if early_exit:
- return
-
- # Return Mutual indicators
- _ = return_indicator_entry(incident_ids, indicators_types, indicators)
-
- # Get related incidents with indicators
- incidents_df, early_exit = get_related_incidents_with_indicators(incident_ids, indicators_types, incident_id)
- if early_exit:
- return
-
- # Current incident
- indicators_for_incident = [' '.join({x.get('id') for x in indicators})] # type: ignore
- current_incident_df = pd.DataFrame(indicators_for_incident, columns=['indicators'])
-
- # Prediction
- model = Model(p_transformation=TRANSFORMATION)
- model.init_prediction(current_incident_df, incidents_df, INCIDENT_FIELDS_TO_USE)
- similar_incidents = model.predict()
-
- # Display and enriched incidents data
- current_incident_df = organize_current_incident(current_incident_df, indicators_map)
- similar_incidents = organize_data(similar_incidents, indicators_map, threshold, max_incidents_to_display)
- similar_incidents = enriched_incidents(similar_incidents, fields_incident_to_display, from_date)
-
- incident_found_bool = (len(similar_incidents) > 0)
-
- if show_actual_incident == 'True':
- command_results = display_actual_incident(current_incident_df, incident_id, fields_incident_to_display, from_date)
- return_results(command_results)
-
- if incident_found_bool:
- context = create_context_for_incidents(similar_incidents)
- return_results(return_outputs_tagged(similar_incidents, context, 'similarIncidents'))
- else:
- return_no_similar_incident_found_entry()
-
-
-if __name__ in ['__main__', '__builtin__', 'builtins']:
+ demisto.debug(f"Executing GetIncidentsByQuery with {args=}")
+ res = execute_command("GetIncidentsByQuery", args, fail_on_error=True)
+ incidents_map: dict[str, dict] = {incident[INCIDENT_ID_FIELD]: incident for incident in json.loads(res)}
+ if CREATED_FIELD in fields_to_display:
+ incidents[CREATED_FIELD] = [
+ dateparser.parse(incidents_map[inc_id][CREATED_FIELD]).strftime(DATE_FORMAT) # type: ignore
+ for inc_id in incident_ids
+ ]
+ if STATUS_FIELD in fields_to_display:
+ incidents[STATUS_FIELD] = [
+ STATUS_DICT.get(incidents_map[inc_id][STATUS_FIELD]) or " "
+ for inc_id in incident_ids
+ ]
+
+ for field in fields_to_display:
+ if field not in [CREATED_FIELD, STATUS_FIELD]:
+ incidents[field] = [incidents_map[inc_id][field] or "" for inc_id in incident_ids]
+ return incidents
+
+
+def replace_indicator_ids_with_values(
+ inc_ids: str,
+ indicators_data: dict[str, dict],
+) -> str:
+ return "\n".join([
+ indicators_data.get(x, {}).get(VALUE_FIELD) or " "
+ for x in inc_ids.split(" ")
+ ])
+
+
+def format_similar_incidents(
+ similar_incidents: pd.DataFrame,
+ indicators_data: dict[str, dict],
+ fields_to_display: list[str],
+) -> pd.DataFrame:
+ """ Formats the similar incidents DataFrame.
+
+ :param indicators_data: a mapping between IDs and the mutual indicators data
+ :param fields_to_display: Fields selected for enrichement
+ :return: a formatted, enriched DataFrame of the similar incidents.
+ """
+ if similar_incidents.empty:
+ demisto.debug("No similar incidents found.")
+ return similar_incidents
+
+ # format and enrich DataFrame
+ similar_incidents = similar_incidents.reset_index().rename(columns={"index": INCIDENT_ID_FIELD})
+ similar_incidents[INCIDENT_LINK_COLUMN] = similar_incidents[INCIDENT_ID_FIELD].apply(
+ lambda _id: INCIDENT_LINK_FORMAT.format(_id)
+ )
+ similar_incidents[IDENTICAL_INDICATORS_COLUMN] = similar_incidents[IDENTICAL_INDICATORS_COLUMN].apply(
+ lambda inc_ids: replace_indicator_ids_with_values(inc_ids, indicators_data)
+ )
+ similar_incidents = similar_incidents[
+ [INCIDENT_LINK_COLUMN, INCIDENT_ID_FIELD, IDENTICAL_INDICATORS_COLUMN, SIMILARITY_SCORE_COLUMN]
+ ]
+ return enrich_incidents(similar_incidents, fields_to_display)
+
+
+def similar_incidents_results(
+ similar_incidents: pd.DataFrame,
+ indicators_of_actual_incident: dict,
+ fields_to_display: list,
+):
+ similar_incidents = format_similar_incidents(similar_incidents, indicators_of_actual_incident, fields_to_display)
+ outputs = similar_incidents.to_dict(orient="records")
+ additional_headers = [
+ x for x in similar_incidents.columns.tolist()
+ if x not in FIRST_COLUMNS_INCIDENTS_DISPLAY + FIELDS_TO_EXCLUDE_FROM_DISPLAY
+ ]
+ return CommandResults(
+ outputs={"similarIncident": outputs, "isSimilarIncidentFound": len(outputs) > 0},
+ outputs_prefix="DBotFindSimilarIncidentsByIndicators",
+ raw_response=outputs,
+ readable_output=tableToMarkdown(
+ "Similar Incidents",
+ outputs,
+ headers=FIRST_COLUMNS_INCIDENTS_DISPLAY + additional_headers,
+ headerTransform=str.title,
+ ),
+ tags=["similarIncidents"], # type: ignore
+ )
+
+
+def actual_incident_results(
+ incident_df: pd.DataFrame,
+ incident_id: str,
+ indicators_data: dict,
+ fields_to_display: list[str],
+) -> CommandResults:
+ """
+ Formats the given DataFrame, and returns a CommandResults object of the actual incident data
+ :param incident_df: a DataFrame of actual incident
+ :param incident_id: the incident ID
+ :param indicators_data: indicators data of the actual incident
+ :param fields_to_display: A list of fields to display
+ :return: a CommandResults obj
+ """
+ incident_df[INCIDENT_ID_FIELD] = [incident_id]
+ incident_df[INCIDENT_LINK_COLUMN] = incident_df[INCIDENT_ID_FIELD].apply(
+ lambda _id: INCIDENT_LINK_FORMAT.format(_id)
+ )
+ incident_df[INDICATORS_COLUMN] = incident_df[INDICATORS_COLUMN].apply(
+ lambda inc_ids: replace_indicator_ids_with_values(inc_ids, indicators_data)
+ )
+ incident_df = enrich_incidents(incident_df, fields_to_display)
+ additional_headers = [
+ x for x in incident_df.columns.tolist()
+ if x not in FIRST_COLUMNS_INCIDENTS_DISPLAY + FIELDS_TO_EXCLUDE_FROM_DISPLAY
+ ]
+ return CommandResults(
+ readable_output=tableToMarkdown(
+ "Actual Incident",
+ incident_df.to_dict(orient="records"),
+ headers=FIRST_COLUMNS_INCIDENTS_DISPLAY + additional_headers,
+ headerTransform=pascalToSpace,
+ ),
+ )
+
+
+def find_similar_incidents_by_indicators(incident_id: str, args: dict) -> list[CommandResults]:
+ # get_indicators_of_actual_incident() args
+ indicators_types = argToList(args.get("indicatorsTypes"), transform=str.lower)
+ min_number_of_indicators = int(args["minNumberOfIndicators"])
+ max_incidents_per_indicator = int(args["maxIncidentsInIndicatorsForWhiteList"])
+
+ # get_related_incidents() args
+ query = args.get("query") or ""
+ from_date = args.get("fromDate")
+
+ # get_similar_incidents() args
+ similarity_threshold = float(args["threshold"])
+ max_incidents_to_display = int(args["maxIncidentsToDisplay"])
+
+ # outputs formatting args
+ show_actual_incident = argToBoolean(args.get("showActualIncident"))
+ fields_to_display = list(set(argToList(args["fieldsIncidentToDisplay"])) | {CREATED_FIELD, NAME_FIELD})
+
+ command_results_list: list[CommandResults] = []
+
+ indicators_of_actual_incident = get_indicators_of_actual_incident(
+ incident_id,
+ indicators_types,
+ min_number_of_indicators,
+ max_incidents_per_indicator,
+ )
+
+ incident_ids = get_related_incidents(indicators_of_actual_incident, query, from_date)
+ related_indicators = get_indicators_of_related_incidents(incident_ids, max_incidents_per_indicator)
+ mutual_indicators = get_mutual_indicators(related_indicators, indicators_of_actual_incident)
+ actual_incident_df = create_actual_incident_df(indicators_of_actual_incident)
+ related_incidents_df = create_related_incidents_df(related_indicators, incident_ids, incident_id)
+
+ similar_incidents = Model(
+ actual_incident_df,
+ related_incidents_df,
+ similarity_threshold,
+ max_incidents_to_display,
+ ).predict()
+
+ if show_actual_incident:
+ command_results_list.append(
+ actual_incident_results(actual_incident_df, incident_id, indicators_of_actual_incident, fields_to_display)
+ )
+ command_results_list.extend([
+ mutual_indicators_results(mutual_indicators, incident_ids),
+ similar_incidents_results(similar_incidents, indicators_of_actual_incident, fields_to_display),
+ ])
+ return command_results_list
+
+
+def main(): # pragma: no cover
+ try:
+ args = demisto.args()
+ incident_id = args.get("incidentId") or demisto.incidents()[0]["id"]
+ return_results(find_similar_incidents_by_indicators(incident_id, args))
+ except Exception as e:
+ return_error(f"Failed to execute DBotFindSimilarIncidentsByIndicators. Error: {e}")
+
+
+if __name__ in ["__main__", "__builtin__", "builtins"]:
main()
diff --git a/Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators.yml b/Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators.yml
index 0a85d5c2c637..2080bdb06546 100644
--- a/Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators.yml
+++ b/Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators.yml
@@ -42,8 +42,8 @@ script: '-'
subtype: python3
timeout: '0'
type: python
-dockerimage: demisto/ml:1.0.0.83711
+dockerimage: demisto/ml:1.0.0.86222
runas: DBotWeakRole
tests:
-- No tests (auto formatted)
+- DBotFindSimilarIncidentsByIndicators - Test
fromversion: 5.0.0
diff --git a/Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators_test.py b/Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators_test.py
index ca3f7d16de00..ac059a60cd51 100644
--- a/Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators_test.py
+++ b/Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators_test.py
@@ -1,104 +1,343 @@
-import pytest
+import json
+import numpy as np
import pandas as pd
-# from CommonServerPython import *
-# import pytest
-from DBotFindSimilarIncidentsByIndicators import identity_score, match_indicators_incident, get_indicators_map, \
- FrequencyIndicators, \
- get_number_of_invs_for_indicators
-
-TRANSFORMATION = {
- 'indicators': {'transformer': FrequencyIndicators,
- 'normalize': None,
- 'scoring_function': identity_score
- }
-}
-
-indicator = [{'id': 'a', 'investigationIDs': ['1', '2', '10'], 'value': 'value_a', 'indicator_type': 'URL'},
- {'id': 'b', 'investigationIDs': ['2', '10'], 'value': 'value_b', 'indicator_type': 'File'}]
-
-indicators_list = [{'id': 'a', 'investigationIDs': ['1', '2', '10'], 'value': 'value_a', 'indicator_type': 'File'},
- {'id': 'b', 'investigationIDs': ['2', '10'], 'value': 'value_b', 'indicator_type': 'Domain'},
- {'id': 'c', 'investigationIDs': ['3', '45'], 'value': 'value_c', 'indicator_type': 'Email'},
- {'id': 'd', 'investigationIDs': ['1', '45'], 'value': 'value_d', 'indicator_type': 'File'},
- {'id': 'c', 'investigationIDs': ['2', '45'], 'value': 'value_c', 'indicator_type': 'File'}
- ]
-
-
-def executeCommand(command, args):
- indicator = [{'id': 'a', 'investigationIDs': ['1', '2', '10'], 'value': 'value_a', 'indicator_type': 'URL'},
- {'id': 'b', 'investigationIDs': ['2', '10'], 'value': 'value_b', 'indicator_type': 'File'}]
-
- indicators_list = [{'id': 'a', 'investigationIDs': ['1', '2', '10'], 'value': 'value_a', 'indicator_type': 'File'},
- {'id': 'b', 'investigationIDs': ['2', '10'], 'value': 'value_b', 'indicator_type': 'Domain'},
- {'id': 'c', 'investigationIDs': ['3', '45'], 'value': 'value_c', 'indicator_type': 'Email'},
- {'id': 'd', 'investigationIDs': ['1', '45'], 'value': 'value_d', 'indicator_type': 'File'},
- {'id': 'c', 'investigationIDs': ['2', '45'], 'value': 'value_c', 'indicator_type': 'File'}
- ]
-
- if command == 'findIndicators' and 'OR' in args['query']:
- return [{'Contents': indicators_list, 'Type': 'note'}]
- else:
- return [{'Contents': indicator, 'Type': 'note'}]
-
-
-TRANSFORMATION = {
- 'indicators': {'transformer': FrequencyIndicators,
- 'normalize': None,
- 'scoring': {'scoring_function': identity_score, 'min': 0.5}
- }
-}
-
-
-def test_get_number_of_invs_for_indicators(mocker):
- assert get_number_of_invs_for_indicators(indicator[0]) == 3
-
-
-def test_get_indicators_map(mocker):
- res = {
- 'a': indicator[0],
- 'b': indicator[1]
+import pytest
+from dateparser import parse
+from DBotFindSimilarIncidentsByIndicators import *
+
+PLAYGROUND_ID = "00000000-0000-0000-0000-0000000000000"
+
+IND_A = {"id": "a", "investigationIDs": ["1", "2", "3"], "value": "value_a", "indicator_type": "File", "score": 0}
+IND_B = {"id": "b", "investigationIDs": ["2", "3"], "value": "value_b", "indicator_type": "Domain", "score": 1}
+IND_C = {"id": "c", "investigationIDs": ["5", "6"], "value": "value_c", "indicator_type": "Email", "score": 2}
+IND_D = {"id": "d", "investigationIDs": ["1", "6"], "value": "value_d", "indicator_type": "File", "score": 2}
+IND_E = {"id": "e", "investigationIDs": ["2", "3", "4"], "value": "value_e", "indicator_type": "File", "score": 1}
+IND_F = {"id": "f", "investigationIDs": ["2", "3", "4", "5"], "value": "value_f", "indicator_type": "File", "score": 1}
+INDICATORS_LIST = [IND_A, IND_B, IND_C, IND_D, IND_E, IND_F]
+
+INC_1 = {"id": "1", "created": "2022-01-01", "status": 0, "name": "inc_a"} # A D
+INC_2 = {"id": "2", "created": "2022-01-01", "status": 1, "name": "inc_b"} # A B E F
+INC_3 = {"id": "3", "created": "2024-01-01", "status": 2, "name": "inc_c"} # A B E F
+INC_4 = {"id": "4", "created": "2024-01-01", "status": 3, "name": "inc_d"} # E F
+INC_5 = {"id": "5", "created": "2024-01-01", "status": 2, "name": "inc_e"} # C F
+INC_6 = {"id": "6", "created": "2024-01-01", "status": 1, "name": "inc_f"} # C D
+INCIDENTS_LIST = [INC_1, INC_2, INC_3, INC_4, INC_5, INC_6]
+
+
+def get_related_indicators(incident_id: str):
+ return [i for i in INDICATORS_LIST if incident_id in i["investigationIDs"]]
+
+
+def mock_execute_command(command: str, args: dict):
+ query: str = args.get("query") or ""
+ from_date: str = args.get("fromDate") or ""
+ match command:
+ case "findIndicators":
+ if match := re.search("investigationIDs:(.*)", query):
+ incident_id = match.group(1)
+ res = [
+ i for i in get_related_indicators(incident_id)
+ if not from_date or parse(i["created"]) >= parse(from_date)
+ ]
+ case "GetIndicatorsByQuery":
+ match = re.search(r"investigationIDs:\(([^\)]*)\)", query)
+ incident_ids = set(match.group(1).split(" ") if match and match.group(1) else [])
+ res = [i for i in INDICATORS_LIST if set(i["investigationIDs"]) & incident_ids]
+ res = [{k: v for k, v in i.items() if k in args["populateFields"] or k == "id"} for i in res]
+ case "GetIncidentsByQuery":
+ match = re.search(r"incident\.id:\((.*)\)", query)
+ incident_ids = set(match.group(1).split(" ") if match and match.group(1) else [])
+ res = json.dumps([
+ {k: v for k, v in i.items() if k in args["populateFields"] or k == "id"} for i in INCIDENTS_LIST
+ if i["id"] in incident_ids and (not from_date or parse(i["created"]) >= parse(from_date))
+ ])
+ case _:
+ raise Exception(f"Unmocked command: {command}")
+ return [{"Contents": res, "Type": "json"}]
+
+
+@pytest.fixture(autouse=True)
+def setup(mocker):
+ mocker.patch.object(demisto, "executeCommand", side_effect=mock_execute_command)
+
+
+@pytest.mark.parametrize("indicator_types, expected_indicators", [
+ (["file"], [IND_A, IND_E]),
+ (["file", "domain"], [IND_A, IND_B, IND_E]),
+ ([], [IND_A, IND_B, IND_E]),
+])
+def test_get_indicators_of_actual_incident(indicator_types: list, expected_indicators: list) -> None:
+ """
+ Given:
+ - An incident INC_2 with file (IND_A, IND_E) and domain (IND_B) indicators associated with it
+ - Different `indicator_types` values
+ When:
+ - Running get_indicators_of_actual_incident() on INC_2 for each `indicator_types` value
+ Then:
+ - Ensure the expected indicators are returned
+ """
+ expected_ids = {inc["id"] for inc in expected_indicators}
+ res: dict = get_indicators_of_actual_incident(
+ incident_id=INC_2["id"],
+ indicator_types=indicator_types,
+ min_number_of_indicators=2,
+ max_incidents_per_indicator=3,
+ )
+ assert set(res.keys()) == expected_ids
+
+
+def test_get_indicators_of_actual_incident__below_minimal_num_of_indicators() -> None:
+ """
+ Given:
+ - An incident INC_2 with only one domain indicator (IND_B) associated with it
+ When:
+ - Running get_indicators_of_actual_incident() on INC_2 for domain indicators only
+ - min_nb_of_indicators=2, meaning, we allow collecting at least two indicators otherwise nothing is returned
+ Then:
+ - Ensure nothing is returned
+ """
+ res: dict = get_indicators_of_actual_incident(
+ incident_id=INC_2["id"],
+ indicator_types=["domain"],
+ min_number_of_indicators=2,
+ max_incidents_per_indicator=3,
+ )
+ assert not res
+
+
+@pytest.mark.parametrize("indicators, query, from_date, expected_incidents", [
+ ([], "", None, []),
+ ([IND_A, IND_B, IND_C], "query", None, [INC_1, INC_2, INC_3, INC_5, INC_6]),
+ ([IND_A, IND_B, IND_C], "", None, [INC_1, INC_2, INC_3, INC_5, INC_6]),
+])
+def test_get_related_incidents(indicators: list, query: str, from_date: str, expected_incidents: list) -> None:
+ """
+ Given:
+ - Different sets of indicators
+ When:
+ - Running get_related_incidents()
+ Then:
+ - Ensure the expected incidents ids are returned
+ """
+ indicators = {ind["id"]: ind for ind in indicators}
+ expected_ids = {inc["id"] for inc in expected_incidents}
+ assert set(get_related_incidents(indicators, query, from_date)) == expected_ids
+
+
+def test_get_related_incidents_filtered() -> None:
+ """
+ Given:
+ - A set of indicators: IND_A, IND_B, IND_C
+ When:
+ - Running get_related_incidents() with from_date="2023-01-01"
+ Then:
+ - Ensure only INC_3, INC_5, INC_6 are returned since INC_1, INC_2 have created dates of 2022-01-01
+ """
+ indicators = {inc["id"]: inc for inc in [IND_A, IND_B, IND_C]}
+ expected_ids = {inc["id"] for inc in [INC_3, INC_5, INC_6]}
+ assert set(get_related_incidents(indicators, "", from_date="2023-01-01")) == expected_ids
+
+
+def test_get_related_incidents_playground() -> None:
+ """
+ Given:
+ - A playground incident ID
+ When:
+ - Running get_related_incidents()
+ Then:
+ - Ensure nothing is returned
+ """
+ indicators = {"ind": {"investigationIDs": [PLAYGROUND_ID]}}
+ assert get_related_incidents(indicators, "query", None) == []
+
+
+@pytest.mark.parametrize("incidents, indicators, expected_indicators", [
+ ([INC_1, INC_6], [IND_A, IND_B, IND_C, IND_D, IND_E], [IND_A, IND_C, IND_D]),
+ ([INC_1, INC_6], [], []),
+ ([], [IND_A, IND_B, IND_C, IND_D, IND_E], []),
+])
+def test_get_mutual_indicators(incidents: list[dict], indicators: list[dict], expected_indicators: list[dict]) -> None:
+ """
+ Given:
+ - Different sets of incidents
+ - A list of indicators of the actual incident
+ When:
+ - Running get_indicators_of_related_incidents() with max_incidents_per_indicator=10
+ - Running get_mutual_indicators() on the result and the indicators of the actual incident
+ Then:
+ - Ensure the expected mutual indicators (which must be a subset of the given indicators)
+ of the given incidents are returned
+ """
+ incident_ids = [inc["id"] for inc in incidents]
+ indicators_of_actual_incidents = {ind["id"]: ind for ind in indicators}
+ related_incidents = get_indicators_of_related_incidents(incident_ids, max_incidents_per_indicator=10)
+ assert get_mutual_indicators(related_incidents, indicators_of_actual_incidents) == expected_indicators
+
+
+def test_find_similar_incidents_by_indicators_end_to_end() -> None:
+ """
+ Given:
+ - An incident INC_2 with file (IND_A, IND_E, IND_F) and domain (IND_B) indicators associated with it
+ - INC_1, INC_3, INC_4 are incidents associated with indicators IND_A, IND_B, IND_E
+ - IND_F has 4 indicators associated with it
+ When:
+ - Running find_similar_incidents_by_indicators() on INC_2
+ - showActualIncident is true
+ - maxIncidentsInIndicatorsForWhiteList is 3
+ - threshold is 0.2
+ Then:
+ - Ensure the actual incident (INC_2) is included in the results
+ - Ensure IND_A, IND_B, IND_E are collected as mutual indicators
+ - Ensure IND_F is not included as a mutual indicator
+ - Ensure INC_1, INC_3, INC_4 are collected as similar incidents, and ensure their expected similarity scores
+ """
+ command_results_list = find_similar_incidents_by_indicators(
+ INC_2["id"],
+ args={
+ "showActualIncident": "true",
+ "minNumberOfIndicators": "2",
+ "maxIncidentsInIndicatorsForWhiteList": "3",
+ "threshold": "0.2",
+ "maxIncidentsToDisplay": "3",
+ "fieldsIncidentToDisplay": "created,name",
+ },
+ )
+
+ actual_incident_results = command_results_list[0].readable_output
+ assert "Actual Incident" in actual_incident_results
+
+ mutual_indicators = command_results_list[1].outputs
+ assert {i["id"] for i in mutual_indicators} == {i["id"] for i in [IND_A, IND_B, IND_E]}
+
+ similar_incidents = command_results_list[2].outputs["similarIncident"]
+ expected_similar_incidents_to_similarity = {
+ INC_1["id"]: 0.3, # ([A] D) / [A B E]
+ INC_3["id"]: 1.0, # [A B E] / [A B E]
+ INC_4["id"]: 0.3, # ([E]) / [A B E]
}
- assert get_indicators_map(indicator) == res
+ assert len(similar_incidents) == len(expected_similar_incidents_to_similarity)
+ for inc in similar_incidents:
+ assert inc["id"] in expected_similar_incidents_to_similarity
+ assert inc["similarity indicators"] == expected_similar_incidents_to_similarity[inc["id"]]
+
+
+def run_args_validations(
+ command_results_list: list[CommandResults],
+ min_number_of_indicators: int,
+ max_incs_in_indicators: int,
+ from_date: str,
+ threshold: float,
+ max_incidents: int,
+ fields_to_display: list[str],
+) -> None:
+ # a helper method for the end to end test below
+ mutual_indicators = command_results_list[0].outputs
+ assert len(mutual_indicators) >= min_number_of_indicators or mutual_indicators == []
+ for mutual_indicator in mutual_indicators:
+ i = [ind for ind in INDICATORS_LIST if ind["id"] == mutual_indicator["id"]][0]
+ rel_inc_count = len(i["investigationIDs"])
+ assert rel_inc_count <= max_incs_in_indicators, f"{i=}"
+
+ similar_incidents = command_results_list[1].outputs["similarIncident"] or []
+ assert len(similar_incidents) <= max_incidents
+ for similar_incident in similar_incidents:
+ assert similar_incident["similarity indicators"] >= threshold, f"{similar_incident=}"
+ assert all(field in similar_incident for field in fields_to_display)
+ i = [inc for inc in INCIDENTS_LIST if inc["id"] == similar_incident["id"]][0]
+ assert not from_date or dateparser.parse(from_date) <= dateparser.parse(i["created"]), f"{i=}"
+
+def test_find_similar_incidents_by_indicators_end_to_end__different_args() -> None:
+ """
+ Given:
+ - Different arguments for the script
+ - showActualIncident is always "false"
+ When:
+ - Running find_similar_incidents_by_indicators()
+ Then:
+ - Ensure the outputs always match all requirements according to the given arguments
+ """
+ fields_to_display = ["created", "name"]
+ for inc in INCIDENTS_LIST:
+ for min_number_of_indicators in range(0, 7, 3):
+ for max_incs_in_indicators in range(0, 7, 3):
+ for threshold in np.linspace(0, 1, 4):
+ for max_incidents in range(0, 7, 3):
+ for from_date in ["", "2023-01-01"]:
+ results = find_similar_incidents_by_indicators(
+ inc["id"],
+ args={
+ "minNumberOfIndicators": str(min_number_of_indicators),
+ "maxIncidentsInIndicatorsForWhiteList": str(max_incs_in_indicators),
+ "threshold": str(threshold),
+ "fromDate": from_date,
+ "maxIncidentsToDisplay": str(max_incidents),
+ "showActualIncident": "false",
+ "fieldsIncidentToDisplay": ",".join(fields_to_display),
+ },
+ )
+ run_args_validations(
+ results,
+ min_number_of_indicators,
+ max_incs_in_indicators,
+ from_date,
+ threshold,
+ max_incidents,
+ fields_to_display,
+ )
-def test_match_indicators_incident(mocker):
- res = {'1': ['a', 'd'], '2': ['a', 'b', 'c']}
- assert match_indicators_incident(indicators_list, ['1', '2']) == res
+def test_find_similar_incidents_by_indicators_end_to_end__no_results() -> None:
+ """
+ Given:
+ - Inputs that would not return any mutual indicators or similar incidents
+ When:
+ - Running find_similar_incidents_by_indicators()
+ Then:
+ - Ensure the command succeeds with empty lists for mutual_indicators and similar_incidents
+ """
+ command_results_list = find_similar_incidents_by_indicators(
+ INC_1["id"],
+ args={
+ "minNumberOfIndicators": "7",
+ "maxIncidentsInIndicatorsForWhiteList": "0",
+ "threshold": "1",
+ "maxIncidentsToDisplay": "0",
+ "showActualIncident": "false",
+ "fieldsIncidentToDisplay": "",
+ },
+ )
+ mutual_indicators = command_results_list[0].outputs
+ assert not mutual_indicators
+ similar_incidents = command_results_list[1].outputs["similarIncident"]
+ assert not similar_incidents
-def test_score(mocker):
- normalize_function = TRANSFORMATION['indicators']['normalize']
- incident = pd.DataFrame({'indicators': ['1 2 3 4 5 6']})
+
+def test_score():
+ """ Runs some sanity tests for the FrequencyIndicators transformer
+ """
+ incident = pd.DataFrame({"indicators": ["1 2 3 4 5 6"]})
# Check if incident is rare then the score is higher
- incidents_1 = pd.DataFrame({'indicators': ['1 2', '1 3', '1 3']})
- tfidf = FrequencyIndicators('indicators', normalize_function, incident)
+ incidents_1 = pd.DataFrame({"indicators": ["1 2", "1 3", "1 3"]})
+ tfidf = FrequencyIndicators("indicators", incident)
tfidf.fit(incidents_1)
res = tfidf.transform(incidents_1)
scores = res.values.tolist()
assert (all(scores[i] >= scores[i + 1] for i in range(len(scores) - 1)))
assert (all(scores[i] >= 0 for i in range(len(scores) - 1)))
# Check if same rarity then same scores
- incidents_1 = pd.DataFrame({'indicators': ['1 2', '3 4']})
- tfidf = FrequencyIndicators('indicators', normalize_function, incident)
+ incidents_1 = pd.DataFrame({"indicators": ["1 2", "3 4"]})
+ tfidf = FrequencyIndicators("indicators", incident)
tfidf.fit(incidents_1)
res = tfidf.transform(incidents_1)
scores = res.values.tolist()
assert (all(scores[i] == scores[i + 1] for i in range(len(scores) - 1)))
assert (all(scores[i] >= 0 for i in range(len(scores) - 1)))
# Check if more indicators in commun them better score
- incidents_1 = pd.DataFrame({'indicators': ['1 2 3', '4 5', '6']})
- tfidf = FrequencyIndicators('indicators', normalize_function, incident)
+ incidents_1 = pd.DataFrame({"indicators": ["1 2 3", "4 5", "6"]})
+ tfidf = FrequencyIndicators("indicators", incident)
tfidf.fit(incidents_1)
res = tfidf.transform(incidents_1)
scores = res.values.tolist()
assert (all(scores[i] >= scores[i + 1] for i in range(len(scores) - 1)))
assert (all(scores[i] >= 0 for i in range(len(scores) - 1)))
-
-
-@pytest.mark.parametrize("incident_ids, expected_result", [
- (['1', '2', '3'], "incident.id:(1 2 3)"),
-])
-def test_get_ids_condition_clause(incident_ids, expected_result):
- from DBotFindSimilarIncidentsByIndicators import get_ids_condition_clause
- result = get_ids_condition_clause(incident_ids)
- assert expected_result == result
diff --git a/Packs/Base/TestPlaybooks/DBotFindSimilarIncidentsByIndicators_-_Test.yml b/Packs/Base/TestPlaybooks/DBotFindSimilarIncidentsByIndicators_-_Test.yml
new file mode 100644
index 000000000000..25f4b73c32da
--- /dev/null
+++ b/Packs/Base/TestPlaybooks/DBotFindSimilarIncidentsByIndicators_-_Test.yml
@@ -0,0 +1,435 @@
+id: DBotFindSimilarIncidentsByIndicators - Test
+version: -1
+inputs: []
+name: DBotFindSimilarIncidentsByIndicators - Test
+outputs: []
+starttaskid: "0"
+tasks:
+ "0":
+ id: "0"
+ taskid: 7f2c9926-6f44-4b32-8612-37dd52a259de
+ type: start
+ task:
+ id: 7f2c9926-6f44-4b32-8612-37dd52a259de
+ version: -1
+ name: ""
+ iscommand: false
+ brand: ""
+ description: ''
+ nexttasks:
+ '#none#':
+ - "1"
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 50,
+ "y": 50
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "1":
+ id: "1"
+ taskid: fb2b2502-5db5-4f83-8e58-d40cf884d68e
+ type: regular
+ task:
+ id: fb2b2502-5db5-4f83-8e58-d40cf884d68e
+ version: -1
+ name: Create two dummy incidents
+ description: Creates incidents from json file, and stores it in the instance context.
+ script: CreateIncidents|||create-test-incident-from-raw-json
+ type: regular
+ iscommand: true
+ brand: CreateIncidents
+ nexttasks:
+ '#none#':
+ - "3"
+ scriptarguments:
+ incident_raw_json:
+ simple: '[{"name": "DBotFindSimilarIncidentsByIndicatorsTest"}, {"name": "DBotFindSimilarIncidentsByIndicatorsTest"}]'
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 50,
+ "y": 195
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "3":
+ id: "3"
+ taskid: 72aeccba-1e66-402e-89fd-4a4b98d7f743
+ type: regular
+ task:
+ id: 72aeccba-1e66-402e-89fd-4a4b98d7f743
+ version: -1
+ name: Sleep 1.5 minutes
+ description: Sleep for X seconds.
+ scriptName: Sleep
+ type: regular
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "10"
+ scriptarguments:
+ seconds:
+ simple: "90"
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 50,
+ "y": 370
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "4":
+ id: "4"
+ taskid: 5c98a9ef-3a4b-4b4e-84dc-846f093b918d
+ type: regular
+ task:
+ id: 5c98a9ef-3a4b-4b4e-84dc-846f093b918d
+ version: -1
+ name: DBotFindSimilarIncidentsByIndicators
+ description: Finds similar incidents based on indicators' similarity. Indicators' contribution to the final score is based on their scarcity.
+ scriptName: DBotFindSimilarIncidentsByIndicators
+ type: regular
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "9"
+ scriptarguments:
+ fromDate:
+ simple: 5 minutes ago
+ indicatorsTypes:
+ simple: IP
+ maxIncidentsToDisplay:
+ simple: "10"
+ query:
+ simple: name:DBotFindSimilarIncidentsByIndicatorsTest
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 50,
+ "y": 1070
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "5":
+ id: "5"
+ taskid: 0695c71d-70ed-4a22-8bc2-480e9ee9811d
+ type: condition
+ task:
+ id: 0695c71d-70ed-4a22-8bc2-480e9ee9811d
+ version: -1
+ name: Are there similar incidents?
+ type: condition
+ iscommand: false
+ brand: ""
+ nexttasks:
+ "yes":
+ - "6"
+ separatecontext: false
+ conditions:
+ - label: "yes"
+ condition:
+ - - operator: isExists
+ left:
+ value:
+ simple: DBotFindSimilarIncidentsByIndicators.similarIncident.id
+ iscontext: true
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 50,
+ "y": 1420
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "6":
+ id: "6"
+ taskid: 0dd851b7-87cb-4a0b-8841-6a6ff94b06f7
+ type: regular
+ task:
+ id: 0dd851b7-87cb-4a0b-8841-6a6ff94b06f7
+ version: -1
+ name: Close the dummy incidents
+ description: commands.local.cmd.close.inv
+ script: Builtin|||closeInvestigation
+ type: regular
+ iscommand: true
+ brand: Builtin
+ nexttasks:
+ '#none#':
+ - "7"
+ scriptarguments:
+ id:
+ simple: ${foundIncidents.id}
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 50,
+ "y": 1595
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "7":
+ id: "7"
+ taskid: ba183997-fd83-4427-8380-94b1786a197e
+ type: title
+ task:
+ id: ba183997-fd83-4427-8380-94b1786a197e
+ version: -1
+ name: Done
+ type: title
+ iscommand: false
+ brand: ""
+ description: ''
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 50,
+ "y": 1770
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "8":
+ id: "8"
+ taskid: 4aa321b2-7c7b-4161-82ec-e202500cf940
+ type: regular
+ task:
+ id: 4aa321b2-7c7b-4161-82ec-e202500cf940
+ version: -1
+ name: Relate indicator 8.8.8.8 to all incidents
+ description: commands.local.cmd.new.indicator
+ script: Builtin|||createNewIndicator
+ type: regular
+ iscommand: true
+ brand: Builtin
+ nexttasks:
+ '#none#':
+ - "4"
+ scriptarguments:
+ relateToIncident:
+ simple: "true"
+ relatedIncidents:
+ complex:
+ root: foundIncidents
+ accessor: id
+ transformers:
+ - operator: join
+ args:
+ separator:
+ value:
+ simple: ','
+ type:
+ simple: IP
+ value:
+ simple: 8.8.8.8
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 50,
+ "y": 895
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "9":
+ id: "9"
+ taskid: e60306c9-5c2d-4c18-80d4-dde4f5988401
+ type: condition
+ task:
+ id: e60306c9-5c2d-4c18-80d4-dde4f5988401
+ version: -1
+ name: Is 8.8.8.8 a mutual indicator?
+ type: condition
+ iscommand: false
+ brand: ""
+ nexttasks:
+ "yes":
+ - "5"
+ separatecontext: false
+ conditions:
+ - label: "yes"
+ condition:
+ - - operator: containsGeneral
+ left:
+ value:
+ simple: MutualIndicators.indicators.value
+ iscontext: true
+ right:
+ value:
+ simple: 8.8.8.8
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 50,
+ "y": 1245
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "10":
+ id: "10"
+ taskid: b6bd4033-f088-4ead-87cf-f85791b206e1
+ type: regular
+ task:
+ id: b6bd4033-f088-4ead-87cf-f85791b206e1
+ version: -1
+ name: Get the dummy incidents' IDs
+ description: |-
+ Searches Demisto incidents. A summarized version of this scrips is avilable with the summarizedversion argument.
+
+ This automation runs using the default Limited User role, unless you explicitly change the permissions.
+ For more information, see the section about permissions here:
+ https://docs-cortex.paloaltonetworks.com/r/Cortex-XSOAR/6.10/Cortex-XSOAR-Administrator-Guide/Automations
+ scriptName: SearchIncidentsV2
+ type: regular
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "11"
+ scriptarguments:
+ fromdate:
+ simple: 5 minutes ago
+ name:
+ simple: DBotFindSimilarIncidentsByIndicatorsTest
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 50,
+ "y": 545
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "11":
+ id: "11"
+ taskid: e628f1ec-cf9b-4f31-85b6-52d3d2cf541e
+ type: condition
+ task:
+ id: e628f1ec-cf9b-4f31-85b6-52d3d2cf541e
+ version: -1
+ name: Were they found?
+ type: condition
+ iscommand: false
+ brand: ""
+ nexttasks:
+ "yes":
+ - "8"
+ separatecontext: false
+ conditions:
+ - label: "yes"
+ condition:
+ - - operator: isExists
+ left:
+ value:
+ simple: foundIncidents.id
+ iscontext: true
+ right:
+ value: {}
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 50,
+ "y": 720
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+view: |-
+ {
+ "linkLabelsPosition": {},
+ "paper": {
+ "dimensions": {
+ "height": 1785,
+ "width": 380,
+ "x": 50,
+ "y": 50
+ }
+ }
+ }
+description: ''
+fromversion: 5.0.0
diff --git a/Packs/Base/pack_metadata.json b/Packs/Base/pack_metadata.json
index c105ac76cd4b..e3fa1328c9ac 100644
--- a/Packs/Base/pack_metadata.json
+++ b/Packs/Base/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Base",
"description": "The base pack for Cortex XSOAR.",
"support": "xsoar",
- "currentVersion": "1.33.18",
+ "currentVersion": "1.33.20",
"author": "Cortex XSOAR",
"serverMinVersion": "6.0.0",
"url": "https://www.paloaltonetworks.com/cortex",
diff --git a/Packs/BluecatAddressManager/ParsingRules/BluecatAddressManager_1_3/BluecatAddressManager_1_3.xif b/Packs/BluecatAddressManager/ParsingRules/BluecatAddressManager_1_3/BluecatAddressManager_1_3.xif
index 562329bf15dc..345b3feb56fa 100644
--- a/Packs/BluecatAddressManager/ParsingRules/BluecatAddressManager_1_3/BluecatAddressManager_1_3.xif
+++ b/Packs/BluecatAddressManager/ParsingRules/BluecatAddressManager_1_3/BluecatAddressManager_1_3.xif
@@ -9,7 +9,7 @@ filter _raw_log ~= "<\d+>(\w+\s*\d+\s\d+:\d+:\d+)\s"
| alter
tmp_time1_1 = parse_timestamp("%Y %b %e %H:%M:%S", tmp_time1_1)
| alter
- tmp_timeDiff = timestamp_diff(tmp_time1_1, current_time(), "DAY")
+ tmp_timeDiff = timestamp_diff(tmp_time1_1, current_time(), "MILLISECOND")
// Check if the date is a future date
| alter
tmp_Year2 = if(tmp_timeDiff > 0, to_string(subtract(to_integer(tmp_Year),1)),null)
diff --git a/Packs/BluecatAddressManager/ReleaseNotes/1_1_12.md b/Packs/BluecatAddressManager/ReleaseNotes/1_1_12.md
new file mode 100644
index 000000000000..e69e00ce4870
--- /dev/null
+++ b/Packs/BluecatAddressManager/ReleaseNotes/1_1_12.md
@@ -0,0 +1,6 @@
+
+#### Parsing Rules
+
+##### Bluecat Address Manager
+
+Updated the Parsing Rule logic.
diff --git a/Packs/BluecatAddressManager/pack_metadata.json b/Packs/BluecatAddressManager/pack_metadata.json
index ad01490a1657..fc2e8db67b8d 100644
--- a/Packs/BluecatAddressManager/pack_metadata.json
+++ b/Packs/BluecatAddressManager/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Bluecat Address Manager",
"description": "Use the BlueCat Address Manager integration to enrich IP addresses and manage response policies.",
"support": "xsoar",
- "currentVersion": "1.1.11",
+ "currentVersion": "1.1.12",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/BmcHelixRemedyForce/Integrations/BmcHelixRemedyForce/BmcHelixRemedyForce.yml b/Packs/BmcHelixRemedyForce/Integrations/BmcHelixRemedyForce/BmcHelixRemedyForce.yml
index 5344c8773caa..377af4d1b66c 100644
--- a/Packs/BmcHelixRemedyForce/Integrations/BmcHelixRemedyForce/BmcHelixRemedyForce.yml
+++ b/Packs/BmcHelixRemedyForce/Integrations/BmcHelixRemedyForce/BmcHelixRemedyForce.yml
@@ -797,7 +797,7 @@ script:
- contextPath: BmcRemedyforce.ServiceRequest.Type
description: The type of the service request.
type: String
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
isfetch: true
runonce: false
script: '-'
diff --git a/Packs/BmcHelixRemedyForce/ReleaseNotes/1_0_39.md b/Packs/BmcHelixRemedyForce/ReleaseNotes/1_0_39.md
new file mode 100644
index 000000000000..20c93f2bd817
--- /dev/null
+++ b/Packs/BmcHelixRemedyForce/ReleaseNotes/1_0_39.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### BMC Helix Remedyforce
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/BmcHelixRemedyForce/pack_metadata.json b/Packs/BmcHelixRemedyForce/pack_metadata.json
index 4a655de66faa..2d2f5480f0e2 100644
--- a/Packs/BmcHelixRemedyForce/pack_metadata.json
+++ b/Packs/BmcHelixRemedyForce/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Bmc Helix Remedyforce",
"description": "Integration of BMC Helix Remedyforce with Cortex XSOAR. BMC Helix Remedyforce integration allows customers to create/update service requests and incidents. It also allows to update status, resolve service requests and incidents with customer notes. This integration exposes standard ticketing capabilities that can be utilized as part of automation & orchestration.",
"support": "xsoar",
- "currentVersion": "1.0.38",
+ "currentVersion": "1.0.39",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/BrocadeSwitch/ParsingRules/BrocadeSwitch/BrocadeSwitch.xif b/Packs/BrocadeSwitch/ParsingRules/BrocadeSwitch/BrocadeSwitch.xif
index 240a2f62ea26..69aa03ecfb2d 100644
--- a/Packs/BrocadeSwitch/ParsingRules/BrocadeSwitch/BrocadeSwitch.xif
+++ b/Packs/BrocadeSwitch/ParsingRules/BrocadeSwitch/BrocadeSwitch.xif
@@ -9,7 +9,7 @@ filter _raw_log ~= "\>\s*\w+\s+\d+\s+\d+\:\d+\:\d+"
| alter // Converts the timestamp string representation to datetime format
tmp_current_year_timestamp_datetime = parse_timestamp("%Y %b %d %H:%M:%S", tmp_current_year_timestamp_string)
| alter // Check if the calculated date is in the future (due to year transitioning during log ingestion )
- tmp_time_difference = timestamp_diff(tmp_current_year_timestamp_datetime, current_time(), "DAY")
+ tmp_time_difference = timestamp_diff(tmp_current_year_timestamp_datetime, current_time(), "MILLISECOND")
| alter // Calculate previous year
tmp_previous_year = if(tmp_time_difference > 0, to_string(subtract(to_integer(tmp_current_year), 1)), null)
| alter // Adjust timestamp to previous year if required
diff --git a/Packs/BrocadeSwitch/ReleaseNotes/1_0_10.md b/Packs/BrocadeSwitch/ReleaseNotes/1_0_10.md
new file mode 100644
index 000000000000..cd5a5ac528a6
--- /dev/null
+++ b/Packs/BrocadeSwitch/ReleaseNotes/1_0_10.md
@@ -0,0 +1,6 @@
+
+#### Parsing Rules
+
+##### BrocadeSwitch Parsing Rule
+
+Updated the Parsing Rule logic.
diff --git a/Packs/BrocadeSwitch/pack_metadata.json b/Packs/BrocadeSwitch/pack_metadata.json
index 0313099b1f26..3883a0b941a5 100644
--- a/Packs/BrocadeSwitch/pack_metadata.json
+++ b/Packs/BrocadeSwitch/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Brocade Switch",
"description": "Modeling Rules for the Brocade Switch logs collector",
"support": "xsoar",
- "currentVersion": "1.0.9",
+ "currentVersion": "1.0.10",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CTIX/Integrations/CTIX/CTIX.yml b/Packs/CTIX/Integrations/CTIX/CTIX.yml
index 6da3fbd0a3b5..46d076e565d7 100644
--- a/Packs/CTIX/Integrations/CTIX/CTIX.yml
+++ b/Packs/CTIX/Integrations/CTIX/CTIX.yml
@@ -784,7 +784,7 @@ script:
- contextPath: CTIX.Intel.status
description: Status code returned from the api.
type: String
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
subtype: python3
tests:
- No tests
diff --git a/Packs/CTIX/Integrations/CTIXv3/CTIXv3.yml b/Packs/CTIX/Integrations/CTIXv3/CTIXv3.yml
index e8913172fc53..f01d3bdc1f9d 100644
--- a/Packs/CTIX/Integrations/CTIXv3/CTIXv3.yml
+++ b/Packs/CTIX/Integrations/CTIXv3/CTIXv3.yml
@@ -1859,7 +1859,7 @@ script:
- contextPath: DBotScore.Score
description: The actual score.
type: Number
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
subtype: python3
tests:
- No tests (auto formatted)
diff --git a/Packs/CTIX/ReleaseNotes/2_2_15.md b/Packs/CTIX/ReleaseNotes/2_2_15.md
new file mode 100644
index 000000000000..ceeb316e2933
--- /dev/null
+++ b/Packs/CTIX/ReleaseNotes/2_2_15.md
@@ -0,0 +1,5 @@
+#### Integrations
+##### Cyware Threat Intelligence eXchange
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
+##### CTIX v3
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/CTIX/pack_metadata.json b/Packs/CTIX/pack_metadata.json
index 6f36e03b4c59..22dfc1024259 100644
--- a/Packs/CTIX/pack_metadata.json
+++ b/Packs/CTIX/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "CTIX",
"description": "Cyware Threat Intelligence eXchange",
"support": "partner",
- "currentVersion": "2.2.14",
+ "currentVersion": "2.2.15",
"author": "Cyware Labs",
"url": "https://cyware.com/",
"email": "connector-dev@cyware.com",
diff --git a/Packs/CVE_2023_36884_-_Microsoft_Office_and_Windows_RCE/Playbooks/playbook-CVE-2023-36884_-_Microsoft_Office_and_Windows_HTML_RCE.yml b/Packs/CVE_2023_36884_-_Microsoft_Office_and_Windows_RCE/Playbooks/playbook-CVE-2023-36884_-_Microsoft_Office_and_Windows_HTML_RCE.yml
index 5ed1cda1f6fd..db2e92099bcd 100644
--- a/Packs/CVE_2023_36884_-_Microsoft_Office_and_Windows_RCE/Playbooks/playbook-CVE-2023-36884_-_Microsoft_Office_and_Windows_HTML_RCE.yml
+++ b/Packs/CVE_2023_36884_-_Microsoft_Office_and_Windows_RCE/Playbooks/playbook-CVE-2023-36884_-_Microsoft_Office_and_Windows_HTML_RCE.yml
@@ -2315,17 +2315,11 @@ inputs:
required: false
description: The playbook description to be used in the Rapid Breach Response - Set Incident Info sub-playbook.
playbookInputQuery:
-- key: autoBlockIndicators
- value:
- simple: "True"
- required: false
- description: Wether to block the indicators automatically.
- playbookInputQuery:
-- key: QRadarTimeRange
+- key: XQLTimeRange
value:
- simple: Last 14 Days
+ simple: 14 days ago
required: false
- description: The time range for the QRadar queries.
+ description: The time range for the XQL queries.
playbookInputQuery:
- key: SplunkEarliestTime
value:
@@ -2339,23 +2333,51 @@ inputs:
required: false
description: The time range for the Elastic queries.
playbookInputQuery:
+- key: ElasticIndex
+ value: {}
+ required: false
+ description: The elastic index to search in.
+ playbookInputQuery:
- key: LogAnalyticsTimespan
value:
simple: 14d
required: false
description: The time range for the Azure Log Analytics queries.
playbookInputQuery:
-- key: XQLTimeRange
+- key: QRadarTimeRange
value:
- simple: 14 days ago
+ simple: Last 14 Days
required: false
- description: The time range for the XQL queries.
+ description: The time range for the QRadar queries.
playbookInputQuery:
-- key: ElasticIndex
- value: {}
+- key: autoBlockIndicators
+ value:
+ simple: "True"
required: false
- description: The elastic index to search in.
+ description: Wether to block the indicators automatically.
playbookInputQuery:
+inputSections:
+- inputs:
+ - PlaybookDescription
+ name: Incident Management
+ description: Incident management settings and data, including escalation processes, and user engagements.
+- inputs:
+ - XQLTimeRange
+ - SplunkEarliestTime
+ - ElasticEarliestTime
+ - ElasticIndex
+ - LogAnalyticsTimespan
+ - QRadarTimeRange
+ name: Investigation
+ description: Incident management settings and data, including escalation processes, and user engagements.
+- inputs:
+ - autoBlockIndicators
+ name: Remediation
+ description: Remediation settings and data, including containment, eradication, and recovery.
+outputSections:
+- outputs: []
+ name: General (Outputs group)
+ description: Generic group for outputs
outputs: []
tests:
- No tests (auto formatted)
diff --git a/Packs/CVE_2023_36884_-_Microsoft_Office_and_Windows_RCE/ReleaseNotes/1_0_2.md b/Packs/CVE_2023_36884_-_Microsoft_Office_and_Windows_RCE/ReleaseNotes/1_0_2.md
new file mode 100644
index 000000000000..db65210e1cf0
--- /dev/null
+++ b/Packs/CVE_2023_36884_-_Microsoft_Office_and_Windows_RCE/ReleaseNotes/1_0_2.md
@@ -0,0 +1,6 @@
+
+#### Playbooks
+
+##### CVE-2023-36884 - Microsoft Office and Windows HTML RCE
+
+Added playbook input sections to organize the inputs into related categories, which simplifies the playbook input visibility(Available from XSOAR 8.5/XSIAM 2.0).
diff --git a/Packs/CVE_2023_36884_-_Microsoft_Office_and_Windows_RCE/pack_metadata.json b/Packs/CVE_2023_36884_-_Microsoft_Office_and_Windows_RCE/pack_metadata.json
index e1db3649b52b..842c5e305efe 100644
--- a/Packs/CVE_2023_36884_-_Microsoft_Office_and_Windows_RCE/pack_metadata.json
+++ b/Packs/CVE_2023_36884_-_Microsoft_Office_and_Windows_RCE/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "CVE-2023-36884 - Microsoft Office and Windows HTML RCE",
"description": "This pack handles CVE-2023-36884 - Microsoft Office and Windows HTML RCE vulnerability",
"support": "xsoar",
- "currentVersion": "1.0.1",
+ "currentVersion": "1.0.2",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CarbonBlackDefense/Integrations/CarbonBlackEndpointStandardEventCollector/CarbonBlackEndpointStandardEventCollector.yml b/Packs/CarbonBlackDefense/Integrations/CarbonBlackEndpointStandardEventCollector/CarbonBlackEndpointStandardEventCollector.yml
index 4ac3f02dd719..fd34d9e2c2ae 100644
--- a/Packs/CarbonBlackDefense/Integrations/CarbonBlackEndpointStandardEventCollector/CarbonBlackEndpointStandardEventCollector.yml
+++ b/Packs/CarbonBlackDefense/Integrations/CarbonBlackEndpointStandardEventCollector/CarbonBlackEndpointStandardEventCollector.yml
@@ -77,7 +77,7 @@ script:
defaultValue: 2500
description: 'Fetch alerts and audit logs from Carbon Black Endpoint Standard.'
name: carbonblack-endpoint-standard-get-events
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
runonce: false
script: '-'
subtype: python3
diff --git a/Packs/CarbonBlackDefense/ReleaseNotes/3_1_1.md b/Packs/CarbonBlackDefense/ReleaseNotes/3_1_1.md
new file mode 100644
index 000000000000..e8f328455681
--- /dev/null
+++ b/Packs/CarbonBlackDefense/ReleaseNotes/3_1_1.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Carbon Black Endpoint Standard Event Collector
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/CarbonBlackDefense/pack_metadata.json b/Packs/CarbonBlackDefense/pack_metadata.json
index ce3dbfb3b8c3..7804f1afdc0c 100644
--- a/Packs/CarbonBlackDefense/pack_metadata.json
+++ b/Packs/CarbonBlackDefense/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Carbon Black Endpoint Standard",
"description": "Next-generation antivirus + EDR in one cloud-delivered platform that stops commodity malware, advanced malware, non-malware attacks and ransomware.",
"support": "xsoar",
- "currentVersion": "3.1.0",
+ "currentVersion": "3.1.1",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CarbonBlackEnterpriseEDR/CONTRIBUTORS.json b/Packs/CarbonBlackEnterpriseEDR/CONTRIBUTORS.json
new file mode 100644
index 000000000000..ce9d08a30890
--- /dev/null
+++ b/Packs/CarbonBlackEnterpriseEDR/CONTRIBUTORS.json
@@ -0,0 +1,3 @@
+[
+ "Fabio Dias"
+]
diff --git a/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR.py b/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR.py
index 4576ac6ab35d..7a63ebdf150a 100644
--- a/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR.py
+++ b/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR.py
@@ -1,12 +1,11 @@
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
-from typing import Union, Dict, Optional, Any, Tuple, List
+from typing import Any
import dateparser
-
import requests
import urllib3
-from CommonServerUserPython import * # noqa: E402 lgtm [py/polluting-import]
+
# Disable insecure warnings
urllib3.disable_warnings()
@@ -34,13 +33,13 @@ def test_module_request(self):
return self._http_request('POST', url_suffix=url_suffix, json_data=body)
- def search_alerts_request(self, group_results: bool = None, minimum_severity: int = None, create_time: Dict = None,
- device_os_version: List = None, policy_id: List = None, alert_tag: List = None,
- alert_id: List = None, device_username: List = None, device_id: List = None,
- device_os: List = None, process_sha256: List = None, policy_name: List = None,
- reputation: List = None, alert_type: List = None, alert_category: List = None,
- workflow: List = None, device_name: List = None, process_name: List = None,
- sort_field: str = None, sort_order: str = None, limit: str = None) -> Dict:
+ def search_alerts_request(self, group_results: bool = None, minimum_severity: int = None, create_time: dict = None,
+ device_os_version: list = None, policy_id: list = None, alert_tag: list = None,
+ alert_id: list = None, device_username: list = None, device_id: list = None,
+ device_os: list = None, process_sha256: list = None, policy_name: list = None,
+ reputation: list = None, alert_type: list = None, alert_category: list = None,
+ workflow: list = None, device_name: list = None, process_name: list = None,
+ sort_field: str = None, sort_order: str = None, limit: str = None) -> dict:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/alerts/_search'
body = {
'criteria': assign_params(
@@ -76,7 +75,7 @@ def search_alerts_request(self, group_results: bool = None, minimum_severity: in
return self._http_request('POST', suffix_url, json_data=body)
def alert_workflow_update_request(self, alert_id: str = None, state: str = None, comment: str = None,
- remediation_state: str = None) -> Dict:
+ remediation_state: str = None) -> dict:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/alerts/{alert_id}/workflow'
body = assign_params(
state=state,
@@ -86,10 +85,10 @@ def alert_workflow_update_request(self, alert_id: str = None, state: str = None,
return self._http_request('POST', suffix_url, json_data=body)
- def devices_list_request(self, device_id: List = None, status: List = None, device_os: List = None,
- last_contact_time: Dict[str, Optional[Any]] = None, ad_group_id: List = None,
- policy_id: List = None, target_priority: List = None, limit: int = None,
- sort_field: str = None, sort_order: str = None) -> Dict:
+ def devices_list_request(self, device_id: list = None, status: list = None, device_os: list = None,
+ last_contact_time: dict[str, Any | None] = None, ad_group_id: list = None,
+ policy_id: list = None, target_priority: list = None, limit: int = None,
+ sort_field: str = None, sort_order: str = None) -> dict:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/devices/_search'
body = {
@@ -114,7 +113,7 @@ def devices_list_request(self, device_id: List = None, status: List = None, devi
return self._http_request('POST', suffix_url, json_data=body)
- def device_quarantine_request(self, device_id: List = None) -> None:
+ def device_quarantine_request(self, device_id: list = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
@@ -127,7 +126,7 @@ def device_quarantine_request(self, device_id: List = None) -> None:
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
- def device_unquarantine_request(self, device_id: List = None) -> None:
+ def device_unquarantine_request(self, device_id: list = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
@@ -140,7 +139,7 @@ def device_unquarantine_request(self, device_id: List = None) -> None:
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
- def device_bypass_request(self, device_id: List = None) -> None:
+ def device_bypass_request(self, device_id: list = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
@@ -153,7 +152,7 @@ def device_bypass_request(self, device_id: List = None) -> None:
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
- def device_unbypass_request(self, device_id: List = None) -> None:
+ def device_unbypass_request(self, device_id: list = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
@@ -166,7 +165,7 @@ def device_unbypass_request(self, device_id: List = None) -> None:
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
- def device_background_scan_request(self, device_id: List = None) -> None:
+ def device_background_scan_request(self, device_id: list = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
@@ -179,7 +178,7 @@ def device_background_scan_request(self, device_id: List = None) -> None:
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
- def device_background_scan_request_stop(self, device_id: List = None) -> None:
+ def device_background_scan_request_stop(self, device_id: list = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
@@ -192,7 +191,7 @@ def device_background_scan_request_stop(self, device_id: List = None) -> None:
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
- def device_policy_update(self, device_id: List = None, policy_id: str = None) -> None:
+ def device_policy_update(self, device_id: list = None, policy_id: str = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
@@ -205,11 +204,11 @@ def device_policy_update(self, device_id: List = None, policy_id: str = None) ->
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
- def list_watchlists_request(self) -> Dict:
+ def list_watchlists_request(self) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists'
return self._http_request('GET', suffix_url)
- def get_watchlist_by_id_request(self, watchlist_id: str = None) -> Dict:
+ def get_watchlist_by_id_request(self, watchlist_id: str = None) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists/{watchlist_id}'
return self._http_request('GET', suffix_url)
@@ -217,11 +216,11 @@ def delete_watchlist_request(self, watchlist_id: str = None) -> None:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists/{watchlist_id}'
self._http_request('DELETE', suffix_url, resp_type='content')
- def watchlist_alert_status_request(self, watchlist_id: str = None) -> Dict:
+ def watchlist_alert_status_request(self, watchlist_id: str = None) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists/{watchlist_id}/alert'
return self._http_request('GET', suffix_url)
- def enable_watchlist_alert_request(self, watchlist_id: str = None) -> Dict:
+ def enable_watchlist_alert_request(self, watchlist_id: str = None) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists/{watchlist_id}/alert'
return self._http_request('PUT', suffix_url)
@@ -230,7 +229,7 @@ def disable_watchlist_alert_request(self, watchlist_id: str = None) -> None:
self._http_request('DELETE', suffix_url, resp_type='content')
def create_watchlist_request(self, watchlist_name: str = None, description: str = None, tags_enabled: bool = None,
- alerts_enabled: bool = None, report_ids: List = None, classifier: Dict = None) -> Dict:
+ alerts_enabled: bool = None, report_ids: list = None, classifier: dict = None) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists'
body = assign_params(
name=watchlist_name,
@@ -244,8 +243,8 @@ def create_watchlist_request(self, watchlist_name: str = None, description: str
return self._http_request('POST', suffix_url, json_data=body)
def update_watchlist_request(self, watchlist_id: str = None, watchlist_name: str = None, description: str = None,
- tags_enabled: bool = None, alerts_enabled: bool = None, report_ids: List = None,
- classifier: Dict = None) -> Dict:
+ tags_enabled: bool = None, alerts_enabled: bool = None, report_ids: list = None,
+ classifier: dict = None) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists/{watchlist_id}'
body = assign_params(
name=watchlist_name,
@@ -257,12 +256,12 @@ def update_watchlist_request(self, watchlist_id: str = None, watchlist_name: str
)
return self._http_request('PUT', suffix_url, json_data=body)
- def get_ignore_ioc_status_request(self, report_id: str = None, ioc_id: str = None) -> Dict:
+ def get_ignore_ioc_status_request(self, report_id: str = None, ioc_id: str = None) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id})/iocs/{ioc_id}/ignore'
return self._http_request('GET', suffix_url)
- def ignore_ioc_request(self, report_id: str = None, ioc_id: str = None) -> Dict:
+ def ignore_ioc_request(self, report_id: str = None, ioc_id: str = None) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id}/iocs/{ioc_id}/ignore'
return self._http_request('PUT', suffix_url)
@@ -272,13 +271,13 @@ def reactivate_ioc_request(self, report_id: str = None, ioc_id: str = None) -> N
self._http_request('DELETE', suffix_url, resp_type='content')
- def get_report_request(self, report_id: str = None) -> Dict:
+ def get_report_request(self, report_id: str = None) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id}'
return self._http_request('GET', suffix_url)
- def create_report_request(self, title: str = None, description: str = None, tags: List = None, severity: int = None,
- iocs: Dict = None, timestamp: int = None) -> Dict:
+ def create_report_request(self, title: str = None, description: str = None, tags: list = None, severity: int = None,
+ iocs: dict = None, timestamp: int = None) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports'
body = assign_params(
@@ -291,7 +290,7 @@ def create_report_request(self, title: str = None, description: str = None, tags
)
return self._http_request('POST', suffix_url, json_data=body)
- def ignore_report_request(self, report_id: str = None) -> Dict:
+ def ignore_report_request(self, report_id: str = None) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id}/ignore'
return self._http_request('PUT', suffix_url)
@@ -301,7 +300,7 @@ def reactivate_report_request(self, report_id: str = None) -> None:
self._http_request('DELETE', suffix_url, resp_type='content')
- def get_report_ignore_status_request(self, report_id: str = None) -> Dict:
+ def get_report_ignore_status_request(self, report_id: str = None) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id}/ignore'
return self._http_request('GET', suffix_url)
@@ -311,8 +310,8 @@ def remove_report_request(self, report_id: str = None) -> None:
self._http_request('DELETE', suffix_url, resp_type='content')
def update_report_request(self, report_id: str = None, title: str = None, description: str = None,
- severity: int = None, iocs: Dict = None, tags: List = None,
- timestamp: int = None) -> Dict:
+ severity: int = None, iocs: dict = None, tags: list = None,
+ timestamp: int = None) -> dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id}'
body = assign_params(
title=title,
@@ -324,15 +323,15 @@ def update_report_request(self, report_id: str = None, title: str = None, descri
)
return self._http_request('PUT', suffix_url, json_data=body)
- def get_file_device_summary_request(self, sha256: str = None) -> Dict:
+ def get_file_device_summary_request(self, sha256: str = None) -> dict:
suffix_url = f'ubs/v1/orgs/{self.cb_org_key}/sha256/{sha256}/summary/device'
return self._http_request('GET', suffix_url)
- def get_file_metadata_request(self, sha256: str = None) -> Dict:
+ def get_file_metadata_request(self, sha256: str = None) -> dict:
suffix_url = f'ubs/v1/orgs/{self.cb_org_key}/sha256/{sha256}/metadata'
return self._http_request('GET', suffix_url)
- def get_file_request(self, sha256: List = None, expiration_seconds: int = None) -> Dict:
+ def get_file_request(self, sha256: list = None, expiration_seconds: int = None) -> dict:
suffix_url = f'/ubs/v1/orgs/{self.cb_org_key}/file/_download'
body = assign_params(
sha256=sha256,
@@ -341,7 +340,7 @@ def get_file_request(self, sha256: List = None, expiration_seconds: int = None)
return self._http_request('POST', suffix_url, json_data=body)
- def get_file_path_request(self, sha256: str = None) -> Dict:
+ def get_file_path_request(self, sha256: str = None) -> dict:
suffix_url = f'/ubs/v1/orgs/{self.cb_org_key}/sha256/{sha256}/summary/file_path'
return self._http_request('GET', suffix_url)
@@ -412,6 +411,40 @@ def create_search_event_by_process_request(self, process_guid: str, event_type:
response = self._http_request('POST', suffix_url, json_data=body)
return response
+ def update_threat_tags(self, threat_id: str = None, tags: list = None) -> dict:
+
+ suffix_url = f'api/alerts/v7/orgs/{self.cb_org_key}/threats/{threat_id}/tags'
+
+ body = {
+ "tags": tags
+ }
+
+ return self._http_request('POST', suffix_url, json_data=body)
+
+ def create_threat_notes(self, threat_id: str = None, notes: str = None) -> dict:
+
+ suffix_url = f'api/alerts/v7/orgs/{self.cb_org_key}/threats/{threat_id}/notes'
+ body = {
+ "note": notes
+ }
+ return self._http_request('POST', suffix_url, json_data=body)
+
+ def update_alert_notes(self, alert_id: str = None, notes: str = None) -> dict:
+
+ suffix_url = f'api/alerts/v7/orgs/{self.cb_org_key}/alerts/{alert_id}/notes'
+
+ body = {
+ "note": notes
+ }
+
+ return self._http_request('POST', suffix_url, json_data=body)
+
+ def get_threat_tags(self, threat_id: str = None) -> dict:
+
+ suffix_url = f'api/alerts/v7/orgs/{self.cb_org_key}/threats/{threat_id}/tags'
+
+ return self._http_request('GET', suffix_url)
+
def test_module(client):
"""
@@ -427,7 +460,7 @@ def test_module(client):
return 'ok'
-def alert_list_command(client: Client, args: Dict) -> Union[CommandResults, str]:
+def alert_list_command(client: Client, args: dict) -> CommandResults | str:
group_results = args.get('group_results')
minimum_severity = args.get('minimum_severity')
create_time = assign_params(
@@ -489,7 +522,7 @@ def alert_list_command(client: Client, args: Dict) -> Union[CommandResults, str]
return results
-def alert_workflow_update_command(client: Client, args: Dict) -> CommandResults:
+def alert_workflow_update_command(client: Client, args: dict) -> CommandResults:
alert_id = args.get('alert_id')
state = args.get('state')
comment = args.get('comment')
@@ -517,7 +550,7 @@ def alert_workflow_update_command(client: Client, args: Dict) -> CommandResults:
return results
-def list_devices_command(client: Client, args: Dict) -> Union[CommandResults, str]:
+def list_devices_command(client: Client, args: dict) -> CommandResults | str:
device_id = argToList(args.get('device_id'))
status = argToList(args.get('status'))
device_os = argToList(args.get('device_os'))
@@ -579,49 +612,49 @@ def list_devices_command(client: Client, args: Dict) -> Union[CommandResults, st
return results
-def device_quarantine_command(client: Client, args: Dict) -> str:
+def device_quarantine_command(client: Client, args: dict) -> str:
device_id = argToList(args.get('device_id'))
client.device_quarantine_request(device_id)
return f'The device {device_id} has been quarantined successfully.'
-def device_unquarantine_command(client: Client, args: Dict) -> str:
+def device_unquarantine_command(client: Client, args: dict) -> str:
device_id = argToList(args.get('device_id'))
client.device_unquarantine_request(device_id)
return f'The device {device_id} has been unquarantined successfully.'
-def device_bypass_command(client: Client, args: Dict) -> str:
+def device_bypass_command(client: Client, args: dict) -> str:
device_id = argToList(args.get('device_id'))
client.device_bypass_request(device_id)
return f'The device {device_id} bypass has been enabled successfully.'
-def device_unbypass_command(client: Client, args: Dict) -> str:
+def device_unbypass_command(client: Client, args: dict) -> str:
device_id = argToList(args.get('device_id'))
client.device_unbypass_request(device_id)
return f'The device {device_id} bypass has been disabled successfully.'
-def device_background_scan_command(client: Client, args: Dict) -> str:
+def device_background_scan_command(client: Client, args: dict) -> str:
device_id = argToList(args.get('device_id'))
client.device_background_scan_request(device_id)
return f'The device {device_id} background scan has been enabled successfully.'
-def device_background_scan_stop_command(client: Client, args: Dict) -> str:
+def device_background_scan_stop_command(client: Client, args: dict) -> str:
device_id = argToList(args.get('device_id'))
client.device_background_scan_request_stop(device_id)
return f'The device {device_id} background scan has been disabled successfully.'
-def device_policy_update_command(client: Client, args: Dict) -> str:
+def device_policy_update_command(client: Client, args: dict) -> str:
device_id = argToList(args.get('device_id'))
policy_id = args.get('policy_id')
@@ -630,7 +663,7 @@ def device_policy_update_command(client: Client, args: Dict) -> str:
return f'The policy {policy_id} has been assigned to device {device_id} successfully.'
-def list_watchlists_command(client: Client) -> Union[CommandResults, str]:
+def list_watchlists_command(client: Client) -> CommandResults | str:
contents = []
headers = ['ID', 'Name', 'Description', 'create_timestamp', 'Alerts_enabled', 'Tags_enabled', 'Report_ids',
'Last_update_timestamp', 'Classifier']
@@ -663,7 +696,7 @@ def list_watchlists_command(client: Client) -> Union[CommandResults, str]:
return results
-def get_watchlist_by_id_command(client: Client, args: Dict) -> CommandResults:
+def get_watchlist_by_id_command(client: Client, args: dict) -> CommandResults:
watchlist_id = args.get('watchlist_id')
result = client.get_watchlist_by_id_request(watchlist_id)
headers = ['ID', 'Name', 'Description', 'create_timestamp', 'Alerts_enabled', 'Tags_enabled', 'Report_ids',
@@ -692,7 +725,7 @@ def get_watchlist_by_id_command(client: Client, args: Dict) -> CommandResults:
return results
-def watchlist_alert_status_command(client: Client, args: Dict) -> str:
+def watchlist_alert_status_command(client: Client, args: dict) -> str:
watchlist_id = args.get('watchlist_id')
result = client.watchlist_alert_status_request(watchlist_id)
@@ -702,21 +735,21 @@ def watchlist_alert_status_command(client: Client, args: Dict) -> str:
return f'Watchlist {watchlist_id} alert status is On'
-def enable_watchlist_alert_command(client: Client, args: Dict) -> str:
+def enable_watchlist_alert_command(client: Client, args: dict) -> str:
watchlist_id = args.get('watchlist_id')
client.enable_watchlist_alert_request(watchlist_id)
return f'Watchlist {watchlist_id} alert was enabled successfully.'
-def disable_watchlist_alert_command(client: Client, args: Dict) -> str:
+def disable_watchlist_alert_command(client: Client, args: dict) -> str:
watchlist_id = args.get('watchlist_id')
client.disable_watchlist_alert_request(watchlist_id)
return f'Watchlist {watchlist_id} alert was disabled successfully.'
-def create_watchlist_command(client: Client, args: Dict) -> CommandResults:
+def create_watchlist_command(client: Client, args: dict) -> CommandResults:
watchlist_name = args.get('watchlist_name')
description = args.get('description')
tags_enabled = args.get('tags_enabled')
@@ -758,14 +791,14 @@ def create_watchlist_command(client: Client, args: Dict) -> CommandResults:
return results
-def delete_watchlist_command(client: Client, args: Dict) -> str:
+def delete_watchlist_command(client: Client, args: dict) -> str:
watchlist_id = args.get('watchlist_id')
client.delete_watchlist_request(watchlist_id)
return f'The watchlist {watchlist_id} was deleted successfully.'
-def update_watchlist_command(client: Client, args: Dict) -> CommandResults:
+def update_watchlist_command(client: Client, args: dict) -> CommandResults:
watchlist_id = args.get('watchlist_id')
watchlist_name = args.get('watchlist_name')
description = args.get('description')
@@ -808,7 +841,7 @@ def update_watchlist_command(client: Client, args: Dict) -> CommandResults:
return results
-def get_report_command(client: Client, args: Dict) -> CommandResults:
+def get_report_command(client: Client, args: dict) -> CommandResults:
report_id = args.get('report_id')
result = client.get_report_request(report_id)
headers = ['ID', 'Title', 'Timestamp', 'Description', 'Severity', 'Link', 'IOCs_v2', 'Tags', 'Visibility']
@@ -858,7 +891,7 @@ def get_report_command(client: Client, args: Dict) -> CommandResults:
return results
-def get_ignore_ioc_status_command(client: Client, args: Dict) -> str:
+def get_ignore_ioc_status_command(client: Client, args: dict) -> str:
report_id = args.get('report_id')
ioc_id = args.get('ioc_id')
@@ -870,7 +903,7 @@ def get_ignore_ioc_status_command(client: Client, args: Dict) -> str:
return f'IOC {ioc_id} status is true'
-def ignore_ioc_command(client: Client, args: Dict) -> str:
+def ignore_ioc_command(client: Client, args: dict) -> str:
report_id = args.get('report_id')
ioc_id = args.get('ioc_id')
@@ -879,7 +912,7 @@ def ignore_ioc_command(client: Client, args: Dict) -> str:
return f'The IOC {ioc_id} for report {report_id} will not match future events for any watchlist.'
-def reactivate_ioc_command(client: Client, args: Dict) -> str:
+def reactivate_ioc_command(client: Client, args: dict) -> str:
report_id = args.get('report_id')
ioc_id = args.get('ioc_id')
@@ -888,7 +921,7 @@ def reactivate_ioc_command(client: Client, args: Dict) -> str:
return f'IOC {ioc_id} for report {report_id} will match future events for all watchlists.'
-def create_report_command(client: Client, args: Dict) -> CommandResults:
+def create_report_command(client: Client, args: dict) -> CommandResults:
title = args.get('title')
description = args.get('description')
tags = argToList(args.get('tags'))
@@ -955,7 +988,7 @@ def create_report_command(client: Client, args: Dict) -> CommandResults:
return results
-def ignore_report_command(client: Client, args: Dict) -> str:
+def ignore_report_command(client: Client, args: dict) -> str:
report_id = args.get('report_id')
client.ignore_report_request(report_id)
@@ -964,7 +997,7 @@ def ignore_report_command(client: Client, args: Dict) -> str:
f'for any watchlist.'
-def reactivate_report_command(client: Client, args: Dict) -> str:
+def reactivate_report_command(client: Client, args: dict) -> str:
report_id = args.get('report_id')
client.reactivate_report_request(report_id)
@@ -972,7 +1005,7 @@ def reactivate_report_command(client: Client, args: Dict) -> str:
return f'Report with report_id "{report_id}" and all contained IOCs will match future events for all watchlists.'
-def get_report_ignore_status_command(client: Client, args: Dict) -> str:
+def get_report_ignore_status_command(client: Client, args: dict) -> str:
report_id = args.get('report_id')
result = client.get_report_ignore_status_request(report_id)
@@ -983,14 +1016,14 @@ def get_report_ignore_status_command(client: Client, args: Dict) -> str:
return f'ignore status for report with report_id "{report_id}" is enabled.'
-def remove_report_command(client: Client, args: Dict) -> str:
+def remove_report_command(client: Client, args: dict) -> str:
report_id = args.get('report_id')
client.remove_report_request(report_id)
return f'The report "{report_id}" was deleted successfully.'
-def update_report_command(client: Client, args: Dict) -> CommandResults:
+def update_report_command(client: Client, args: dict) -> CommandResults:
report_id = args.get('report_id')
title = args.get('title')
description = args.get('description')
@@ -1060,7 +1093,7 @@ def update_report_command(client: Client, args: Dict) -> CommandResults:
return results
-def get_file_device_summary(client: Client, args: Dict) -> CommandResults:
+def get_file_device_summary(client: Client, args: dict) -> CommandResults:
sha256 = args.get('sha256')
result = client.get_file_device_summary_request(sha256)
readable_output = tableToMarkdown('The file device summary', result)
@@ -1074,7 +1107,7 @@ def get_file_device_summary(client: Client, args: Dict) -> CommandResults:
return results
-def get_file_metadata_command(client: Client, args: Dict) -> CommandResults:
+def get_file_metadata_command(client: Client, args: dict) -> CommandResults:
sha256 = args.get('sha256')
result = client.get_file_metadata_request(sha256)
headers = ['SHA256', 'file_size', 'original_filename', 'internal_name', 'os_type', 'comments']
@@ -1118,7 +1151,7 @@ def get_file_metadata_command(client: Client, args: Dict) -> CommandResults:
return results
-def get_file_command(client: Client, args: Dict) -> CommandResults:
+def get_file_command(client: Client, args: dict) -> CommandResults:
sha256 = argToList(args.get('sha256'))
expiration_seconds = args.get('expiration_seconds')
download_to_xsoar = args.get('download_to_xsoar')
@@ -1148,7 +1181,7 @@ def get_file_command(client: Client, args: Dict) -> CommandResults:
return results
-def get_file_path_command(client: Client, args: Dict) -> CommandResults:
+def get_file_path_command(client: Client, args: dict) -> CommandResults:
sha256 = args.get('sha256')
result = client.get_file_path_request(sha256)
@@ -1163,7 +1196,7 @@ def get_file_path_command(client: Client, args: Dict) -> CommandResults:
return results
-def fetch_incidents(client: Client, fetch_time: str, fetch_limit: str, last_run: Dict) -> Tuple[List, Dict]:
+def fetch_incidents(client: Client, fetch_time: str, fetch_limit: str, last_run: dict) -> tuple[list, dict]:
last_fetched_alert_create_time = last_run.get('last_fetched_alert_create_time')
last_fetched_alert_id = last_run.get('last_fetched_alert_id', '')
if not last_fetched_alert_create_time:
@@ -1207,7 +1240,7 @@ def fetch_incidents(client: Client, fetch_time: str, fetch_limit: str, last_run:
return incidents, res
-def process_search_command(client: Client, args: Dict) -> CommandResults:
+def process_search_command(client: Client, args: dict) -> CommandResults:
"""
Gets arguments for a process search task, and returns the task's id and status.
"""
@@ -1234,7 +1267,7 @@ def process_search_command(client: Client, args: Dict) -> CommandResults:
outputs=output, outputs_key_field='job_id', readable_output=readable_output)
-def event_by_process_search_command(client: Client, args: Dict) -> CommandResults:
+def event_by_process_search_command(client: Client, args: dict) -> CommandResults:
"""
Gets arguments for an event-by-process search task, and returns the task's results.
"""
@@ -1271,7 +1304,7 @@ def event_by_process_search_command(client: Client, args: Dict) -> CommandResult
raw_response=result, readable_output=readable)
-def process_search_get_command(client: Client, args: Dict) -> List[CommandResults]:
+def process_search_get_command(client: Client, args: dict) -> list[CommandResults]:
"""
Gets a process search task's id, and returns the task's results.
"""
@@ -1291,6 +1324,89 @@ def process_search_get_command(client: Client, args: Dict) -> List[CommandResult
return job_result_list
+def add_threat_tags_command(client: Client, args: dict) -> CommandResults:
+ tags = argToList(args.get("tags"))
+ threat_id = args.get("threat_id")
+ result = client.update_threat_tags(threat_id, tags)
+
+ readable_output = tableToMarkdown(f'Successfully updated threat: "{threat_id}"', result, removeNull=True)
+ outputs = {
+ 'ThreatID': threat_id,
+ 'Tags': result.get('tags')
+ }
+
+ results = CommandResults(
+ outputs_prefix='CarbonBlackEEDR.Threat',
+ outputs_key_field='tags',
+ outputs=outputs,
+ readable_output=readable_output,
+ raw_response=result
+ )
+ return results
+
+
+def add_threat_notes_command(client: Client, args: dict) -> CommandResults:
+ notes = args.get("notes")
+ threat_id = args.get("threat_id")
+ result = client.create_threat_notes(threat_id, notes)
+
+ readable_output = tableToMarkdown(f'Successfully added notes to threat: "{threat_id}"', result, removeNull=True)
+ outputs = {
+ 'ThreatID': threat_id,
+ 'Notes': notes
+ }
+
+ results = CommandResults(
+ outputs_prefix='CarbonBlackEEDR.Threat',
+ outputs_key_field='ThreatID',
+ outputs=outputs,
+ readable_output=readable_output,
+ raw_response=result
+ )
+ return results
+
+
+def add_alert_notes_command(client: Client, args: dict) -> CommandResults:
+ notes = args.get("notes")
+ alert_id = args.get("alert_id")
+ result = client.update_alert_notes(alert_id, notes)
+
+ readable_output = tableToMarkdown(f'Successfully added notes to alert: "{alert_id}"', result, removeNull=True)
+ outputs = {
+ 'AlertID': alert_id,
+ 'Notes': notes
+ }
+
+ results = CommandResults(
+ outputs_prefix='CarbonBlackEEDR.Threat',
+ outputs_key_field='AlertID',
+ outputs=outputs,
+ readable_output=readable_output,
+ raw_response=result
+ )
+ return results
+
+
+def get_threat_tags_command(client: Client, args: dict) -> CommandResults:
+ threat_id = args.get("threat_id")
+ result = client.get_threat_tags(threat_id)
+
+ readable_output = tableToMarkdown(f'Successfully sent for threat: "{threat_id}"', result, removeNull=True)
+ outputs = {
+ 'ThreatID': threat_id,
+ 'Tags': result.get('list')
+ }
+
+ results = CommandResults(
+ outputs_prefix='CarbonBlackEEDR.Threat',
+ outputs_key_field='ThreatID',
+ outputs=outputs,
+ readable_output=readable_output,
+ raw_response=result
+ )
+ return results
+
+
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
@@ -1435,6 +1551,18 @@ def main():
elif demisto.command() == 'cb-eedr-events-by-process-get':
return_results(event_by_process_search_command(client, demisto.args()))
+ elif demisto.command() == 'cb-eedr-add-threat-tags':
+ return_results(add_threat_tags_command(client, demisto.args()))
+
+ elif demisto.command() == 'cb-eedr-add-threat-notes':
+ return_results(add_threat_notes_command(client, demisto.args()))
+
+ elif demisto.command() == 'cb-eedr-add-alert-notes':
+ return_results(add_alert_notes_command(client, demisto.args()))
+
+ elif demisto.command() == 'cb-eedr-get-threat-tags':
+ return_results(get_threat_tags_command(client, demisto.args()))
+
# Log exceptions
except Exception as e:
err_msg = str(e)
diff --git a/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR.yml b/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR.yml
index 11223f24dae8..8c33477343dd 100644
--- a/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR.yml
+++ b/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR.yml
@@ -1,7 +1,4 @@
category: Endpoint
-sectionOrder:
-- Connect
-- Collect
commonfields:
id: Carbon Black Enterprise EDR
version: -1
@@ -68,8 +65,13 @@ configuration:
type: 0
section: Collect
required: false
-description: VMware Carbon Black Enterprise EDR (formerly known as Carbon Black ThreatHunter) is an advanced threat hunting and incident response solution delivering continuous visibility for top security operations centers (SOCs) and incident response (IR) teams. (formerly known as ThreatHunter)
-display: VMware Carbon Black Enterprise EDR
+- defaultvalue: "1"
+ display: Incidents Fetch Interval
+ name: incidentFetchInterval
+ required: false
+ type: 19
+description: VMware Carbon Black Enterprise EDR (formerly known as Carbon Black ThreatHunter) is an advanced threat hunting and incident response solution delivering continuous visibility for top security operations centers (SOCs) and incident response (IR) teams. (formerly known as ThreatHunter).
+display: Carbon Black Enterprise EDR
name: Carbon Black Enterprise EDR
script:
commands:
@@ -177,16 +179,16 @@ script:
- LIVE
- description: Device operation system. Supports comma-separated values.
name: device_os
- - description: 'Device start last contact time. For example: 2019-01-01T11:00:00.157Z'
+ - description: 'Device start last contact time. For example: 2019-01-01T11:00:00.157Z.'
name: start_time
- - description: 'Device end last contact time. For example: 2019-01-01T11:00:00.157Z'
+ - description: 'Device end last contact time. For example: 2019-01-01T11:00:00.157Z.'
name: end_time
- - description: Active directory group ID. Supports comma-separated values
+ - description: Active directory group ID. Supports comma-separated values.
name: ad_group_id
- description: The policy ID. Supports comma-separated values.
name: policy_id
- auto: PREDEFINED
- description: Device target priority. Supports comma-separated values
+ description: Device target priority. Supports comma-separated values.
name: target_priority
predefined:
- LOW
@@ -197,7 +199,7 @@ script:
description: Maximum number of rows to return.
name: limit
- auto: PREDEFINED
- description: Sort Fields
+ description: Sort Fields.
name: sort_field
predefined:
- target_priority
@@ -294,7 +296,7 @@ script:
description: Sensor version.
type: String
- contextPath: CarbonBlackEEDR.Device.virtual_machine
- description: Is the device virtual machine
+ description: Is the device virtual machine.
type: Boolean
- contextPath: CarbonBlackEEDR.Device.last_name
description: Last name.
@@ -568,7 +570,7 @@ script:
description: Alert workflow - remediation.
type: String
- contextPath: CarbonBlackEEDR.Alert.workflow.state
- description: Alert workflow - state
+ description: Alert workflow - state.
type: String
- description: Retrieves all watchlists.
name: cb-eedr-watchlist-list
@@ -597,6 +599,7 @@ script:
- contextPath: CarbonBlackEEDR.Watchlist.description
description: Watchlist description.
type: String
+ arguments: []
- arguments:
- description: The watchlist ID. Get the ID from the watchlist-list command.
name: watchlist_id
@@ -662,7 +665,7 @@ script:
- 'true'
- 'false'
- auto: PREDEFINED
- description: Enable watchlist alerts
+ description: Enable watchlist alerts.
name: alerts_enabled
predefined:
- 'true'
@@ -804,7 +807,7 @@ script:
- description: Report ID. Get the ID from the watchlist-list command.
name: report_id
required: true
- - description: IOC ID. Get the ID from get_report command
+ - description: IOC ID. Get the ID from get_report command.
name: ioc_id
required: true
description: Gets the current ignore status for IOC ioc_id in report report_id.
@@ -822,7 +825,7 @@ script:
- description: Report ID. Get the ID from the watchlist-list command.
name: report_id
required: true
- - description: IOC ID. Get the ID from get_report command
+ - description: IOC ID. Get the ID from get_report command.
name: ioc_id
required: true
description: IOC ioc_id for report report_id will match future events for all watchlists.
@@ -867,7 +870,7 @@ script:
name: ipv4
- description: 'The IOC query for the report, for example: (netconn_ipv4:2.2.2.2). Supports comma-separated values.'
name: ioc_query
- - description: 'The report timestamp. For example: 2020-01-19T09:16:16'
+ - description: 'The report timestamp. For example: 2020-01-19T09:16:16.'
name: timestamp
required: true
- description: IOCs of type IPv6. Supports comma-separated values.
@@ -883,7 +886,7 @@ script:
description: The report ID.
type: String
- contextPath: CarbonBlackEEDR.Report.IOCs
- description: The report IOCs
+ description: The report IOCs.
type: String
- contextPath: CarbonBlackEEDR.Report.Link
description: Report link.
@@ -941,7 +944,7 @@ script:
description: The report ID.
type: String
- contextPath: CarbonBlackEEDR.Report.IOCs
- description: The report IOC's
+ description: The report IOC's.
type: String
- contextPath: CarbonBlackEEDR.Report.Link
description: Report link.
@@ -1021,7 +1024,7 @@ script:
description: The Language ID value from the Windows VERSIONINFO resource.
type: String
- contextPath: CarbonBlackEEDR.File.company_name
- description: Company name from FileVersionInformation
+ description: Company name from FileVersionInformation.
type: String
- contextPath: CarbonBlackEEDR.File.internal_name
description: Internal name from FileVersionInformation.
@@ -1039,7 +1042,7 @@ script:
description: Comments from FileVersionInformation.
type: String
- contextPath: CarbonBlackEEDR.File.os_type
- description: 'The OS that this file is designed for. This may contain one or more of the following values: WINDOWS, ANDROID, MAC, IOS, LINUX, and OTHER'
+ description: 'The OS that this file is designed for. This may contain one or more of the following values: WINDOWS, ANDROID, MAC, IOS, LINUX, and OTHER.'
type: String
- contextPath: CarbonBlackEEDR.File.original_filename
description: Original filename from FileVersionInformation.
@@ -1074,7 +1077,7 @@ script:
name: cb-eedr-files-download-link-get
outputs:
- contextPath: CarbonBlackEEDR.File.found.sha256
- description: SHA256 hash of file that is available to be downloaded
+ description: SHA256 hash of file that is available to be downloaded.
type: String
- contextPath: CarbonBlackEEDR.File.found.url
description: An AWS S3 pre-signed URL for this file. Perform a GET on this URL to download the file.
@@ -1089,7 +1092,7 @@ script:
- description: The requested SHA256 hash to obtain information for. Supports comma-separated values.
name: sha256
required: true
- description: Return a summary of the observed file paths
+ description: Return a summary of the observed file paths.
name: cb-eedr-file-paths
outputs:
- contextPath: CarbonBlackEEDR.File.file_path_count
@@ -1260,7 +1263,7 @@ script:
description: The cumulative count of file modifications since process tracking started.
type: Number
- contextPath: CarbonBlackEEDR.SearchProcess.results.ingress_time
- description: Unknown
+ description: Unknown.
type: Date
- contextPath: CarbonBlackEEDR.SearchProcess.results.legacy
description: True if the process document came from the legacy data stream (deprecated, use enriched).
@@ -1301,10 +1304,64 @@ script:
- contextPath: CarbonBlackEEDR.SearchProcess.results.scriptload_count
description: The cumulative count of loaded scripts since process tracking started.
type: Number
- dockerimage: demisto/python3:3.10.13.72123
+ - arguments:
+ - description: Threat ID.
+ name: threat_id
+ required: true
+ - description: Comma-separated list of tags.
+ isArray: true
+ name: tags
+ required: true
+ description: Update threat ID tags.
+ name: cb-eedr-add-threat-tags
+ outputs:
+ - contextPath: CarbonBlackEEDR.Threat.ThreatID
+ description: Threat ID.
+ - contextPath: CarbonBlackEEDR.Threat.Tags
+ description: Threat ID tags.
+ - arguments:
+ - description: Threat ID.
+ name: threat_id
+ required: true
+ - description: Notes to be added to the provided threat ID.
+ name: notes
+ required: true
+ description: Update threat ID notes.
+ name: cb-eedr-add-threat-notes
+ outputs:
+ - contextPath: CarbonBlackEEDR.Threat.ThreatID
+ description: Threat ID.
+ - contextPath: CarbonBlackEEDR.Threat.Notes
+ description: Threat ID notes.
+ - arguments:
+ - description: Alert ID to add the notes to.
+ name: alert_id
+ required: true
+ - description: Notes to be added to the provided alert ID.
+ name: notes
+ required: true
+ description: Update alert ID notes.
+ name: cb-eedr-add-alert-notes
+ outputs:
+ - contextPath: CarbonBlackEEDR.Alert.AlertID
+ description: The alert ID.
+ - contextPath: CarbonBlackEEDR.Alert.Notes
+ description: Alert notes.
+ - arguments:
+ - description: The threat ID for which we wish to get the tags.
+ name: threat_id
+ required: true
+ description: Output a list of tags for the provided threat ID.
+ name: cb-eedr-get-threat-tags
+ outputs:
+ - contextPath: CarbonBlackEEDR.Threat.ThreatID
+ description: Threat ID.
+ - contextPath: CarbonBlackEEDR.Threat.Tags
+ description: Threat tags.
+ dockerimage: demisto/python3:3.10.13.84405
isfetch: true
runonce: false
- script: '-'
+ script: ''
subtype: python3
type: python
tests:
diff --git a/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR_test.py b/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR_test.py
index 720da4810916..77d29f1128cb 100644
--- a/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR_test.py
+++ b/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR_test.py
@@ -1,5 +1,11 @@
import pytest
import CarbonBlackEnterpriseEDR as cbe
+from CarbonBlackEnterpriseEDR import (
+ get_threat_tags_command,
+ add_threat_tags_command,
+ add_threat_notes_command,
+ add_alert_notes_command,
+)
import demistomock as demisto
from freezegun import freeze_time
@@ -171,3 +177,154 @@ def test_event_by_process_failing(mocker, requests_mock, demisto_args, expected_
with pytest.raises(Exception) as e:
client.create_search_event_by_process_request(**demisto_args)
assert str(e.value) == expected_error_msg
+
+
+MOCK_UPDATE_THREAT_TAGS_RESPONSE = {
+ 'tags': ['tag1', 'tag2']
+}
+
+
+def test_add_threat_tags_command(mocker):
+ """
+ Given:
+ - args with threat_id and tags.
+
+ When:
+ - Calling add_threat_tags_command.
+
+ Then:
+ - validate that the returned results were parsed as expected.
+
+ """
+ client = cbe.Client(
+ base_url='https://server_url.com',
+ use_ssl=False,
+ use_proxy=False,
+ token=None,
+ cb_org_key="123")
+
+ mocker.patch.object(client, '_http_request', return_value=MOCK_UPDATE_THREAT_TAGS_RESPONSE)
+
+ args = {'threat_id': '123456', 'tags': ['tag1', 'tag2']}
+ result = add_threat_tags_command(client, args)
+
+ assert result.outputs == {'ThreatID': '123456', 'Tags': ['tag1', 'tag2']}
+ assert result.outputs_prefix == 'CarbonBlackEEDR.Threat'
+ assert result.outputs_key_field == 'tags'
+
+ assert "Successfully updated threat: \"123456\"" in result.readable_output
+ assert result.raw_response == MOCK_UPDATE_THREAT_TAGS_RESPONSE
+
+
+MOCK_CREATE_THREAT_NOTES_RESPONSE = {
+ 'notes': 'These are threat notes'
+}
+
+
+def test_add_threat_notes_command(mocker):
+ """
+ Given:
+ - args with threat_id and notes.
+
+ When:
+ - Calling add_threat_notes_command.
+
+ Then:
+ - validate that the returned results were parsed as expected.
+
+ """
+ client = cbe.Client(
+ base_url='https://server_url.com',
+ use_ssl=False,
+ use_proxy=False,
+ token=None,
+ cb_org_key="123")
+
+ mocker.patch.object(client, '_http_request', return_value=MOCK_CREATE_THREAT_NOTES_RESPONSE)
+
+ args = {'threat_id': '123456', 'notes': 'These are threat notes'}
+ result = add_threat_notes_command(client, args)
+
+ assert result.outputs == {'ThreatID': '123456', 'Notes': 'These are threat notes'}
+ assert result.outputs_prefix == 'CarbonBlackEEDR.Threat'
+ assert result.outputs_key_field == 'ThreatID'
+
+ assert "Successfully added notes to threat: \"123456\"" in result.readable_output
+ assert result.raw_response == MOCK_CREATE_THREAT_NOTES_RESPONSE
+
+
+MOCK_GET_THREAT_TAGS_RESPONSE = {
+ 'list': [
+ {'tag': 'malware'},
+ {'tag': 'suspicious'}
+ ]
+}
+
+
+def test_get_threat_tags_command(mocker):
+ """
+ Given:
+ - args with thread_it.
+
+ When:
+ - Calling get_threat_tags_command.
+
+ Then:
+ - validate that the returned results was parsed as expected.
+
+ """
+ client = cbe.Client(
+ base_url='https://server_url.com',
+ use_ssl=False,
+ use_proxy=False,
+ token=None,
+ cb_org_key="123")
+
+ mocker.patch.object(client, '_http_request', return_value=MOCK_GET_THREAT_TAGS_RESPONSE)
+
+ args = {'threat_id': '123456'}
+ result = get_threat_tags_command(client, args)
+
+ assert result.outputs == {'ThreatID': '123456', 'Tags': [{'tag': 'malware'}, {'tag': 'suspicious'}]}
+ assert result.outputs_prefix == 'CarbonBlackEEDR.Threat'
+ assert result.outputs_key_field == 'ThreatID'
+
+ assert "Successfully sent for threat: \"123456\"" in result.readable_output
+ assert result.raw_response == MOCK_GET_THREAT_TAGS_RESPONSE
+
+
+MOCK_UPDATE_ALERT_NOTES_RESPONSE = {
+ 'notes': 'These are alert notes'
+}
+
+
+def test_add_alert_notes_command(mocker):
+ """
+ Given:
+ - args with alert_id and notes.
+
+ When:
+ - Calling add_alert_notes_command.
+
+ Then:
+ - validate that the returned results were parsed as expected.
+
+ """
+ client = cbe.Client(
+ base_url='https://server_url.com',
+ use_ssl=False,
+ use_proxy=False,
+ token=None,
+ cb_org_key="123")
+
+ mocker.patch.object(client, '_http_request', return_value=MOCK_UPDATE_ALERT_NOTES_RESPONSE)
+
+ args = {'alert_id': '789012', 'notes': 'These are alert notes'}
+ result = add_alert_notes_command(client, args)
+
+ assert result.outputs == {'AlertID': '789012', 'Notes': 'These are alert notes'}
+ assert result.outputs_prefix == 'CarbonBlackEEDR.Threat'
+ assert result.outputs_key_field == 'AlertID'
+
+ assert "Successfully added notes to alert: \"789012\"" in result.readable_output
+ assert result.raw_response == MOCK_UPDATE_ALERT_NOTES_RESPONSE
diff --git a/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/README.md b/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/README.md
index 8f3ce111fa90..358fb3aae2a6 100644
--- a/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/README.md
+++ b/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/README.md
@@ -2712,3 +2712,93 @@ RBAC Permissions Required - org.search.events: READ
>| 80abd555c1869baaff2d8a8d535ce07e,
fa353f142361e5c6ca57a66dcb341bba20392f5c29d2c113c7d62a216b0e0504 | c:\program files\vmware\vmware tools\vmtoolsd.exe | desktop-aa2m6ld | 2020-08-26T16:06:50.813Z | 2016 | DESKTOP-AA2M6LD\John Doe |
>| 80abd555c1869baaff2d8a8d535ce07e,
fa353f142361e5c6ca57a66dcb341bba20392f5c29d2c113c7d62a216b0e0504 | c:\program files\vmware\vmware tools\vmtoolsd.exe | desktop-aa2m6ld | 2020-08-17T14:37:19.963Z | 8052 | DESKTOP-AA2M6LD\John Doe |
+### cb-eedr-add-threat-tags
+
+***
+Update threat ID tags.
+
+#### Base Command
+
+`cb-eedr-add-threat-tags`
+
+#### Input
+
+| **Argument Name** | **Description** | **Required** |
+| --- | --- | --- |
+| threat_id | Threat ID. | Required |
+| tags | Comma-separated list of tags. | Required |
+
+#### Context Output
+
+| **Path** | **Type** | **Description** |
+| --- | --- | --- |
+| CarbonBlackEEDR.Threat.ThreatID | unknown | Threat ID. |
+| CarbonBlackEEDR.Threat.Tags | unknown | Threat ID tags. |
+
+### cb-eedr-add-threat-notes
+
+***
+Update threat ID notes.
+
+#### Base Command
+
+`cb-eedr-add-threat-notes`
+
+#### Input
+
+| **Argument Name** | **Description** | **Required** |
+| --- | --- | --- |
+| threat_id | Threat ID. | Required |
+| notes | Notes to be added to the provided threat ID. | Required |
+
+#### Context Output
+
+| **Path** | **Type** | **Description** |
+| --- | --- | --- |
+| CarbonBlackEEDR.Threat.ThreatID | unknown | Threat ID. |
+| CarbonBlackEEDR.Threat.Notes | unknown | Threat ID notes. |
+
+### cb-eedr-add-alert-notes
+
+***
+Update alert ID notes.
+
+#### Base Command
+
+`cb-eedr-add-alert-notes`
+
+#### Input
+
+| **Argument Name** | **Description** | **Required** |
+| --- | --- | --- |
+| alert_id | Alert ID to add the notes to. | Required |
+| notes | Notes to be added to the provided alert ID. | Required |
+
+#### Context Output
+
+| **Path** | **Type** | **Description** |
+| --- | --- | --- |
+| CarbonBlackEEDR.Alert.AlertID | unknown | The alert ID. |
+| CarbonBlackEEDR.Alert.Notes | unknown | Alert notes. |
+
+### cb-eedr-get-threat-tags
+
+***
+Output a list of tags for the provided threat ID.
+
+#### Base Command
+
+`cb-eedr-get-threat-tags`
+
+#### Input
+
+| **Argument Name** | **Description** | **Required** |
+| --- | --- | --- |
+| threat_id | The threat ID for which we wish to get the tags. | Required |
+
+#### Context Output
+
+| **Path** | **Type** | **Description** |
+| --- | --- | --- |
+| CarbonBlackEEDR.Threat.ThreatID | unknown | Threat ID. |
+| CarbonBlackEEDR.Threat.Tags | unknown | Threat tags. |
\ No newline at end of file
diff --git a/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/command_examples.txt b/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/command_examples.txt
index 88f5c5f0c8ab..aca3761cc5aa 100644
--- a/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/command_examples.txt
+++ b/Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/command_examples.txt
@@ -1,3 +1,7 @@
!cb-eedr-process-search process_name="vmtoolsd.exe" limit=10
!cb-eedr-process-search-results job_id="99aad740-3903-4148-a5e7-7b5648794862"
-!cb-eedr-events-by-process-get process_guid="7DESJ9GN-0034d5f2-00001f78-00000000-1d68709f411ee43" event_type="modload" start_time="1 month"
\ No newline at end of file
+!cb-eedr-events-by-process-get process_guid="7DESJ9GN-0034d5f2-00001f78-00000000-1d68709f411ee43" event_type="modload" start_time="1 month"
+!cb-eedr-add-threat-notes threat_id=fb6a305cd33e6b99b3010d3005f65943 notes="add xsoar threat notes"
+!cb-eedr-add-alert-notes alert_id=1bc5dff7-e2fb-f336-7997-277b142c9ec1 notes="xsoar alert notes"
+!cb-eedr-add-threat-tags threat_id=fb6a305cd33e6b99b3010d3005f65943 tags=CSIRC-77777
+!cb-eedr-get-threat-tags threat_id=fb6a305cd33e6b99b3010d3005f65943
diff --git a/Packs/CarbonBlackEnterpriseEDR/ReleaseNotes/1_1_33.md b/Packs/CarbonBlackEnterpriseEDR/ReleaseNotes/1_1_33.md
new file mode 100644
index 000000000000..3b6e73fbf24a
--- /dev/null
+++ b/Packs/CarbonBlackEnterpriseEDR/ReleaseNotes/1_1_33.md
@@ -0,0 +1,8 @@
+
+#### Integrations
+
+##### Carbon Black Enterprise EDR
+
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
+- Added the new commands: cb-eedr-add-alert-notes, cb-eedr-add-threat-notes, cb-eedr-add-threat-tags and cb-eedr-add-threat-tags.
+
diff --git a/Packs/CarbonBlackEnterpriseEDR/pack_metadata.json b/Packs/CarbonBlackEnterpriseEDR/pack_metadata.json
index 12470ff071ae..ecd53d42a864 100644
--- a/Packs/CarbonBlackEnterpriseEDR/pack_metadata.json
+++ b/Packs/CarbonBlackEnterpriseEDR/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Carbon Black Cloud Enterprise EDR",
"description": "Advanced threat hunting and incident response solution.",
"support": "xsoar",
- "currentVersion": "1.1.32",
+ "currentVersion": "1.1.33",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
@@ -17,4 +17,4 @@
"xsoar",
"marketplacev2"
]
-}
\ No newline at end of file
+}
diff --git a/Packs/Carbon_Black_Enterprise_Response/ReleaseNotes/2_1_44.md b/Packs/Carbon_Black_Enterprise_Response/ReleaseNotes/2_1_44.md
new file mode 100644
index 000000000000..8e56b5c8efd8
--- /dev/null
+++ b/Packs/Carbon_Black_Enterprise_Response/ReleaseNotes/2_1_44.md
@@ -0,0 +1,6 @@
+
+#### Scripts
+##### CBWatchlists
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
+##### CBAlerts
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
\ No newline at end of file
diff --git a/Packs/Carbon_Black_Enterprise_Response/Scripts/CBAlerts/CBAlerts.yml b/Packs/Carbon_Black_Enterprise_Response/Scripts/CBAlerts/CBAlerts.yml
index f2980648dcc1..40386cc249f1 100644
--- a/Packs/Carbon_Black_Enterprise_Response/Scripts/CBAlerts/CBAlerts.yml
+++ b/Packs/Carbon_Black_Enterprise_Response/Scripts/CBAlerts/CBAlerts.yml
@@ -15,7 +15,7 @@ dependson:
- cb-alert
timeout: 0s
fromversion: 5.0.0
-dockerimage: demisto/python3:3.10.13.83255
+dockerimage: demisto/python3:3.10.13.84405
tests:
- No tests (auto formatted)
diff --git a/Packs/Carbon_Black_Enterprise_Response/Scripts/CBWatchlists/CBWatchlists.yml b/Packs/Carbon_Black_Enterprise_Response/Scripts/CBWatchlists/CBWatchlists.yml
index 65898c4c5c34..4622aa64d069 100644
--- a/Packs/Carbon_Black_Enterprise_Response/Scripts/CBWatchlists/CBWatchlists.yml
+++ b/Packs/Carbon_Black_Enterprise_Response/Scripts/CBWatchlists/CBWatchlists.yml
@@ -19,6 +19,6 @@ dependson:
- cb-edr-watchlists-list
timeout: 0s
fromversion: 5.0.0
-dockerimage: demisto/python3:3.10.13.83255
+dockerimage: demisto/python3:3.10.13.84405
tests:
- No tests (auto formatted)
diff --git a/Packs/Carbon_Black_Enterprise_Response/pack_metadata.json b/Packs/Carbon_Black_Enterprise_Response/pack_metadata.json
index 96223d36668a..b19a774dec75 100644
--- a/Packs/Carbon_Black_Enterprise_Response/pack_metadata.json
+++ b/Packs/Carbon_Black_Enterprise_Response/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Carbon Black Enterprise Response",
"description": "Query and respond with Carbon Black endpoint detection and response.",
"support": "xsoar",
- "currentVersion": "2.1.43",
+ "currentVersion": "2.1.44",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/Censys/Integrations/CensysV2/CensysV2.yml b/Packs/Censys/Integrations/CensysV2/CensysV2.yml
index 6448f76b6950..e2818ef6ee76 100644
--- a/Packs/Censys/Integrations/CensysV2/CensysV2.yml
+++ b/Packs/Censys/Integrations/CensysV2/CensysV2.yml
@@ -381,7 +381,7 @@ script:
- contextPath: Censys.Search.parsed.issuer_dn
description: Distinguished name of the entity that has signed and issued the certificate.
type: String
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
runonce: false
script: '-'
subtype: python3
diff --git a/Packs/Censys/ReleaseNotes/2_0_28.md b/Packs/Censys/ReleaseNotes/2_0_28.md
new file mode 100644
index 000000000000..41c02b782e90
--- /dev/null
+++ b/Packs/Censys/ReleaseNotes/2_0_28.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Censys v2
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/Censys/pack_metadata.json b/Packs/Censys/pack_metadata.json
index a05cb4a2664a..5396df970432 100644
--- a/Packs/Censys/pack_metadata.json
+++ b/Packs/Censys/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Censys",
"description": "Censys is a search engine that allows computer scientists to ask questions about the devices and networks that compose the Internet. Driven by Internet-wide scanning, Censys lets researchers find specific hosts and create aggregate reports on how devices, websites, and certificates are configured and deployed.",
"support": "xsoar",
- "currentVersion": "2.0.27",
+ "currentVersion": "2.0.28",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/Centreon/Integrations/Centreon/Centreon.yml b/Packs/Centreon/Integrations/Centreon/Centreon.yml
index 8d939d856823..865c5f51534b 100644
--- a/Packs/Centreon/Integrations/Centreon/Centreon.yml
+++ b/Packs/Centreon/Integrations/Centreon/Centreon.yml
@@ -145,7 +145,7 @@ script:
description: Service state.
description: All the monitoring information regarding services.
runonce: false
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
tests:
- Centreon-Test-Playbook
fromversion: 5.0.0
diff --git a/Packs/Centreon/ReleaseNotes/1_0_22.md b/Packs/Centreon/ReleaseNotes/1_0_22.md
new file mode 100644
index 000000000000..9f168d3e20a7
--- /dev/null
+++ b/Packs/Centreon/ReleaseNotes/1_0_22.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Centreon
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/Centreon/pack_metadata.json b/Packs/Centreon/pack_metadata.json
index 2993cba52561..8abaf7687782 100644
--- a/Packs/Centreon/pack_metadata.json
+++ b/Packs/Centreon/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Centreon",
"description": "IT & Network Monitoring",
"support": "xsoar",
- "currentVersion": "1.0.21",
+ "currentVersion": "1.0.22",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CheckPointDome9/Integrations/CheckPointDome9/CheckPointDome9.yml b/Packs/CheckPointDome9/Integrations/CheckPointDome9/CheckPointDome9.yml
index bc96d955c6cf..8648fc9c2975 100644
--- a/Packs/CheckPointDome9/Integrations/CheckPointDome9/CheckPointDome9.yml
+++ b/Packs/CheckPointDome9/Integrations/CheckPointDome9/CheckPointDome9.yml
@@ -1363,7 +1363,7 @@ script:
- contextPath: CheckPointDome9.FindingsBundle.region
description: The CloudTrail account ID.
type: String
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
isfetch: true
runonce: false
script: '-'
diff --git a/Packs/CheckPointDome9/ReleaseNotes/1_0_16.md b/Packs/CheckPointDome9/ReleaseNotes/1_0_16.md
new file mode 100644
index 000000000000..64b3b63adc3d
--- /dev/null
+++ b/Packs/CheckPointDome9/ReleaseNotes/1_0_16.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Check Point Dome9 (CloudGuard)
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/CheckPointDome9/pack_metadata.json b/Packs/CheckPointDome9/pack_metadata.json
index 7796e8996d81..7beb2d7a4c87 100644
--- a/Packs/CheckPointDome9/pack_metadata.json
+++ b/Packs/CheckPointDome9/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Check Point Dome9 (CloudGuard)",
"description": "Dome9 integration allows to easily manage the security and compliance of the public cloud.",
"support": "xsoar",
- "currentVersion": "1.0.15",
+ "currentVersion": "1.0.16",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CheckPointHEC/Integrations/CheckPointHEC/CheckPointHEC.yml b/Packs/CheckPointHEC/Integrations/CheckPointHEC/CheckPointHEC.yml
index 4b7436139b23..f995de7a3210 100644
--- a/Packs/CheckPointHEC/Integrations/CheckPointHEC/CheckPointHEC.yml
+++ b/Packs/CheckPointHEC/Integrations/CheckPointHEC/CheckPointHEC.yml
@@ -343,7 +343,7 @@ script:
script: '-'
type: python
subtype: python3
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
fromversion: 6.9.0
tests:
- No tests (auto formatted)
diff --git a/Packs/CheckPointHEC/ReleaseNotes/1_0_6.md b/Packs/CheckPointHEC/ReleaseNotes/1_0_6.md
new file mode 100644
index 000000000000..558e6af5b5d8
--- /dev/null
+++ b/Packs/CheckPointHEC/ReleaseNotes/1_0_6.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Check Point Harmony Email and Collaboration (HEC)
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/CheckPointHEC/pack_metadata.json b/Packs/CheckPointHEC/pack_metadata.json
index 02bcc9d2bee3..996ae81ad2cb 100644
--- a/Packs/CheckPointHEC/pack_metadata.json
+++ b/Packs/CheckPointHEC/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Check Point Harmony Email and Collaboration (HEC)",
"description": "The Best Way to Protect Enterprise Email & Collaboration from phishing, malware, account takeover, data loss, etc.",
"support": "partner",
- "currentVersion": "1.0.5",
+ "currentVersion": "1.0.6",
"author": "Check Point Harmony Email & Collaboration (HEC)",
"url": "https://supportcenter.checkpoint.com/",
"email": "EmailSecurity_Support@checkpoint.com",
diff --git a/Packs/CheckpointFirewall/README.md b/Packs/CheckpointFirewall/README.md
index 2f3566f6c03d..affce9bc23b6 100644
--- a/Packs/CheckpointFirewall/README.md
+++ b/Packs/CheckpointFirewall/README.md
@@ -1,13 +1,15 @@
# Check Point Firewall
This pack includes Cortex XSIAM content.
+<~XSIAM>
## Configuration on Server Side
You need to configure Check Point to forward Syslog messages in CEF format.
-Go to Checkpoint Log Export, and follow the instructions under "Basic Deployment" to set up the connection using the following guidelines:
+Go to Checkpoint Log Export, and follow the instructions under **Basic Deployment** to set up the connection using the following guidelines:
1. If you use version R77.30 or R80.10, follow the instructions to install a Log Exporter.
2. Set the Syslog port to 514 or your agent port.
-3. Replace the "name" and "\" in the CLI with the broker VM name and IP address.
+3. Replace the **name** and **\** in the CLI with the broker VM name and IP address.
Set the format to CEF.
+
## Collect Events from Vendor
In order to use the collector, use the [Broker VM](#broker-vm) option.
@@ -23,3 +25,4 @@ You can configure the specific vendor and product for this instance.
3. When configuring the Syslog Collector, set the following values:
- vendor as vendor - Checkpoint
- product as product - Firewall
+~XSIAM>
\ No newline at end of file
diff --git a/Packs/CimTrak-SystemIntegrityAssurance/Integrations/CimTrak/CimTrak.yml b/Packs/CimTrak-SystemIntegrityAssurance/Integrations/CimTrak/CimTrak.yml
index fc4176ad693b..4f38ee632c2b 100644
--- a/Packs/CimTrak-SystemIntegrityAssurance/Integrations/CimTrak/CimTrak.yml
+++ b/Packs/CimTrak-SystemIntegrityAssurance/Integrations/CimTrak/CimTrak.yml
@@ -2302,7 +2302,7 @@ script:
- contextPath: CimTrak.Object.agentObjectId
description: Agent Object Id.
type: number
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
isfetch: true
longRunning: true
script: ''
diff --git a/Packs/CimTrak-SystemIntegrityAssurance/ReleaseNotes/1_0_15.md b/Packs/CimTrak-SystemIntegrityAssurance/ReleaseNotes/1_0_15.md
new file mode 100644
index 000000000000..bfa55dca4822
--- /dev/null
+++ b/Packs/CimTrak-SystemIntegrityAssurance/ReleaseNotes/1_0_15.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### CimTrak - System Integrity Assurance
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/CimTrak-SystemIntegrityAssurance/pack_metadata.json b/Packs/CimTrak-SystemIntegrityAssurance/pack_metadata.json
index 958f9813ce34..3ae4e628f951 100644
--- a/Packs/CimTrak-SystemIntegrityAssurance/pack_metadata.json
+++ b/Packs/CimTrak-SystemIntegrityAssurance/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "CimTrak - System Integrity Assurance",
"description": "The CimTrak integration helps you detect unexpected system/device/config modifications and automatically respond/react to threats",
"support": "partner",
- "currentVersion": "1.0.14",
+ "currentVersion": "1.0.15",
"author": "Cimcor",
"url": "www.cimcor.com",
"email": "support@cimcor.com",
diff --git a/Packs/CiscoCatalyst/ParsingRules/CiscoCatalyst/CiscoCatalyst.xif b/Packs/CiscoCatalyst/ParsingRules/CiscoCatalyst/CiscoCatalyst.xif
index e9faa5819bc3..1f88b91107bd 100644
--- a/Packs/CiscoCatalyst/ParsingRules/CiscoCatalyst/CiscoCatalyst.xif
+++ b/Packs/CiscoCatalyst/ParsingRules/CiscoCatalyst/CiscoCatalyst.xif
@@ -11,7 +11,7 @@ filter _raw_log ~= "[A-Za-z]+\s+\d+\s+\d{4}\s+\d+\:\d+\:\d+\sUTC" or _raw_log ~=
tmp_time_format2 = if(tmp_time_without_year != null and tmp_time_without_year != "", concat(tmp_year, " ", tmp_time_without_year), null)
| alter
tmp_time1_1 = parse_timestamp("%Y %b %e %H:%M:%E*S", tmp_time_format2 )
-| alter tmp_timeDiff = timestamp_diff(tmp_time1_1, current_time(), "DAY")
+| alter tmp_timeDiff = timestamp_diff(tmp_time1_1, current_time(), "MILLISECOND")
// Check if the date is a future date
| alter tmp_year2 = if(tmp_timeDiff > 0, to_string(subtract(to_integer(tmp_year),1)),null)
// Create timestamp minus 1 year if the timestamp is a future one
diff --git a/Packs/CiscoCatalyst/ReleaseNotes/1_0_5.md b/Packs/CiscoCatalyst/ReleaseNotes/1_0_5.md
new file mode 100644
index 000000000000..b13fbf758b38
--- /dev/null
+++ b/Packs/CiscoCatalyst/ReleaseNotes/1_0_5.md
@@ -0,0 +1,3 @@
+#### Parsing Rules
+##### CiscoCatalyst Parsing Rule
+Improved implementation of parsing rule.
\ No newline at end of file
diff --git a/Packs/CiscoCatalyst/pack_metadata.json b/Packs/CiscoCatalyst/pack_metadata.json
index 740a0847a1ca..d3325a1414f7 100644
--- a/Packs/CiscoCatalyst/pack_metadata.json
+++ b/Packs/CiscoCatalyst/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Cisco Catalyst",
"description": "Cisco Catalyst switch",
"support": "xsoar",
- "currentVersion": "1.0.4",
+ "currentVersion": "1.0.5",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CiscoISR/ParsingRules/CiscoISR/CiscoISR.xif b/Packs/CiscoISR/ParsingRules/CiscoISR/CiscoISR.xif
index 3c9d3e1bedcb..fdcb0637ec03 100644
--- a/Packs/CiscoISR/ParsingRules/CiscoISR/CiscoISR.xif
+++ b/Packs/CiscoISR/ParsingRules/CiscoISR/CiscoISR.xif
@@ -10,7 +10,7 @@ filter _raw_log ~= "\:[\s|\s\*]+\w+\s\d+\s\d{2}\:\d{2}\:\d{2}\.\d{3}\sUTC" or _r
tmp_time_format2 = if(tmp_time_without_year != null and tmp_time_without_year != "", concat(tmp_year, " ", tmp_time_without_year), null)
| alter
tmp_time1_1 = parse_timestamp("%Y %b %e %H:%M:%E*S", tmp_time_format2)
-| alter tmp_timeDiff = timestamp_diff(tmp_time1_1, current_time(), "DAY")
+| alter tmp_timeDiff = timestamp_diff(tmp_time1_1, current_time(), "MILLISECOND")
// Check if the date is a future date
| alter tmp_year2 = if(tmp_timeDiff > 0, to_string(subtract(to_integer(tmp_year),1)), null)
// Create timestamp minus 1 year if the timestamp is a future one
diff --git a/Packs/CiscoISR/ReleaseNotes/1_0_3.md b/Packs/CiscoISR/ReleaseNotes/1_0_3.md
new file mode 100644
index 000000000000..654b4fe43cee
--- /dev/null
+++ b/Packs/CiscoISR/ReleaseNotes/1_0_3.md
@@ -0,0 +1,3 @@
+#### Parsing Rules
+##### Cisco ISR
+Improved implementation of parsing rule.
\ No newline at end of file
diff --git a/Packs/CiscoISR/pack_metadata.json b/Packs/CiscoISR/pack_metadata.json
index 75103ee26920..4bb0ef91805f 100644
--- a/Packs/CiscoISR/pack_metadata.json
+++ b/Packs/CiscoISR/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Cisco ISR",
"description": "Cisco Integrated Services Routers (ISRs) are high-performance routers designed to provide advanced security, multicloud access, and wireless capability in one device.",
"support": "xsoar",
- "currentVersion": "1.0.2",
+ "currentVersion": "1.0.3",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CiscoSpark/Integrations/CiscoWebexEventCollector/CiscoWebexEventCollector.yml b/Packs/CiscoSpark/Integrations/CiscoWebexEventCollector/CiscoWebexEventCollector.yml
index 59bd9b7a52b2..4ff6aa98fd04 100644
--- a/Packs/CiscoSpark/Integrations/CiscoWebexEventCollector/CiscoWebexEventCollector.yml
+++ b/Packs/CiscoSpark/Integrations/CiscoWebexEventCollector/CiscoWebexEventCollector.yml
@@ -147,7 +147,7 @@ script:
name: since_datetime
description: Gets events from Cisco Webex.
name: cisco-webex-get-compliance-officer-events
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
isfetchevents: true
runonce: false
script: ''
diff --git a/Packs/CiscoSpark/ReleaseNotes/1_0_6.md b/Packs/CiscoSpark/ReleaseNotes/1_0_6.md
new file mode 100644
index 000000000000..2bb61079f717
--- /dev/null
+++ b/Packs/CiscoSpark/ReleaseNotes/1_0_6.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Cisco Webex Event Collector
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/CiscoSpark/pack_metadata.json b/Packs/CiscoSpark/pack_metadata.json
index 46e14202332d..45ea48644d61 100644
--- a/Packs/CiscoSpark/pack_metadata.json
+++ b/Packs/CiscoSpark/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Cisco Webex Teams",
"description": "Send messages, create rooms and more, via the Cisco Webex Teams (Cisco Spark) API.",
"support": "xsoar",
- "currentVersion": "1.0.5",
+ "currentVersion": "1.0.6",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CiscoStealthwatch/ModelingRules/CiscoStealthwatch/CiscoStealthwatch.xif b/Packs/CiscoStealthwatch/ModelingRules/CiscoStealthwatch/CiscoStealthwatch.xif
index d0d976c19064..2d349bcda043 100644
--- a/Packs/CiscoStealthwatch/ModelingRules/CiscoStealthwatch/CiscoStealthwatch.xif
+++ b/Packs/CiscoStealthwatch/ModelingRules/CiscoStealthwatch/CiscoStealthwatch.xif
@@ -1,5 +1,7 @@
[MODEL: dataset ="lancope_stealthwatch_raw"]
alter
+ proto_string = to_string(proto)
+| alter
xdm.source.ipv4 = if(src ~= "\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", src, null),
xdm.source.ipv6 = if(src ~= "[a-fA-F0-9\:]{1,5}[a-fA-F0-9\:]{1,5}[a-fA-F0-9\:]{1,5}[a-fA-F0-9\:]{1,5}[a-fA-F0-9\:]{1,5}[a-fA-F0-9\:]{1,5}[a-fA-F0-9\:]{1,5}[a-fA-F0-9\:]{1,5}", src, null),
xdm.target.ipv4 = if(dst ~= "\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", dst, null),
@@ -13,4 +15,4 @@ alter
xdm.target.user.username = targetUser,
xdm.target.host.hostname = targetHostname,
xdm.target.domain = domain,
- xdm.network.ip_protocol = if(proto="0",XDM_CONST.IP_PROTOCOL_HOPOPT, proto="1",XDM_CONST.IP_PROTOCOL_ICMP, proto="2",XDM_CONST.IP_PROTOCOL_IGMP, proto="3",XDM_CONST.IP_PROTOCOL_GGP, proto="4",XDM_CONST.IP_PROTOCOL_IP, proto="5",XDM_CONST.IP_PROTOCOL_ST, proto="6",XDM_CONST.IP_PROTOCOL_TCP, proto="7",XDM_CONST.IP_PROTOCOL_CBT, proto="8",XDM_CONST.IP_PROTOCOL_EGP, proto="9",XDM_CONST.IP_PROTOCOL_IGP, proto="10",XDM_CONST.IP_PROTOCOL_BBN_RCC_MON, proto="11",XDM_CONST.IP_PROTOCOL_NVP_II, proto="12",XDM_CONST.IP_PROTOCOL_PUP, proto="13",XDM_CONST.IP_PROTOCOL_ARGUS, proto="14",XDM_CONST.IP_PROTOCOL_EMCON, proto="15",XDM_CONST.IP_PROTOCOL_XNET, proto="16",XDM_CONST.IP_PROTOCOL_CHAOS, proto="17",XDM_CONST.IP_PROTOCOL_UDP, proto="18",XDM_CONST.IP_PROTOCOL_MUX, proto="19",XDM_CONST.IP_PROTOCOL_DCN_MEAS, proto="20",XDM_CONST.IP_PROTOCOL_HMP, proto="21",XDM_CONST.IP_PROTOCOL_PRM, proto="22",XDM_CONST.IP_PROTOCOL_XNS_IDP, proto="23",XDM_CONST.IP_PROTOCOL_TRUNK_1, proto="24",XDM_CONST.IP_PROTOCOL_TRUNK_2, proto="25",XDM_CONST.IP_PROTOCOL_LEAF_1, proto="26",XDM_CONST.IP_PROTOCOL_LEAF_2, proto="27",XDM_CONST.IP_PROTOCOL_RDP, proto="28",XDM_CONST.IP_PROTOCOL_IRTP, proto="29",XDM_CONST.IP_PROTOCOL_ISO_TP4, proto="30",XDM_CONST.IP_PROTOCOL_NETBLT, proto="31",XDM_CONST.IP_PROTOCOL_MFE_NSP, proto="32",XDM_CONST.IP_PROTOCOL_MERIT_INP, proto="33",XDM_CONST.IP_PROTOCOL_DCCP, proto="34",XDM_CONST.IP_PROTOCOL_3PC, proto="35",XDM_CONST.IP_PROTOCOL_IDPR, proto="36",XDM_CONST.IP_PROTOCOL_XTP, proto="37",XDM_CONST.IP_PROTOCOL_DDP, proto="38",XDM_CONST.IP_PROTOCOL_IDPR_CMTP, proto="39",XDM_CONST.IP_PROTOCOL_TP, proto="40",XDM_CONST.IP_PROTOCOL_IL, proto="41",XDM_CONST.IP_PROTOCOL_IPV6, proto="42",XDM_CONST.IP_PROTOCOL_SDRP, proto="43",XDM_CONST.IP_PROTOCOL_IPV6_ROUTE, proto="44",XDM_CONST.IP_PROTOCOL_IPV6_FRAG, proto="45",XDM_CONST.IP_PROTOCOL_IDRP, proto="46",XDM_CONST.IP_PROTOCOL_RSVP, proto="47",XDM_CONST.IP_PROTOCOL_GRE, proto="48",XDM_CONST.IP_PROTOCOL_DSR, proto="49",XDM_CONST.IP_PROTOCOL_BNA, proto="50",XDM_CONST.IP_PROTOCOL_ESP, proto="51",XDM_CONST.IP_PROTOCOL_AH, proto="52",XDM_CONST.IP_PROTOCOL_I_NLSP, proto="53",XDM_CONST.IP_PROTOCOL_SWIPE, proto="54",XDM_CONST.IP_PROTOCOL_NARP, proto="55",XDM_CONST.IP_PROTOCOL_MOBILE, proto="56",XDM_CONST.IP_PROTOCOL_TLSP, proto="57",XDM_CONST.IP_PROTOCOL_SKIP, proto="58",XDM_CONST.IP_PROTOCOL_IPV6_ICMP, proto="59",XDM_CONST.IP_PROTOCOL_IPV6_NONXT, proto="60",XDM_CONST.IP_PROTOCOL_IPV6_OPTS, proto="62",XDM_CONST.IP_PROTOCOL_CFTP, proto="64",XDM_CONST.IP_PROTOCOL_SAT_EXPAK, proto="65",XDM_CONST.IP_PROTOCOL_KRYPTOLAN, proto="66",XDM_CONST.IP_PROTOCOL_RVD, proto="67",XDM_CONST.IP_PROTOCOL_IPPC, proto="69",XDM_CONST.IP_PROTOCOL_SAT_MON, proto="70",XDM_CONST.IP_PROTOCOL_VISA, proto="71",XDM_CONST.IP_PROTOCOL_IPCV, proto="72",XDM_CONST.IP_PROTOCOL_CPNX, proto="73",XDM_CONST.IP_PROTOCOL_CPHB, proto="74",XDM_CONST.IP_PROTOCOL_WSN, proto="75",XDM_CONST.IP_PROTOCOL_PVP, proto="76",XDM_CONST.IP_PROTOCOL_BR_SAT_MON, proto="77",XDM_CONST.IP_PROTOCOL_SUN_ND, proto="78",XDM_CONST.IP_PROTOCOL_WB_MON, proto="79",XDM_CONST.IP_PROTOCOL_WB_EXPAK, proto="80",XDM_CONST.IP_PROTOCOL_ISO_IP, proto="81",XDM_CONST.IP_PROTOCOL_VMTP, proto="82",XDM_CONST.IP_PROTOCOL_SECURE_VMTP, proto="83",XDM_CONST.IP_PROTOCOL_VINES, proto="84",XDM_CONST.IP_PROTOCOL_TTP, proto="85",XDM_CONST.IP_PROTOCOL_NSFNET_IGP, proto="86",XDM_CONST.IP_PROTOCOL_DGP, proto="87",XDM_CONST.IP_PROTOCOL_TCF, proto="88",XDM_CONST.IP_PROTOCOL_EIGRP, proto="89",XDM_CONST.IP_PROTOCOL_OSPFIGP, proto="90",XDM_CONST.IP_PROTOCOL_SPRITE_RPC, proto="91",XDM_CONST.IP_PROTOCOL_LARP, proto="92",XDM_CONST.IP_PROTOCOL_MTP, proto="93",XDM_CONST.IP_PROTOCOL_AX25, proto="94",XDM_CONST.IP_PROTOCOL_IPIP, proto="95",XDM_CONST.IP_PROTOCOL_MICP, proto="96",XDM_CONST.IP_PROTOCOL_SCC_SP, proto="97",XDM_CONST.IP_PROTOCOL_ETHERIP, proto="98",XDM_CONST.IP_PROTOCOL_ENCAP, proto="100",XDM_CONST.IP_PROTOCOL_GMTP, proto="101",XDM_CONST.IP_PROTOCOL_IFMP, proto="102",XDM_CONST.IP_PROTOCOL_PNNI, proto="103",XDM_CONST.IP_PROTOCOL_PIM, proto="104",XDM_CONST.IP_PROTOCOL_ARIS, proto="105",XDM_CONST.IP_PROTOCOL_SCPS, proto="106",XDM_CONST.IP_PROTOCOL_QNX, proto="107",XDM_CONST.IP_PROTOCOL_AN, proto="108",XDM_CONST.IP_PROTOCOL_IPCOMP, proto="110",XDM_CONST.IP_PROTOCOL_COMPAQ_PEER, proto="111",XDM_CONST.IP_PROTOCOL_IPX_IN_IP, proto="112",XDM_CONST.IP_PROTOCOL_VRRP, proto="113",XDM_CONST.IP_PROTOCOL_PGM, proto="115",XDM_CONST.IP_PROTOCOL_L2TP, proto="116",XDM_CONST.IP_PROTOCOL_DDX, proto="117",XDM_CONST.IP_PROTOCOL_IATP, proto="118",XDM_CONST.IP_PROTOCOL_STP, proto="119",XDM_CONST.IP_PROTOCOL_SRP, proto="120",XDM_CONST.IP_PROTOCOL_UTI, proto="121",XDM_CONST.IP_PROTOCOL_SMP, proto="122",XDM_CONST.IP_PROTOCOL_SM, proto="123",XDM_CONST.IP_PROTOCOL_PTP, proto="124",XDM_CONST.IP_PROTOCOL_ISIS, proto="125",XDM_CONST.IP_PROTOCOL_FIRE, proto="126",XDM_CONST.IP_PROTOCOL_CRTP, proto="127",XDM_CONST.IP_PROTOCOL_CRUDP, proto="128",XDM_CONST.IP_PROTOCOL_SSCOPMCE, proto="129",XDM_CONST.IP_PROTOCOL_IPLT, proto="130",XDM_CONST.IP_PROTOCOL_SPS, proto="131",XDM_CONST.IP_PROTOCOL_PIPE, proto="132",XDM_CONST.IP_PROTOCOL_SCTP, proto="133",XDM_CONST.IP_PROTOCOL_FC, proto="134",XDM_CONST.IP_PROTOCOL_RSVP_E2E_IGNORE, proto="135",XDM_CONST.IP_PROTOCOL_MOBILITY, proto="136",XDM_CONST.IP_PROTOCOL_UDPLITE, proto="137",XDM_CONST.IP_PROTOCOL_MPLS_IN_IP,to_string(proto));
\ No newline at end of file
+ xdm.network.ip_protocol = if(proto_string="0",XDM_CONST.IP_PROTOCOL_HOPOPT, proto_string="1",XDM_CONST.IP_PROTOCOL_ICMP, proto_string="2",XDM_CONST.IP_PROTOCOL_IGMP, proto_string="3",XDM_CONST.IP_PROTOCOL_GGP, proto_string="4",XDM_CONST.IP_PROTOCOL_IP, proto_string="5",XDM_CONST.IP_PROTOCOL_ST, proto_string="6",XDM_CONST.IP_PROTOCOL_TCP, proto_string="7",XDM_CONST.IP_PROTOCOL_CBT, proto_string="8",XDM_CONST.IP_PROTOCOL_EGP, proto_string="9",XDM_CONST.IP_PROTOCOL_IGP, proto_string="10",XDM_CONST.IP_PROTOCOL_BBN_RCC_MON, proto_string="11",XDM_CONST.IP_PROTOCOL_NVP_II, proto_string="12",XDM_CONST.IP_PROTOCOL_PUP, proto_string="13",XDM_CONST.IP_PROTOCOL_ARGUS, proto_string="14",XDM_CONST.IP_PROTOCOL_EMCON, proto_string="15",XDM_CONST.IP_PROTOCOL_XNET, proto_string="16",XDM_CONST.IP_PROTOCOL_CHAOS, proto_string="17",XDM_CONST.IP_PROTOCOL_UDP, proto_string="18",XDM_CONST.IP_PROTOCOL_MUX, proto_string="19",XDM_CONST.IP_PROTOCOL_DCN_MEAS, proto_string="20",XDM_CONST.IP_PROTOCOL_HMP, proto_string="21",XDM_CONST.IP_PROTOCOL_PRM, proto_string="22",XDM_CONST.IP_PROTOCOL_XNS_IDP, proto_string="23",XDM_CONST.IP_PROTOCOL_TRUNK_1, proto_string="24",XDM_CONST.IP_PROTOCOL_TRUNK_2, proto_string="25",XDM_CONST.IP_PROTOCOL_LEAF_1, proto_string="26",XDM_CONST.IP_PROTOCOL_LEAF_2, proto_string="27",XDM_CONST.IP_PROTOCOL_RDP, proto_string="28",XDM_CONST.IP_PROTOCOL_IRTP, proto_string="29",XDM_CONST.IP_PROTOCOL_ISO_TP4, proto_string="30",XDM_CONST.IP_PROTOCOL_NETBLT, proto_string="31",XDM_CONST.IP_PROTOCOL_MFE_NSP, proto_string="32",XDM_CONST.IP_PROTOCOL_MERIT_INP, proto_string="33",XDM_CONST.IP_PROTOCOL_DCCP, proto_string="34",XDM_CONST.IP_PROTOCOL_3PC, proto_string="35",XDM_CONST.IP_PROTOCOL_IDPR, proto_string="36",XDM_CONST.IP_PROTOCOL_XTP, proto_string="37",XDM_CONST.IP_PROTOCOL_DDP, proto_string="38",XDM_CONST.IP_PROTOCOL_IDPR_CMTP, proto_string="39",XDM_CONST.IP_PROTOCOL_TP, proto_string="40",XDM_CONST.IP_PROTOCOL_IL, proto_string="41",XDM_CONST.IP_PROTOCOL_IPV6, proto_string="42",XDM_CONST.IP_PROTOCOL_SDRP, proto_string="43",XDM_CONST.IP_PROTOCOL_IPV6_ROUTE, proto_string="44",XDM_CONST.IP_PROTOCOL_IPV6_FRAG, proto_string="45",XDM_CONST.IP_PROTOCOL_IDRP, proto_string="46",XDM_CONST.IP_PROTOCOL_RSVP, proto_string="47",XDM_CONST.IP_PROTOCOL_GRE, proto_string="48",XDM_CONST.IP_PROTOCOL_DSR, proto_string="49",XDM_CONST.IP_PROTOCOL_BNA, proto_string="50",XDM_CONST.IP_PROTOCOL_ESP, proto_string="51",XDM_CONST.IP_PROTOCOL_AH, proto_string="52",XDM_CONST.IP_PROTOCOL_I_NLSP, proto_string="53",XDM_CONST.IP_PROTOCOL_SWIPE, proto_string="54",XDM_CONST.IP_PROTOCOL_NARP, proto_string="55",XDM_CONST.IP_PROTOCOL_MOBILE, proto_string="56",XDM_CONST.IP_PROTOCOL_TLSP, proto_string="57",XDM_CONST.IP_PROTOCOL_SKIP, proto_string="58",XDM_CONST.IP_PROTOCOL_IPV6_ICMP, proto_string="59",XDM_CONST.IP_PROTOCOL_IPV6_NONXT, proto_string="60",XDM_CONST.IP_PROTOCOL_IPV6_OPTS, proto_string="62",XDM_CONST.IP_PROTOCOL_CFTP, proto_string="64",XDM_CONST.IP_PROTOCOL_SAT_EXPAK, proto_string="65",XDM_CONST.IP_PROTOCOL_KRYPTOLAN, proto_string="66",XDM_CONST.IP_PROTOCOL_RVD, proto_string="67",XDM_CONST.IP_PROTOCOL_IPPC, proto_string="69",XDM_CONST.IP_PROTOCOL_SAT_MON, proto_string="70",XDM_CONST.IP_PROTOCOL_VISA, proto_string="71",XDM_CONST.IP_PROTOCOL_IPCV, proto_string="72",XDM_CONST.IP_PROTOCOL_CPNX, proto_string="73",XDM_CONST.IP_PROTOCOL_CPHB, proto_string="74",XDM_CONST.IP_PROTOCOL_WSN, proto_string="75",XDM_CONST.IP_PROTOCOL_PVP, proto_string="76",XDM_CONST.IP_PROTOCOL_BR_SAT_MON, proto_string="77",XDM_CONST.IP_PROTOCOL_SUN_ND, proto_string="78",XDM_CONST.IP_PROTOCOL_WB_MON, proto_string="79",XDM_CONST.IP_PROTOCOL_WB_EXPAK, proto_string="80",XDM_CONST.IP_PROTOCOL_ISO_IP, proto_string="81",XDM_CONST.IP_PROTOCOL_VMTP, proto_string="82",XDM_CONST.IP_PROTOCOL_SECURE_VMTP, proto_string="83",XDM_CONST.IP_PROTOCOL_VINES, proto_string="84",XDM_CONST.IP_PROTOCOL_TTP, proto_string="85",XDM_CONST.IP_PROTOCOL_NSFNET_IGP, proto_string="86",XDM_CONST.IP_PROTOCOL_DGP, proto_string="87",XDM_CONST.IP_PROTOCOL_TCF, proto_string="88",XDM_CONST.IP_PROTOCOL_EIGRP, proto_string="89",XDM_CONST.IP_PROTOCOL_OSPFIGP, proto_string="90",XDM_CONST.IP_PROTOCOL_SPRITE_RPC, proto_string="91",XDM_CONST.IP_PROTOCOL_LARP, proto_string="92",XDM_CONST.IP_PROTOCOL_MTP, proto_string="93",XDM_CONST.IP_PROTOCOL_AX25, proto_string="94",XDM_CONST.IP_PROTOCOL_IPIP, proto_string="95",XDM_CONST.IP_PROTOCOL_MICP, proto_string="96",XDM_CONST.IP_PROTOCOL_SCC_SP, proto_string="97",XDM_CONST.IP_PROTOCOL_ETHERIP, proto_string="98",XDM_CONST.IP_PROTOCOL_ENCAP, proto_string="100",XDM_CONST.IP_PROTOCOL_GMTP, proto_string="101",XDM_CONST.IP_PROTOCOL_IFMP, proto_string="102",XDM_CONST.IP_PROTOCOL_PNNI, proto_string="103",XDM_CONST.IP_PROTOCOL_PIM, proto_string="104",XDM_CONST.IP_PROTOCOL_ARIS, proto_string="105",XDM_CONST.IP_PROTOCOL_SCPS, proto_string="106",XDM_CONST.IP_PROTOCOL_QNX, proto_string="107",XDM_CONST.IP_PROTOCOL_AN, proto_string="108",XDM_CONST.IP_PROTOCOL_IPCOMP, proto_string="110",XDM_CONST.IP_PROTOCOL_COMPAQ_PEER, proto_string="111",XDM_CONST.IP_PROTOCOL_IPX_IN_IP, proto_string="112",XDM_CONST.IP_PROTOCOL_VRRP, proto_string="113",XDM_CONST.IP_PROTOCOL_PGM, proto_string="115",XDM_CONST.IP_PROTOCOL_L2TP, proto_string="116",XDM_CONST.IP_PROTOCOL_DDX, proto_string="117",XDM_CONST.IP_PROTOCOL_IATP, proto_string="118",XDM_CONST.IP_PROTOCOL_STP, proto_string="119",XDM_CONST.IP_PROTOCOL_SRP, proto_string="120",XDM_CONST.IP_PROTOCOL_UTI, proto_string="121",XDM_CONST.IP_PROTOCOL_SMP, proto_string="122",XDM_CONST.IP_PROTOCOL_SM, proto_string="123",XDM_CONST.IP_PROTOCOL_PTP, proto_string="124",XDM_CONST.IP_PROTOCOL_ISIS, proto_string="125",XDM_CONST.IP_PROTOCOL_FIRE, proto_string="126",XDM_CONST.IP_PROTOCOL_CRTP, proto_string="127",XDM_CONST.IP_PROTOCOL_CRUDP, proto_string="128",XDM_CONST.IP_PROTOCOL_SSCOPMCE, proto_string="129",XDM_CONST.IP_PROTOCOL_IPLT, proto_string="130",XDM_CONST.IP_PROTOCOL_SPS, proto_string="131",XDM_CONST.IP_PROTOCOL_PIPE, proto_string="132",XDM_CONST.IP_PROTOCOL_SCTP, proto_string="133",XDM_CONST.IP_PROTOCOL_FC, proto_string="134",XDM_CONST.IP_PROTOCOL_RSVP_E2E_IGNORE, proto_string="135",XDM_CONST.IP_PROTOCOL_MOBILITY, proto_string="136",XDM_CONST.IP_PROTOCOL_UDPLITE, proto_string="137",XDM_CONST.IP_PROTOCOL_MPLS_IN_IP, proto_string);
\ No newline at end of file
diff --git a/Packs/CiscoStealthwatch/ModelingRules/CiscoStealthwatch/CiscoStealthwatch_schema.json b/Packs/CiscoStealthwatch/ModelingRules/CiscoStealthwatch/CiscoStealthwatch_schema.json
index d190045f17ca..a3289333987d 100644
--- a/Packs/CiscoStealthwatch/ModelingRules/CiscoStealthwatch/CiscoStealthwatch_schema.json
+++ b/Packs/CiscoStealthwatch/ModelingRules/CiscoStealthwatch/CiscoStealthwatch_schema.json
@@ -49,7 +49,7 @@
"is_array": false
},
"proto": {
- "type": "string",
+ "type": "int",
"is_array": false
}
}
diff --git a/Packs/CiscoStealthwatch/README.md b/Packs/CiscoStealthwatch/README.md
index 87417d13743f..14b428ec4ba5 100644
--- a/Packs/CiscoStealthwatch/README.md
+++ b/Packs/CiscoStealthwatch/README.md
@@ -5,13 +5,12 @@ Today, companies have large networks with lots of devices stretching further tha
Finding the latest threat is like looking for a needle in a haystack. Stealwatch (or in its new name "Secure Network Analytics") analyzes your company's billions of network sessions, and provides analytics for your query flows and security events to enable you to determine when something looks suspicious and respond to any network threats.
## What does this pack do
-
+
- Lists security events and returns the results to the context.
- Runs queries on Cisco Stealthwatch flows and returns its results to the context.
- Maps logs to "One Data Model".
-
-
+<~XSIAM>
## Configuration on Server Side
You need to configure Cisco Stealthwatch to forward Syslog messages in custom LEEF format.
@@ -29,7 +28,9 @@ You need to configure Cisco Stealthwatch to forward Syslog messages in custom LE
4. In the Message box, paste the following:
-`LEEF:2.0|Lancope|Stealthwatch|6.8|{alarm_type_id}|0x7C|eventName={alarm_type_name}|src={source_ip}|dst={target_ip}|dstPort={port}|proto={protocol}|msg={alarm_type_description}|fullmessage={details}|start={start_active_time}|end={end_active_time}|cat={alarm_category_name}|alarmID={alarm_id}|sourceHG={source_host_group_names}|targetHG={target_host_group_names}|sourceHostSnapshot={source_url}|targetHostSnapshot={target_url}|flowCollectorName={device_name}|flowCollectorIP={device_ip}|domain={domain_name}|exporterName={exporter_hostname}|exporterIPAddress ={exporter_ip}|exporterInfo={exporter_label}|targetUser={target_username}|targetHostname={target_hostname}|sourceUser={source_username}|alarmStatus={alarm_status}|alarmSev={alarm_severity_name}`
+``` javaScript
+LEEF:2.0|Lancope|Stealthwatch|6.8|{alarm_type_id}|0x7C|eventName={alarm_type_name}|src={source_ip}|dst={target_ip}|dstPort={port}|proto={protocol}|msg={alarm_type_description}|fullmessage={details}|start={start_active_time}|end={end_active_time}|cat={alarm_category_name}|alarmID={alarm_id}|sourceHG={source_host_group_names}|targetHG={target_host_group_names}|sourceHostSnapshot={source_url}|targetHostSnapshot={target_url}|flowCollectorName={device_name}|flowCollectorIP={device_ip}|domain={domain_name}|exporterName={exporter_hostname}|exporterIPAddress ={exporter_ip}|exporterInfo={exporter_label}|targetUser={target_username}|targetHostname={target_hostname}|sourceUser={source_username}|alarmStatus={alarm_status}|alarmSev={alarm_severity_name}
+```
### Configure syslog message action
1. Log in to the Stealthwatch Management Console (SMC).
@@ -64,4 +65,5 @@ In order to use the collector, use the [Broker VM](#broker-vm) option.
### Broker VM
To create or configure the Broker VM, use the information described [here](https://docs-cortex.paloaltonetworks.com/r/Cortex-XDR/Cortex-XDR-Pro-Administrator-Guide/Configure-the-Broker-VM).
-**Note**: The name of the dataset will be assigned automatically as "lancope_stealthwatch_raw".
+**Note**: The name of the dataset will be assigned automatically as **lancope_stealthwatch_raw**.
+~XSIAM>
\ No newline at end of file
diff --git a/Packs/CiscoStealthwatch/ReleaseNotes/1_0_28.md b/Packs/CiscoStealthwatch/ReleaseNotes/1_0_28.md
new file mode 100644
index 000000000000..5b97e8e680f0
--- /dev/null
+++ b/Packs/CiscoStealthwatch/ReleaseNotes/1_0_28.md
@@ -0,0 +1,6 @@
+
+#### Modeling Rules
+
+##### Cisco Stealthwatch
+
+Updated the Parsing Rule logic.
diff --git a/Packs/CiscoStealthwatch/pack_metadata.json b/Packs/CiscoStealthwatch/pack_metadata.json
index 9b72e832f104..9cd41a2f1386 100644
--- a/Packs/CiscoStealthwatch/pack_metadata.json
+++ b/Packs/CiscoStealthwatch/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Cisco Secure Network Analytics (Stealthwatch)",
"description": "Cisco Secure Network Analytics (Stealthwatch) provides scalable visibility and security analytics.",
"support": "xsoar",
- "currentVersion": "1.0.27",
+ "currentVersion": "1.0.28",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CofenseIntelligenceV2/Integrations/CofenseIntelligenceV2/CofenseIntelligenceV2.yml b/Packs/CofenseIntelligenceV2/Integrations/CofenseIntelligenceV2/CofenseIntelligenceV2.yml
index a7177cf2b296..275f2ee7ba95 100644
--- a/Packs/CofenseIntelligenceV2/Integrations/CofenseIntelligenceV2/CofenseIntelligenceV2.yml
+++ b/Packs/CofenseIntelligenceV2/Integrations/CofenseIntelligenceV2/CofenseIntelligenceV2.yml
@@ -1491,7 +1491,7 @@ script:
- contextPath: File.Extension
description: The file extension.
type: String
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
runonce: false
script: '-'
subtype: python3
diff --git a/Packs/CofenseIntelligenceV2/ReleaseNotes/1_1_15.md b/Packs/CofenseIntelligenceV2/ReleaseNotes/1_1_15.md
new file mode 100644
index 000000000000..545f46821c19
--- /dev/null
+++ b/Packs/CofenseIntelligenceV2/ReleaseNotes/1_1_15.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Cofense Intelligence v2
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/CofenseIntelligenceV2/pack_metadata.json b/Packs/CofenseIntelligenceV2/pack_metadata.json
index 47b70d3dd569..0f95c1651f61 100644
--- a/Packs/CofenseIntelligenceV2/pack_metadata.json
+++ b/Packs/CofenseIntelligenceV2/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Cofense Intelligence v2",
"description": "Cofense Intelligence allows users to search for threat intelligence reports based on domains, IPs, email address, file hashes, URLs and extracted strings.",
"support": "partner",
- "currentVersion": "1.1.14",
+ "currentVersion": "1.1.15",
"author": "Cofense",
"url": "https://cofense.com/contact-support/",
"email": "support@cofense.com",
diff --git a/Packs/CofenseVision/Integrations/CofenseVision/CofenseVision.yml b/Packs/CofenseVision/Integrations/CofenseVision/CofenseVision.yml
index b5d4b9449b23..bbfa8872db46 100644
--- a/Packs/CofenseVision/Integrations/CofenseVision/CofenseVision.yml
+++ b/Packs/CofenseVision/Integrations/CofenseVision/CofenseVision.yml
@@ -2382,7 +2382,7 @@ script:
- contextPath: Cofense.Config.value
description: List of headers that are available to create a message search.
type: Unknown
- dockerimage: demisto/python3:3.10.13.83255
+ dockerimage: demisto/python3:3.10.13.84405
runonce: false
script: '-'
subtype: python3
diff --git a/Packs/CofenseVision/ReleaseNotes/1_0_11.md b/Packs/CofenseVision/ReleaseNotes/1_0_11.md
new file mode 100644
index 000000000000..27a331cc874d
--- /dev/null
+++ b/Packs/CofenseVision/ReleaseNotes/1_0_11.md
@@ -0,0 +1,3 @@
+#### Integrations
+##### Cofense Vision
+- Updated the Docker image to: *demisto/python3:3.10.13.84405*.
diff --git a/Packs/CofenseVision/pack_metadata.json b/Packs/CofenseVision/pack_metadata.json
index 9aff9a971e3c..64c717ace277 100644
--- a/Packs/CofenseVision/pack_metadata.json
+++ b/Packs/CofenseVision/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Cofense Vision",
"description": "Cofense Vision empowers security teams to hunt for email messages and quarantine threats in mailboxes. Analysts can setup jobs to remove emerging phishing campaigns based on trusted and credible IOCs through an automated workflow.",
"support": "partner",
- "currentVersion": "1.0.10",
+ "currentVersion": "1.0.11",
"author": "Cofense",
"url": "https://cofense.com/contact-support/",
"email": "support@cofense.com",
diff --git a/Packs/CommonDashboards/ReleaseNotes/1_3_9.md b/Packs/CommonDashboards/ReleaseNotes/1_3_9.md
new file mode 100644
index 000000000000..fa45119267b3
--- /dev/null
+++ b/Packs/CommonDashboards/ReleaseNotes/1_3_9.md
@@ -0,0 +1,3 @@
+## Common Dashboards
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/CommonDashboards/pack_metadata.json b/Packs/CommonDashboards/pack_metadata.json
index e26b76125452..c4d0e267b016 100644
--- a/Packs/CommonDashboards/pack_metadata.json
+++ b/Packs/CommonDashboards/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Common Dashboards",
"description": "Frequently used dashboards pack.",
"support": "xsoar",
- "currentVersion": "1.3.8",
+ "currentVersion": "1.3.9",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CommonPlaybooks/.pack-ignore b/Packs/CommonPlaybooks/.pack-ignore
index 303c65fe9ba1..db2a4b3acd5e 100644
--- a/Packs/CommonPlaybooks/.pack-ignore
+++ b/Packs/CommonPlaybooks/.pack-ignore
@@ -115,3 +115,6 @@ ignore=RM108
[file:playbook-Get_File_Sample_By_Hash_-_Generic_v2_README.md]
ignore=RM108
+
+[file:playbook-Containment_Plan_-_Block_Indicators.yml]
+ignore=PB118
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Block_Indicators_-_Generic_v3.yml b/Packs/CommonPlaybooks/Playbooks/playbook-Block_Indicators_-_Generic_v3.yml
index 2ca848f13327..9266ea4cc81e 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Block_Indicators_-_Generic_v3.yml
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Block_Indicators_-_Generic_v3.yml
@@ -350,6 +350,15 @@ tasks:
indicatorsValues:
complex:
root: IndicatorsToBlock
+ filters:
+ - - operator: isNotEqualString
+ left:
+ value:
+ simple: IndicatorsToBlock
+ iscontext: true
+ right:
+ value:
+ simple: No indicators to block
separatecontext: false
view: |-
{
@@ -826,8 +835,8 @@ tasks:
defaultValue:
value:
simple: No indicators to block
- separatecontext: false
reputationcalc: 2
+ separatecontext: false
continueonerrortype: ""
view: |-
{
@@ -915,6 +924,7 @@ tasks:
defaultValue:
value:
simple: No indicators to block
+ reputationcalc: 2
separatecontext: false
continueonerrortype: ""
view: |-
@@ -927,7 +937,6 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
- reputationcalc: 2
skipunavailable: false
quietmode: 0
isoversize: false
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Generic_v2.yml b/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Generic_v2.yml
index c1af82829415..100f5449f6af 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Generic_v2.yml
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Generic_v2.yml
@@ -28,12 +28,12 @@ tasks:
description: ''
nexttasks:
'#none#':
- - "19"
- "18"
- "24"
- "16"
- "27"
- "28"
+ - "29"
separatecontext: false
view: |-
{
@@ -635,47 +635,6 @@ tasks:
continueonerrortype: ""
isoversize: false
isautoswitchedtoquietmode: false
- "19":
- id: "19"
- taskid: 7347fd96-e6df-4a99-8bf1-7ba856806ec0
- type: playbook
- task:
- id: 7347fd96-e6df-4a99-8bf1-7ba856806ec0
- version: -1
- name: Calculate Severity By Highest DBotScore
- description: Calculates the incident severity level according to the highest indicator DBotScore.
- playbookName: Calculate Severity By Highest DBotScore
- type: playbook
- iscommand: false
- brand: ""
- nexttasks:
- '#none#':
- - "15"
- scriptarguments:
- DBotScore:
- complex:
- root: inputs.DBotScore
- separatecontext: true
- loop:
- iscommand: false
- exitCondition: ""
- wait: 1
- max: 0
- view: |-
- {
- "position": {
- "x": 1370,
- "y": 200
- }
- }
- note: false
- timertriggers: []
- ignoreworker: false
- skipunavailable: false
- quietmode: 0
- continueonerrortype: ""
- isoversize: false
- isautoswitchedtoquietmode: false
"24":
id: "24"
taskid: 86c4ba1f-fddb-4f6b-8712-c5b0fc6bb55a
@@ -876,6 +835,60 @@ tasks:
quietmode: 0
isoversize: false
isautoswitchedtoquietmode: false
+ "29":
+ id: "29"
+ taskid: 92cbfafa-4642-43ae-8756-56081a975704
+ type: playbook
+ task:
+ id: 92cbfafa-4642-43ae-8756-56081a975704
+ version: -1
+ name: Calculate Severity By Highest DBotScore
+ description: Calculates the incident severity level according to the highest DBotScore.
+ playbookName: Calculate Severity By Highest DBotScore
+ type: playbook
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "15"
+ scriptarguments:
+ DBotScoreIndicators:
+ complex:
+ root: inputs.DBotScoreIndicators
+ transformers:
+ - operator: uniq
+ DBotScoreMaxScore:
+ complex:
+ root: inputs.DBotScoreMaxScore
+ transformers:
+ - operator: sort
+ args:
+ descending:
+ value:
+ simple: "true"
+ - operator: uniq
+ - operator: FirstArrayElement
+ separatecontext: true
+ continueonerrortype: ""
+ loop:
+ iscommand: false
+ exitCondition: ""
+ wait: 1
+ max: 100
+ view: |-
+ {
+ "position": {
+ "x": 1360,
+ "y": 200
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
view: |-
{
"linkLabelsPosition": {
@@ -896,12 +909,15 @@ view: |-
}
}
inputs:
-- key: DBotScore
+- key: DBotScoreIndicators
value:
complex:
root: DBotScore
+ accessor: Indicator
+ transformers:
+ - operator: uniq
required: false
- description: 'Array of all indicators associated with the incident. '
+ description: 'Array of all indicator values associated with the incident. '
playbookInputQuery:
- key: CriticalUsers
value:
@@ -975,6 +991,22 @@ inputs:
required: false
description: An object of risky hosts and their corresponding scores, as outputted by the "xdr-list-risky-hosts" command.
playbookInputQuery:
+- key: DBotScoreMaxScore
+ value:
+ complex:
+ root: DBotScore
+ accessor: Score
+ transformers:
+ - operator: sort
+ args:
+ descending:
+ value:
+ simple: "true"
+ - operator: uniq
+ - operator: FirstArrayElement
+ required: false
+ description: The highest score (number) that was given to a DBotScore indicator.
+ playbookInputQuery:
outputs:
- contextPath: CriticalAssets
description: All critical assets involved in the incident.
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Generic_v2_README.md b/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Generic_v2_README.md
index 8589948a6dc3..25728338802f 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Generic_v2_README.md
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Generic_v2_README.md
@@ -4,7 +4,9 @@ Calculate and assign the incident severity based on the highest returned severit
- Critical assets
- Email authenticity
- Current incident severity
-- Microsoft Headers.
+- Microsoft Headers
+- Risky users (XDR)
+- Risky hosts (XDR).
## Dependencies
@@ -12,9 +14,10 @@ This playbook uses the following sub-playbooks, integrations, and scripts.
### Sub-playbooks
-* Calculate Severity By Email Authenticity
-* Calculate Severity - Critical Assets v2
* Calculate Severity By Highest DBotScore
+* Calculate Severity - Cortex XDR Risky Assets
+* Calculate Severity - Critical Assets v2
+* Calculate Severity By Email Authenticity
### Integrations
@@ -34,7 +37,7 @@ This playbook does not use any integrations.
| **Name** | **Description** | **Default Value** | **Required** |
| --- | --- | --- | --- |
-| DBotScore | Array of all indicators associated with the incident. | DBotScore | Optional |
+| DBotScoreIndicators | Array of all indicator values associated with the incident. | DBotScore.Indicator | Optional |
| CriticalUsers | CSV of usernames of critical users. | admin,administrator | Optional |
| CriticalEndpoints | CSV of hostnames of critical endpoints. | admin | Optional |
| CriticalGroups | CSV of DN names of critical AD groups. | admins,administrators | Optional |
@@ -42,6 +45,9 @@ This playbook does not use any integrations.
| Endpoint | Endpoints to check against the CriticalEndpoints list. | Endpoint | Optional |
| EmailAuthenticityCheck | Indicates the email authenticity resulting from the EmailAuthenticityCheck script. Possible values are: Pass, Fail, Suspicious, and Undetermined. | Email.AuthenticityCheck | Optional |
| MicrosoftHeadersSeverityCheck | The value is set by the "Process Microsoft's Anti-Spam Headers" Playbook, which calculates the severity after processing the PCL, BCL and PCL values inside Microsoft's headers. | ${Email.MicrosoftHeadersSeverityCheck} | Optional |
+| XDRRiskyUsers | An object of risky users and their corresponding scores, as outputted by the "xdr-list-risky-users" command. | PaloAltoNetworksXDR.RiskyUser | Optional |
+| XDRRiskyHosts | An object of risky hosts and their corresponding scores, as outputted by the "xdr-list-risky-hosts" command. | PaloAltoNetworksXDR.RiskyHost | Optional |
+| DBotScoreMaxScore | The highest score (number) that was given to a DBotScore indicatorr. | DBotScore.Score | Optional |
## Playbook Outputs
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Standard.yml b/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Standard.yml
index 929506e75307..832dca053560 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Standard.yml
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Standard.yml
@@ -1,7 +1,7 @@
id: Calculate Severity - Standard
version: -1
name: Calculate Severity - Standard
-description: Calculates and sets the incident severity based on the combination of the current incident severity, and the severity returned from the Evaluate Severity - Set By Highest DBotScore playbook.
+description: Calculates and sets the incident severity based on the combination of the current incident severity, and the severity returned from the Calculate Severity By Highest DBotScore playbook.
fromversion: 5.0.0
starttaskid: "0"
tasks:
@@ -13,13 +13,13 @@ tasks:
id: 39518188-8725-4c70-8f80-3a9a4554deb2
version: -1
name: ""
- description: ""
iscommand: false
brand: ""
+ description: ''
nexttasks:
'#none#':
- "16"
- - "18"
+ - "20"
separatecontext: false
view: |-
{
@@ -31,6 +31,11 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"4":
id: "4"
taskid: d750bb71-4ef5-4de1-88ec-02d5035a0177
@@ -48,158 +53,8 @@ tasks:
'#none#':
- "10"
scriptarguments:
- addLabels: {}
- affecteddata: {}
- affecteddatatype: {}
- affectedhosts: {}
- affectedindividualscontactinformation: {}
- affectedips: {}
- app: {}
- approximatenumberofaffecteddatasubjects: {}
- arcsightcaseid: {}
- assetid: {}
- attachmentcount: {}
- attachmentextension: {}
- attachmenthash: {}
- attachmentid: {}
- attachmentitem: {}
- attachmentname: {}
- attachmentsize: {}
- attachmenttype: {}
- backupowner: {}
- blah: {}
- booltest: {}
- bugtraq: {}
- campaigntargetcount: {}
- campaigntargets: {}
- city: {}
- closeNotes: {}
- closeReason: {}
- companyaddress: {}
- companycity: {}
- companycountry: {}
- companyhasinsuranceforthebreach: {}
- companyname: {}
- companypostalcode: {}
- contactaddress: {}
- contactname: {}
- country: {}
- countrywherebusinesshasitsmainestablishment: {}
- countrywherethebreachtookplace: {}
- criticalassets: {}
- customFields: {}
- cve: {}
- cvss: {}
- dataencryptionstatus: {}
- datetimeofthebreach: {}
- daysbetweenreportcreation: {}
- deleteEmptyField: {}
- dest: {}
- destinationip: {}
- destntdomain: {}
- details: {}
- detectedusers: {}
- dpoemailaddress: {}
- duration: {}
- emailaddress: {}
- emailauthenticitycheck: {}
- emailbcc: {}
- emailbody: {}
- emailbodyformat: {}
- emailbodyhtml: {}
- emailbodyhtmlraw: {}
- emailcc: {}
- emailclassification: {}
- emailclientname: {}
- emailfrom: {}
- emailfromdisplayname: {}
- emailhtml: {}
- emailinreplyto: {}
- emailkeywords: {}
- emailmessageid: {}
- emailreceived: {}
- emailreplyto: {}
- emailreturnpath: {}
- emailsenderdomain: {}
- emailsenderip: {}
- emailsize: {}
- emailsource: {}
- emailsubject: {}
- emailsubjectlanguage: {}
- emailto: {}
- emailtocount: {}
- emailurlclicked: {}
- eventid: {}
- falses: {}
- fetchid: {}
- fetchtype: {}
- filehash: {}
- filename: {}
- filepath: {}
- hostid: {}
- hostname: {}
- htmlimage: {}
- htmlrenderedimage: {}
- id: {}
- important: {}
- importantfield: {}
- isthedatasubjecttodpia: {}
- killchain: {}
- labels: {}
- likelyimpact: {}
- lob: {}
- maliciouscauseifthecauseisamaliciousattack: {}
- malwarefamily: {}
- mdtest: {}
- measurestomitigate: {}
- myfield: {}
- name: {}
- occurred: {}
- owner: {}
- phase: {}
- phishingsubtype: {}
- possiblecauseofthebreach: {}
- postalcode: {}
- queues: {}
- relateddomain: {}
- replacePlaybook: {}
- reporteduser: {}
- reportinguser: {}
- roles: {}
- screenshot: {}
- screenshot2: {}
- sectorofaffectedparty: {}
- selector: {}
severity:
simple: low
- signature: {}
- single: {}
- single2: {}
- sizenumberofemployees: {}
- sizeturnover: {}
- sla: {}
- slaField: {}
- source: {}
- src: {}
- srcntdomain: {}
- srcuser: {}
- systems: {}
- team: {}
- telephoneno: {}
- test: {}
- test2: {}
- testfield: {}
- timeassignedtolevel2: {}
- timefield1: {}
- timelevel1: {}
- type: {}
- user: {}
- username: {}
- vendorid: {}
- vendorproduct: {}
- vulnerabilitycategory: {}
- whereisdatahosted: {}
- xdr: {}
reputationcalc: 1
separatecontext: false
view: |-
@@ -212,6 +67,11 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"5":
id: "5"
taskid: 9c7487d4-53f2-4645-8f7b-2653d6c76676
@@ -229,153 +89,8 @@ tasks:
'#none#':
- "10"
scriptarguments:
- addLabels: {}
- affecteddata: {}
- affecteddatatype: {}
- affectedhosts: {}
- affectedindividualscontactinformation: {}
- affectedips: {}
- app: {}
- approximatenumberofaffecteddatasubjects: {}
- arcsightcaseid: {}
- assetid: {}
- attachmentcount: {}
- attachmentextension: {}
- attachmenthash: {}
- attachmentid: {}
- attachmentitem: {}
- attachmentname: {}
- attachmentsize: {}
- attachmenttype: {}
- backupowner: {}
- blah: {}
- booltest: {}
- bugtraq: {}
- city: {}
- closeNotes: {}
- closeReason: {}
- companyaddress: {}
- companycity: {}
- companycountry: {}
- companyhasinsuranceforthebreach: {}
- companyname: {}
- companypostalcode: {}
- contactaddress: {}
- contactname: {}
- country: {}
- countrywherebusinesshasitsmainestablishment: {}
- countrywherethebreachtookplace: {}
- customFields: {}
- cve: {}
- cvss: {}
- dataencryptionstatus: {}
- datetimeofthebreach: {}
- daysbetweenreportcreation: {}
- deleteEmptyField: {}
- dest: {}
- destinationip: {}
- destntdomain: {}
- details: {}
- detectedusers: {}
- dpoemailaddress: {}
- duration: {}
- emailaddress: {}
- emailbcc: {}
- emailbody: {}
- emailbodyformat: {}
- emailbodyhtml: {}
- emailbodyhtmlraw: {}
- emailcc: {}
- emailclassification: {}
- emailclientname: {}
- emailfrom: {}
- emailfromdisplayname: {}
- emailinreplyto: {}
- emailkeywords: {}
- emailmessageid: {}
- emailreceived: {}
- emailreplyto: {}
- emailreturnpath: {}
- emailsenderdomain: {}
- emailsenderip: {}
- emailsize: {}
- emailsource: {}
- emailsubject: {}
- emailsubjectlanguage: {}
- emailto: {}
- emailtocount: {}
- emailurlclicked: {}
- eventid: {}
- falses: {}
- fetchid: {}
- fetchtype: {}
- filehash: {}
- filename: {}
- filepath: {}
- hostid: {}
- hostname: {}
- htmlimage: {}
- htmlrenderedimage: {}
- id: {}
- important: {}
- importantfield: {}
- isthedatasubjecttodpia: {}
- killchain: {}
- labels: {}
- likelyimpact: {}
- lob: {}
- maliciouscauseifthecauseisamaliciousattack: {}
- malwarefamily: {}
- mdtest: {}
- measurestomitigate: {}
- myfield: {}
- name: {}
- occurred: {}
- owner: {}
- phase: {}
- phishingsubtype: {}
- possiblecauseofthebreach: {}
- postalcode: {}
- queues: {}
- relateddomain: {}
- replacePlaybook: {}
- reporteduser: {}
- reportinguser: {}
- roles: {}
- screenshot: {}
- screenshot2: {}
- sectorofaffectedparty: {}
- selector: {}
severity:
simple: medium
- signature: {}
- single: {}
- single2: {}
- sizenumberofemployees: {}
- sizeturnover: {}
- sla: {}
- slaField: {}
- source: {}
- src: {}
- srcntdomain: {}
- srcuser: {}
- systems: {}
- team: {}
- telephoneno: {}
- test: {}
- test2: {}
- testfield: {}
- timeassignedtolevel2: {}
- timefield1: {}
- timelevel1: {}
- type: {}
- user: {}
- username: {}
- vendorid: {}
- vendorproduct: {}
- vulnerabilitycategory: {}
- whereisdatahosted: {}
- xdr: {}
reputationcalc: 1
separatecontext: false
view: |-
@@ -388,6 +103,11 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"6":
id: "6"
taskid: 77f20a17-499a-4ed3-85ad-a24e697b4792
@@ -405,158 +125,8 @@ tasks:
'#none#':
- "10"
scriptarguments:
- addLabels: {}
- affecteddata: {}
- affecteddatatype: {}
- affectedhosts: {}
- affectedindividualscontactinformation: {}
- affectedips: {}
- app: {}
- approximatenumberofaffecteddatasubjects: {}
- arcsightcaseid: {}
- assetid: {}
- attachmentcount: {}
- attachmentextension: {}
- attachmenthash: {}
- attachmentid: {}
- attachmentitem: {}
- attachmentname: {}
- attachmentsize: {}
- attachmenttype: {}
- backupowner: {}
- blah: {}
- booltest: {}
- bugtraq: {}
- campaigntargetcount: {}
- campaigntargets: {}
- city: {}
- closeNotes: {}
- closeReason: {}
- companyaddress: {}
- companycity: {}
- companycountry: {}
- companyhasinsuranceforthebreach: {}
- companyname: {}
- companypostalcode: {}
- contactaddress: {}
- contactname: {}
- country: {}
- countrywherebusinesshasitsmainestablishment: {}
- countrywherethebreachtookplace: {}
- criticalassets: {}
- customFields: {}
- cve: {}
- cvss: {}
- dataencryptionstatus: {}
- datetimeofthebreach: {}
- daysbetweenreportcreation: {}
- deleteEmptyField: {}
- dest: {}
- destinationip: {}
- destntdomain: {}
- details: {}
- detectedusers: {}
- dpoemailaddress: {}
- duration: {}
- emailaddress: {}
- emailauthenticitycheck: {}
- emailbcc: {}
- emailbody: {}
- emailbodyformat: {}
- emailbodyhtml: {}
- emailbodyhtmlraw: {}
- emailcc: {}
- emailclassification: {}
- emailclientname: {}
- emailfrom: {}
- emailfromdisplayname: {}
- emailhtml: {}
- emailinreplyto: {}
- emailkeywords: {}
- emailmessageid: {}
- emailreceived: {}
- emailreplyto: {}
- emailreturnpath: {}
- emailsenderdomain: {}
- emailsenderip: {}
- emailsize: {}
- emailsource: {}
- emailsubject: {}
- emailsubjectlanguage: {}
- emailto: {}
- emailtocount: {}
- emailurlclicked: {}
- eventid: {}
- falses: {}
- fetchid: {}
- fetchtype: {}
- filehash: {}
- filename: {}
- filepath: {}
- hostid: {}
- hostname: {}
- htmlimage: {}
- htmlrenderedimage: {}
- id: {}
- important: {}
- importantfield: {}
- isthedatasubjecttodpia: {}
- killchain: {}
- labels: {}
- likelyimpact: {}
- lob: {}
- maliciouscauseifthecauseisamaliciousattack: {}
- malwarefamily: {}
- mdtest: {}
- measurestomitigate: {}
- myfield: {}
- name: {}
- occurred: {}
- owner: {}
- phase: {}
- phishingsubtype: {}
- possiblecauseofthebreach: {}
- postalcode: {}
- queues: {}
- relateddomain: {}
- replacePlaybook: {}
- reporteduser: {}
- reportinguser: {}
- roles: {}
- screenshot: {}
- screenshot2: {}
- sectorofaffectedparty: {}
- selector: {}
severity:
simple: high
- signature: {}
- single: {}
- single2: {}
- sizenumberofemployees: {}
- sizeturnover: {}
- sla: {}
- slaField: {}
- source: {}
- src: {}
- srcntdomain: {}
- srcuser: {}
- systems: {}
- team: {}
- telephoneno: {}
- test: {}
- test2: {}
- testfield: {}
- timeassignedtolevel2: {}
- timefield1: {}
- timelevel1: {}
- type: {}
- user: {}
- username: {}
- vendorid: {}
- vendorproduct: {}
- vulnerabilitycategory: {}
- whereisdatahosted: {}
- xdr: {}
reputationcalc: 1
separatecontext: false
view: |-
@@ -569,6 +139,11 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"10":
id: "10"
taskid: 84feda52-060b-4e27-89ef-05553ca57636
@@ -577,10 +152,10 @@ tasks:
id: 84feda52-060b-4e27-89ef-05553ca57636
version: -1
name: Done
- description: ""
type: title
iscommand: false
brand: ""
+ description: ''
separatecontext: false
view: |-
{
@@ -592,6 +167,11 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"11":
id: "11"
taskid: ffcb1963-a04f-4f3b-8ff8-37e6829ac96c
@@ -609,155 +189,8 @@ tasks:
'#none#':
- "10"
scriptarguments:
- addLabels: {}
- affecteddata: {}
- affecteddatatype: {}
- affectedhosts: {}
- affectedindividualscontactinformation: {}
- affectedips: {}
- app: {}
- approximatenumberofaffecteddatasubjects: {}
- arcsightcaseid: {}
- assetid: {}
- attachmentcount: {}
- attachmentextension: {}
- attachmenthash: {}
- attachmentid: {}
- attachmentitem: {}
- attachmentname: {}
- attachmentsize: {}
- attachmenttype: {}
- backupowner: {}
- blah: {}
- booltest: {}
- bugtraq: {}
- campaigntargetcount: {}
- campaigntargets: {}
- city: {}
- closeNotes: {}
- closeReason: {}
- companyaddress: {}
- companycity: {}
- companycountry: {}
- companyhasinsuranceforthebreach: {}
- companyname: {}
- companypostalcode: {}
- contactaddress: {}
- contactname: {}
- country: {}
- countrywherebusinesshasitsmainestablishment: {}
- countrywherethebreachtookplace: {}
- criticalassets: {}
- customFields: {}
- cve: {}
- cvss: {}
- dataencryptionstatus: {}
- datetimeofthebreach: {}
- daysbetweenreportcreation: {}
- deleteEmptyField: {}
- dest: {}
- destinationip: {}
- destntdomain: {}
- details: {}
- detectedusers: {}
- dpoemailaddress: {}
- duration: {}
- emailaddress: {}
- emailauthenticitycheck: {}
- emailbcc: {}
- emailbody: {}
- emailbodyformat: {}
- emailbodyhtml: {}
- emailbodyhtmlraw: {}
- emailcc: {}
- emailclassification: {}
- emailclientname: {}
- emailfrom: {}
- emailfromdisplayname: {}
- emailhtml: {}
- emailinreplyto: {}
- emailkeywords: {}
- emailmessageid: {}
- emailreceived: {}
- emailreplyto: {}
- emailreturnpath: {}
- emailsenderdomain: {}
- emailsenderip: {}
- emailsize: {}
- emailsource: {}
- emailsubject: {}
- emailsubjectlanguage: {}
- emailto: {}
- emailtocount: {}
- emailurlclicked: {}
- eventid: {}
- falses: {}
- fetchid: {}
- fetchtype: {}
- filehash: {}
- filename: {}
- filepath: {}
- hostid: {}
- hostname: {}
- htmlimage: {}
- htmlrenderedimage: {}
- id: {}
- important: {}
- importantfield: {}
- isthedatasubjecttodpia: {}
- labels: {}
- likelyimpact: {}
- maliciouscauseifthecauseisamaliciousattack: {}
- malwarefamily: {}
- mdtest: {}
- measurestomitigate: {}
- myfield: {}
- name: {}
- occurred: {}
- owner: {}
- phase: {}
- phishingsubtype: {}
- possiblecauseofthebreach: {}
- postalcode: {}
- relateddomain: {}
- replacePlaybook: {}
- reporteduser: {}
- reportinguser: {}
- roles: {}
- screenshot: {}
- screenshot2: {}
- sectorofaffectedparty: {}
- selector: {}
severity:
simple: critical
- signature: {}
- single: {}
- single2: {}
- sizenumberofemployees: {}
- sizeturnover: {}
- sla: {}
- slaField: {}
- source: {}
- src: {}
- srcntdomain: {}
- srcuser: {}
- systems: {}
- team: {}
- telephoneno: {}
- test: {}
- test2: {}
- testfield: {}
- timeassignedtolevel2: {}
- timefield1: {}
- timelevel1: {}
- type: {}
- user: {}
- username: {}
- vendorid: {}
- vendorproduct: {}
- vulnerabilitycategory: {}
- whereisdatahosted: {}
- xdr: {}
reputationcalc: 1
separatecontext: false
view: |-
@@ -770,6 +203,11 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"15":
id: "15"
taskid: 14b9554d-23ca-4e85-86f8-51f290b2a18d
@@ -914,6 +352,11 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"16":
id: "16"
taskid: dd9d54b5-409c-4bd2-82d3-62785b06c406
@@ -951,43 +394,11 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
- "18":
- id: "18"
- taskid: 64da7797-a618-454e-8243-37cfde94d1c5
- type: playbook
- task:
- id: 64da7797-a618-454e-8243-37cfde94d1c5
- version: -1
- name: Evaluate Severity - Set By Highest DBotScore
- description: Calculates the incident severity level according to the indicator with the highest DBotScore.
- playbookName: Calculate Severity By Highest DBotScore
- type: playbook
- iscommand: false
- brand: ""
- nexttasks:
- '#none#':
- - "15"
- scriptarguments:
- DBotScore:
- complex:
- root: inputs.DBotScore
- transformers:
- - operator: uniq
- separatecontext: true
- loop:
- iscommand: false
- exitCondition: ""
- wait: 1
- view: |-
- {
- "position": {
- "x": 620,
- "y": 195
- }
- }
- note: false
- timertriggers: []
- ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"19":
id: "19"
taskid: c78dbddd-05c3-4810-8b62-9a0f5958dc73
@@ -1005,158 +416,8 @@ tasks:
'#none#':
- "10"
scriptarguments:
- addLabels: {}
- affecteddata: {}
- affecteddatatype: {}
- affectedhosts: {}
- affectedindividualscontactinformation: {}
- affectedips: {}
- app: {}
- approximatenumberofaffecteddatasubjects: {}
- arcsightcaseid: {}
- assetid: {}
- attachmentcount: {}
- attachmentextension: {}
- attachmenthash: {}
- attachmentid: {}
- attachmentitem: {}
- attachmentname: {}
- attachmentsize: {}
- attachmenttype: {}
- backupowner: {}
- blah: {}
- booltest: {}
- bugtraq: {}
- campaigntargetcount: {}
- campaigntargets: {}
- city: {}
- closeNotes: {}
- closeReason: {}
- companyaddress: {}
- companycity: {}
- companycountry: {}
- companyhasinsuranceforthebreach: {}
- companyname: {}
- companypostalcode: {}
- contactaddress: {}
- contactname: {}
- country: {}
- countrywherebusinesshasitsmainestablishment: {}
- countrywherethebreachtookplace: {}
- criticalassets: {}
- customFields: {}
- cve: {}
- cvss: {}
- dataencryptionstatus: {}
- datetimeofthebreach: {}
- daysbetweenreportcreation: {}
- deleteEmptyField: {}
- dest: {}
- destinationip: {}
- destntdomain: {}
- details: {}
- detectedusers: {}
- dpoemailaddress: {}
- duration: {}
- emailaddress: {}
- emailauthenticitycheck: {}
- emailbcc: {}
- emailbody: {}
- emailbodyformat: {}
- emailbodyhtml: {}
- emailbodyhtmlraw: {}
- emailcc: {}
- emailclassification: {}
- emailclientname: {}
- emailfrom: {}
- emailfromdisplayname: {}
- emailhtml: {}
- emailinreplyto: {}
- emailkeywords: {}
- emailmessageid: {}
- emailreceived: {}
- emailreplyto: {}
- emailreturnpath: {}
- emailsenderdomain: {}
- emailsenderip: {}
- emailsize: {}
- emailsource: {}
- emailsubject: {}
- emailsubjectlanguage: {}
- emailto: {}
- emailtocount: {}
- emailurlclicked: {}
- eventid: {}
- falses: {}
- fetchid: {}
- fetchtype: {}
- filehash: {}
- filename: {}
- filepath: {}
- hostid: {}
- hostname: {}
- htmlimage: {}
- htmlrenderedimage: {}
- id: {}
- important: {}
- importantfield: {}
- isthedatasubjecttodpia: {}
- killchain: {}
- labels: {}
- likelyimpact: {}
- lob: {}
- maliciouscauseifthecauseisamaliciousattack: {}
- malwarefamily: {}
- mdtest: {}
- measurestomitigate: {}
- myfield: {}
- name: {}
- occurred: {}
- owner: {}
- phase: {}
- phishingsubtype: {}
- possiblecauseofthebreach: {}
- postalcode: {}
- queues: {}
- relateddomain: {}
- replacePlaybook: {}
- reporteduser: {}
- reportinguser: {}
- roles: {}
- screenshot: {}
- screenshot2: {}
- sectorofaffectedparty: {}
- selector: {}
severity:
simple: unknown
- signature: {}
- single: {}
- single2: {}
- sizenumberofemployees: {}
- sizeturnover: {}
- sla: {}
- slaField: {}
- source: {}
- src: {}
- srcntdomain: {}
- srcuser: {}
- systems: {}
- team: {}
- telephoneno: {}
- test: {}
- test2: {}
- testfield: {}
- timeassignedtolevel2: {}
- timefield1: {}
- timelevel1: {}
- type: {}
- user: {}
- username: {}
- vendorid: {}
- vendorproduct: {}
- vulnerabilitycategory: {}
- whereisdatahosted: {}
- xdr: {}
reputationcalc: 1
separatecontext: false
view: |-
@@ -1169,6 +430,65 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "20":
+ id: "20"
+ taskid: 0488f795-a0de-432a-848e-ccc213a40e15
+ type: playbook
+ task:
+ id: 0488f795-a0de-432a-848e-ccc213a40e15
+ version: -1
+ name: Calculate Severity By Highest DBotScore
+ description: Calculates the incident severity level according to the highest DBotScore.
+ playbookName: Calculate Severity By Highest DBotScore
+ type: playbook
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "15"
+ scriptarguments:
+ DBotScoreIndicators:
+ complex:
+ root: inputs.DBotScoreIndicators
+ transformers:
+ - operator: uniq
+ DBotScoreMaxScore:
+ complex:
+ root: inputs.DBotScoreMaxScore
+ transformers:
+ - operator: sort
+ args:
+ descending:
+ value:
+ simple: "true"
+ - operator: uniq
+ - operator: FirstArrayElement
+ separatecontext: true
+ continueonerrortype: ""
+ loop:
+ iscommand: false
+ exitCondition: ""
+ wait: 1
+ max: 100
+ view: |-
+ {
+ "position": {
+ "x": 610,
+ "y": 195
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
view: |-
{
"linkLabelsPosition": {
@@ -1186,12 +506,34 @@ view: |-
}
}
inputs:
-- key: DBotScore
+- key: DBotScoreIndicators
+ value:
+ complex:
+ root: DBotScore
+ accessor: Indicator
+ transformers:
+ - operator: uniq
+ required: false
+ description: A list of DBotScore indicator values.
+ playbookInputQuery:
+- key: DBotScoreMaxScore
value:
complex:
root: DBotScore
+ accessor: Score
+ transformers:
+ - operator: sort
+ args:
+ descending:
+ value:
+ simple: "true"
+ - operator: uniq
+ - operator: FirstArrayElement
required: false
- description: A list of DBotScores of indicators.
+ description: The highest score (number) that was given to a DBotScore indicator.
+ playbookInputQuery:
outputs: []
tests:
- Calculate Severity - Standard - Test
+contentitemexportablefields:
+ contentitemfields: {}
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Standard_README.md b/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Standard_README.md
index 2583a7aa8321..8dc0c7a9a2fb 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Standard_README.md
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_-_Standard_README.md
@@ -1,31 +1,41 @@
-Calculates and sets the incident severity based on the combination of the current incident severity, and the severity returned from the `Evaluate Severity - Set By Highest DBotScore` playbook.
+Calculates and sets the incident severity based on the combination of the current incident severity, and the severity returned from the **Calculate Severity By Highest DBotScore** playbook.
## Dependencies
+
This playbook uses the following sub-playbooks, integrations, and scripts.
-## Sub-playbooks
-* Evaluate Severity - Set By Highest DBotScore
+### Sub-playbooks
+
+Calculate Severity By Highest DBotScore
+
+### Integrations
+
+This playbook does not use any integrations.
+
+### Scripts
-## Integrations
-* Builtin
+Set
-## Scripts
-* Set
+### Commands
-## Commands
* setIncident
## Playbook Inputs
+
---
-| **Name** | **Description** | **Default Value** | **Source** | **Required** |
-| --- | --- | --- | --- | --- |
-| DBotScore | The list of DBotScores of indicators. | None | DBotScore | Optional |
+| **Name** | **Description** | **Default Value** | **Required** |
+| --- | --- | --- | --- |
+| DBotScoreIndicators | A list of DBotScore indicator values. | DBotScore.Indicator | Optional |
+| DBotScoreMaxScore | The highest score \(number\) that was given to a DBotScore indicator. | DBotScore.Score | Optional |
## Playbook Outputs
+
---
There are no outputs for this playbook.
## Playbook Image
+
---
-
+
+
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_By_Highest_DBotScore_6_0.yml b/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_By_Highest_DBotScore_6_0.yml
index 545d454e3c04..6318e928f1d9 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_By_Highest_DBotScore_6_0.yml
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_By_Highest_DBotScore_6_0.yml
@@ -54,47 +54,7 @@ tasks:
{
"position": {
"x": 240,
- "y": 975
- }
- }
- note: false
- timertriggers: []
- ignoreworker: false
- skipunavailable: false
- quietmode: 0
- isoversize: false
- isautoswitchedtoquietmode: false
- "22":
- id: "22"
- taskid: c33bfc3b-3bfd-4b09-8fef-d7c299ddbcab
- type: regular
- task:
- id: c33bfc3b-3bfd-4b09-8fef-d7c299ddbcab
- version: -1
- name: Set DBotScore from inputs
- description: |
- Sets the DBotScore into context.
- scriptName: Set
- type: regular
- iscommand: false
- brand: ""
- nexttasks:
- '#none#':
- - "31"
- scriptarguments:
- key:
- simple: DBotScore
- value:
- complex:
- root: inputs.DBotScore
- reputationcalc: 1
- separatecontext: false
- continueonerrortype: ""
- view: |-
- {
- "position": {
- "x": 592.5,
- "y": 290
+ "y": 855
}
}
note: false
@@ -106,10 +66,10 @@ tasks:
isautoswitchedtoquietmode: false
"23":
id: "23"
- taskid: 0cc4278a-6b22-4ea9-834f-3ed21c346499
+ taskid: 2166ae1b-bafb-478b-81f9-c68b98651a63
type: condition
task:
- id: 0cc4278a-6b22-4ea9-834f-3ed21c346499
+ id: 2166ae1b-bafb-478b-81f9-c68b98651a63
version: -1
name: Is there a DBotScore in inputs?
description: Checks if there is a "DBotScore" in the playbook input.
@@ -120,19 +80,17 @@ tasks:
'#default#':
- "24"
"yes":
- - "22"
+ - "31"
separatecontext: false
conditions:
- label: "yes"
condition:
- - - operator: isExists
+ - - operator: isNotEmpty
left:
value:
complex:
- root: inputs.DBotScore
+ root: inputs.DBotScoreIndicators
iscontext: true
- right:
- value: {}
continueonerrortype: ""
view: |-
{
@@ -181,10 +139,10 @@ tasks:
isautoswitchedtoquietmode: false
"25":
id: "25"
- taskid: 8ad27555-7d81-4dd4-86e5-598b2c445def
+ taskid: e66f1b7a-bf4c-4b8c-8995-ba249d9f7600
type: condition
task:
- id: 8ad27555-7d81-4dd4-86e5-598b2c445def
+ id: e66f1b7a-bf4c-4b8c-8995-ba249d9f7600
version: -1
name: Evaluate severity based on DBotScore of indicators
description: |-
@@ -209,7 +167,7 @@ tasks:
conditions:
- label: High
condition:
- - - operator: isExists
+ - - operator: isNotEmpty
left:
value:
complex:
@@ -225,18 +183,16 @@ tasks:
simple: "3"
accessor: Score
iscontext: true
- right:
- value: {}
- - operator: isExists
+ - operator: isNotEmpty
left:
value:
complex:
- root: DBotScore.Score
+ root: inputs.DBotScoreMaxScore
filters:
- - operator: isEqualString
left:
value:
- simple: DBotScore.Score
+ simple: inputs.DBotScoreMaxScore
iscontext: true
right:
value:
@@ -244,7 +200,7 @@ tasks:
iscontext: true
- label: Medium
condition:
- - - operator: isExists
+ - - operator: isNotEmpty
left:
value:
complex:
@@ -259,16 +215,16 @@ tasks:
value:
simple: "2"
iscontext: true
- - operator: isExists
+ - operator: isNotEmpty
left:
value:
complex:
- root: DBotScore.Score
+ root: inputs.DBotScoreMaxScore
filters:
- - operator: isEqualString
left:
value:
- simple: DBotScore.Score
+ simple: inputs.DBotScoreMaxScore
iscontext: true
right:
value:
@@ -276,7 +232,7 @@ tasks:
iscontext: true
- label: Low
condition:
- - - operator: isExists
+ - - operator: isNotEmpty
left:
value:
complex:
@@ -291,16 +247,16 @@ tasks:
value:
simple: "1"
iscontext: true
- - operator: isExists
+ - operator: isNotEmpty
left:
value:
complex:
- root: DBotScore.Score
+ root: inputs.DBotScoreMaxScore
filters:
- - operator: isEqualString
left:
value:
- simple: DBotScore.Score
+ simple: inputs.DBotScoreMaxScore
iscontext: true
right:
value:
@@ -311,7 +267,7 @@ tasks:
{
"position": {
"x": 592.5,
- "y": 610
+ "y": 490
}
}
note: false
@@ -349,7 +305,7 @@ tasks:
{
"position": {
"x": 990,
- "y": 805
+ "y": 685
}
}
note: false
@@ -387,7 +343,7 @@ tasks:
{
"position": {
"x": 592.5,
- "y": 805
+ "y": 685
}
}
note: false
@@ -425,7 +381,7 @@ tasks:
{
"position": {
"x": 190,
- "y": 805
+ "y": 685
}
}
note: false
@@ -463,7 +419,7 @@ tasks:
{
"position": {
"x": -210,
- "y": 805
+ "y": 685
}
}
note: false
@@ -475,10 +431,10 @@ tasks:
isautoswitchedtoquietmode: false
"31":
id: "31"
- taskid: 5acf56ba-5b7b-4472-86e2-637756c34822
+ taskid: 508c21c3-4362-4dd9-84a7-9efd305c0fef
type: regular
task:
- id: 5acf56ba-5b7b-4472-86e2-637756c34822
+ id: 508c21c3-4362-4dd9-84a7-9efd305c0fef
version: -1
name: Get DBotScore from XSOAR
description: Get the overall score for the indicator as calculated by DBot.
@@ -492,8 +448,7 @@ tasks:
scriptarguments:
value:
complex:
- root: DBotScore
- accessor: Indicator
+ root: inputs.DBotScoreIndicators
transformers:
- operator: uniq
separatecontext: false
@@ -502,7 +457,7 @@ tasks:
{
"position": {
"x": 592.5,
- "y": 450
+ "y": 310
}
}
note: false
@@ -515,7 +470,6 @@ tasks:
view: |-
{
"linkLabelsPosition": {
- "23_22_yes": 0.31,
"23_24_#default#": 0.24,
"25_27_High": 0.82,
"25_28_Medium": 0.76,
@@ -524,7 +478,7 @@ view: |-
},
"paper": {
"dimensions": {
- "height": 1050,
+ "height": 930,
"width": 1580,
"x": -210,
"y": -10
@@ -532,12 +486,31 @@ view: |-
}
}
inputs:
-- key: DBotScore
+- key: DBotScoreIndicators
value:
complex:
root: DBotScore
+ accessor: Indicator
+ transformers:
+ - operator: uniq
+ required: false
+ description: 'Array of all DBotScore indicator values associated with the incident. '
+ playbookInputQuery:
+- key: DBotScoreMaxScore
+ value:
+ complex:
+ root: DBotScore
+ accessor: Score
+ transformers:
+ - operator: sort
+ args:
+ descending:
+ value:
+ simple: "true"
+ - operator: uniq
+ - operator: FirstArrayElement
required: false
- description: 'Array of all indicators associated with the incident. '
+ description: The highest score that was given to a DBotScore indicator.
playbookInputQuery:
outputs:
- contextPath: Severities.DBotScoreSeverity
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_By_Highest_DBotScore_6_0_README.md b/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_By_Highest_DBotScore_6_0_README.md
index cbcdbe31dc8a..a07898d3664f 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_By_Highest_DBotScore_6_0_README.md
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Calculate_Severity_By_Highest_DBotScore_6_0_README.md
@@ -27,7 +27,8 @@ This playbook does not use any commands.
| **Name** | **Description** | **Default Value** | **Required** |
| --- | --- | --- | --- |
-| DBotScore | Array of all indicators associated with the incident. | DBotScore | Optional |
+| DBotScoreIndicators | Array of all DBotScore indicator values associated with the incident. | DBotScore.Indicator | Optional |
+| DBotScoreMaxScore | The highest score (number) that was given to a DBotScore indicator. | DBotScore.Score | Optional |
## Playbook Outputs
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Containment_Plan_-_Block_Indicators.yml b/Packs/CommonPlaybooks/Playbooks/playbook-Containment_Plan_-_Block_Indicators.yml
index 2a063ca24020..e6c6d917ef27 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Containment_Plan_-_Block_Indicators.yml
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Containment_Plan_-_Block_Indicators.yml
@@ -122,8 +122,8 @@ tasks:
view: |-
{
"position": {
- "x": 800,
- "y": 1330
+ "x": 680,
+ "y": 1520
}
}
note: false
@@ -246,7 +246,7 @@ tasks:
{
"position": {
"x": 230,
- "y": 1160
+ "y": 1350
}
}
note: false
@@ -482,7 +482,7 @@ tasks:
brand: ""
nexttasks:
'#none#':
- - "11"
+ - "30"
scriptarguments:
AutoBlockIndicators:
complex:
@@ -661,19 +661,62 @@ tasks:
quietmode: 0
isoversize: false
isautoswitchedtoquietmode: false
+ "30":
+ id: "30"
+ taskid: 0a63acdb-6497-4f96-8318-5231aefde427
+ type: condition
+ task:
+ id: 0a63acdb-6497-4f96-8318-5231aefde427
+ version: -1
+ name: Are there any indicators that are blocked?
+ description: Check if there are any indicators that are blocked.
+ type: condition
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#default#':
+ - "2"
+ "yes":
+ - "11"
+ separatecontext: false
+ conditions:
+ - label: "yes"
+ condition:
+ - - operator: isNotEmpty
+ left:
+ value:
+ simple: IndicatorsToBlock
+ iscontext: true
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 230,
+ "y": 1160
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
view: |-
{
"linkLabelsPosition": {
"24_1_yes": 0.44,
"24_28_#default#": 0.24,
"26_28_#default#": 0.29,
+ "30_11_yes": 0.52,
+ "30_2_#default#": 0.38,
"3_25_yes": 0.43,
"3_2_#default#": 0.12
},
"paper": {
"dimensions": {
- "height": 1585,
- "width": 1190,
+ "height": 1775,
+ "width": 1070,
"x": -10,
"y": -190
}
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Detonate_URL_-_Generic_v1.5.yml b/Packs/CommonPlaybooks/Playbooks/playbook-Detonate_URL_-_Generic_v1.5.yml
index 64cf166823e3..823b79ce276d 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Detonate_URL_-_Generic_v1.5.yml
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Detonate_URL_-_Generic_v1.5.yml
@@ -21,9 +21,10 @@ description: |-
- CrowdStrike Falcon Intelligence Sandbox
- OPSWAT Filescan
- ANYRUN
- - VirusTotal
+ - VirusTotal
- Anomali ThreatStream
- - Hatching Triage.
+ - Hatching Triage
+ - ThreatGrid
starttaskid: "0"
tasks:
"0":
@@ -46,7 +47,7 @@ tasks:
{
"position": {
"x": 450,
- "y": 50
+ "y": 170
}
}
note: false
@@ -72,7 +73,22 @@ tasks:
'#default#':
- "20"
"yes":
- - "2"
+ - "22"
+ - "12"
+ - "13"
+ - "14"
+ - "15"
+ - "16"
+ - "17"
+ - "21"
+ - "19"
+ - "9"
+ - "8"
+ - "7"
+ - "6"
+ - "4"
+ - "23"
+ - "24"
separatecontext: false
conditions:
- label: "yes"
@@ -88,100 +104,7 @@ tasks:
{
"position": {
"x": 450,
- "y": 180
- }
- }
- note: false
- timertriggers: []
- ignoreworker: false
- skipunavailable: true
- quietmode: 0
- isoversize: false
- isautoswitchedtoquietmode: false
- "2":
- id: "2"
- taskid: 3868d472-e5f4-4714-8c58-5c8f95d6b626
- type: title
- task:
- id: 3868d472-e5f4-4714-8c58-5c8f95d6b626
- version: -1
- name: Detonate URL
- type: title
- iscommand: false
- brand: ""
- description: ''
- nexttasks:
- '#none#':
- - "3"
- - "4"
- - "5"
- - "6"
- - "7"
- - "8"
- - "9"
- - "11"
- - "12"
- - "13"
- - "14"
- - "15"
- - "16"
- - "17"
- - "19"
- - "21"
- separatecontext: false
- continueonerrortype: ""
- view: |-
- {
- "position": {
- "x": 450,
- "y": 370
- }
- }
- note: false
- timertriggers: []
- ignoreworker: false
- skipunavailable: false
- quietmode: 0
- isoversize: false
- isautoswitchedtoquietmode: false
- "3":
- id: "3"
- taskid: 7631e811-c7ae-4c2b-895b-793922894b45
- type: playbook
- task:
- id: 7631e811-c7ae-4c2b-895b-793922894b45
- version: -1
- name: Detonate URL - ThreatGrid v2
- description: Detonate one or more URLs using the ThreatGrid integration.
- playbookName: Detonate URL - ThreatGrid v2
- type: playbook
- iscommand: false
- brand: ""
- nexttasks:
- '#none#':
- - "20"
- scriptarguments:
- URL:
- complex:
- root: inputs.URL
- transformers:
- - operator: uniq
- interval:
- simple: "10"
- timeout:
- simple: "60"
- separatecontext: true
- continueonerrortype: ""
- loop:
- iscommand: false
- exitCondition: ""
- wait: 1
- max: 100
- view: |-
- {
- "position": {
- "x": -2500,
- "y": 510
+ "y": 320
}
}
note: false
@@ -227,47 +150,7 @@ tasks:
view: |-
{
"position": {
- "x": -2090,
- "y": 510
- }
- }
- note: false
- timertriggers: []
- ignoreworker: false
- skipunavailable: true
- quietmode: 0
- isoversize: false
- isautoswitchedtoquietmode: false
- "5":
- id: "5"
- taskid: f2828581-9807-4f24-883e-130c0ca7fec6
- type: regular
- task:
- id: f2828581-9807-4f24-883e-130c0ca7fec6
- version: -1
- name: Detonate URL - JoeSecurity
- description: Submit a URL for sandbox analysis.
- script: '|||joe-submit-url'
- type: regular
- iscommand: true
- brand: ""
- nexttasks:
- '#none#':
- - "20"
- scriptarguments:
- timeout:
- simple: "60"
- url:
- complex:
- root: inputs.URL
- transformers:
- - operator: uniq
- separatecontext: false
- continueonerrortype: ""
- view: |-
- {
- "position": {
- "x": -1670,
+ "x": -1650,
"y": 510
}
}
@@ -468,48 +351,6 @@ tasks:
quietmode: 0
isoversize: false
isautoswitchedtoquietmode: false
- "11":
- id: "11"
- taskid: f2828581-9807-4f24-883e-130c0ca7fec6
- type: regular
- task:
- id: f2828581-9807-4f24-883e-130c0ca7fec6
- version: -1
- name: Detonate URL - WildFire
- description: Uploads a URL of a webpage to WildFire for analysis.
- script: '|||wildfire-upload-url'
- type: regular
- iscommand: true
- brand: ""
- nexttasks:
- '#none#':
- - "20"
- scriptarguments:
- polling:
- simple: "true"
- upload:
- complex:
- root: inputs.URL
- transformers:
- - operator: uniq
- verbose:
- simple: "true"
- separatecontext: false
- continueonerrortype: ""
- view: |-
- {
- "position": {
- "x": 450,
- "y": 510
- }
- }
- note: false
- timertriggers: []
- ignoreworker: false
- skipunavailable: true
- quietmode: 0
- isoversize: false
- isautoswitchedtoquietmode: false
"12":
id: "12"
taskid: 3868d472-e5f4-4714-8c58-5c8f95d6b626
@@ -913,17 +754,150 @@ tasks:
quietmode: 0
isoversize: false
isautoswitchedtoquietmode: false
+ "22":
+ id: "22"
+ taskid: d2b682c2-7df5-4ebd-855a-8949333649fb
+ type: playbook
+ task:
+ id: d2b682c2-7df5-4ebd-855a-8949333649fb
+ version: -1
+ name: Detonate URL - WildFire v2.2
+ description: |-
+ Detonate a webpage or remote file using the WildFire v2 integration. This playbook returns relevant reports to the War Room and file reputations to the context data.
+ The detonation supports the following file types:
+ APK, JAR, DOC, DOCX, RTF, OOXLS, XLSX, PPT, PPTX, XML, PE32, PDF, DMG, PKG, RAR, 7Z, JS.
+ playbookName: Detonate URL - WildFire v2.2
+ type: playbook
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "20"
+ scriptarguments:
+ Interval:
+ simple: "1"
+ Timeout:
+ simple: "8"
+ URL:
+ complex:
+ root: inputs.URL
+ transformers:
+ - operator: uniq
+ separatecontext: true
+ continueonerrortype: ""
+ loop:
+ iscommand: false
+ exitCondition: ""
+ wait: 1
+ max: 100
+ view: |-
+ {
+ "position": {
+ "x": 450,
+ "y": 510
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: true
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "23":
+ id: "23"
+ taskid: 156ee7d3-6235-4011-85fe-32e8d2154de1
+ type: regular
+ task:
+ id: 156ee7d3-6235-4011-85fe-32e8d2154de1
+ version: -1
+ name: Detonate URL - JoeSecurity
+ description: Submit a URL for sandbox analysis.
+ script: '|||joe-submit-url'
+ type: regular
+ iscommand: true
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "20"
+ scriptarguments:
+ url:
+ complex:
+ root: inputs.URL
+ transformers:
+ - operator: uniq
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": -2060,
+ "y": 510
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: true
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "24":
+ id: "24"
+ taskid: 79a510af-3640-415e-85bd-b9cf42ace798
+ type: playbook
+ task:
+ id: 79a510af-3640-415e-85bd-b9cf42ace798
+ version: -1
+ name: Detonate URL - ThreatGrid v2
+ playbookName: Detonate URL - ThreatGrid v2
+ type: playbook
+ iscommand: false
+ brand: ""
+ description: ''
+ nexttasks:
+ '#none#':
+ - "20"
+ separatecontext: true
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": -2470,
+ "y": 510
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
view: |-
{
"linkLabelsPosition": {
- "1_2_yes": 0.47
+ "1_12_yes": 0.51,
+ "1_13_yes": 0.71,
+ "1_14_yes": 0.82,
+ "1_15_yes": 0.86,
+ "1_16_yes": 0.88,
+ "1_17_yes": 0.9,
+ "1_19_yes": 0.9,
+ "1_21_yes": 0.9,
+ "1_23_yes": 0.9,
+ "1_24_yes": 0.9,
+ "1_4_yes": 0.89,
+ "1_6_yes": 0.84,
+ "1_7_yes": 0.83,
+ "1_9_yes": 0.57
},
"paper": {
"dimensions": {
- "height": 705,
- "width": 6690,
- "x": -2500,
- "y": 50
+ "height": 585,
+ "width": 6660,
+ "x": -2470,
+ "y": 170
}
}
}
@@ -931,10 +905,7 @@ inputs:
- key: URL
value:
complex:
- root: URL
- accessor: Data
- transformers:
- - operator: uniq
+ root: URL.Data
required: false
description: The URL object of the URL to be detonated.
playbookInputQuery:
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Detonate_URL_-_Generic_v1.5_README.md b/Packs/CommonPlaybooks/Playbooks/playbook-Detonate_URL_-_Generic_v1.5_README.md
index 47065a065040..72b38cb0f625 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Detonate_URL_-_Generic_v1.5_README.md
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Detonate_URL_-_Generic_v1.5_README.md
@@ -15,9 +15,10 @@ Supported integrations:
- CrowdStrike Falcon Intelligence Sandbox
- OPSWAT Filescan
- ANYRUN
-- VirusTotal
+- VirusTotal
- Anomali ThreatStream
-- Hatching Triage.
+- Hatching Triage
+- ThreatGrid
## Dependencies
@@ -25,19 +26,20 @@ This playbook uses the following sub-playbooks, integrations, and scripts.
### Sub-playbooks
-* Detonate URL - Cuckoo
-* Detonate URL - ThreatGrid v2
-* Detonate URL - Lastline v2
+* Detonate URL - SecneurX Analysis
* Detonate URL - ThreatStream
-* Detonate URL - FireEye AX
-* Detonate URL - McAfee ATD
* Detonate URL - VMRay
-* Detonate URL - SecneurX Analysis
-* Detonate URL - Hatching Triage
-* Detonate URL - CrowdStrike Falcon Intelligence Sandbox v2
* Detonate URL - ANYRUN
-* Detonate URL - VirusTotal (API v3)
+* Detonate URL - Cuckoo
+* Detonate URL - McAfee ATD
* Detonate URL - Group-IB TDS Polygon
+* Detonate URL - ThreatGrid v2
+* Detonate URL - WildFire v2.2
+* Detonate URL - VirusTotal (API v3)
+* Detonate URL - Hatching Triage
+* Detonate URL - CrowdStrike Falcon Intelligence Sandbox v2
+* Detonate URL - FireEye AX
+* Detonate URL - Lastline v2
### Integrations
@@ -49,7 +51,6 @@ This playbook does not use any scripts.
### Commands
-* wildfire-upload-url
* opswat-filescan-scan-url
* joe-submit-url
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Enrichment_for_Verdict.yml b/Packs/CommonPlaybooks/Playbooks/playbook-Enrichment_for_Verdict.yml
index 77c2bbd2abe3..bc843e925f62 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Enrichment_for_Verdict.yml
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Enrichment_for_Verdict.yml
@@ -3,7 +3,7 @@ version: -1
marketplaces:
- marketplacev2
name: Enrichment for Verdict
-description: This playbook checks prior alert closing reasons and performs enrichment and prevalence checks on different IOC types. It then returns the information needed to establish the alert's verdict.
+description: This playbook checks prior alert closing reasons and performs enrichment and prevalence checks on different IOC types. It then returns the information needed to establish the alert's verdict.
starttaskid: "0"
tasks:
"0":
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Enrichment_for_Verdict_README.md b/Packs/CommonPlaybooks/Playbooks/playbook-Enrichment_for_Verdict_README.md
index 9092e0a8ebcc..a1030089a7ef 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Enrichment_for_Verdict_README.md
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Enrichment_for_Verdict_README.md
@@ -1,4 +1,4 @@
-This playbook checks prior alert closing reasons and performs enrichment and prevalence checks on different IOC types. It then returns the information needed to establish the alert's verdict.
+This playbook checks prior alert closing reasons and performs enrichment and prevalence checks on different IOC types. It then returns the information needed to establish the alert's verdict.
## Dependencies
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Get_File_Sample_From_Path_-_Generic_V3.yml b/Packs/CommonPlaybooks/Playbooks/playbook-Get_File_Sample_From_Path_-_Generic_V3.yml
index 0dd21172b93e..7257c9449ce0 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Get_File_Sample_From_Path_-_Generic_V3.yml
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Get_File_Sample_From_Path_-_Generic_V3.yml
@@ -3,10 +3,13 @@ version: -1
contentitemexportablefields:
contentitemfields: {}
name: Get File Sample From Path - Generic V3
-description: |
+description: |-
This playbook returns a file sample from a specified path and host that you input in the following playbooks:
- 1) PS Remote Get File Sample From Path.
- 2) Get File Sample From Path - VMware Carbon Black EDR (Live Response API).
+ - PS Remote Get File Sample From Path
+ - Get File Sample From Path - VMware Carbon Black EDR (Live Response API)
+ - CrowdStrike Falcon - Retrieve File
+ - MDE - Retrieve File
+ - Cortex XDR - Retrieve File V2
starttaskid: "0"
tasks:
"0":
@@ -24,6 +27,9 @@ tasks:
'#none#':
- "11"
- "14"
+ - "15"
+ - "16"
+ - "17"
separatecontext: false
view: |-
{
@@ -87,7 +93,7 @@ tasks:
view: |-
{
"position": {
- "x": 50,
+ "x": 60,
"y": 195
}
}
@@ -131,7 +137,7 @@ tasks:
view: |-
{
"position": {
- "x": 50,
+ "x": 60,
"y": 340
}
}
@@ -220,14 +226,261 @@ tasks:
continueonerrortype: ""
isoversize: false
isautoswitchedtoquietmode: false
+ "15":
+ id: "15"
+ taskid: c12382b6-414b-4613-8ba2-3a686f22ba19
+ type: title
+ task:
+ id: c12382b6-414b-4613-8ba2-3a686f22ba19
+ version: -1
+ name: Use Microsoft Defender for Endpoint
+ type: title
+ iscommand: false
+ brand: ""
+ description: ''
+ nexttasks:
+ '#none#':
+ - "20"
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 900,
+ "y": 195
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: true
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "16":
+ id: "16"
+ taskid: dd5f9bca-f2d6-4d83-8188-372acf9aafc5
+ type: title
+ task:
+ id: dd5f9bca-f2d6-4d83-8188-372acf9aafc5
+ version: -1
+ name: Use CrowdStrike Falcon
+ type: title
+ iscommand: false
+ brand: ""
+ description: ''
+ nexttasks:
+ '#none#':
+ - "19"
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": -360,
+ "y": 195
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: true
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "17":
+ id: "17"
+ taskid: 9abf10fa-2f9e-4714-83ce-94f1e9388e92
+ type: title
+ task:
+ id: 9abf10fa-2f9e-4714-83ce-94f1e9388e92
+ version: -1
+ name: Use Cortex XDR
+ type: title
+ iscommand: false
+ brand: ""
+ description: ''
+ nexttasks:
+ '#none#':
+ - "18"
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 1310,
+ "y": 195
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: true
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "18":
+ id: "18"
+ taskid: 291977e7-1951-462c-8875-7a8f13ad4636
+ type: playbook
+ task:
+ id: 291977e7-1951-462c-8875-7a8f13ad4636
+ version: -1
+ name: Cortex XDR - Retrieve File v2
+ description: |-
+ This playbook retrieves files from selected endpoints. You can retrieve up to 20 files, from 10 endpoints.
+ Inputs for this playbook are:
+ - A comma-separated list of endpoint IDs.
+ - A comma-separated list of file paths for your operating system, either Windows, Linux, or Mac. At least one file path is required.
+ playbookName: Cortex XDR - Retrieve File v2
+ type: playbook
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "8"
+ scriptarguments:
+ endpoint_ids:
+ complex:
+ root: inputs.Agent_ID
+ transformers:
+ - operator: uniq
+ file_path:
+ complex:
+ root: inputs.Path
+ transformers:
+ - operator: uniq
+ separatecontext: true
+ continueonerrortype: ""
+ loop:
+ iscommand: false
+ exitCondition: ""
+ wait: 1
+ max: 100
+ view: |-
+ {
+ "position": {
+ "x": 1310,
+ "y": 340
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: true
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "19":
+ id: "19"
+ taskid: 093a8aa7-07c9-45e3-85de-b08c39983dce
+ type: playbook
+ task:
+ id: 093a8aa7-07c9-45e3-85de-b08c39983dce
+ version: -1
+ name: CrowdStrike Falcon - Retrieve File
+ description: |-
+ This playbook is part of the 'Malware Investigation And Response' pack. For more information, refer to https://xsoar.pan.dev/docs/reference/packs/malware-investigation-and-response.
+ This playbook retrieves and unzips files from CrowdStrike Falcon and returns a list of the files that were and were not retrieved.
+ playbookName: CrowdStrike Falcon - Retrieve File
+ type: playbook
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "8"
+ scriptarguments:
+ HostId:
+ complex:
+ root: inputs.Agent_ID
+ transformers:
+ - operator: uniq
+ PathsToGet:
+ complex:
+ root: inputs.Path
+ transformers:
+ - operator: uniq
+ ZipPassword:
+ simple: infected
+ separatecontext: true
+ continueonerrortype: ""
+ loop:
+ iscommand: false
+ exitCondition: ""
+ wait: 1
+ max: 100
+ view: |-
+ {
+ "position": {
+ "x": -360,
+ "y": 340
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: true
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "20":
+ id: "20"
+ taskid: 8232fa6d-94a4-4d02-8ea4-dd0d3144fcaa
+ type: playbook
+ task:
+ id: 8232fa6d-94a4-4d02-8ea4-dd0d3144fcaa
+ version: -1
+ name: MDE - Retrieve File
+ description: |-
+ This playbook is part of the 'Malware Investigation And Response' pack. For more information, refer to https://xsoar.pan.dev/docs/reference/packs/malware-investigation-and-response.
+ This playbook uses the Live Response feature to retrieve a file from an endpoint./nNote that the endpoint ID will be set from the incident field "DeviceID".
+ playbookName: MDE - Retrieve File
+ type: playbook
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "8"
+ scriptarguments:
+ MachineID:
+ complex:
+ root: inputs.Agent_ID
+ transformers:
+ - operator: uniq
+ paths:
+ complex:
+ root: inputs.Path
+ transformers:
+ - operator: uniq
+ separatecontext: true
+ continueonerrortype: ""
+ loop:
+ iscommand: false
+ exitCondition: ""
+ wait: 1
+ max: 100
+ view: |-
+ {
+ "position": {
+ "x": 900,
+ "y": 340
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: true
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
view: |-
{
"linkLabelsPosition": {},
"paper": {
"dimensions": {
"height": 530,
- "width": 810,
- "x": 50,
+ "width": 2050,
+ "x": -360,
"y": 50
}
}
@@ -288,6 +541,12 @@ outputs:
- contextPath: AcquiredFile
description: The acquired file details.
type: Unknown
+- contextPath: ExtractedFiles
+ description: A list of file names that were extracted from the ZIP file.
+ type: string
+- contextPath: NonRetrievedFiles
+ description: A list of files that were not retrieved.
+ type: string
tests:
- No tests.
fromversion: 6.0.0
diff --git a/Packs/CommonPlaybooks/Playbooks/playbook-Get_File_Sample_From_Path_-_Generic_V3_README.md b/Packs/CommonPlaybooks/Playbooks/playbook-Get_File_Sample_From_Path_-_Generic_V3_README.md
index 72a3af1a812f..93d54547b55b 100644
--- a/Packs/CommonPlaybooks/Playbooks/playbook-Get_File_Sample_From_Path_-_Generic_V3_README.md
+++ b/Packs/CommonPlaybooks/Playbooks/playbook-Get_File_Sample_From_Path_-_Generic_V3_README.md
@@ -1,7 +1,9 @@
This playbook returns a file sample from a specified path and host that you input in the following playbooks:
-1) PS Remote Get File Sample From Path.
-2) Get File Sample From Path - VMware Carbon Black EDR (Live Response API).
-
+- PS Remote Get File Sample From Path
+- Get File Sample From Path - VMware Carbon Black EDR (Live Response API)
+- CrowdStrike Falcon - Retrieve File
+- MDE - Retrieve File
+- Cortex XDR - Retrieve File V2
## Dependencies
@@ -9,7 +11,10 @@ This playbook uses the following sub-playbooks, integrations, and scripts.
### Sub-playbooks
+* MDE - Retrieve File
+* CrowdStrike Falcon - Retrieve File
* Get File Sample From Path - VMware Carbon Black EDR - Live Response API
+* Cortex XDR - Retrieve File v2
* PS Remote Get File Sample From Path
### Integrations
@@ -51,7 +56,9 @@ This playbook does not use any commands.
| File.Extension | The file extension. | string |
| File.Name | The file name. | string |
| File.SSDeep | File SSDeep. | string |
-| AcquiredFile | The acquired file details. | string |
+| AcquiredFile | The acquired file details. | Unknown |
+| ExtractedFiles | A list of file names that were extracted from the ZIP file. | string |
+| NonRetrievedFiles | A list of files that were not retrieved. | string |
## Playbook Image
diff --git a/Packs/CommonPlaybooks/ReleaseNotes/2_6_0.json b/Packs/CommonPlaybooks/ReleaseNotes/2_6_0.json
new file mode 100644
index 000000000000..0f7578152379
--- /dev/null
+++ b/Packs/CommonPlaybooks/ReleaseNotes/2_6_0.json
@@ -0,0 +1,4 @@
+{
+ "breakingChanges": true,
+ "breakingChangesNotes": "The playbook inputs of the following playbooks have been reset: Calculate Severity - Generic v2, Calculate Severity By Highest DBotScore, Calculate Severity - Standard. If you are keeping the DBotScore context under another key, or if you've customized the inputs of the Calculate Severity playbooks or subplaybooks, you will need to reconfigure them. Out of the box, the playbooks have been adjusted to accommodate for the new inputs."
+}
diff --git a/Packs/CommonPlaybooks/ReleaseNotes/2_6_0.md b/Packs/CommonPlaybooks/ReleaseNotes/2_6_0.md
new file mode 100644
index 000000000000..4cf7e62ef593
--- /dev/null
+++ b/Packs/CommonPlaybooks/ReleaseNotes/2_6_0.md
@@ -0,0 +1,16 @@
+
+#### Playbooks
+
+##### Calculate Severity By Highest DBotScore
+
+- Changed the inputs of this playbook. If you customized the playbook input to anything other than the DBotScore object, you will need to re-adjust the new inputs. The playbook now accepts a list of unique DBotScore indicators and the maximum score given to an indicator, instead of the full DBotScore object.
+- Improved the performance of the playbook and reduced the size of the context.
+
+##### Calculate Severity - Generic v2
+
+- Changed the inputs of this playbook. If you customized the playbook input to anything other than the DBotScore object, you will need to re-adjust the new inputs. The playbook now accepts a list of unique DBotScore indicators and the maximum score given to an indicator, instead of the full DBotScore object.
+- Improved the performance of the playbook and reduced the size of the context.
+##### Calculate Severity - Standard
+
+- Changed the inputs of this playbook. If you customized the playbook input to anything other than the DBotScore object, you will need to re-adjust the new inputs. The playbook now accepts a list of unique DBotScore indicators and the maximum score given to an indicator, instead of the full DBotScore object.
+- Improved the performance of the playbook and reduced the size of the context.
diff --git a/Packs/CommonPlaybooks/ReleaseNotes/2_6_1.md b/Packs/CommonPlaybooks/ReleaseNotes/2_6_1.md
new file mode 100644
index 000000000000..b14934b9f6eb
--- /dev/null
+++ b/Packs/CommonPlaybooks/ReleaseNotes/2_6_1.md
@@ -0,0 +1,6 @@
+
+#### Playbooks
+
+##### Enrichment for Verdict
+
+Updated the command `SearchIncidentsV2` to `SearchAlertsV2`.
diff --git a/Packs/CommonPlaybooks/ReleaseNotes/2_6_2.md b/Packs/CommonPlaybooks/ReleaseNotes/2_6_2.md
new file mode 100644
index 000000000000..a3361cae3250
--- /dev/null
+++ b/Packs/CommonPlaybooks/ReleaseNotes/2_6_2.md
@@ -0,0 +1,11 @@
+
+#### Playbooks
+
+##### Get File Sample From Path - Generic V3
+
+- Added additional sub-playbooks:
+ * **Cortex XDR - Retrieve File v2**
+ * **MDE - Retrieve File**
+ * **CrowdStrike Falcon - Retrieve File**
+- Added outputs for extracted files and non retrieved files.
+
diff --git a/Packs/CommonPlaybooks/ReleaseNotes/2_6_3.md b/Packs/CommonPlaybooks/ReleaseNotes/2_6_3.md
new file mode 100644
index 000000000000..bb9b0d3f0430
--- /dev/null
+++ b/Packs/CommonPlaybooks/ReleaseNotes/2_6_3.md
@@ -0,0 +1,11 @@
+
+#### Playbooks
+
+##### Block Indicators - Generic v3
+
+Updated the tasks 'Set indicators to block - Auto' and 'Set indicators to block - Manual' to remove empty values from the key `IndicatorsToBlock`.
+
+##### Containment Plan - Block Indicators
+
+Added a conditional task to check if there are any indicators that are blocked before setting those indicators in the incident context.
+
diff --git a/Packs/CommonPlaybooks/ReleaseNotes/2_6_4.md b/Packs/CommonPlaybooks/ReleaseNotes/2_6_4.md
new file mode 100644
index 000000000000..23d18467adc6
--- /dev/null
+++ b/Packs/CommonPlaybooks/ReleaseNotes/2_6_4.md
@@ -0,0 +1,3 @@
+## Common Playbooks
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/CommonPlaybooks/ReleaseNotes/2_6_5.md b/Packs/CommonPlaybooks/ReleaseNotes/2_6_5.md
new file mode 100644
index 000000000000..5249f8525674
--- /dev/null
+++ b/Packs/CommonPlaybooks/ReleaseNotes/2_6_5.md
@@ -0,0 +1,6 @@
+
+#### Playbooks
+
+##### Block Indicators - Generic v3
+
+Fixed the "Set indicators to block" task bug that causing errors.
diff --git a/Packs/CommonPlaybooks/ReleaseNotes/2_6_6.md b/Packs/CommonPlaybooks/ReleaseNotes/2_6_6.md
new file mode 100644
index 000000000000..15f1311f56f1
--- /dev/null
+++ b/Packs/CommonPlaybooks/ReleaseNotes/2_6_6.md
@@ -0,0 +1,10 @@
+
+#### Playbooks
+
+##### Detonate URL - Generic v1.5
+
+- Removed the duplicated playbook of the 'Detonate Url - WildFire'.
+- Added detonation with JoeSecurity vendor.
+- Removed the unnecessary section header of the "Detonate URL - VirusTotal (API v3)".
+- Removed the unnecessary condition of Check URL.
+
diff --git a/Packs/CommonPlaybooks/TestPlaybooks/playbook-Calculate_Severity_-_Generic_v2_-_Test.yml b/Packs/CommonPlaybooks/TestPlaybooks/playbook-Calculate_Severity_-_Generic_v2_-_Test.yml
index 1ec76fdfcafa..da1533512bb8 100644
--- a/Packs/CommonPlaybooks/TestPlaybooks/playbook-Calculate_Severity_-_Generic_v2_-_Test.yml
+++ b/Packs/CommonPlaybooks/TestPlaybooks/playbook-Calculate_Severity_-_Generic_v2_-_Test.yml
@@ -6,10 +6,10 @@ starttaskid: "0"
tasks:
"0":
id: "0"
- taskid: b10419a2-7351-4a97-885a-cfd514987681
+ taskid: e94b08bb-2e7c-4636-807f-b6e2648cf4e5
type: start
task:
- id: b10419a2-7351-4a97-885a-cfd514987681
+ id: e94b08bb-2e7c-4636-807f-b6e2648cf4e5
version: -1
name: ""
iscommand: false
@@ -36,10 +36,10 @@ tasks:
isautoswitchedtoquietmode: false
"1":
id: "1"
- taskid: c873349f-9030-424a-8052-7f0002f57d58
+ taskid: d72b2e6f-ea25-4633-825f-4710f51baa8e
type: regular
task:
- id: c873349f-9030-424a-8052-7f0002f57d58
+ id: d72b2e6f-ea25-4633-825f-4710f51baa8e
version: -1
name: Delete context
description: Delete field from context
@@ -75,10 +75,10 @@ tasks:
isautoswitchedtoquietmode: false
"2":
id: "2"
- taskid: 5a651054-8af5-48a5-8d3b-8d03944ee8db
+ taskid: 4e7646bd-9794-4812-82e4-277498c5baa8
type: regular
task:
- id: 5a651054-8af5-48a5-8d3b-8d03944ee8db
+ id: 4e7646bd-9794-4812-82e4-277498c5baa8
version: -1
name: Add username to account in context
description: Sets a value into the context with the given context key
@@ -115,10 +115,10 @@ tasks:
isautoswitchedtoquietmode: false
"3":
id: "3"
- taskid: 1efe02a5-ed07-4b83-82fb-e8f28d099923
+ taskid: fba89476-0aab-491c-8fc5-df71b758302d
type: title
task:
- id: 1efe02a5-ed07-4b83-82fb-e8f28d099923
+ id: fba89476-0aab-491c-8fc5-df71b758302d
version: -1
name: Critical Assets Data Initialization
type: title
@@ -151,10 +151,10 @@ tasks:
isautoswitchedtoquietmode: false
"4":
id: "4"
- taskid: b4f48886-7cf3-48d8-8653-050fae5fe405
+ taskid: 76ca583d-91d3-44b3-8cf5-a289cbe8c6c9
type: regular
task:
- id: b4f48886-7cf3-48d8-8653-050fae5fe405
+ id: 76ca583d-91d3-44b3-8cf5-a289cbe8c6c9
version: -1
name: Get AD user
script: Active Directory Query v2|||ad-get-user
@@ -186,10 +186,10 @@ tasks:
isautoswitchedtoquietmode: false
"5":
id: "5"
- taskid: f336b3bd-ce06-4496-8a53-ea22921ade46
+ taskid: 7ab692f1-aa1a-4ad2-8551-e5d9fc74151c
type: regular
task:
- id: f336b3bd-ce06-4496-8a53-ea22921ade46
+ id: 7ab692f1-aa1a-4ad2-8551-e5d9fc74151c
version: -1
name: Get AD computers
script: Active Directory Query v2|||ad-get-computer
@@ -218,10 +218,10 @@ tasks:
isautoswitchedtoquietmode: false
"6":
id: "6"
- taskid: 7372310e-bc90-49f6-8009-119081fbed0e
+ taskid: da30408f-c816-409d-8073-8cdb3ab66c77
type: title
task:
- id: 7372310e-bc90-49f6-8009-119081fbed0e
+ id: da30408f-c816-409d-8073-8cdb3ab66c77
version: -1
name: DBotScore Data Initialization
type: title
@@ -250,10 +250,10 @@ tasks:
isautoswitchedtoquietmode: false
"7":
id: "7"
- taskid: 7c5bb9f3-7bca-4baf-871a-3b5d1be463f5
+ taskid: 028b1b89-d77e-4115-827f-5f394d9708df
type: title
task:
- id: 7c5bb9f3-7bca-4baf-871a-3b5d1be463f5
+ id: 028b1b89-d77e-4115-827f-5f394d9708df
version: -1
name: Incident Severity Initialization
type: title
@@ -281,10 +281,10 @@ tasks:
isautoswitchedtoquietmode: false
"8":
id: "8"
- taskid: d4f81054-a64c-4d0f-8972-457d730122ae
+ taskid: 15693dd5-e2dd-4221-8973-d3be1d639168
type: regular
task:
- id: d4f81054-a64c-4d0f-8972-457d730122ae
+ id: 15693dd5-e2dd-4221-8973-d3be1d639168
version: -1
name: Create malicious URL indicator
description: commands.local.cmd.new.indicator
@@ -294,7 +294,7 @@ tasks:
brand: Builtin
nexttasks:
'#none#':
- - "16"
+ - "25"
scriptarguments:
comment:
simple: Malicious URL for test
@@ -304,7 +304,9 @@ tasks:
simple: URL
value:
simple: http://annachapman3.icu/eu/1.exe
- reputationcalc: 2
+ verdict:
+ simple: Malicious
+ reputationcalc: 1
separatecontext: false
view: |-
{
@@ -323,10 +325,10 @@ tasks:
isautoswitchedtoquietmode: false
"10":
id: "10"
- taskid: 085995d8-21b7-42cd-8232-a8da42b7ae7f
+ taskid: f6281592-2116-4fa6-8c75-e18f08913d41
type: regular
task:
- id: 085995d8-21b7-42cd-8232-a8da42b7ae7f
+ id: f6281592-2116-4fa6-8c75-e18f08913d41
version: -1
name: Create benign URL indicator
description: commands.local.cmd.new.indicator
@@ -361,10 +363,10 @@ tasks:
isautoswitchedtoquietmode: false
"11":
id: "11"
- taskid: 0c14b118-4c3c-4506-8484-4007241c61eb
+ taskid: 2c00a6fa-996b-417c-8c74-69b00c74ad2c
type: regular
task:
- id: 0c14b118-4c3c-4506-8484-4007241c61eb
+ id: 2c00a6fa-996b-417c-8c74-69b00c74ad2c
version: -1
name: Set incident severity to medium
script: Builtin|||setIncident
@@ -396,10 +398,10 @@ tasks:
isautoswitchedtoquietmode: false
"13":
id: "13"
- taskid: 32e93e26-729b-45b5-844a-a4485f0ed883
+ taskid: 84575712-859e-4a0d-8f69-62da4c4e459d
type: title
task:
- id: 32e93e26-729b-45b5-844a-a4485f0ed883
+ id: 84575712-859e-4a0d-8f69-62da4c4e459d
version: -1
name: Email Authentication Initialization
type: title
@@ -427,10 +429,10 @@ tasks:
isautoswitchedtoquietmode: false
"14":
id: "14"
- taskid: a1d29ea8-4a72-4385-80a2-ebdd8ed090e4
+ taskid: 068be0d1-62fa-45c7-8f87-eae4d5541706
type: regular
task:
- id: a1d29ea8-4a72-4385-80a2-ebdd8ed090e4
+ id: 068be0d1-62fa-45c7-8f87-eae4d5541706
version: -1
name: Set email authentication verdict to "Fail"
description: Sets a value into the context with the given context key
@@ -465,10 +467,10 @@ tasks:
isautoswitchedtoquietmode: false
"16":
id: "16"
- taskid: 6a6c50c1-cf59-46a2-8596-e6f02fc31d7d
+ taskid: c0410da5-1684-4412-848e-c78cfdf71482
type: playbook
task:
- id: 6a6c50c1-cf59-46a2-8596-e6f02fc31d7d
+ id: c0410da5-1684-4412-848e-c78cfdf71482
version: -1
name: Calculate Severity - Generic v2
playbookName: Calculate Severity - Generic v2
@@ -492,12 +494,15 @@ tasks:
quietmode: 0
isoversize: false
isautoswitchedtoquietmode: false
+ nexttasks:
+ '#none#':
+ - "24"
"17":
id: "17"
- taskid: 4c137427-fef0-453c-833c-00a7e7adabcb
+ taskid: 6034cc8f-2240-4840-8a44-d3774770b478
type: regular
task:
- id: 4c137427-fef0-453c-833c-00a7e7adabcb
+ id: 6034cc8f-2240-4840-8a44-d3774770b478
version: -1
name: Add hostname to context
description: Sets a value into the context with the given context key
@@ -534,10 +539,10 @@ tasks:
isautoswitchedtoquietmode: false
"18":
id: "18"
- taskid: d8cad198-3915-4027-8f88-07c802373043
+ taskid: ed4e38cd-e4ab-4abc-82a3-bb1ed6c03130
type: regular
task:
- id: d8cad198-3915-4027-8f88-07c802373043
+ id: ed4e38cd-e4ab-4abc-82a3-bb1ed6c03130
version: -1
name: Add hostname to context
description: Sets a value into the context with the given context key
@@ -574,10 +579,10 @@ tasks:
isautoswitchedtoquietmode: false
"19":
id: "19"
- taskid: 4d629957-3fe6-4faa-8c5f-7be39cea6602
+ taskid: 7f8878db-8ad7-4414-805b-0a4cc7fe75aa
type: regular
task:
- id: 4d629957-3fe6-4faa-8c5f-7be39cea6602
+ id: 7f8878db-8ad7-4414-805b-0a4cc7fe75aa
version: -1
name: Add hostname to context
description: Sets a value into the context with the given context key
@@ -614,10 +619,10 @@ tasks:
isautoswitchedtoquietmode: false
"20":
id: "20"
- taskid: 717c2129-4cde-4f4b-8301-0d3dbae8f6cd
+ taskid: 8b8984a4-c25c-4fe4-8287-47d8b22dba74
type: title
task:
- id: 717c2129-4cde-4f4b-8301-0d3dbae8f6cd
+ id: 8b8984a4-c25c-4fe4-8287-47d8b22dba74
version: -1
name: Test Empty Inputs
type: title
@@ -645,10 +650,10 @@ tasks:
isautoswitchedtoquietmode: false
"21":
id: "21"
- taskid: 0b496742-3ece-4238-86f1-3dba4e5b11fb
+ taskid: 0a05e82f-05e2-4800-8267-9520bba8cea4
type: title
task:
- id: 0b496742-3ece-4238-86f1-3dba4e5b11fb
+ id: 0a05e82f-05e2-4800-8267-9520bba8cea4
version: -1
name: Test Empty Inputs
type: title
@@ -676,10 +681,10 @@ tasks:
isautoswitchedtoquietmode: false
"22":
id: "22"
- taskid: b11be79f-eb56-4faa-8104-3736f6a277b0
+ taskid: cd135097-bbd3-4bfb-8654-6f342483b9f8
type: regular
task:
- id: b11be79f-eb56-4faa-8104-3736f6a277b0
+ id: cd135097-bbd3-4bfb-8654-6f342483b9f8
version: -1
name: Delete context
description: Delete field from context
@@ -712,10 +717,10 @@ tasks:
isautoswitchedtoquietmode: false
"23":
id: "23"
- taskid: 57fc2d8e-66d3-4c44-89ff-03ff0500812d
+ taskid: 868dff1f-1237-4669-877f-ffa9c68ce23e
type: playbook
task:
- id: 57fc2d8e-66d3-4c44-89ff-03ff0500812d
+ id: 868dff1f-1237-4669-877f-ffa9c68ce23e
version: -1
name: Calculate Severity - Generic v2
description: |-
@@ -783,12 +788,202 @@ tasks:
quietmode: 0
isoversize: false
isautoswitchedtoquietmode: false
+ "24":
+ id: "24"
+ taskid: e5c230ea-d56a-4bf6-8646-e43532f595d8
+ type: condition
+ task:
+ id: e5c230ea-d56a-4bf6-8646-e43532f595d8
+ version: -1
+ name: Is the severity critical?
+ type: condition
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#default#':
+ - "27"
+ "yes":
+ - "26"
+ separatecontext: false
+ conditions:
+ - label: "yes"
+ condition:
+ - - operator: isEqualString
+ left:
+ value:
+ complex:
+ root: incident
+ accessor: severity
+ iscontext: true
+ right:
+ value:
+ simple: "4"
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 450,
+ "y": 1930
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "25":
+ id: "25"
+ taskid: c58531de-1998-4e8b-809c-4795de0c32dc
+ type: regular
+ task:
+ id: c58531de-1998-4e8b-809c-4795de0c32dc
+ version: -1
+ name: Create DBotScore for URL
+ description: Set a value in context under the key you entered.
+ scriptName: Set
+ type: regular
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "29"
+ scriptarguments:
+ append:
+ simple: "true"
+ key:
+ simple: DBotScore
+ value:
+ simple: "{\n\t\"Indicator\": \"http://annachapman3.icu/eu/1.exe\",\n\t\"Reliability\": \"A - Completely reliable\",\n\t\"Score\": 3,\n\t\"Type\": \"URL\",\n\t\"Vendor\": \"Manual\"\n}"
+ reputationcalc: 1
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": -160,
+ "y": 1290
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "26":
+ id: "26"
+ taskid: cf71a47e-f9f3-4dcb-8c0d-8433bcf9aace
+ type: title
+ task:
+ id: cf71a47e-f9f3-4dcb-8c0d-8433bcf9aace
+ version: -1
+ name: Done
+ type: title
+ iscommand: false
+ brand: ""
+ description: ''
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 450,
+ "y": 2370
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "27":
+ id: "27"
+ taskid: 1c307d2b-284b-4aa1-8b6a-c43a9f1fdcf9
+ type: regular
+ task:
+ id: 1c307d2b-284b-4aa1-8b6a-c43a9f1fdcf9
+ version: -1
+ name: Fail the test
+ description: Prints an error entry with a given message.
+ scriptName: PrintErrorEntry
+ type: regular
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "26"
+ scriptarguments:
+ message:
+ simple: |-
+ The severity of this incident should be critical, because we created a critical account in context and the rest of the indicators have lower verdicts.
+ Please check the Calculate Severity playbook and subplaybooks, especially the Critical Assets subplaybook, and check why the severity is not as expected.
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 770,
+ "y": 2130
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "29":
+ id: "29"
+ taskid: 198c7a6c-f753-443e-8625-2987bfbd70c0
+ type: regular
+ task:
+ id: 198c7a6c-f753-443e-8625-2987bfbd70c0
+ version: -1
+ name: Update indicator to malicious in case it already exists as benign
+ description: commands.local.cmd.set.indicator
+ script: Builtin|||setIndicator
+ type: regular
+ iscommand: true
+ brand: Builtin
+ nexttasks:
+ '#none#':
+ - "16"
+ scriptarguments:
+ value:
+ simple: http://annachapman3.icu/eu/1.exe
+ verdict:
+ simple: Malicious
+ reputationcalc: 1
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": -160,
+ "y": 1460
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
view: |-
{
- "linkLabelsPosition": {},
+ "linkLabelsPosition": {
+ "24_26_yes": 0.35
+ },
"paper": {
"dimensions": {
- "height": 1775,
+ "height": 2385,
"width": 3440,
"x": -1150,
"y": 50
@@ -797,4 +992,4 @@ view: |-
}
inputs: []
outputs: []
-description: This Playbook tests The Calculate Severity Generic V2 playbook with multiple input
+description: This Playbook tests The Calculate Severity Generic V2 playbook with multiple input.
diff --git a/Packs/CommonPlaybooks/TestPlaybooks/playbook-Calculate_Severity_-_Standard_-_Test.yml b/Packs/CommonPlaybooks/TestPlaybooks/playbook-Calculate_Severity_-_Standard_-_Test.yml
index 0ff043336050..fb14ae28da0b 100644
--- a/Packs/CommonPlaybooks/TestPlaybooks/playbook-Calculate_Severity_-_Standard_-_Test.yml
+++ b/Packs/CommonPlaybooks/TestPlaybooks/playbook-Calculate_Severity_-_Standard_-_Test.yml
@@ -7,15 +7,15 @@ starttaskid: "0"
tasks:
"0":
id: "0"
- taskid: a49ae08f-6b81-412a-84a9-d8e17ae2bd26
+ taskid: 913682a8-fb0c-42bb-84b2-1adc471453b9
type: start
task:
- id: a49ae08f-6b81-412a-84a9-d8e17ae2bd26
+ id: 913682a8-fb0c-42bb-84b2-1adc471453b9
version: -1
name: ""
- description: ""
iscommand: false
brand: ""
+ description: ''
nexttasks:
'#none#':
- "1"
@@ -30,12 +30,17 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"1":
id: "1"
- taskid: d9d0ebac-7038-405f-883b-722d3c1fbb65
+ taskid: 6b8c8523-7a7d-4fd2-84ef-01378ad9a677
type: regular
task:
- id: d9d0ebac-7038-405f-883b-722d3c1fbb65
+ id: 6b8c8523-7a7d-4fd2-84ef-01378ad9a677
version: -1
name: Delete context
description: Clears the context for a fresh start of the test.
@@ -46,14 +51,10 @@ tasks:
nexttasks:
'#none#':
- "4"
- - "2"
+ - "9"
scriptarguments:
all:
simple: "yes"
- index: {}
- key: {}
- keysToKeep: {}
- subplaybook: {}
reputationcalc: 1
separatecontext: false
view: |-
@@ -66,85 +67,17 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
- "2":
- id: "2"
- taskid: c6d9c6a1-5bca-4e16-8a06-d63675b317f5
- type: regular
- task:
- id: c6d9c6a1-5bca-4e16-8a06-d63675b317f5
- version: -1
- name: Add malicious indicator to Minemeld
- script: '|||minemeld-add-to-miner'
- type: regular
- iscommand: true
- brand: ""
- nexttasks:
- '#none#':
- - "3"
- scriptarguments:
- comment:
- simple: Malicious indicator for test
- indicator:
- simple: http://annachapman3.icu/eu/1.exe
- miner:
- simple: Malicious
- separatecontext: false
- view: |-
- {
- "position": {
- "x": 40,
- "y": 350
- }
- }
- note: false
- timertriggers: []
- ignoreworker: false
- "3":
- id: "3"
- taskid: 04197795-431b-4d8b-8b82-b4bcfbdb9b15
- type: regular
- task:
- id: 04197795-431b-4d8b-8b82-b4bcfbdb9b15
- version: -1
- name: Get malicious URL reputation using Minemeld
- script: Palo Alto Minemeld|||url
- type: regular
- iscommand: true
- brand: Palo Alto Minemeld
- nexttasks:
- '#none#':
- - "5"
- scriptarguments:
- include_inactive: {}
- limit: {}
- long: {}
- public: {}
- retries: {}
- sampleSize: {}
- submitWait: {}
- threshold: {}
- timeout: {}
- url:
- simple: http://annachapman3.icu/eu/1.exe
- wait: {}
- reputationcalc: 1
- separatecontext: false
- view: |-
- {
- "position": {
- "x": 40,
- "y": 510
- }
- }
- note: false
- timertriggers: []
- ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"4":
id: "4"
- taskid: 42d6e933-3ec7-4d7f-8327-c8167dcef94b
+ taskid: e5f27ab8-ea65-4803-84fe-94fef6705221
type: regular
task:
- id: 42d6e933-3ec7-4d7f-8327-c8167dcef94b
+ id: e5f27ab8-ea65-4803-84fe-94fef6705221
version: -1
name: Set incident severity to medium
description: Sets the incident severity to medium.
@@ -156,146 +89,8 @@ tasks:
'#none#':
- "5"
scriptarguments:
- addLabels: {}
- affecteddata: {}
- affecteddatatype: {}
- affectedhosts: {}
- affectedindividualscontactinformation: {}
- affectedips: {}
- app: {}
- approximatenumberofaffecteddatasubjects: {}
- assetid: {}
- attachmentcount: {}
- attachmentextension: {}
- attachmenthash: {}
- attachmentid: {}
- attachmentitem: {}
- attachmentname: {}
- attachmentsize: {}
- attachmenttype: {}
- backupowner: {}
- bugtraq: {}
- city: {}
- closeNotes: {}
- closeReason: {}
- companyaddress: {}
- companycity: {}
- companycountry: {}
- companyhasinsuranceforthebreach: {}
- companyname: {}
- companypostalcode: {}
- contactaddress: {}
- contactname: {}
- country: {}
- countrywherebusinesshasitsmainestablishment: {}
- countrywherethebreachtookplace: {}
- customFields: {}
- cve: {}
- cvss: {}
- dataencryptionstatus: {}
- datetimeofthebreach: {}
- daysbetweenreportcreation: {}
- deleteEmptyField: {}
- dest: {}
- destinationip: {}
- destntdomain: {}
- details: {}
- detectedusers: {}
- dpoemailaddress: {}
- duration: {}
- emailaddress: {}
- emailbcc: {}
- emailbody: {}
- emailbodyformat: {}
- emailbodyhtml: {}
- emailbodyhtmlraw: {}
- emailcc: {}
- emailclassification: {}
- emailclientname: {}
- emailfrom: {}
- emailfromdisplayname: {}
- emailinreplyto: {}
- emailkeywords: {}
- emailmessageid: {}
- emailreceived: {}
- emailreplyto: {}
- emailreturnpath: {}
- emailsenderdomain: {}
- emailsenderip: {}
- emailsize: {}
- emailsource: {}
- emailsubject: {}
- emailsubjectlanguage: {}
- emailto: {}
- emailtocount: {}
- emailurlclicked: {}
- eventid: {}
- falses: {}
- fetchid: {}
- fetchtype: {}
- filehash: {}
- filename: {}
- filepath: {}
- hostid: {}
- hostname: {}
- htmlimage: {}
- htmlrenderedimage: {}
- id: {}
- important: {}
- importantfield: {}
- isthedatasubjecttodpia: {}
- labels: {}
- likelyimpact: {}
- maliciouscauseifthecauseisamaliciousattack: {}
- malwarefamily: {}
- mdtest: {}
- measurestomitigate: {}
- myfield: {}
- name: {}
- occurred: {}
- owner: {}
- phase: {}
- phishingsubtype: {}
- possiblecauseofthebreach: {}
- postalcode: {}
- relateddomain: {}
- replacePlaybook: {}
- reporteduser: {}
- reportinguser: {}
- roles: {}
- screenshot: {}
- screenshot2: {}
- sectorofaffectedparty: {}
- selector: {}
severity:
simple: "2"
- signature: {}
- single: {}
- single2: {}
- sizenumberofemployees: {}
- sizeturnover: {}
- sla: {}
- slaField: {}
- source: {}
- src: {}
- srcntdomain: {}
- srcuser: {}
- systems: {}
- telephoneno: {}
- test: {}
- test2: {}
- testfield: {}
- timeassignedtolevel2: {}
- timefield1: {}
- timelevel1: {}
- type: {}
- user: {}
- username: {}
- vendorid: {}
- vendorproduct: {}
- vulnerabilitycategory: {}
- whereisdatahosted: {}
- xdr: {}
reputationcalc: 1
separatecontext: false
view: |-
@@ -308,19 +103,24 @@ tasks:
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"5":
id: "5"
- taskid: f0f9b58d-fdc8-48f8-89c4-879674a8913d
+ taskid: 46196465-a23d-4ee4-8b5b-0946819bc5d1
type: playbook
task:
- id: f0f9b58d-fdc8-48f8-89c4-879674a8913d
+ id: 46196465-a23d-4ee4-8b5b-0946819bc5d1
version: -1
name: Calculate Severity - Core
- description: ""
playbookName: Calculate Severity - Standard
type: playbook
iscommand: false
brand: ""
+ description: ''
nexttasks:
'#none#':
- "6"
@@ -328,19 +128,24 @@ tasks:
view: |-
{
"position": {
- "x": 450,
- "y": 700
+ "x": 440,
+ "y": 850
}
}
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"6":
id: "6"
- taskid: f02c69e4-baa2-4126-863f-50b925294c6c
+ taskid: ed93d7eb-f892-4a32-8b55-40aabcac2d53
type: condition
task:
- id: f02c69e4-baa2-4126-863f-50b925294c6c
+ id: ed93d7eb-f892-4a32-8b55-40aabcac2d53
version: -1
name: Is the severity of the incident "high"?
type: condition
@@ -368,42 +173,52 @@ tasks:
view: |-
{
"position": {
- "x": 450,
- "y": 880
+ "x": 440,
+ "y": 1030
}
}
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"7":
id: "7"
- taskid: 3c289d2e-afed-4b06-8643-bd1f17420932
+ taskid: b5a3b88a-b1da-4633-8183-3fe490003343
type: title
task:
- id: 3c289d2e-afed-4b06-8643-bd1f17420932
+ id: b5a3b88a-b1da-4633-8183-3fe490003343
version: -1
name: Done
- description: ""
type: title
iscommand: false
brand: ""
+ description: ''
separatecontext: false
view: |-
{
"position": {
- "x": 450,
- "y": 1270
+ "x": 440,
+ "y": 1420
}
}
note: false
timertriggers: []
ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"8":
id: "8"
- taskid: 5bff82cc-4e12-40b6-846f-8b93c1cc1da5
+ taskid: 6e41c079-13da-44f8-8fc3-d15922c26392
type: regular
task:
- id: 5bff82cc-4e12-40b6-846f-8b93c1cc1da5
+ id: 6e41c079-13da-44f8-8fc3-d15922c26392
version: -1
name: Make test fail
description: Prints an error entry with a given message
@@ -422,13 +237,140 @@ tasks:
view: |-
{
"position": {
- "x": 740,
- "y": 1090
+ "x": 730,
+ "y": 1240
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ continueonerrortype: ""
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "9":
+ id: "9"
+ taskid: 3b50e092-bac7-492b-87ce-fe810396d859
+ type: regular
+ task:
+ id: 3b50e092-bac7-492b-87ce-fe810396d859
+ version: -1
+ name: Create malicious URL indicator
+ description: commands.local.cmd.new.indicator
+ script: Builtin|||createNewIndicator
+ type: regular
+ iscommand: true
+ brand: Builtin
+ nexttasks:
+ '#none#':
+ - "10"
+ scriptarguments:
+ comment:
+ simple: Malicious URL for test
+ reputation:
+ simple: Bad
+ type:
+ simple: URL
+ value:
+ simple: http://annachapman3.icu/eu/1.exe
+ verdict:
+ simple: Malicious
+ reputationcalc: 1
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 80,
+ "y": 350
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "10":
+ id: "10"
+ taskid: 247afeb2-4cff-4fd0-8c65-6d77defa1c21
+ type: regular
+ task:
+ id: 247afeb2-4cff-4fd0-8c65-6d77defa1c21
+ version: -1
+ name: Create DBotScore for URL
+ description: Set a value in context under the key you entered.
+ scriptName: Set
+ type: regular
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "11"
+ scriptarguments:
+ append:
+ simple: "true"
+ key:
+ simple: DBotScore
+ value:
+ simple: "{\n\t\"Indicator\": \"http://annachapman3.icu/eu/1.exe\",\n\t\"Reliability\": \"A - Completely reliable\",\n\t\"Score\": 3,\n\t\"Type\": \"URL\",\n\t\"Vendor\": \"Manual\"\n}"
+ reputationcalc: 1
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 80,
+ "y": 510
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "11":
+ id: "11"
+ taskid: 9e7a55fc-167a-4ac3-807f-f2d42d4d64f3
+ type: regular
+ task:
+ id: 9e7a55fc-167a-4ac3-807f-f2d42d4d64f3
+ version: -1
+ name: Update indicator to malicious in case it already exists as benign
+ description: commands.local.cmd.set.indicator
+ script: Builtin|||setIndicator
+ type: regular
+ iscommand: true
+ brand: Builtin
+ nexttasks:
+ '#none#':
+ - "5"
+ scriptarguments:
+ value:
+ simple: http://annachapman3.icu/eu/1.exe
+ verdict:
+ simple: Malicious
+ reputationcalc: 1
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 80,
+ "y": 680
}
}
note: false
timertriggers: []
ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
view: |-
{
"linkLabelsPosition": {
@@ -437,9 +379,9 @@ view: |-
},
"paper": {
"dimensions": {
- "height": 1285,
- "width": 1200,
- "x": 40,
+ "height": 1435,
+ "width": 1160,
+ "x": 80,
"y": 50
}
}
diff --git a/Packs/CommonPlaybooks/TestPlaybooks/playbook-Retrieve_File_from_Endpoint_-_Generic_V3_Test.yml b/Packs/CommonPlaybooks/TestPlaybooks/playbook-Retrieve_File_from_Endpoint_-_Generic_V3_Test.yml
index d11febf09a0b..31fb8ed9e1a2 100644
--- a/Packs/CommonPlaybooks/TestPlaybooks/playbook-Retrieve_File_from_Endpoint_-_Generic_V3_Test.yml
+++ b/Packs/CommonPlaybooks/TestPlaybooks/playbook-Retrieve_File_from_Endpoint_-_Generic_V3_Test.yml
@@ -237,11 +237,11 @@ tasks:
transformers:
- operator: uniq
MD5:
- simple: 523613A7B9DFA398CBD5EBD2DD0F4F38
+ simple: B4EA38EB798EA1C1E067DFD176B882BB
Path:
simple: C:\\Windows\\system32\\drivers\\etc\\HOSTS
SHA256:
- simple: 3e59379f585ebf0becb6b4e06d0fbbf806de28a4bb256e837b4555f1b4245571
+ simple: 055D7A25DECF6769BF4FB2F3BC9FD3159C8B42972818177E44975929D97292DE
separatecontext: true
continueonerrortype: ""
loop:
diff --git a/Packs/CommonPlaybooks/doc_files/Calculate_Severity_-_Standard.png b/Packs/CommonPlaybooks/doc_files/Calculate_Severity_-_Standard.png
new file mode 100644
index 000000000000..9b42c7e35b03
Binary files /dev/null and b/Packs/CommonPlaybooks/doc_files/Calculate_Severity_-_Standard.png differ
diff --git a/Packs/CommonPlaybooks/doc_files/Calculate_Severity_By_Highest_DBotScore.png b/Packs/CommonPlaybooks/doc_files/Calculate_Severity_By_Highest_DBotScore.png
index e4c33f1790a9..2819957abcc6 100644
Binary files a/Packs/CommonPlaybooks/doc_files/Calculate_Severity_By_Highest_DBotScore.png and b/Packs/CommonPlaybooks/doc_files/Calculate_Severity_By_Highest_DBotScore.png differ
diff --git a/Packs/CommonPlaybooks/doc_files/Detonate_URL_-_Generic_v1.5.png b/Packs/CommonPlaybooks/doc_files/Detonate_URL_-_Generic_v1.5.png
index 83fe158582f4..095cc8fa0320 100644
Binary files a/Packs/CommonPlaybooks/doc_files/Detonate_URL_-_Generic_v1.5.png and b/Packs/CommonPlaybooks/doc_files/Detonate_URL_-_Generic_v1.5.png differ
diff --git a/Packs/CommonPlaybooks/doc_files/Get_File_Sample_From_Path_-_Generic_V3.png b/Packs/CommonPlaybooks/doc_files/Get_File_Sample_From_Path_-_Generic_V3.png
index 705cb08c1125..e3d755f2f771 100644
Binary files a/Packs/CommonPlaybooks/doc_files/Get_File_Sample_From_Path_-_Generic_V3.png and b/Packs/CommonPlaybooks/doc_files/Get_File_Sample_From_Path_-_Generic_V3.png differ
diff --git a/Packs/CommonPlaybooks/pack_metadata.json b/Packs/CommonPlaybooks/pack_metadata.json
index 4799e6bc4845..189721fe6b83 100644
--- a/Packs/CommonPlaybooks/pack_metadata.json
+++ b/Packs/CommonPlaybooks/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Common Playbooks",
"description": "Frequently used playbooks pack.",
"support": "xsoar",
- "currentVersion": "2.5.9",
+ "currentVersion": "2.6.6",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CommonReports/ReleaseNotes/1_0_5.md b/Packs/CommonReports/ReleaseNotes/1_0_5.md
new file mode 100644
index 000000000000..726d6689b015
--- /dev/null
+++ b/Packs/CommonReports/ReleaseNotes/1_0_5.md
@@ -0,0 +1,6 @@
+
+#### Reports
+
+##### Daily incidents
+
+- Documentation and metadata improvements.
diff --git a/Packs/CommonReports/ReleaseNotes/1_0_6.md b/Packs/CommonReports/ReleaseNotes/1_0_6.md
new file mode 100644
index 000000000000..e21e99f3e845
--- /dev/null
+++ b/Packs/CommonReports/ReleaseNotes/1_0_6.md
@@ -0,0 +1,3 @@
+## Common Reports
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/CommonReports/Reports/report-csv-dailyIncidentReport.json b/Packs/CommonReports/Reports/report-csv-dailyIncidentReport.json
index 8e5986e3e09f..0d6435a8d7f3 100644
--- a/Packs/CommonReports/Reports/report-csv-dailyIncidentReport.json
+++ b/Packs/CommonReports/Reports/report-csv-dailyIncidentReport.json
@@ -1,7 +1,7 @@
{
"id": "csvDailyIncidentReport",
"name": "Daily incidents",
- "description": "Daily summary of all current open incidents (CSV format).",
+ "description": "Daily summary of incidents statistics, followed by a list of all current open incidents.",
"tags": [
],
diff --git a/Packs/CommonReports/pack_metadata.json b/Packs/CommonReports/pack_metadata.json
index 12b7ad4ccaf2..064ff8274872 100644
--- a/Packs/CommonReports/pack_metadata.json
+++ b/Packs/CommonReports/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Common Reports",
"description": "Frequently used reports pack.",
"support": "xsoar",
- "currentVersion": "1.0.4",
+ "currentVersion": "1.0.6",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CommonScripts/.secrets-ignore b/Packs/CommonScripts/.secrets-ignore
index 825d0e44ccba..597c6c4203eb 100644
--- a/Packs/CommonScripts/.secrets-ignore
+++ b/Packs/CommonScripts/.secrets-ignore
@@ -116,4 +116,6 @@ testing@gmail.com
a853a1b0-1ffe-4e37-d9a9-a27c6bc0bd5b@gmail.com
cipher=ECDHE-RSA-AES128-GCM-SHA256
07.23.05.38
-multipart/signed
\ No newline at end of file
+multipart/signed
+123.123.123.123
+107.66.225.91
\ No newline at end of file
diff --git a/Packs/CommonScripts/ReleaseNotes/1_13_20.md b/Packs/CommonScripts/ReleaseNotes/1_13_20.md
new file mode 100644
index 000000000000..beda8743ac2a
--- /dev/null
+++ b/Packs/CommonScripts/ReleaseNotes/1_13_20.md
@@ -0,0 +1,7 @@
+
+#### Scripts
+
+##### DBotUpdateLogoURLPhishing
+
+- Fixed an issue where the script failed to run.
+- Updated the Docker image to: *demisto/mlurlphishing:1.0.0.85127*.
diff --git a/Packs/CommonScripts/ReleaseNotes/1_13_21.md b/Packs/CommonScripts/ReleaseNotes/1_13_21.md
new file mode 100644
index 000000000000..18d4a83bc68e
--- /dev/null
+++ b/Packs/CommonScripts/ReleaseNotes/1_13_21.md
@@ -0,0 +1,6 @@
+
+#### Scripts
+
+##### Set
+
+- Fixed an issue where the **Set** command couldn't set large numbers.
diff --git a/Packs/CommonScripts/ReleaseNotes/1_13_22.md b/Packs/CommonScripts/ReleaseNotes/1_13_22.md
new file mode 100644
index 000000000000..c28625dd7a36
--- /dev/null
+++ b/Packs/CommonScripts/ReleaseNotes/1_13_22.md
@@ -0,0 +1,6 @@
+
+#### Scripts
+
+##### Sleep
+
+- Fixed an issue where long run times failed when using integer values passed from the context.
diff --git a/Packs/CommonScripts/ReleaseNotes/1_13_23.md b/Packs/CommonScripts/ReleaseNotes/1_13_23.md
new file mode 100644
index 000000000000..6311a9d003a1
--- /dev/null
+++ b/Packs/CommonScripts/ReleaseNotes/1_13_23.md
@@ -0,0 +1,3 @@
+## Common Scripts
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/CommonScripts/ReleaseNotes/1_13_24.md b/Packs/CommonScripts/ReleaseNotes/1_13_24.md
new file mode 100644
index 000000000000..5450cfdec90b
--- /dev/null
+++ b/Packs/CommonScripts/ReleaseNotes/1_13_24.md
@@ -0,0 +1,7 @@
+
+#### Scripts
+
+##### ReadPDFFileV2
+
+- Added support for extracting hashes.
+- Updated the Docker image to: *demisto/readpdf:1.0.0.85832*.
diff --git a/Packs/CommonScripts/ReleaseNotes/1_13_25.md b/Packs/CommonScripts/ReleaseNotes/1_13_25.md
new file mode 100644
index 000000000000..aecf29eb48a5
--- /dev/null
+++ b/Packs/CommonScripts/ReleaseNotes/1_13_25.md
@@ -0,0 +1,7 @@
+
+#### Scripts
+
+##### SetGridField
+- Updated the Docker image to: *demisto/pandas:1.0.0.86039*.
+
+- Fixed an issue where the script fails on XSOAR 8.0 and above.
diff --git a/Packs/CommonScripts/ReleaseNotes/1_13_26.md b/Packs/CommonScripts/ReleaseNotes/1_13_26.md
new file mode 100644
index 000000000000..262ecd765b8d
--- /dev/null
+++ b/Packs/CommonScripts/ReleaseNotes/1_13_26.md
@@ -0,0 +1,8 @@
+
+#### Scripts
+
+##### CopyNotesToIncident
+- Updated the Docker image to: *demisto/python3:3.10.13.86272*.
+
+- Added `auto_extract` argument to the script. When set to `true`, the script will extract indicators from the note. The default value is `false`.
+
diff --git a/Packs/CommonScripts/Scripts/CopyNotesToIncident/CopyNotesToIncident.py b/Packs/CommonScripts/Scripts/CopyNotesToIncident/CopyNotesToIncident.py
index e4b4e3c97776..787ad65fd3dd 100644
--- a/Packs/CommonScripts/Scripts/CopyNotesToIncident/CopyNotesToIncident.py
+++ b/Packs/CommonScripts/Scripts/CopyNotesToIncident/CopyNotesToIncident.py
@@ -2,7 +2,7 @@
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa # pylint: disable=unused-wildcard-import
-from typing import Dict, Any, List
+from typing import Any
import traceback
''' STANDALONE FUNCTION '''
@@ -13,17 +13,18 @@ def remove_id_and_version_from_entry(entry):
entry.pop('Version', None)
-def copy_notes_to_target_incident(args: Dict[str, Any]) -> CommandResults:
+def copy_notes_to_target_incident(args: dict[str, Any]) -> CommandResults:
target_incident = args.get('target_incident', None)
if not target_incident:
raise ValueError('Target Incident ID not specified')
tags = argToList(args.get('tags'))
+ auto_extract = args.get('auto_extract', False)
+ auto_extract_value = "none" if not auto_extract else "inline"
+ entries = demisto.executeCommand('getEntries', {'filter': {'tags': tags, "auto_extract": auto_extract_value}})
- entries = demisto.executeCommand('getEntries', {'filter': {'tags': tags}})
-
- note_entries: List = []
+ note_entries: list = []
md: str = ''
if isinstance(entries, list) and len(entries) > 0:
@@ -33,7 +34,9 @@ def copy_notes_to_target_incident(args: Dict[str, Any]) -> CommandResults:
note_entries.append(entry)
if len(note_entries) > 0:
- demisto.executeCommand("addEntries", {"id": target_incident, "entries": note_entries})
+ demisto.executeCommand("addEntries", {"id": target_incident,
+ "entries": note_entries,
+ "auto_extract": auto_extract_value})
md = f'## {len(note_entries)} notes copied'
else:
md = '## No notes found'
diff --git a/Packs/CommonScripts/Scripts/CopyNotesToIncident/CopyNotesToIncident.yml b/Packs/CommonScripts/Scripts/CopyNotesToIncident/CopyNotesToIncident.yml
index d36fa89eb5e6..5aea10c48642 100644
--- a/Packs/CommonScripts/Scripts/CopyNotesToIncident/CopyNotesToIncident.yml
+++ b/Packs/CommonScripts/Scripts/CopyNotesToIncident/CopyNotesToIncident.yml
@@ -2,6 +2,13 @@ args:
- description: Incident ID to copy notes to.
name: target_incident
required: true
+- description: Whether auto extract the indicators from the notes.
+ name: auto_extract
+ auto: PREDEFINED
+ predefined:
+ - 'true'
+ - 'false'
+ defaultValue: 'true'
- description: Replicate only notes with these tags (array or comma separated).
isArray: true
name: tags
@@ -9,7 +16,7 @@ comment: Copy all entries marked as notes from current incident to another incid
commonfields:
id: CopyNotesToIncident
version: -1
-dockerimage: demisto/python3:3.10.12.63474
+dockerimage: demisto/python3:3.10.13.86272
enabled: true
name: CopyNotesToIncident
runas: DBotWeakRole
diff --git a/Packs/CommonScripts/Scripts/CopyNotesToIncident/CopyNotesToIncident_test.py b/Packs/CommonScripts/Scripts/CopyNotesToIncident/CopyNotesToIncident_test.py
index cd6cd12c867e..07d8e660710b 100644
--- a/Packs/CommonScripts/Scripts/CopyNotesToIncident/CopyNotesToIncident_test.py
+++ b/Packs/CommonScripts/Scripts/CopyNotesToIncident/CopyNotesToIncident_test.py
@@ -1,6 +1,6 @@
from CopyNotesToIncident import copy_notes_to_target_incident
import demistomock as demisto # noqa # pylint: disable=unused-wildcard-import
-from typing import List, Dict, Any
+from typing import Any
import json
MOCK_TARGET_INCIDENT_ID = '99'
@@ -27,7 +27,7 @@ def test_copy_no_note_entries(mocker):
mock_target_entries = mock_source_entries
- def executeCommand(name: str, args: Dict[str, Any]) -> List[Dict[str, Any]]:
+ def executeCommand(name: str, args: dict[str, Any]) -> list[dict[str, Any]]:
if name == 'getEntries':
return mock_target_entries
elif name == 'addEntries':
@@ -65,7 +65,7 @@ def test_copy_all_note_entries(mocker):
mock_target_entries = [e for e in mock_source_entries if isinstance(e, dict) and 'Note' in e and e['Note'] is True]
- def executeCommand(name: str, args: Dict[str, Any]) -> List[Dict[str, Any]]:
+ def executeCommand(name: str, args: dict[str, Any]) -> list[dict[str, Any]]:
if name == 'getEntries':
return mock_target_entries
elif name == 'addEntries':
@@ -114,7 +114,7 @@ def test_copy_tagged_note_entries(mocker):
)
]
- def executeCommand(name: str, args: Dict[str, Any]) -> List[Dict[str, Any]]:
+ def executeCommand(name: str, args: dict[str, Any]) -> list[dict[str, Any]]:
if name == 'getEntries':
return mock_target_entries
elif name == 'addEntries':
diff --git a/Packs/CommonScripts/Scripts/DBotUpdateLogoURLPhishing/DBotUpdateLogoURLPhishing.py b/Packs/CommonScripts/Scripts/DBotUpdateLogoURLPhishing/DBotUpdateLogoURLPhishing.py
index c9a543a91e62..57cbf14865b1 100644
--- a/Packs/CommonScripts/Scripts/DBotUpdateLogoURLPhishing/DBotUpdateLogoURLPhishing.py
+++ b/Packs/CommonScripts/Scripts/DBotUpdateLogoURLPhishing/DBotUpdateLogoURLPhishing.py
@@ -177,10 +177,10 @@ def display_all_logos(model):
logo_list = []
for name, logo in model.logos_dict.items():
custom_associated_logo = model.custom_logo_associated_domain.get(name, '')
- if name in model.custom_logo_associated_domain.keys():
- description = description + ", %s (%s, %s)" % (name, 'Custom Logo', ','.join(custom_associated_logo))
+ if name in model.custom_logo_associated_domain:
+ description = description + ", {} ({}, {})".format(name, 'Custom Logo', ','.join(custom_associated_logo))
else:
- description = description + ", %s (%s)" % (name, 'Default Logo')
+ description = description + ", {} ({})".format(name, 'Default Logo')
logo_list.append(logo)
description = description[1:]
merged_logos = get_concat_logo_single_image([image_from_base64_to_bytes(logo) for logo in logo_list])
@@ -214,7 +214,7 @@ def main():
else:
model = load_model_from_docker()
display_all_logos(model)
- return
+ return None
if (action == KEY_ADD_LOGO) and (not logo_image_id or not logo_name):
return_error(MSG_EMPTY_NAME_OR_URL)
diff --git a/Packs/CommonScripts/Scripts/DBotUpdateLogoURLPhishing/DBotUpdateLogoURLPhishing.yml b/Packs/CommonScripts/Scripts/DBotUpdateLogoURLPhishing/DBotUpdateLogoURLPhishing.yml
index 77c31e08cee9..3c0a312c454d 100644
--- a/Packs/CommonScripts/Scripts/DBotUpdateLogoURLPhishing/DBotUpdateLogoURLPhishing.yml
+++ b/Packs/CommonScripts/Scripts/DBotUpdateLogoURLPhishing/DBotUpdateLogoURLPhishing.yml
@@ -27,7 +27,7 @@ tags:
- ml
timeout: '0'
type: python
-dockerimage: demisto/mlurlphishing:1.0.0.61412
+dockerimage: demisto/mlurlphishing:1.0.0.85127
runas: DBotRole
tests:
- DBotUpdateLogoURLPhishing_test
diff --git a/Packs/CommonScripts/Scripts/ReadPDFFileV2/README.md b/Packs/CommonScripts/Scripts/ReadPDFFileV2/README.md
index cdbee295104b..52c0b700959a 100644
--- a/Packs/CommonScripts/Scripts/ReadPDFFileV2/README.md
+++ b/Packs/CommonScripts/Scripts/ReadPDFFileV2/README.md
@@ -1,4 +1,4 @@
-Loads a PDF file's content and metadata into context.
+Load a PDF file's content and metadata into context. Supports extraction of hashes, urls, and emails when available.
## Script Data
@@ -55,3 +55,5 @@ Loads a PDF file's content and metadata into context.
| File.UserProperties | Indicates the presence of the structure elements that contain user properties attributes. | String |
| File.Extension | The file's extension. | String |
| Account.Email | The email address of the account. | String |
+| Hashes.type | The hash type extracted from the PDF file. | String |
+| Hashes.value | The hash value extracted from the PDF file. | String |
diff --git a/Packs/CommonScripts/Scripts/ReadPDFFileV2/ReadPDFFileV2.py b/Packs/CommonScripts/Scripts/ReadPDFFileV2/ReadPDFFileV2.py
index 82bd49d5f68f..8a0cb51221a4 100644
--- a/Packs/CommonScripts/Scripts/ReadPDFFileV2/ReadPDFFileV2.py
+++ b/Packs/CommonScripts/Scripts/ReadPDFFileV2/ReadPDFFileV2.py
@@ -9,7 +9,6 @@
import re
import shutil
import json
-from typing import List, Set, Tuple
from pikepdf import Pdf, PasswordError
import contextlib
import io
@@ -27,7 +26,6 @@ class PdfPermissionsException(Exception):
Every exception class that is in charge of catching errors that occur when trying to
extract data from the PDF must inherit this class
"""
- pass
class PdfCopyingProtectedException(PdfPermissionsException):
@@ -36,7 +34,6 @@ class PdfCopyingProtectedException(PdfPermissionsException):
a `copy-protected` file (Copy-protected files are files that prevent us from copy its content)
This is relevant since we run a command that copies the content of the pdf file into a text file.
"""
- pass
class PdfInvalidCredentialsException(PdfPermissionsException):
@@ -44,7 +41,6 @@ class PdfInvalidCredentialsException(PdfPermissionsException):
This class is in charge of catching errors that occur when we try to decrypt an encrypted
pdf file with the wrong password.
"""
- pass
# Error class for shell errors
@@ -120,7 +116,7 @@ def run_shell_command(command: str, *args) -> bytes:
"""Runs shell command and returns the result if not encountered an error"""
cmd = [command] + list(args)
demisto.debug(f'Running the shell command {cmd=}')
- completed_process = subprocess.run(
+ completed_process = subprocess.run( # noqa: UP022
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
exit_codes = completed_process.returncode
@@ -159,9 +155,9 @@ def get_files_names_in_path(path: str, name_of_file: str, full_path: bool = Fals
return res
-def get_images_paths_in_path(path: str) -> List[str]:
+def get_images_paths_in_path(path: str) -> list[str]:
"""Gets images paths from path"""
- res: List[str] = []
+ res: list[str] = []
for img_type in IMG_FORMATS:
img_format = f"*.{img_type}"
res.extend(get_files_names_in_path(path, img_format, True))
@@ -250,14 +246,15 @@ def get_pdf_htmls_content(pdf_path: str, output_folder: str) -> str:
def build_readpdf_entry_object(entry_id: str, metadata: dict, text: str, urls: list, emails: list, images: list[str],
- max_images: int) -> list[dict[str, Any]]:
+ max_images: int,
+ hash_contexts: list[dict[str, Any]] | None = None) -> list[dict[str, Any]]:
"""Builds an entry object for the main script flow"""
pdf_file = {"EntryID": entry_id}
# Add Text to file entity
pdf_file["Text"] = text
# Add Metadata to file entity
- for k in metadata.keys():
+ for k in metadata:
pdf_file[k] = metadata[k]
md = "### Metadata\n"
@@ -306,6 +303,8 @@ def build_readpdf_entry_object(entry_id: str, metadata: dict, text: str, urls: l
indicators_map = json.loads(indicators_map)
if emails:
indicators_map["Email"] = emails
+ if hash_contexts:
+ indicators_map['Hashes'] = hash_contexts
except json.JSONDecodeError:
pass
ec = build_readpdf_entry_context(indicators_map)
@@ -334,6 +333,8 @@ def build_readpdf_entry_context(indicators_map: Any) -> dict:
for email in indicators_map["Email"]:
ec_email.append({"Email": email})
ec["Account"] = ec_email
+ if 'Hashes' in indicators_map:
+ ec['Hashes'] = indicators_map['Hashes']
return ec
@@ -351,7 +352,7 @@ def get_urls_from_binary_file(file_path: str) -> set:
return binary_file_urls
-def get_urls_and_emails_from_pdf_html_content(cpy_file_path: str, output_folder: str) -> Tuple[set, set]:
+def get_urls_and_emails_from_pdf_html_content(cpy_file_path: str, output_folder: str) -> tuple[set, set]:
"""
Extract the URLs and emails from the pdf html content.
@@ -386,6 +387,8 @@ def extract_url_from_annot_object(annot_object: Any):
if isinstance(url, PyPDF2.generic.IndirectObject):
url = url.get_object()
return url
+ return None
+ return None
def extract_url(extracted_object: Any):
@@ -461,7 +464,7 @@ def extract_urls_and_emails_from_annot_objects(annot_objects: list | Any):
return urls, emails
-def get_urls_and_emails_from_pdf_annots(file_path: str) -> Tuple[set, set]:
+def get_urls_and_emails_from_pdf_annots(file_path: str) -> tuple[set, set]:
"""
Extracts the URLs and Emails from the pdf's Annots (Annotations and Commenting) using PyPDF2 package.
Args:
@@ -469,8 +472,8 @@ def get_urls_and_emails_from_pdf_annots(file_path: str) -> Tuple[set, set]:
Returns:
Tuple[set, set]: A set includes the URLs that were found, A set includes the Emails that were found.
"""
- all_urls: Set[str] = set()
- all_emails: Set[str] = set()
+ all_urls: set[str] = set()
+ all_emails: set[str] = set()
output_capture = io.StringIO()
with open(file_path, 'rb') as pdf_file:
# The following context manager was added so we could redirect error messages to the server logs since
@@ -510,7 +513,7 @@ def get_urls_and_emails_from_pdf_annots(file_path: str) -> Tuple[set, set]:
return all_urls, all_emails
-def extract_urls_and_emails_from_pdf_file(file_path: str, output_folder: str) -> Tuple[list, list]:
+def extract_urls_and_emails_from_pdf_file(file_path: str, output_folder: str) -> tuple[list, list]:
"""
Extract URLs and Emails from the PDF file.
Args:
@@ -545,6 +548,57 @@ def extract_urls_and_emails_from_pdf_file(file_path: str, output_folder: str) ->
return urls_ec, emails_ec
+def extract_hash_contexts_from_pdf_file(file_text: str) -> list[dict[str, Any]]:
+ """Extracts the hashes from the file's text, and converts them to hash contexts.
+
+ Args:
+ file_text (str): The text extracted from the PDF.
+
+ Returns:
+ list[dict[str, Any]]: A list of hash contexts.
+ """
+ hash_contexts: list[dict[str, Any]] = []
+ hashes_in_file = get_hashes_from_file(file_text)
+ for hash_type, hashes in hashes_in_file.items():
+ if hashes:
+ hash_contexts.extend(convert_hash_to_context(hash_type, hashes))
+ return hash_contexts
+
+
+def convert_hash_to_context(hash_type: str, hashes: set[Any]) -> list[dict[str, Any]]:
+ """Converts the given hashes to hash contexts
+
+ Args:
+ hash_type (str): The hash type of the given hashes.
+ hashes (set[Any]): The set of hashes.
+
+ Returns:
+ list[dict[str, Any]]: A list of hash contexts that have the same hash type.
+ """
+ hash_context: list[dict[str, Any]] = [{'type': hash_type, 'value': hash} for hash in hashes]
+ return hash_context
+
+
+def get_hashes_from_file(file_text: str) -> dict[str, set[Any]]:
+ """Extracts all the hashes found in the file's text.
+
+ Args:
+ file_text (str): The file's text.
+
+ Returns:
+ dict[str, set[Any]]: A dictionary that holds the hash types as keys, and each key
+ holds the set of hashes corresponding to that hash type.
+ """
+ demisto.debug('Extracting hashes from file')
+ hashes: dict[str, set[Any]] = {}
+ hashes['SHA1'] = set(re.findall(sha1Regex, file_text))
+ hashes['SHA256'] = set(re.findall(sha256Regex, file_text))
+ hashes['SHA512'] = set(re.findall(sha512Regex, file_text))
+ hashes['MD5'] = set(re.findall(md5Regex, file_text))
+
+ return hashes
+
+
def handling_pdf_credentials(cpy_file_path: str, dec_file_path: str, encrypted: str = '',
user_password: str = '') -> str:
"""
@@ -577,6 +631,9 @@ def extract_data_from_pdf(path: str, user_password: str, entry_id: str, max_imag
pdf_text_output_path = f"{working_dir}/PDFText.txt"
text = get_pdf_text(cpy_file_path, pdf_text_output_path)
+ # Get hash contexts
+ hash_contexts = extract_hash_contexts_from_pdf_file(text)
+
# Get URLS + emails:
urls_ec, emails_ec = extract_urls_and_emails_from_pdf_file(cpy_file_path, working_dir)
@@ -588,7 +645,8 @@ def extract_data_from_pdf(path: str, user_password: str, entry_id: str, max_imag
urls_ec,
emails_ec,
images,
- max_images=max_images)
+ max_images=max_images,
+ hash_contexts=hash_contexts)
return_results(readpdf_entry_object)
else:
diff --git a/Packs/CommonScripts/Scripts/ReadPDFFileV2/ReadPDFFileV2.yml b/Packs/CommonScripts/Scripts/ReadPDFFileV2/ReadPDFFileV2.yml
index c9d3d9e66291..480ca5f0a553 100644
--- a/Packs/CommonScripts/Scripts/ReadPDFFileV2/ReadPDFFileV2.yml
+++ b/Packs/CommonScripts/Scripts/ReadPDFFileV2/ReadPDFFileV2.yml
@@ -9,13 +9,19 @@ args:
description: Maximum number of images to extract from the PDF file.
name: maxImages
comment: |-
- Load a PDF file's content and metadata into context.
+ Load a PDF file's content and metadata into context. Supports extraction of hashes, urls, and emails when available.
commonfields:
id: ReadPDFFileV2
version: -1
enabled: true
name: ReadPDFFileV2
outputs:
+- contextPath: Hashes.type
+ description: The hash type extracted from the PDF file.
+ type: String
+- contextPath: Hashes.value
+ description: The hash value extracted from the PDF file.
+ type: String
- contextPath: URL.Data
description: A list of URLs that were extracted from the PDF file.
type: String
@@ -116,7 +122,7 @@ tags:
- ingestion
timeout: "0"
type: python
-dockerimage: demisto/readpdf:1.0.0.83506
+dockerimage: demisto/readpdf:1.0.0.85832
runas: DBotRole
tests:
- Extract Indicators From File - Generic v2 - Test
diff --git a/Packs/CommonScripts/Scripts/ReadPDFFileV2/ReadPDFFileV2_test.py b/Packs/CommonScripts/Scripts/ReadPDFFileV2/ReadPDFFileV2_test.py
index d46fdf43a4ca..9a5701f0fb22 100644
--- a/Packs/CommonScripts/Scripts/ReadPDFFileV2/ReadPDFFileV2_test.py
+++ b/Packs/CommonScripts/Scripts/ReadPDFFileV2/ReadPDFFileV2_test.py
@@ -13,6 +13,46 @@ def open_html_file(file):
return f.read()
+def test_extract_hash_contexts():
+ """
+ Given
+ - A PDF with hashes in it.
+ When
+ - Trying extract the hashes from the file.
+ Then
+ - Validate that the hashes were extracted successfully.
+ """
+ from ReadPDFFileV2 import extract_hash_contexts_from_pdf_file, get_pdf_text
+ expected_hash_contexts = [{'type': 'SHA1', 'value': 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'},
+ {'type': 'SHA256', 'value': '2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824'},
+ {'type': 'SHA256', 'value': '8732331accf45f86a00ca823cb24d9806ec1380846a337ac86b4fe6f9d06f1f5'},
+ {'type': 'MD5', 'value': '5d41402abc4b2a76b9719d911017c592'}]
+ # We first extract the file's text, and then extract the hashes
+ pdf_text_output_path = f"{CWD}/PDFText.txt"
+ file_text = get_pdf_text(f'{CWD}/pdf-with-hashes.pdf', pdf_text_output_path)
+ hash_contexts = extract_hash_contexts_from_pdf_file(file_text)
+ assert len(hash_contexts) == len(expected_hash_contexts)
+ for hash_context in hash_contexts:
+ assert hash_context in expected_hash_contexts
+
+
+def test_hash_contexts_in_return_results():
+ """
+ Given
+ - A hash context to add to the entry context.
+ When
+ - Building the entry context.
+ Then
+ - Validate that the hash context was added.
+ """
+ from ReadPDFFileV2 import build_readpdf_entry_context
+ hashes = {'Hashes': [
+ {'type': 'SHA1', 'value': 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'},
+ {'type': 'MD5', 'value': '5d41402abc4b2a76b9719d911017c592'}]}
+ entry_context = build_readpdf_entry_context(hashes)
+ assert entry_context == hashes
+
+
def test_urls_are_found_correctly(mocker):
"""
Given
diff --git a/Packs/CommonScripts/Scripts/ReadPDFFileV2/test_data/PDFText.txt b/Packs/CommonScripts/Scripts/ReadPDFFileV2/test_data/PDFText.txt
new file mode 100644
index 000000000000..da23228abfc7
--- /dev/null
+++ b/Packs/CommonScripts/Scripts/ReadPDFFileV2/test_data/PDFText.txt
@@ -0,0 +1,11 @@
+Dummy PDF
+SHA256: 2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824
+MD5: 5d41402abc4b2a76b9719d911017c592
+SHA1: aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d
+SHA1: aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d
+SHA256: 8732331accf45f86a00ca823cb24d9806ec1380846a337ac86b4fe6f9d06f1f5
+some@example.com
+IPs: 123.123.123.123, 107.66.225.91
+URLs: google.com, https://example.com/
+
+
\ No newline at end of file
diff --git a/Packs/CommonScripts/Scripts/ReadPDFFileV2/test_data/pdf-with-hashes.pdf b/Packs/CommonScripts/Scripts/ReadPDFFileV2/test_data/pdf-with-hashes.pdf
new file mode 100644
index 000000000000..a4e480ee09ce
Binary files /dev/null and b/Packs/CommonScripts/Scripts/ReadPDFFileV2/test_data/pdf-with-hashes.pdf differ
diff --git a/Packs/CommonScripts/Scripts/Set/Set.js b/Packs/CommonScripts/Scripts/Set/Set.js
index 053936606221..ba41ccaa510a 100644
--- a/Packs/CommonScripts/Scripts/Set/Set.js
+++ b/Packs/CommonScripts/Scripts/Set/Set.js
@@ -8,12 +8,24 @@ try {
value = JSON.stringify(args.value)
}
} else {
- value = JSON.parse(args.value);
+ if (!isNaN(args.value)) {
+ // This means that the value is a number
+ if (args.value.includes('.')) {
+ // It is a decimal number
+ value = JSON.parse(args.value);
+ }
+ else {
+ // If the number is large
+ value = BigInt(args.value);
+ }
+ }
+ else {
+ value = JSON.parse(args.value);
+ }
}
-} catch(err) {
+} catch (err) {
value = args.value;
}
-
ec[args.key] = value;
var result = {
Type: entryTypes.note,
@@ -27,5 +39,5 @@ if (!args.append || args.append === 'false') {
} else {
result.EntryContext = ec;
}
-
+logDebug(`Setting ${JSON.stringify(result)}`)
return result;
diff --git a/Packs/CommonScripts/Scripts/SetGridField/SetGridField.py b/Packs/CommonScripts/Scripts/SetGridField/SetGridField.py
index 226e6f6d8d37..ba8f35a8e9fb 100644
--- a/Packs/CommonScripts/Scripts/SetGridField/SetGridField.py
+++ b/Packs/CommonScripts/Scripts/SetGridField/SetGridField.py
@@ -180,7 +180,7 @@ def get_current_table(grid_id: str) -> pd.DataFrame:
# in XSOAR the fields exist with empty values.
incident = demisto.incident()
custom_fields = incident.get("CustomFields", {}) or {}
- if (not is_xsiam()) and grid_id not in custom_fields:
+ if (not is_xsiam_or_xsoar_saas()) and grid_id not in custom_fields:
raise ValueError(get_error_message(grid_id))
current_table: list[dict] | None = custom_fields.get(grid_id)
return pd.DataFrame(current_table) if current_table else pd.DataFrame()
@@ -276,18 +276,18 @@ def build_grid(context_path: str, keys: list[str], columns: list[str], unpack_ne
# Handle entry context as dict, with unpacking of nested elements
table = pd.DataFrame(unpack_all_data_from_dict(entry_context_data, keys, columns))
- table.rename(columns=dict(zip(table.columns, columns)), inplace=True)
+ table = table.rename(columns=dict(zip(table.columns, columns)))
elif data_type == 'list':
# Handle entry context as list of value
table = pd.DataFrame(entry_context_data)
- table.rename(columns=dict(zip(table.columns, columns)), inplace=True)
+ table = table.rename(columns=dict(zip(table.columns, columns)))
elif isinstance(entry_context_data, list):
# Handle entry context as list of dicts
entry_context_data = [entry_dicts_to_string(dict_obj=filter_dict(item, keys, len(columns)),
keys_to_choose=keys_from_nested)
for item in entry_context_data]
table = pd.DataFrame(entry_context_data)
- table.rename(columns=dict(zip(table.columns, columns)), inplace=True)
+ table = table.rename(columns=dict(zip(table.columns, columns)))
elif isinstance(entry_context_data, dict):
# Handle entry context key-value
# If the keys arg is * it means we don't know which keys we have in the context - Will create key-value table.
@@ -299,7 +299,7 @@ def build_grid(context_path: str, keys: list[str], columns: list[str], unpack_ne
else:
entry_context_data = entry_context_data
table = pd.DataFrame([entry_context_data])
- table.rename(columns=dict(zip(table.columns, columns)), inplace=True)
+ table = table.rename(columns=dict(zip(table.columns, columns)))
else:
table = []
@@ -354,7 +354,7 @@ def build_grid_command(grid_id: str, context_path: str, keys: list[str], columns
# Sort by column name if specified, support multi columns sort
if sort_by and set(sort_by) <= set(new_table.columns):
- new_table.sort_values(by=sort_by, inplace=True)
+ new_table = new_table.sort_values(by=sort_by)
# filter empty values in the generated table
filtered_table = []
@@ -396,7 +396,7 @@ def main(): # pragma: no cover
data = entry["Contents"]["data"]
custom_fields = data[0].get("CustomFields") if data and data[0].get("CustomFields") else {}
# in the debugger, there is an addition of the "_grid" suffix to the grid_id.
- if is_xsiam() and table and grid_id not in custom_fields and f"{grid_id}_grid" not in custom_fields:
+ if is_xsiam_or_xsoar_saas() and table and grid_id not in custom_fields and f"{grid_id}_grid" not in custom_fields:
raise ValueError(get_error_message(grid_id))
if is_error(res_set):
demisto.error(f'failed to execute "setIncident" with table: {table}.')
diff --git a/Packs/CommonScripts/Scripts/SetGridField/SetGridField.yml b/Packs/CommonScripts/Scripts/SetGridField/SetGridField.yml
index f7e0cd0e6446..dac3a7f2cf86 100644
--- a/Packs/CommonScripts/Scripts/SetGridField/SetGridField.yml
+++ b/Packs/CommonScripts/Scripts/SetGridField/SetGridField.yml
@@ -43,7 +43,7 @@ script: '-'
subtype: python3
timeout: '0'
type: python
-dockerimage: demisto/pandas:1.0.0.83429
+dockerimage: demisto/pandas:1.0.0.86039
fromversion: 5.0.0
tests:
- No tests
diff --git a/Packs/CommonScripts/Scripts/SetGridField/SetGridField_test.py b/Packs/CommonScripts/Scripts/SetGridField/SetGridField_test.py
index 5c4c945913aa..5eab0b1f3798 100644
--- a/Packs/CommonScripts/Scripts/SetGridField/SetGridField_test.py
+++ b/Packs/CommonScripts/Scripts/SetGridField/SetGridField_test.py
@@ -215,7 +215,7 @@ def test_main_does_not_raises_error_in_xsoar(mocker):
"overwrite": "True",
"unpack_nested_elements": "False"})
mocker.patch.object(demisto, 'incident', return_value={"CustomFields": None})
- mocker.patch.object(SetGridField, 'is_xsiam', return_value=False)
+ mocker.patch.object(SetGridField, 'is_xsiam_or_xsoar_saas', return_value=False)
mocker.patch.object(SetGridField, 'get_current_table', return_value=[])
mocker.patch.object(SetGridField, 'build_grid_command', return_value=[{"name": "name", "readable_name": "readable_name"}])
mocker.patch.object(demisto, 'executeCommand', side_effect=side_effect_for_execute_command)
@@ -238,7 +238,7 @@ def test_main_raises_error_in_xsiam(mocker):
'columns': "col1,col2", "sort_by": "col1",
"overwrite": "True",
"unpack_nested_elements": "False"})
- mocker.patch.object(SetGridField, 'is_xsiam', return_value=True)
+ mocker.patch.object(SetGridField, 'is_xsiam_or_xsoar_saas', return_value=True)
mocker.patch.object(demisto, 'incident', return_value={"CustomFields": None})
mocker.patch.object(SetGridField, 'get_current_table', return_value=[])
mocker.patch.object(SetGridField, 'build_grid_command', return_value=[{"name": "name", "readable_name": "readable_name"}])
@@ -259,7 +259,7 @@ def test_get_current_table_exception(mocker):
"""
import SetGridField
- mocker.patch.object(SetGridField, 'is_xsiam', return_value=False)
+ mocker.patch.object(SetGridField, 'is_xsiam_or_xsoar_saas', return_value=False)
mocker.patch.object(demisto, 'incident', return_value={"CustomFields": None, "isPlayground": True})
with pytest.raises(Exception):
SetGridField.get_current_table("grid_id")
diff --git a/Packs/CommonScripts/Scripts/Sleep/Sleep.js b/Packs/CommonScripts/Scripts/Sleep/Sleep.js
index b44d9752eec1..fdb524ccc18a 100644
--- a/Packs/CommonScripts/Scripts/Sleep/Sleep.js
+++ b/Packs/CommonScripts/Scripts/Sleep/Sleep.js
@@ -13,7 +13,7 @@ if (parseInt(args.seconds) >= pollingThreshold) {
Type: entryTypes.note,
Contents: 'Sleep will complete in ' + args.seconds + ' seconds',
PollingCommand: 'Print',
- NextRun: args.seconds,
+ NextRun: args.seconds + '',
PollingArgs: {value: 'Sleep completed in ' + args.seconds + ' seconds'},
Timeout: String(parseInt(args.seconds) + 60)
}
diff --git a/Packs/CommonScripts/TestPlaybooks/playbook-Set_-_Test.yml b/Packs/CommonScripts/TestPlaybooks/playbook-Set_-_Test.yml
index f6c4d9498cfa..7a5036ffcdd6 100644
--- a/Packs/CommonScripts/TestPlaybooks/playbook-Set_-_Test.yml
+++ b/Packs/CommonScripts/TestPlaybooks/playbook-Set_-_Test.yml
@@ -7,23 +7,19 @@ starttaskid: "0"
tasks:
"0":
id: "0"
- ignoreworker: false
+ taskid: 2cf02b47-e47f-4e2b-8c3e-ea0e6940bbca
+ type: start
+ task:
+ id: 2cf02b47-e47f-4e2b-8c3e-ea0e6940bbca
+ version: -1
+ name: ""
+ iscommand: false
+ brand: ""
nexttasks:
'#none#':
- "7"
- note: false
- quietmode: 0
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- id: d4343368-209f-4fb3-8d32-d417151b7996
- iscommand: false
- name: ""
- version: -1
- taskid: d4343368-209f-4fb3-8d32-d417151b7996
- timertriggers: []
- type: start
+ continueonerrortype: ""
view: |-
{
"position": {
@@ -31,142 +27,151 @@ tasks:
"y": 50
}
}
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"1":
id: "1"
- ignoreworker: false
+ taskid: a08ada0a-294d-4913-844d-abbb55466baa
+ type: regular
+ task:
+ id: a08ada0a-294d-4913-844d-abbb55466baa
+ version: -1
+ name: Set number as number
+ description: Sets a value into the context with the given context key
+ scriptName: Set
+ type: regular
+ iscommand: false
+ brand: ""
nexttasks:
'#none#':
- "6"
- note: false
- quietmode: 0
- reputationcalc: 1
scriptarguments:
- append: {}
key:
simple: NumberSavedAsNumber
- stringify: {}
value:
simple: "14444444444444444444"
+ reputationcalc: 1
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- description: Sets a value into the context with the given context key
- id: b65c2503-b3c3-447d-8d51-a7f08a8dd62c
- iscommand: false
- name: Set number as number
- script: Set
- type: regular
- version: -1
- taskid: b65c2503-b3c3-447d-8d51-a7f08a8dd62c
- timertriggers: []
- type: regular
+ continueonerrortype: ""
view: |-
{
"position": {
- "x": 750,
+ "x": 1160,
"y": 350
}
}
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"2":
id: "2"
- ignoreworker: false
+ taskid: 5c3a99c1-8b8f-49ac-8308-f5b7808c74e3
+ type: regular
+ task:
+ id: 5c3a99c1-8b8f-49ac-8308-f5b7808c74e3
+ version: -1
+ name: Set number as string
+ description: Sets a value into the context with the given context key
+ scriptName: Set
+ type: regular
+ iscommand: false
+ brand: ""
nexttasks:
'#none#':
- "3"
- note: false
- quietmode: 0
- reputationcalc: 1
scriptarguments:
- append: {}
key:
simple: NumberSavedAsString
stringify:
simple: "true"
value:
simple: "14444444444444444444"
+ reputationcalc: 1
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- description: Sets a value into the context with the given context key
- id: c1295684-04ff-4b37-8512-72c7c27e0d1c
- iscommand: false
- name: Set number as string
- script: Set
- type: regular
- version: -1
- taskid: c1295684-04ff-4b37-8512-72c7c27e0d1c
- timertriggers: []
- type: regular
+ continueonerrortype: ""
view: |-
{
"position": {
- "x": 150,
+ "x": -350,
"y": 350
}
}
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"3":
- conditions:
- - condition:
- - - left:
- iscontext: true
- value:
- complex:
- root: NumberSavedAsString
- operator: isEqualString
- right:
- value:
- simple: "14444444444444444444"
- label: "yes"
id: "3"
- ignoreworker: false
+ taskid: 84e845bf-0441-407f-89e6-c68c7ce0a6e7
+ type: condition
+ task:
+ id: 84e845bf-0441-407f-89e6-c68c7ce0a6e7
+ version: -1
+ name: Was the number saved as string?
+ type: condition
+ iscommand: false
+ brand: ""
nexttasks:
'#default#':
- "5"
"yes":
- "4"
- note: false
- quietmode: 0
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- id: ff36914c-f04a-4bbb-87a0-a52db41471f6
- iscommand: false
- name: Was the number saved as string?
- type: condition
- version: -1
- taskid: ff36914c-f04a-4bbb-87a0-a52db41471f6
- timertriggers: []
- type: condition
+ conditions:
+ - label: "yes"
+ condition:
+ - - operator: isEqualString
+ left:
+ value:
+ complex:
+ root: NumberSavedAsString
+ iscontext: true
+ right:
+ value:
+ simple: "14444444444444444444"
+ continueonerrortype: ""
view: |-
{
"position": {
- "x": 150,
- "y": 520
+ "x": -350,
+ "y": 550
}
}
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"4":
id: "4"
- ignoreworker: false
+ taskid: be26e01a-5cf7-4605-80bd-5212fd91ce41
+ type: title
+ task:
+ id: be26e01a-5cf7-4605-80bd-5212fd91ce41
+ version: -1
+ name: Test DT
+ type: title
+ iscommand: false
+ brand: ""
nexttasks:
'#none#':
- "8"
- note: false
- quietmode: 0
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- id: cae5408c-78aa-4a22-8a26-82ce42940895
- iscommand: false
- name: Test DT
- type: title
- version: -1
- taskid: cae5408c-78aa-4a22-8a26-82ce42940895
- timertriggers: []
- type: title
+ continueonerrortype: ""
view: |-
{
"position": {
@@ -174,35 +179,38 @@ tasks:
"y": 1090
}
}
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"5":
id: "5"
- ignoreworker: false
+ taskid: d19c74d8-c2c1-4153-89c0-b0531fbe099a
+ type: regular
+ task:
+ id: d19c74d8-c2c1-4153-89c0-b0531fbe099a
+ version: -1
+ name: Fail the playbook
+ description: Prints an error entry with a given message
+ scriptName: PrintErrorEntry
+ type: regular
+ iscommand: false
+ brand: ""
nexttasks:
'#none#':
- "4"
- note: false
- quietmode: 0
- reputationcalc: 1
scriptarguments:
message:
simple: The number was not saved as string, or the number was saved as string
instead of a number. Does your PR change the Stringify parameter of the
Set command? Or was Demisto suddenly able to save the number 14444444444444444444
as the actual number it is?
+ reputationcalc: 1
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- description: Prints an error entry with a given message
- id: bb4843aa-8f84-4e7f-8991-1d3a5df2f225
- iscommand: false
- name: Fail the playbook
- script: PrintErrorEntry
- type: regular
- version: -1
- taskid: bb4843aa-8f84-4e7f-8991-1d3a5df2f225
- timertriggers: []
- type: regular
+ continueonerrortype: ""
view: |-
{
"position": {
@@ -210,78 +218,82 @@ tasks:
"y": 750
}
}
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"6":
- conditions:
- - condition:
- - - left:
- iscontext: true
- value:
- complex:
- root: NumberSavedAsNumber
- operator: isEqualNumber
- right:
- value:
- simple: "14444444444444444444"
- label: "Yes"
id: "6"
- ignoreworker: false
+ taskid: 7ad9f973-49ca-4c4a-8dd5-78b5fe883e2f
+ type: condition
+ task:
+ id: 7ad9f973-49ca-4c4a-8dd5-78b5fe883e2f
+ version: -1
+ name: Was the number saved as number?
+ type: condition
+ iscommand: false
+ brand: ""
nexttasks:
'#default#':
- "5"
"Yes":
- "4"
- note: false
- quietmode: 0
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- id: 5285e6b8-ef58-4c47-89b2-345111bdb5f5
- iscommand: false
- name: Was the number saved as number?
- type: condition
- version: -1
- taskid: 5285e6b8-ef58-4c47-89b2-345111bdb5f5
- timertriggers: []
- type: condition
+ conditions:
+ - label: "Yes"
+ condition:
+ - - operator: isEqualNumber
+ left:
+ value:
+ complex:
+ root: NumberSavedAsNumber
+ iscontext: true
+ right:
+ value:
+ simple: "14444444444444444444"
+ continueonerrortype: ""
view: |-
{
"position": {
- "x": 750,
- "y": 530
+ "x": 1160,
+ "y": 540
}
}
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"7":
id: "7"
- ignoreworker: false
+ taskid: 695f15b7-3249-4cd3-88e5-4d83744b08ae
+ type: regular
+ task:
+ id: 695f15b7-3249-4cd3-88e5-4d83744b08ae
+ version: -1
+ name: Clear the context
+ description: Delete field from context
+ scriptName: DeleteContext
+ type: regular
+ iscommand: false
+ brand: ""
nexttasks:
'#none#':
- "1"
- "2"
- note: false
- quietmode: 0
- reputationcalc: 1
+ - "15"
+ - "17"
scriptarguments:
all:
simple: "yes"
- index: {}
- key: {}
- keysToKeep: {}
- subplaybook: {}
+ reputationcalc: 1
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- description: Delete field from context
- id: 9fdfda53-6c3a-44bf-81c2-d2f5cd82ae2d
- iscommand: false
- name: Clear the context
- script: DeleteContext
- type: regular
- version: -1
- taskid: 9fdfda53-6c3a-44bf-81c2-d2f5cd82ae2d
- timertriggers: []
- type: regular
+ continueonerrortype: ""
view: |-
{
"position": {
@@ -289,36 +301,38 @@ tasks:
"y": 180
}
}
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"8":
id: "8"
- ignoreworker: false
+ taskid: 033e6ecd-c3fa-40d0-8857-a26e6fe0ac71
+ type: regular
+ task:
+ id: 033e6ecd-c3fa-40d0-8857-a26e6fe0ac71
+ version: -1
+ name: Set Object
+ description: Set a value in context under the key you entered.
+ scriptName: Set
+ type: regular
+ iscommand: false
+ brand: ""
nexttasks:
'#none#':
- "9"
- note: false
- quietmode: 0
scriptarguments:
append:
simple: "true"
key:
simple: Data
- stringify: {}
value:
simple: '{"ID": "test_id", "Value": "test_value"}'
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- description: Set a value in context under the key you entered.
- id: 450a4b59-dc85-42b3-88e0-287bcba194ae
- iscommand: false
- name: Set Object
- script: Set
- type: regular
- version: -1
- taskid: 450a4b59-dc85-42b3-88e0-287bcba194ae
- timertriggers: []
- type: regular
+ continueonerrortype: ""
view: |-
{
"position": {
@@ -326,36 +340,41 @@ tasks:
"y": 1230
}
}
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"9":
id: "9"
- ignoreworker: false
+ taskid: 36c3c8c1-1fb9-4508-8080-1f2f3573b285
+ type: condition
+ task:
+ id: 36c3c8c1-1fb9-4508-8080-1f2f3573b285
+ version: -1
+ name: Verify Context
+ description: Check whether the values provided in arguments are equal. If either
+ of the arguments are missing, no is returned.
+ scriptName: AreValuesEqual
+ type: condition
+ iscommand: false
+ brand: ""
nexttasks:
"no":
- "10"
"yes":
- "11"
- note: false
- quietmode: 0
scriptarguments:
left:
simple: ${Data.Value}
right:
simple: test_value
+ results:
+ - AreValuesEqual
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- description: Check whether the values provided in arguments are equal. If either
- of the arguments are missing, no is returned.
- id: 4c2c20e6-0549-4345-8093-9d61ff74ab53
- iscommand: false
- name: Verify Context
- script: AreValuesEqual
- type: condition
- version: -1
- taskid: 4c2c20e6-0549-4345-8093-9d61ff74ab53
- timertriggers: []
- type: condition
+ continueonerrortype: ""
view: |-
{
"position": {
@@ -363,28 +382,31 @@ tasks:
"y": 1400
}
}
- "10":
- id: "10"
- ignoreworker: false
note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "10":
+ id: "10"
+ taskid: 46df44e3-8391-4d83-8d07-a8eff2ebf33f
+ type: regular
+ task:
+ id: 46df44e3-8391-4d83-8d07-a8eff2ebf33f
+ version: -1
+ name: Fail
+ description: Prints an error entry with a given message
+ scriptName: PrintErrorEntry
+ type: regular
+ iscommand: false
+ brand: ""
scriptarguments:
message:
simple: Context was not set as expected to test_value
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- description: Prints an error entry with a given message
- id: c30ec61c-6ef4-4c3d-8c47-eb8ac5cd1f56
- iscommand: false
- name: Fail
- script: PrintErrorEntry
- type: regular
- version: -1
- taskid: c30ec61c-6ef4-4c3d-8c47-eb8ac5cd1f56
- timertriggers: []
- type: regular
+ continueonerrortype: ""
view: |-
{
"position": {
@@ -392,36 +414,38 @@ tasks:
"y": 1610
}
}
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"11":
id: "11"
- ignoreworker: false
+ taskid: 59836edb-a29e-40ac-82c9-9159edf4ccdc
+ type: regular
+ task:
+ id: 59836edb-a29e-40ac-82c9-9159edf4ccdc
+ version: -1
+ name: Set - Update Object
+ description: Set a value in context under the key you entered.
+ scriptName: Set
+ type: regular
+ iscommand: false
+ brand: ""
nexttasks:
'#none#':
- "12"
- note: false
- quietmode: 0
scriptarguments:
append:
simple: "true"
key:
simple: Data(val.ID == obj.ID)
- stringify: {}
value:
simple: '{"ID": "test_id", "Value": "test_val2"}'
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- description: Set a value in context under the key you entered.
- id: 53c58f20-8f46-472a-8a13-2fc4721475fb
- iscommand: false
- name: Set - Update Object
- script: Set
- type: regular
- version: -1
- taskid: 53c58f20-8f46-472a-8a13-2fc4721475fb
- timertriggers: []
- type: regular
+ continueonerrortype: ""
view: |-
{
"position": {
@@ -429,36 +453,41 @@ tasks:
"y": 1630
}
}
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"12":
id: "12"
- ignoreworker: false
+ taskid: dc69dffc-68c8-4f08-8093-86afb3ff9677
+ type: condition
+ task:
+ id: dc69dffc-68c8-4f08-8093-86afb3ff9677
+ version: -1
+ name: Verify Context
+ description: Check whether the values provided in arguments are equal. If either
+ of the arguments are missing, no is returned.
+ scriptName: AreValuesEqual
+ type: condition
+ iscommand: false
+ brand: ""
nexttasks:
"no":
- "13"
"yes":
- "14"
- note: false
- quietmode: 0
scriptarguments:
left:
simple: ${Data.Value}
right:
simple: test_val2
+ results:
+ - AreValuesEqual
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- description: Check whether the values provided in arguments are equal. If either
- of the arguments are missing, no is returned.
- id: 9ea6f6f3-245c-4b8b-8899-11f5f58b438d
- iscommand: false
- name: Verify Context
- script: AreValuesEqual
- type: condition
- version: -1
- taskid: 9ea6f6f3-245c-4b8b-8899-11f5f58b438d
- timertriggers: []
- type: condition
+ continueonerrortype: ""
view: |-
{
"position": {
@@ -466,28 +495,31 @@ tasks:
"y": 1830
}
}
- "13":
- id: "13"
- ignoreworker: false
note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "13":
+ id: "13"
+ taskid: 6a049e8f-3718-4f48-83b5-2611600949d7
+ type: regular
+ task:
+ id: 6a049e8f-3718-4f48-83b5-2611600949d7
+ version: -1
+ name: Fail
+ description: Prints an error entry with a given message
+ scriptName: PrintErrorEntry
+ type: regular
+ iscommand: false
+ brand: ""
scriptarguments:
message:
simple: Failed verifying context value of test_val2
separatecontext: false
- skipunavailable: false
- task:
- brand: ""
- description: Prints an error entry with a given message
- id: 5dbdb574-8e38-47e2-8e92-04d45b49e065
- iscommand: false
- name: Fail
- script: PrintErrorEntry
- type: regular
- version: -1
- taskid: 5dbdb574-8e38-47e2-8e92-04d45b49e065
- timertriggers: []
- type: regular
+ continueonerrortype: ""
view: |-
{
"position": {
@@ -495,28 +527,193 @@ tasks:
"y": 2030
}
}
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
"14":
id: "14"
- ignoreworker: false
+ taskid: 0f055861-68f5-4080-8e23-7a05a8f25de1
+ type: title
+ task:
+ id: 0f055861-68f5-4080-8e23-7a05a8f25de1
+ version: -1
+ name: All Good
+ type: title
+ iscommand: false
+ brand: ""
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 440,
+ "y": 2050
+ }
+ }
note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "15":
+ id: "15"
+ taskid: ff62230d-fe4f-4134-8051-a1cac5547c8f
+ type: regular
+ task:
+ id: ff62230d-fe4f-4134-8051-a1cac5547c8f
+ version: -1
+ name: Set large number
+ description: Set a value in context under the key you entered.
+ scriptName: Set
+ type: regular
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "16"
+ scriptarguments:
+ key:
+ simple: LargeNumber
+ value:
+ simple: "1234567890123456789012345"
separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 170,
+ "y": 360
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "16":
+ id: "16"
+ taskid: 3d0f60f1-5c8c-4a47-859f-85748efc75d2
+ type: condition
task:
- brand: ""
- id: 552f24cc-4df2-45ed-82a4-7f4bd438449c
+ id: 3d0f60f1-5c8c-4a47-859f-85748efc75d2
+ version: -1
+ name: Was the large number saved?
+ type: condition
iscommand: false
- name: All Good
- type: title
+ brand: ""
+ nexttasks:
+ '#default#':
+ - "5"
+ "yes":
+ - "4"
+ separatecontext: false
+ conditions:
+ - label: "yes"
+ condition:
+ - - operator: isEqualNumber
+ left:
+ value:
+ complex:
+ root: LargeNumber
+ iscontext: true
+ right:
+ value:
+ simple: "1234567890123456789012345"
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 170,
+ "y": 510
+ }
+ }
+ note: false
+ timertriggers: []
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "17":
+ id: "17"
+ taskid: 52bca13e-34cd-42a5-81cd-ad9d58db2fa8
+ type: regular
+ task:
+ id: 52bca13e-34cd-42a5-81cd-ad9d58db2fa8
version: -1
- taskid: 552f24cc-4df2-45ed-82a4-7f4bd438449c
+ name: Set decimal number
+ description: Set a value in context under the key you entered.
+ scriptName: Set
+ type: regular
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#none#':
+ - "18"
+ scriptarguments:
+ key:
+ simple: DecimalNumber
+ value:
+ simple: "12345.6789"
+ separatecontext: false
+ continueonerrortype: ""
+ view: |-
+ {
+ "position": {
+ "x": 740,
+ "y": 360
+ }
+ }
+ note: false
timertriggers: []
- type: title
+ ignoreworker: false
+ skipunavailable: false
+ quietmode: 0
+ isoversize: false
+ isautoswitchedtoquietmode: false
+ "18":
+ id: "18"
+ taskid: 319a9b4f-062e-48fa-8173-33d24136973c
+ type: condition
+ task:
+ id: 319a9b4f-062e-48fa-8173-33d24136973c
+ version: -1
+ name: Was the decimal number saved?
+ type: condition
+ iscommand: false
+ brand: ""
+ nexttasks:
+ '#default#':
+ - "5"
+ "yes":
+ - "4"
+ separatecontext: false
+ conditions:
+ - label: "yes"
+ condition:
+ - - operator: isEqualNumber
+ left:
+ value:
+ complex:
+ root: DecimalNumber
+ iscontext: true
+ right:
+ value:
+ simple: "12345.6789"
+ continueonerrortype: ""
view: |-
{
"position": {
- "x": 440,
- "y": 2050
+ "x": 740,
+ "y": 530
}
}
version: -1
diff --git a/Packs/CommonScripts/pack_metadata.json b/Packs/CommonScripts/pack_metadata.json
index 6b28e23a4328..ee06809a1c02 100644
--- a/Packs/CommonScripts/pack_metadata.json
+++ b/Packs/CommonScripts/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Common Scripts",
"description": "Frequently used scripts pack.",
"support": "xsoar",
- "currentVersion": "1.13.19",
+ "currentVersion": "1.13.26",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CommonTypes/ReleaseNotes/3_4_0.md b/Packs/CommonTypes/ReleaseNotes/3_4_0.md
new file mode 100644
index 000000000000..8c84669cd029
--- /dev/null
+++ b/Packs/CommonTypes/ReleaseNotes/3_4_0.md
@@ -0,0 +1,3 @@
+## Common Types
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/CommonTypes/pack_metadata.json b/Packs/CommonTypes/pack_metadata.json
index 7f1cf2ff3ca6..73dd885f9f0d 100644
--- a/Packs/CommonTypes/pack_metadata.json
+++ b/Packs/CommonTypes/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Common Types",
"description": "This Content Pack will get you up and running in no-time and provide you with the most commonly used incident & indicator fields and types.",
"support": "xsoar",
- "currentVersion": "3.3.99",
+ "currentVersion": "3.4.0",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CommonWidgets/ReleaseNotes/1_2_46.md b/Packs/CommonWidgets/ReleaseNotes/1_2_46.md
new file mode 100644
index 000000000000..141145e251a9
--- /dev/null
+++ b/Packs/CommonWidgets/ReleaseNotes/1_2_46.md
@@ -0,0 +1,3 @@
+## Common Widgets
+
+- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release.
diff --git a/Packs/CommonWidgets/pack_metadata.json b/Packs/CommonWidgets/pack_metadata.json
index 3f8ce79e54b4..89bd44a64306 100644
--- a/Packs/CommonWidgets/pack_metadata.json
+++ b/Packs/CommonWidgets/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Common Widgets",
"description": "Frequently used widgets pack.",
"support": "xsoar",
- "currentVersion": "1.2.45",
+ "currentVersion": "1.2.46",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
diff --git a/Packs/CommunityCommonScripts/ReleaseNotes/1_1_5.md b/Packs/CommunityCommonScripts/ReleaseNotes/1_1_5.md
new file mode 100644
index 000000000000..828feacfa9a4
--- /dev/null
+++ b/Packs/CommunityCommonScripts/ReleaseNotes/1_1_5.md
@@ -0,0 +1,24 @@
+
+#### Scripts
+
+##### New: GetListDatawithKeyword
+
+- This transformer will get list of array elements by providing keyword
+[
+ {
+ "folder": "abc",
+ "username": "test"
+ },
+ {
+ "folder": "def",
+ "username": "test123"
+ },
+ {
+ "folder": "ghi",
+ "username": "admin"
+ }
+] (Available from Cortex XSOAR 6.8.0).
+##### New: GetIndexOfArrayValue
+
+- This transformer will get an index of an element from an array.
+ex:["phishing","Malware"], if we provide "Malware" to array_value argument, will get index as 1. (Available from Cortex XSOAR 6.8.0).
diff --git a/Packs/CommunityCommonScripts/Scripts/GetIndexOfArrayValue/GetIndexOfArrayValue.py b/Packs/CommunityCommonScripts/Scripts/GetIndexOfArrayValue/GetIndexOfArrayValue.py
new file mode 100644
index 000000000000..ef5d84c6ff71
--- /dev/null
+++ b/Packs/CommunityCommonScripts/Scripts/GetIndexOfArrayValue/GetIndexOfArrayValue.py
@@ -0,0 +1,31 @@
+import demistomock as demisto # noqa: F401
+from CommonServerPython import * # noqa: F401
+
+
+import traceback
+
+
+''' MAIN FUNCTION '''
+
+
+def get_index(json_data, array_val):
+ element_index = json_data.index(array_val)
+ return element_index
+
+
+def main():
+ try:
+ json_data = argToList(demisto.args()['value'])
+ array_val = demisto.args()['array_value']
+ res = get_index(json_data, array_val)
+ demisto.results(res)
+ except Exception as ex:
+ demisto.error(traceback.format_exc()) # print the traceback
+ return_error(f'Failed to execute BaseScript. Error: {str(ex)}')
+
+
+''' ENTRY POINT '''
+
+
+if __name__ in ('__main__', '__builtin__', 'builtins'):
+ main()
diff --git a/Packs/CommunityCommonScripts/Scripts/GetIndexOfArrayValue/GetIndexOfArrayValue.yml b/Packs/CommunityCommonScripts/Scripts/GetIndexOfArrayValue/GetIndexOfArrayValue.yml
new file mode 100644
index 000000000000..c3cc416229b7
--- /dev/null
+++ b/Packs/CommunityCommonScripts/Scripts/GetIndexOfArrayValue/GetIndexOfArrayValue.yml
@@ -0,0 +1,30 @@
+args:
+- description: The value is an array of elements.
+ isArray: true
+ name: value
+ required: true
+- description: Provide the value from the array.
+ name: array_value
+ required: true
+comment: |-
+ This transformer will get an index of an element from an array.
+ ex:["phishing","Malware"], if we provide "Malware" to array_value argument, will get index as 1.
+commonfields:
+ id: GetIndexOfArrayValue
+ version: -1
+contentitemexportablefields:
+ contentitemfields:
+ fromServerVersion: ""
+dockerimage: demisto/python3:3.10.13.84405
+enabled: true
+name: GetIndexOfArrayValue
+runas: DBotWeakRole
+runonce: false
+script: ''
+scripttarget: 0
+subtype: python3
+tags: []
+type: python
+fromversion: 6.8.0
+tests:
+- No tests (auto formatted)
diff --git a/Packs/CommunityCommonScripts/Scripts/GetIndexOfArrayValue/README.md b/Packs/CommunityCommonScripts/Scripts/GetIndexOfArrayValue/README.md
new file mode 100644
index 000000000000..9bc7e2636b3f
--- /dev/null
+++ b/Packs/CommunityCommonScripts/Scripts/GetIndexOfArrayValue/README.md
@@ -0,0 +1,25 @@
+This transformer will get an index of an element from an array.
+ex:["phishing","Malware"], if we provide "Malware" to array_value argument, will get index as 1.
+
+## Script Data
+
+---
+
+| **Name** | **Description** |
+| --- | --- |
+| Script Type | python3 |
+| Cortex XSOAR Version | 6.8.0 |
+
+## Inputs
+
+---
+
+| **Argument Name** | **Description** |
+| --- | --- |
+| value | The value is an array of elements |
+| array_value | Provide the value from the array |
+
+## Outputs
+
+---
+There are no outputs for this script.
diff --git a/Packs/CommunityCommonScripts/Scripts/GetListDatawithKeyword/GetListDatawithKeyword.py b/Packs/CommunityCommonScripts/Scripts/GetListDatawithKeyword/GetListDatawithKeyword.py
new file mode 100644
index 000000000000..847130881b7b
--- /dev/null
+++ b/Packs/CommunityCommonScripts/Scripts/GetListDatawithKeyword/GetListDatawithKeyword.py
@@ -0,0 +1,47 @@
+import demistomock as demisto # noqa: F401
+from CommonServerPython import * # noqa: F401
+
+
+import traceback
+
+result_data = []
+
+
+def get_data(key_word, json_data):
+ for i in range(len(json_data)):
+ for _key, value in json_data[i].items():
+ if key_word in value:
+ result_data.append(json_data[i])
+ break
+
+ return result_data
+
+
+''' MAIN FUNCTION '''
+
+
+def main():
+ try:
+ key_word = demisto.args()['Keyword']
+ json_data = argToList(demisto.args()['value'])
+ res = get_data(key_word, json_data)
+
+ md = tableToMarkdown("List Data", res)
+ demisto.results({
+ 'Type': entryTypes['note'],
+ 'Contents': res,
+ 'ContentsFormat': formats['json'],
+ 'HumanReadable': md,
+ 'ReadableContentsFormat': formats['markdown'],
+ 'EntryContext': {'ListData': res}
+ })
+ except Exception as ex:
+ demisto.error(traceback.format_exc()) # print the traceback
+ return_error(f'Failed to execute BaseScript. Error: {str(ex)}')
+
+
+''' ENTRY POINT '''
+
+
+if __name__ in ('__main__', '__builtin__', 'builtins'):
+ main()
diff --git a/Packs/CommunityCommonScripts/Scripts/GetListDatawithKeyword/GetListDatawithKeyword.yml b/Packs/CommunityCommonScripts/Scripts/GetListDatawithKeyword/GetListDatawithKeyword.yml
new file mode 100644
index 000000000000..ff640d2abf17
--- /dev/null
+++ b/Packs/CommunityCommonScripts/Scripts/GetListDatawithKeyword/GetListDatawithKeyword.yml
@@ -0,0 +1,45 @@
+args:
+- description: Provide a key word to search the data.
+ name: Keyword
+ required: true
+- description: The value is an array of elements.
+ isArray: true
+ name: value
+ required: true
+comment: |-
+ This transformer will get list of array elements by providing keyword. List data format
+ [
+ {
+ "folder": "abc",
+ "username": "test"
+ },
+ {
+ "folder": "def",
+ "username": "test123"
+ },
+ {
+ "folder": "ghi",
+ "username": "admin"
+ }
+ ]
+commonfields:
+ id: GetListDatawithKeyword
+ version: -1
+contentitemexportablefields:
+ contentitemfields:
+ fromServerVersion: ""
+dockerimage: demisto/python3:3.10.13.84405
+enabled: true
+name: GetListDatawithKeyword
+runas: DBotWeakRole
+runonce: false
+script: ''
+scripttarget: 0
+subtype: python3
+tags:
+- general
+- transformer
+type: python
+fromversion: 6.8.0
+tests:
+- No tests (auto formatted)
diff --git a/Packs/CommunityCommonScripts/Scripts/GetListDatawithKeyword/README.md b/Packs/CommunityCommonScripts/Scripts/GetListDatawithKeyword/README.md
new file mode 100644
index 000000000000..b274bfb174ca
--- /dev/null
+++ b/Packs/CommunityCommonScripts/Scripts/GetListDatawithKeyword/README.md
@@ -0,0 +1,39 @@
+This transformer will get list of array elements by providing keyword. List data format
+[
+ {
+ "folder": "abc",
+ "username": "test"
+ },
+ {
+ "folder": "def",
+ "username": "test123"
+ },
+ {
+ "folder": "ghi",
+ "username": "admin"
+ }
+]
+
+## Script Data
+
+---
+
+| **Name** | **Description** |
+| --- | --- |
+| Script Type | python3 |
+| Tags | general, transformer |
+| Cortex XSOAR Version | 6.8.0 |
+
+## Inputs
+
+---
+
+| **Argument Name** | **Description** |
+| --- | --- |
+| Keyword | Provide a key word to search the data |
+| value | The value is an array of elements |
+
+## Outputs
+
+---
+There are no outputs for this script.
diff --git a/Packs/CommunityCommonScripts/pack_metadata.json b/Packs/CommunityCommonScripts/pack_metadata.json
index 2e158b6a3f57..ae329608b4ff 100644
--- a/Packs/CommunityCommonScripts/pack_metadata.json
+++ b/Packs/CommunityCommonScripts/pack_metadata.json
@@ -2,7 +2,7 @@
"name": "Community Common Scripts",
"description": "A pack that contains community scripts",
"support": "community",
- "currentVersion": "1.1.4",
+ "currentVersion": "1.1.5",
"author": "",
"url": "https://live.paloaltonetworks.com/t5/cortex-xsoar-discussions/bd-p/Cortex_XSOAR_Discussions",
"email": "",
diff --git a/Packs/Core/Integrations/CortexCoreIR/CortexCoreIR.py b/Packs/Core/Integrations/CortexCoreIR/CortexCoreIR.py
index a589ca246f87..94efba870da0 100644
--- a/Packs/Core/Integrations/CortexCoreIR/CortexCoreIR.py
+++ b/Packs/Core/Integrations/CortexCoreIR/CortexCoreIR.py
@@ -461,11 +461,8 @@ def main(): # pragma: no cover
elif command == 'core-list-user-groups':
return_results(list_user_groups_command(client, args))
- elif command == 'core-list-roles':
- return_results(list_roles_command(client, args))
-
- elif command in ('core-set-user-role', 'core-remove-user-role'):
- return_results(change_user_role_command(client, args))
+ elif command == 'core-get-incidents':
+ return_outputs(*get_incidents_command(client, args))
elif command in PREVALENCE_COMMANDS:
return_results(handle_prevalence_command(client, command, args))
diff --git a/Packs/Core/Integrations/CortexCoreIR/CortexCoreIR.yml b/Packs/Core/Integrations/CortexCoreIR/CortexCoreIR.yml
index 259fdd09fbdf..172f42a88385 100644
--- a/Packs/Core/Integrations/CortexCoreIR/CortexCoreIR.yml
+++ b/Packs/Core/Integrations/CortexCoreIR/CortexCoreIR.yml
@@ -34,6 +34,119 @@ display: Investigation & Response
name: Cortex Core - IR
script:
commands:
+ - arguments:
+ - description: A date in the format 2019-12-31T23:59:00. Only incidents that were created on or before the specified date/time will be retrieved.
+ name: lte_creation_time
+ - description: A date in the format 2019-12-31T23:59:00. Only incidents that were created on or after the specified date/time will be retrieved.
+ name: gte_creation_time
+ - description: Filters returned incidents that were created on or before the specified date/time, in the format 2019-12-31T23:59:00.
+ name: lte_modification_time
+ - description: Filters returned incidents that were modified on or after the specified date/time, in the format 2019-12-31T23:59:00.
+ name: gte_modification_time
+ - description: An array or CSV string of incident IDs.
+ isArray: true
+ name: incident_id_list
+ - description: Filters returned incidents that were created on or after the specified date/time range, for example, 1 month, 2 days, 1 hour, and so on.
+ name: since_creation_time
+ - description: Filters returned incidents that were modified on or after the specified date/time range, for example, 1 month, 2 days, 1 hour, and so on.
+ name: since_modification_time
+ - auto: PREDEFINED
+ description: Sorts returned incidents by the date/time that the incident was last modified ("asc" - ascending, "desc" - descending).
+ name: sort_by_modification_time
+ predefined:
+ - asc
+ - desc
+ - auto: PREDEFINED
+ description: Sorts returned incidents by the date/time that the incident was created ("asc" - ascending, "desc" - descending).
+ name: sort_by_creation_time
+ predefined:
+ - asc
+ - desc
+ - defaultValue: '0'
+ description: Page number (for pagination). The default is 0 (the first page).
+ name: page
+ - defaultValue: '100'
+ description: Maximum number of incidents to return per page. The default and maximum is 100.
+ name: limit
+ - description: 'Filters only incidents in the specified status. The options are: new, under_investigation, resolved_known_issue, resolved_false_positive, resolved_true_positive resolved_security_testing, resolved_other, resolved_auto.'
+ name: status
+ - auto: PREDEFINED
+ description: 'Whether the incident is starred (Boolean value: true or false).'
+ name: starred
+ predefined:
+ - 'true'
+ - 'false'
+ - description: Starred fetch window timestamp (