Skip to content

Commit

Permalink
Xdr change command results list (#31956)
Browse files Browse the repository at this point in the history
* save change to command results

* precommit fixes

* revert get_script_execution_results_command

* fix tests

* rn

* fix bug found by the test playbbok

* Bump pack from version CortexXDR to 6.1.3.

* Bump pack from version ctf01 to 1.0.5.

* Apply suggestions from code review

Co-authored-by: Dan Tavori <38749041+dantavori@users.noreply.github.com>

* Apply suggestions from code review

Co-authored-by: Dan Tavori <38749041+dantavori@users.noreply.github.com>

* Bump pack from version Core to 3.0.7.

* save changes from cr in rn

* rn

* Bump pack from version Core to 3.0.8.

---------

Co-authored-by: Content Bot <bot@demisto.com>
Co-authored-by: Dan Tavori <38749041+dantavori@users.noreply.github.com>
  • Loading branch information
3 people committed Jan 10, 2024
1 parent a76a587 commit 7181d66
Show file tree
Hide file tree
Showing 8 changed files with 91 additions and 54 deletions.
98 changes: 54 additions & 44 deletions Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule.py
Expand Up @@ -1407,10 +1407,9 @@ def run_polling_command(client: CoreClient,
if command_decision_field not in args:
# create new command run
command_results = command_function(client, args)
if isinstance(command_results, CommandResults):
outputs = [command_results.raw_response] if command_results.raw_response else []
else:
outputs = [c.raw_response for c in command_results]
outputs = command_results.raw_response
if outputs and not isinstance(outputs, list):
outputs = [outputs]
command_decision_values = [o.get(command_decision_field) for o in outputs] if outputs else [] # type: ignore
if outputs and command_decision_values:
polling_args = {
Expand Down Expand Up @@ -2050,65 +2049,72 @@ def run_script_execute_commands_command(client: CoreClient, args: Dict) -> Comma
)


def run_script_kill_process_command(client: CoreClient, args: Dict) -> List[CommandResults]:
def run_script_kill_process_command(client: CoreClient, args: Dict) -> CommandResults:
endpoint_ids = argToList(args.get('endpoint_ids'))
incident_id = arg_to_number(args.get('incident_id'))
timeout = arg_to_number(args.get('timeout', 600)) or 600
processes_names = argToList(args.get('process_name'))
all_processes_response = []
replies = []

for process_name in processes_names:
parameters = {'process_name': process_name}
response = client.run_script('fd0a544a99a9421222b4f57a11839481', endpoint_ids, parameters, timeout, incident_id)
reply = response.get('reply')
all_processes_response.append(CommandResults(
readable_output=tableToMarkdown(f'Run Script Kill Process on {process_name}', reply),
outputs_prefix=f'{args.get("integration_context_brand", "CoreApiModule")}.ScriptRun',
outputs_key_field='action_id',
outputs=reply,
raw_response=reply,
))
replies.append(reply)

command_result = CommandResults(
readable_output=tableToMarkdown("Run Script Kill Process Results", replies),
outputs_prefix=f'{args.get("integration_context_brand", "CoreApiModule")}.ScriptRun',
outputs_key_field='action_id',
outputs=replies,
raw_response=replies,
)

return all_processes_response
return command_result


def run_script_file_exists_command(client: CoreClient, args: Dict) -> List[CommandResults]:
def run_script_file_exists_command(client: CoreClient, args: Dict) -> CommandResults:
endpoint_ids = argToList(args.get('endpoint_ids'))
incident_id = arg_to_number(args.get('incident_id'))
timeout = arg_to_number(args.get('timeout', 600)) or 600
file_paths = argToList(args.get('file_path'))
all_files_response = []
replies = []
for file_path in file_paths:
parameters = {'path': file_path}
response = client.run_script('414763381b5bfb7b05796c9fe690df46', endpoint_ids, parameters, timeout, incident_id)
reply = response.get('reply')
all_files_response.append(CommandResults(
readable_output=tableToMarkdown(f'Run Script File Exists on {file_path}', reply),
outputs_prefix=f'{args.get("integration_context_brand", "CoreApiModule")}.ScriptRun',
outputs_key_field='action_id',
outputs=reply,
raw_response=reply,
))
return all_files_response
replies.append(reply)

command_result = CommandResults(
readable_output=tableToMarkdown(f'Run Script File Exists on {",".join(file_paths)}', replies),
outputs_prefix=f'{args.get("integration_context_brand", "CoreApiModule")}.ScriptRun',
outputs_key_field='action_id',
outputs=replies,
raw_response=replies,
)
return command_result


def run_script_delete_file_command(client: CoreClient, args: Dict) -> List[CommandResults]:
def run_script_delete_file_command(client: CoreClient, args: Dict) -> CommandResults:
endpoint_ids = argToList(args.get('endpoint_ids'))
incident_id = arg_to_number(args.get('incident_id'))
timeout = arg_to_number(args.get('timeout', 600)) or 600
file_paths = argToList(args.get('file_path'))
all_files_response = []
replies = []
for file_path in file_paths:
parameters = {'file_path': file_path}
response = client.run_script('548023b6e4a01ec51a495ba6e5d2a15d', endpoint_ids, parameters, timeout, incident_id)
reply = response.get('reply')
all_files_response.append(CommandResults(
readable_output=tableToMarkdown(f'Run Script Delete File on {file_path}', reply),
outputs_prefix=f'{args.get("integration_context_brand", "CoreApiModule")}.ScriptRun',
outputs_key_field='action_id',
outputs=reply,
raw_response=reply,
))
return all_files_response
replies.append(reply)

command_result = CommandResults(
readable_output=tableToMarkdown(f'Run Script Delete File on {",".join(file_paths)}', replies),
outputs_prefix=f'{args.get("integration_context_brand", "CoreApiModule")}.ScriptRun',
outputs_key_field='action_id',
outputs=replies,
raw_response=replies,
)
return command_result


def quarantine_files_command(client, args):
Expand Down Expand Up @@ -3064,21 +3070,25 @@ def run_script_command(client: CoreClient, args: Dict) -> CommandResults:
)


def get_script_execution_status_command(client: CoreClient, args: Dict) -> List[CommandResults]:
def get_script_execution_status_command(client: CoreClient, args: Dict) -> CommandResults:
action_ids = argToList(args.get('action_id', ''))
command_results = []
replies = []
raw_responses = []
for action_id in action_ids:
response = client.get_script_execution_status(action_id)
reply = response.get('reply')
reply['action_id'] = int(action_id)
command_results.append(CommandResults(
readable_output=tableToMarkdown(f'Script Execution Status - {action_id}', reply),
outputs_prefix=f'{args.get("integration_context_brand", "CoreApiModule")}.ScriptStatus',
outputs_key_field='action_id',
outputs=reply,
raw_response=response,
))
return command_results
replies.append(reply)
raw_responses.append(response)

command_result = CommandResults(
readable_output=tableToMarkdown(f'Script Execution Status - {",".join(str(i) for i in action_ids)}', replies),
outputs_prefix=f'{args.get("integration_context_brand", "CoreApiModule")}.ScriptStatus',
outputs_key_field='action_id',
outputs=replies,
raw_response=raw_responses,
)
return command_result


def parse_get_script_execution_results(results: List[Dict]) -> List[Dict]:
Expand Down
14 changes: 7 additions & 7 deletions Packs/ApiModules/Scripts/CoreIRApiModule/CoreIRApiModule_test.py
Expand Up @@ -1752,7 +1752,7 @@ def test_get_script_execution_status_command(requests_mock):
response = get_script_execution_status_command(client, args)

api_response['reply']['action_id'] = int(action_id)
assert response[0].outputs == api_response.get('reply')
assert response.outputs[0] == api_response.get('reply')
assert requests_mock.request_history[0].json() == {
'request_data': {
'action_id': action_id
Expand Down Expand Up @@ -1931,7 +1931,7 @@ def test_run_script_delete_file_command(requests_mock):

response = run_script_delete_file_command(client, args)

assert response[0].outputs == api_response.get('reply')
assert response.outputs[0] == api_response.get('reply')
assert requests_mock.request_history[0].json() == {
'request_data': {
'script_uid': '548023b6e4a01ec51a495ba6e5d2a15d',
Expand Down Expand Up @@ -1978,7 +1978,7 @@ def test_run_script_delete_multiple_files_command(requests_mock):

response = run_script_delete_file_command(client, args)

assert response[0].outputs == api_response.get('reply')
assert response.outputs[0] == api_response.get('reply')
assert requests_mock.request_history[0].json() == {
'request_data': {
'script_uid': '548023b6e4a01ec51a495ba6e5d2a15d',
Expand Down Expand Up @@ -2038,7 +2038,7 @@ def test_run_script_file_exists_command(requests_mock):

response = run_script_file_exists_command(client, args)

assert response[0].outputs == api_response.get('reply')
assert response.outputs[0] == api_response.get('reply')
assert requests_mock.request_history[0].json() == {
'request_data': {
'script_uid': '414763381b5bfb7b05796c9fe690df46',
Expand Down Expand Up @@ -2085,7 +2085,7 @@ def test_run_script_file_exists_multiple_files_command(requests_mock):

response = run_script_file_exists_command(client, args)

assert response[0].outputs == api_response.get('reply')
assert response.outputs[0] == api_response.get('reply')
assert requests_mock.request_history[0].json() == {
'request_data': {
'script_uid': '414763381b5bfb7b05796c9fe690df46',
Expand Down Expand Up @@ -2145,7 +2145,7 @@ def test_run_script_kill_process_command(requests_mock):

response = run_script_kill_process_command(client, args)

assert response[0].outputs == api_response.get('reply')
assert response.outputs[0] == api_response.get('reply')
assert requests_mock.request_history[0].json() == {
'request_data': {
'script_uid': 'fd0a544a99a9421222b4f57a11839481',
Expand Down Expand Up @@ -2192,7 +2192,7 @@ def test_run_script_kill_multiple_processes_command(requests_mock):

response = run_script_kill_process_command(client, args)

assert response[0].outputs == api_response.get('reply')
assert response.outputs[0] == api_response.get('reply')
assert requests_mock.request_history[0].json() == {
'request_data': {
'script_uid': 'fd0a544a99a9421222b4f57a11839481',
Expand Down
9 changes: 9 additions & 0 deletions Packs/Core/ReleaseNotes/3_0_8.md
@@ -0,0 +1,9 @@

#### Integrations

##### Investigation & Response

- Fixed an issue where the following polling commands retrieved partial results when a list of arguments was provided:
- ***core-run-script-kill-process***
- ***core-run-script-file-exists***
- ***core-run-script-delete-file***
2 changes: 1 addition & 1 deletion Packs/Core/pack_metadata.json
Expand Up @@ -2,7 +2,7 @@
"name": "Core - Investigation and Response",
"description": "Automates incident response",
"support": "xsoar",
"currentVersion": "3.0.7",
"currentVersion": "3.0.8",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
Expand Down
10 changes: 10 additions & 0 deletions Packs/CortexXDR/ReleaseNotes/6_1_3.md
@@ -0,0 +1,10 @@

#### Integrations

##### Palo Alto Networks Cortex XDR - Investigation and Response

- Fixed an issue where the following polling commands retrieved partial results when a list of arguments was provided:
- ***xdr-kill-process-script-execute***
- ***xdr-file-exist-script-execute***
- ***xdr-get-script-execution-status***
- ***xdr-file-delete-script-execute***
2 changes: 1 addition & 1 deletion Packs/CortexXDR/pack_metadata.json
Expand Up @@ -2,7 +2,7 @@
"name": "Cortex XDR by Palo Alto Networks",
"description": "Automates Cortex XDR incident response, and includes custom Cortex XDR incident views and layouts to aid analyst investigations.",
"support": "xsoar",
"currentVersion": "6.1.2",
"currentVersion": "6.1.3",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
"email": "",
Expand Down
8 changes: 8 additions & 0 deletions Packs/ctf01/ReleaseNotes/1_0_5.md
@@ -0,0 +1,8 @@

<!--
#### Integrations
##### Cortex XDR - IR CTF
- No changes related directly to this integration.
-->
2 changes: 1 addition & 1 deletion Packs/ctf01/pack_metadata.json
Expand Up @@ -2,7 +2,7 @@
"name": "Capture The Flag - 01",
"description": "XSOAR's Capture the flag (CTF)",
"support": "xsoar",
"currentVersion": "1.0.4",
"currentVersion": "1.0.5",
"serverMinVersion": "8.2.0",
"author": "Cortex XSOAR",
"url": "https://www.paloaltonetworks.com/cortex",
Expand Down

0 comments on commit 7181d66

Please sign in to comment.