Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,11 @@ def delete_sample(a1000):
except Exception as e:
return_error(str(e))

results, file_result = delete_sample_output(response_json=response_json)
return [results, file_result]


def delete_sample_output(response_json):
res = response_json.get('results')
markdown = f'''## ReversingLabs A1000 delete sample\n **Message:** {res.get('message')}
**MD5:** {demisto.get(res, 'detail.md5')}
Expand All @@ -262,7 +267,7 @@ def delete_sample(a1000):
file_result = fileResult('Delete sample report file', json.dumps(response_json, indent=4),
file_type=EntryType.ENTRY_INFO_FILE)

return [command_result, file_result]
return command_result, file_result


def reanalyze(a1000):
Expand All @@ -283,6 +288,11 @@ def reanalyze(a1000):
except Exception as e:
return_error(str(e))

results, file_result = reanalyze_output(response_json=response_json)
return [results, file_result]


def reanalyze_output(response_json):
try:
result = response_json.get("results")[0]
except Exception as e:
Expand All @@ -302,7 +312,7 @@ def reanalyze(a1000):
file_result = fileResult('ReAnalyze sample report file', json.dumps(response_json, indent=4),
file_type=EntryType.ENTRY_INFO_FILE)

return [command_result, file_result]
return command_result, file_result


def list_extracted_files(a1000):
Expand Down Expand Up @@ -483,6 +493,11 @@ def advanced_search(a1000):
except Exception as e:
return_error(str(e))

results, file_result = advanced_search_output(result_list=result_list)
return [results, file_result]


def advanced_search_output(result_list):
command_result = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'a1000_advanced_search_report': result_list},
Expand All @@ -492,7 +507,7 @@ def advanced_search(a1000):
file_result = fileResult('Advanced search report file', json.dumps(result_list, indent=4),
file_type=EntryType.ENTRY_INFO_FILE)

return [command_result, file_result]
return command_result, file_result


def get_url_report(a1000):
Expand Down Expand Up @@ -1148,7 +1163,7 @@ def sample_classification_command(a1000: A1000):

elif action == "SET CLASSIFICATION":
classification = demisto.getArg("classification")
risk_score = demisto.getArg("risk_score")
risk_score = arg_to_number(demisto.getArg("risk_score"))
threat_platform = demisto.getArg("threat_platform")
threat_name = demisto.getArg("threat_name")
threat_type = demisto.getArg("threat_type")
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
import json
from ReversingLabsA1000v2 import a1000_report_output, list_extracted_files_output, get_classification_output, \
classification_to_score, url_report_output, domain_report_output, ip_report_output, format_proxy
classification_to_score, url_report_output, domain_report_output, ip_report_output, format_proxy, \
file_analysis_status_output, pdf_report_output, static_analysis_report_output, dynamic_analysis_report_output, \
sample_classification_output, yara_output, yara_retro_output, list_containers_output, upload_from_url_output, \
delete_sample_output, reanalyze_output, advanced_search_output, VERSION, USER_AGENT, RELIABILITY, return_proxies
import demistomock as demisto
import pytest

Expand Down Expand Up @@ -66,6 +69,123 @@ def test_ip_report_output():
assert result.to_context() == test_context


def test_file_analysis_status_output():
test_response = util_load_json("test_data/a1000_analysis_status.json")

result = file_analysis_status_output(resp_json=test_response)

for k, v in result.to_context().items():
if k == "hash_value":
assert v == "d1aff4d205b59b1ae3edf152603fa2ae5a7c6cc5"


def test_pdf_report_output():
test_response = util_load_json("test_data/a1000_pdf_report.json")

result = pdf_report_output(resp=test_response, sample_hash="d1aff4d205b59b1ae3edf152603fa2ae5a7c6cc5", action="CHECK STATUS")

for k, v in result[0].to_context().items():
if k == "status":
assert v == 2


def test_static_analysis_report_output():
test_response = util_load_json("test_data/a1000_static_analysis.json")

result = static_analysis_report_output(resp_json=test_response, sample_hash="d1aff4d205b59b1ae3edf152603fa2ae5a7c6cc5")

for k, v in result.to_context().items():
if k == "Contents":
assert "a1000_static_analysis_report" in v


def test_dynamic_analysis_report_output():
test_response = util_load_json("test_data/a1000_dynamic_analysis.json")

result = dynamic_analysis_report_output(resp=test_response, action="CHECK STATUS", report_format="pdf",
sample_hash="d1aff4d205b59b1ae3edf152603fa2ae5a7c6cc5")

for k, v in result[0].to_context().items():
if k == "status":
assert v == 1


def test_sample_classification_output():
test_response = util_load_json("test_data/a1000_sample_classification.json")

result = sample_classification_output(resp_json=test_response, action="GET CLASSIFICATION", av_scanners=False,
sample_hash="d1aff4d205b59b1ae3edf152603fa2ae5a7c6cc5")

for k, v in result.to_context().items():
if k == "Contents":
assert "a1000_sample_classification" in v


def test_yara_output():
rulesets = util_load_json("test_data/a1000_yara_get_rulesets.json")
contents = util_load_json("test_data/a1000_yara_get_contents.json")

result_rulesets = yara_output(resp_json=rulesets, action="GET RULESETS")
result_contents = yara_output(resp_json=contents, action="GET CONTENTS")

assert result_rulesets.to_context().get("Contents").get("a1000_yara").get("count") == 4
assert result_contents.to_context().get("Contents").get("a1000_yara").get("detail").get("name") == "test_yara_rule"


def test_yara_retro_output():
local = util_load_json("test_data/a1000_yara_retro_local.json")
cloud = util_load_json("test_data/a1000_yara_retro_cloud.json")

result_local = yara_retro_output(resp_json=local, action="LOCAL SCAN STATUS")
result_cloud = yara_retro_output(resp_json=cloud, action="CLOUD SCAN STATUS")

assert result_local.to_context().get("Contents").get("a1000_yara_retro").get("status").get("state") == "COMPLETED"
assert result_cloud.to_context().get("Contents").get("a1000_yara_retro").get("status").get("cloud_status") == "ACTIVE"


def test_list_containers_output():
containers = util_load_json("test_data/a1000_list_containers.json")
result = list_containers_output(resp_json=containers)

assert result.to_context().get("Contents").get("a1000_list_containers").get("count") == 0


def test_upload_from_url_output():
upload = util_load_json("test_data/a1000_upload_from_url.json")
report = util_load_json("test_data/a1000_report_from_url.json")
check = util_load_json("test_data/a1000_check_from_url.json")

result_upload = upload_from_url_output(resp_json=upload, action="UPLOAD")
result_report = upload_from_url_output(resp_json=report, action="GET REPORT")
result_check = upload_from_url_output(resp_json=check, action="CHECK ANALYSIS STATUS")

assert result_upload.to_context().get("Contents").get("a1000_upload_from_url_actions").get("message") == "Done."
assert result_report.to_context().get("Contents").get("a1000_upload_from_url_actions").get("processing_status") == "complete"
assert result_check.to_context().get("Contents").get("a1000_upload_from_url_actions").get("processing_status") == "complete"


def test_delete_sample_output():
report = util_load_json("test_data/a1000_delete_sample.json")
result = delete_sample_output(response_json=report)

assert result[0].to_context().get("Contents").get("a1000_delete_report").get("results").get("code") == 200


def test_reanalyze_output():
report = util_load_json("test_data/a1000_reanalyze.json")
result = reanalyze_output(response_json=report)

assert (result[0].to_context().get("Contents").get("a1000_reanalyze_report").get("results")[0].get("detail").get("sha1") ==
"d1aff4d205b59b1ae3edf152603fa2ae5a7c6cc5")


def test_advanced_search_output():
report = util_load_json("test_data/a1000_advanced_search.json")
result = advanced_search_output(result_list=report)

assert result[0].to_context().get("Contents").get("a1000_advanced_search_report")[0].get("available")


def test_classification_to_score():
assert classification_to_score("MALICIOUS") == 3

Expand All @@ -77,9 +197,21 @@ def test_format_proxy():
password="pass1"
)

formatted_http = format_proxy(
addr="http://proxy-address.com",
username="user1",
password="pass1"
)

correct_expected = "https://user1:pass1@proxy-address.com"

assert formatted_correctly == correct_expected
assert formatted_http != correct_expected


def test_vars():
assert USER_AGENT == "ReversingLabs XSOAR A1000 " + VERSION
assert RELIABILITY is not None


def util_load_json(path):
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
[{"available": true, "sha1": "3d615604bf4979f86befbe1358028198d9123a59", "classification": "malicious", "threatname": "Document-PDF.Phishing.Generic", "size": 171465, "sampletype": "Document/None/PDF", "antivirus": 5, "filename": "alldata-v1040w-import-disc-5-19902011-free-download.pdf", "lastseen": "2024-06-11T13:41:15Z", "filecount": 0, "factor": 10, "sha256": "877b8bab995bcb4b7ad52166c07897d1c79d03d4abf6278c78645e74bb86a8c8", "firstseen": "2024-06-11T13:25:14Z", "md5": "bb40a4830252928b43802405a1a03997"}, {"available": true, "sha1": "2954c5702057e44569f8a912ddfcd00e24b6909f", "classification": "malicious", "threatname": "Document-HTML.Trojan.Heuristic", "size": 55236, "sampletype": "Text/HTML/HTML", "antivirus": 5, "lastseen": "2024-06-11T13:38:13Z", "filecount": 0, "factor": 7, "sha256": "65394a2065064c768b44acb77c5b303b0ffdd906bc34d6b7f882f5b72ae2a6bb", "firstseen": "2024-06-11T13:24:37Z", "md5": "2c88feef1f8869a5c42f8f9e48f70cf5"}, {"available": true, "sha1": "12352dcd0a22c2a995d110aa7cb1730bb77bc4d7", "classification": "malicious", "threatname": "Document-HTML.Trojan.Heuristic", "size": 77862, "sampletype": "Text/HTML/HTML", "antivirus": 5, "lastseen": "2024-06-11T13:33:03Z", "filecount": 0, "factor": 7, "sha256": "a8ae9a80d2e77d644bcb979b5969d5033a9d3d57b033471243ebbb1bc0f06796", "firstseen": "2024-06-11T13:24:26Z", "md5": "5ab6250a891abaf4c50a882cd838b07a"}]
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"hash_type":"sha1","results":[{"hash_value":"d1aff4d205b59b1ae3edf152603fa2ae5a7c6cc5","status":"processed"}]}

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"results": {"code": 200, "detail": {"sha1": "e296c0ded887bcfc4c8a38e1328875d6e7caef83", "sha256": "cc027133e6f4190a5202327b20fe4f89490b8f6d8e32bc489a3a9947015486eb", "sha512": "3eb2c074dbad561e5972620b1b5af1732d49ccc5ec6f76a7bbacb3ca8333146e3c358bb6f7273f402546916f26ae17d24931b6e07783b2092a20c5e7c337c22e", "md5": "fb098286a31ec260a8e05da98e35a5cf"}, "message": "Sample deleted successfully."}}
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"status": 1, "message": "PDF does not exist and must be created."}
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"count":0,"next":null,"previous":null,"results":[]}
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"status": 2, "status_message": "PDF is ready for download."}
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"results": [{"detail": {"sha1": "d1aff4d205b59b1ae3edf152603fa2ae5a7c6cc5", "sha256": "121c3fc43774abf1054ca9b7c10ebb49677889a527f34c7268872e7d670a2233", "sha512": "bc32ea53671eb549fb994a6c9188e7beed03e87a647c73c73a23818ff3d848778b138f8f6e177ae6089a9b8372253f6176d6d54e9c57c2c2c8d4a01234c4b08f", "md5": "debaf7021f49695eadcaaab58d502eb9", "imphash": "feb9bebf646137f4ff73e503cbcb6361"}, "analysis": [{"name": "cloud", "code": 201, "message": "Sample is queued for analysis."}, {"name": "core", "code": 201, "message": "Sample is queued for core analysis."}, {"name": "rl_cloud_sandbox", "code": 200, "message": "Sample is already queued for analysis."}, {"name": "fireeye", "code": 405, "message": "Sandbox integration is not configured."}, {"name": "cape", "code": 405, "message": "Sandbox integration is not configured."}, {"name": "cuckoo", "code": 405, "message": "Sandbox integration is not configured."}, {"name": "joe", "code": 405, "message": "Sandbox integration is not configured."}]}]}

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"classification":"malicious","riskscore":10,"first_seen":"2024-01-08T16:36:03Z","last_seen":"2024-06-10T10:31:18Z","classification_result":"Win32.Ransomware.Petya","classification_reason":"Antivirus","classification_origin":null,"cloud_last_lookup":"2024-06-11T00:38:52Z","data_source":"LOCAL","sha1":"d1aff4d205b59b1ae3edf152603fa2ae5a7c6cc5","sha256":"121c3fc43774abf1054ca9b7c10ebb49677889a527f34c7268872e7d670a2233","md5":"debaf7021f49695eadcaaab58d502eb9"}

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"code":201,"message":"Done.","detail":{"id":421,"user":1,"created":"2024-06-11T11:51:17.795939Z","filename":"https://download.sublimetext.com/sublime_text_build_4169_x64_setup.exe"}}
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"code":200,"detail":{"name":"test_yara_rule","content":"rule example_rule{condition:false}"}}
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"count":4,"next":null,"previous":null,"results":[{"status":"pending","suspicious_match_count":0,"malicious_match_count":1,"goodware_match_count":27,"unknown_match_count":1,"name":"get_money3","owner":"admin","last_matched":"2024-06-05T15:52:25.785993Z","system_ruleset":false,"cloud_synced":false},{"status":"pending","suspicious_match_count":0,"malicious_match_count":0,"goodware_match_count":2,"unknown_match_count":0,"name":"Rule_Find_PDF_with_URLs","owner":"admin","last_matched":"2024-05-24T16:00:19.220946Z","system_ruleset":false,"cloud_synced":false},{"status":"pending","suspicious_match_count":0,"malicious_match_count":0,"goodware_match_count":0,"unknown_match_count":0,"name":"MislavTesting","owner":"admin","last_matched":null,"system_ruleset":false,"cloud_synced":false},{"status":"active","suspicious_match_count":0,"malicious_match_count":0,"goodware_match_count":0,"unknown_match_count":0,"name":"test_yara_rule","owner":"admin","last_matched":null,"system_ruleset":false,"cloud_synced":true}],"type":"my","status":"all","source":"all"}
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"success": true, "message": "Ruleset is saved on the cloud.", "ruleset_name": "test_yara_rule", "status": {"cloud_status": "ACTIVE", "scale_status": "ACTIVE", "retrohunt_status": "CANCELLED", "retrohunt_start_time": null, "retrohunt_finish_time": null, "retrohunt_estimated_finish_time": null, "retrohunt_progress": null, "reason": null}}
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"success": true, "message": null, "status": {"state": "COMPLETED", "started": "2024-05-24T15:58:55.075337+00:00", "stopped": "2024-05-24T16:28:13.110974+00:00", "samples": 281, "processed": 373, "history": [{"state": "COMPLETED", "started": "2024-05-24T15:58:55.075337+00:00", "stopped": "2024-05-24T16:28:13.110974+00:00", "samples": 281, "started_username": "admin", "stopped_username": null}, {"state": "COMPLETED", "started": "2022-11-15T10:14:16.515681+00:00", "stopped": "2022-11-15T10:14:20.687855+00:00", "samples": 11, "started_username": "admin", "stopped_username": null}, {"state": "COMPLETED", "started": "2022-11-11T15:02:00.683418+00:00", "stopped": "2022-11-11T15:02:07.011490+00:00", "samples": 11, "started_username": "admin", "stopped_username": null}]}}