Skip to content

Commit

Permalink
Use black to autoformat code locally
Browse files Browse the repository at this point in the history
  • Loading branch information
KingAkeem committed Oct 26, 2023
1 parent d24503f commit af64fd6
Show file tree
Hide file tree
Showing 9 changed files with 225 additions and 147 deletions.
2 changes: 1 addition & 1 deletion torbot/modules/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def get_ip(client: httpx.Client) -> dict:
Returns the IP address of the current Tor client the service is using.
"""
resp = client.get("https://check.torproject.org/")
soup = BeautifulSoup(resp.text, 'html.parser')
soup = BeautifulSoup(resp.text, "html.parser")

# Get the content of check tor project, this contains the header and body
content = soup.find("div", {"class": "content"})
Expand Down
36 changes: 18 additions & 18 deletions torbot/modules/color.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,23 @@
Module containing class with colors
"""
COLORS = {
'white': "\033[1;37m",
'yellow': "\033[1;33m",
'green': "\033[1;32m",
'blue': "\033[1;34m",
'cyan': "\033[1;36m",
'red': "\033[1;31m",
'magenta': "\033[1;35m",
'black': "\033[1;30m",
'darkwhite': "\033[0;37m",
'darkyellow': "\033[0;33m",
'darkgreen': "\033[0;32m",
'darkblue': "\033[0;34m",
'darkcyan': "\033[0;36m",
'darkred': "\033[0;31m",
'darkmagenta': "\033[0;35m",
'darkblack': "\033[0;30m",
'end': "\033[0;0m"
"white": "\033[1;37m",
"yellow": "\033[1;33m",
"green": "\033[1;32m",
"blue": "\033[1;34m",
"cyan": "\033[1;36m",
"red": "\033[1;31m",
"magenta": "\033[1;35m",
"black": "\033[1;30m",
"darkwhite": "\033[0;37m",
"darkyellow": "\033[0;33m",
"darkgreen": "\033[0;32m",
"darkblue": "\033[0;34m",
"darkcyan": "\033[0;36m",
"darkred": "\033[0;31m",
"darkmagenta": "\033[0;35m",
"darkblack": "\033[0;30m",
"end": "\033[0;0m",
}


Expand All @@ -38,7 +38,7 @@ def __init__(self, message, selected):
self._color = COLORS[selected]

def __str__(self):
return self._color + self._msg + COLORS['end']
return self._color + self._msg + COLORS["end"]

def __add__(self, other):
return str(self) + other
Expand Down
120 changes: 75 additions & 45 deletions torbot/modules/info.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,35 @@
everything = []
bad_intel = set() # unclean intel urls
bad_scripts = set() # unclean javascript file urls
datasets = [files, intel, robots, custom, failed, scripts, external, fuzzable, endpoints, keys]
datasets = [
files,
intel,
robots,
custom,
failed,
scripts,
external,
fuzzable,
endpoints,
keys,
]
dataset_names = [
'files', 'intel', 'robots', 'custom', 'failed', 'scripts', 'external', 'fuzzable', 'endpoints', 'keys'
"files",
"intel",
"robots",
"custom",
"failed",
"scripts",
"external",
"fuzzable",
"endpoints",
"keys",
]


def execute_all(client: httpx.Client, link: str, *, display_status: bool = False) -> None:
def execute_all(
client: httpx.Client, link: str, *, display_status: bool = False
) -> None:
"""Initialise datasets and functions to retrieve data, and execute
each for a given link.
Expand All @@ -43,141 +65,149 @@ def execute_all(client: httpx.Client, link: str, *, display_status: bool = False
"""

resp = client.get(url=link)
soup = BeautifulSoup(resp.text, 'html.parser')
soup = BeautifulSoup(resp.text, "html.parser")
validation_functions = [
get_robots_txt, get_dot_git, get_dot_svn, get_dot_git, get_intel, get_dot_htaccess, get_bitcoin_address
get_robots_txt,
get_dot_git,
get_dot_svn,
get_dot_git,
get_intel,
get_dot_htaccess,
get_bitcoin_address,
]
for validate_func in validation_functions:
try:
validate_func(client, link, resp)
except Exception as e:
logging.debug(e)
cprint('Error', 'red')
cprint("Error", "red")

display_webpage_description(soup)
# display_headers(response)


def display_headers(response):
""" Print all headers in response object.
"""Print all headers in response object.
Args:
response (object): Response object.
"""
print('''
print(
"""
RESPONSE HEADERS
__________________
''')
"""
)
for key, val in response.headers.items():
print('*', key, ':', val)
print("*", key, ":", val)


def get_robots_txt(client: httpx.Client, target: str, response: str) -> None:
""" Check link for Robot.txt, and if found, add link to robots dataset.
"""Check link for Robot.txt, and if found, add link to robots dataset.
Args:
target (str): URL to be checked.
response (object): Response object containing data to check.
"""
cprint("[*]Checking for Robots.txt", 'yellow')
cprint("[*]Checking for Robots.txt", "yellow")
url = target
target = "{0.scheme}://{0.netloc}/".format(urlsplit(url))
client.get(target + "robots.txt")
print(target + "robots.txt")
matches = re.findall(r'Allow: (.*)|Disallow: (.*)', response)
matches = re.findall(r"Allow: (.*)|Disallow: (.*)", response)
for match in matches:
match = ''.join(match)
if '*' not in match:
match = "".join(match)
if "*" not in match:
url = target + match
robots.add(url)
cprint("Robots.txt found", 'blue')
cprint("Robots.txt found", "blue")
print(robots)


def get_intel(client: httpx.Client, url: str, response: str) -> None:
""" Check link for intel, and if found, add link to intel dataset,
"""Check link for intel, and if found, add link to intel dataset,
including but not limited to website accounts and AWS buckets.
Args:
target (str): URL to be checked.
response (object): Response object containing data to check.
"""
intel = set()
regex = r'''([\w\.-]+s[\w\.-]+\.amazonaws\.com)|([\w\.-]+@[\w\.-]+\.[\.\w]+)'''
regex = r"""([\w\.-]+s[\w\.-]+\.amazonaws\.com)|([\w\.-]+@[\w\.-]+\.[\.\w]+)"""
matches = re.findall(regex, response)
print("Intel\n--------\n\n")
for match in matches:
intel.add(match)


def get_dot_git(client: httpx.Client, target: str, response: str) -> None:
""" Check link for .git folders exposed on public domain.
"""Check link for .git folders exposed on public domain.
Args:
target (str): URL to be checked.
response (object): Response object containing data to check.
"""
cprint("[*]Checking for .git folder", 'yellow')
cprint("[*]Checking for .git folder", "yellow")
url = target
target = "{0.scheme}://{0.netloc}/".format(urlsplit(url))
resp = client.get(target + "/.git/config")
if not resp.text.__contains__("404"):
cprint("Alert!", 'red')
cprint(".git folder exposed publicly", 'red')
cprint("Alert!", "red")
cprint(".git folder exposed publicly", "red")
else:
cprint("NO .git folder found", 'blue')
cprint("NO .git folder found", "blue")


def get_bitcoin_address(client: httpx.Client, target: str, response: str) -> None:
""" Check link for Bitcoin addresses, and if found, print.
"""Check link for Bitcoin addresses, and if found, print.
Args:
target (str): URL to be checked.
response (object): Response object containing data to check.
"""
bitcoins = re.findall(r'^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$', response)
bitcoins = re.findall(r"^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$", response)
print("BTC FOUND: ", len(bitcoins))
for bitcoin in bitcoins:
print("BTC: ", bitcoin)


def get_dot_svn(client: httpx.Client, target: str, response: str) -> None:
""" Check link for .svn folders exposed on public domain=.
"""Check link for .svn folders exposed on public domain=.
Args:
target (str): URL to be checked.
response (object): Response object containing data to check.
"""
cprint("[*]Checking for .svn folder", 'yellow')
cprint("[*]Checking for .svn folder", "yellow")
url = target
target = "{0.scheme}://{0.netloc}/".format(urlsplit(url))
resp = httpx.get(target + "/.svn/entries", proxies='socks5://127.0.0.1:9050')
resp = httpx.get(target + "/.svn/entries", proxies="socks5://127.0.0.1:9050")
if not resp.text.__contains__("404"):
cprint("Alert!", 'red')
cprint(".SVN folder exposed publicly", 'red')
cprint("Alert!", "red")
cprint(".SVN folder exposed publicly", "red")
else:
cprint("NO .SVN folder found", 'blue')
cprint("NO .SVN folder found", "blue")


def get_dot_htaccess(client: httpx.Client, target: str, response: str) -> None:
""" Check link for .htaccess files on public domain.
"""Check link for .htaccess files on public domain.
Args:
target (str): URL to be checked.
response (object): Response object containing data to check.
"""
cprint("[*]Checking for .htaccess", 'yellow')
cprint("[*]Checking for .htaccess", "yellow")
url = target
target = "{0.scheme}://{0.netloc}/".format(urlsplit(url))
resp = httpx.get(target + "/.htaccess", proxies='socks5://127.0.0.1:9050')
resp = httpx.get(target + "/.htaccess", proxies="socks5://127.0.0.1:9050")
if resp.text.__contains__("403"):
cprint("403 Forbidden", 'blue')
cprint("403 Forbidden", "blue")
elif not resp.text.__contains__("404") or resp.text.__contains__("500"):
cprint("Alert!!", 'blue')
cprint(".htaccess file found!", 'blue')
cprint("Alert!!", "blue")
cprint(".htaccess file found!", "blue")
else:
cprint("Response", 'blue')
cprint(resp, 'blue')
cprint("Response", "blue")
cprint(resp, "blue")


def display_webpage_description(soup: BeautifulSoup) -> None:
Expand All @@ -186,8 +216,8 @@ def display_webpage_description(soup: BeautifulSoup) -> None:
Args:
soup (object): Processed HTML object.
"""
cprint("[*]Checking for meta tag", 'yellow')
metatags = soup.find_all('meta')
cprint("[*]Checking for meta tag", "yellow")
metatags = soup.find_all("meta")
for meta in metatags:
print("Meta : ", meta)

Expand All @@ -202,11 +232,11 @@ def writer(datasets, dataset_names, output_dir):
"""
for dataset, dataset_name in zip(datasets, dataset_names):
if dataset:
filepath = output_dir + '/' + dataset_name + '.txt'
filepath = output_dir + "/" + dataset_name + ".txt"

with open(filepath, 'w+', encoding='utf8') as f:
f.write(str('\n'.join(dataset)))
f.write('\n')
with open(filepath, "w+", encoding="utf8") as f:
f.write(str("\n".join(dataset)))
f.write("\n")
# else:
# with open(filepath, 'w+') as f:
# joined = '\n'.join(dataset)
Expand Down
Loading

0 comments on commit af64fd6

Please sign in to comment.