Skip to content
Permalink
Browse files

Added the search mixin to support Google web search functionality.

Refactored the google_site_web module to leverage the new Google search mixin.

Fixed a bug in the profiler module. Resolves issue #148.

Added gzip response detection and deflation to the internal requests library.

Updated the gitignore file.

Fixed a bug in the core framework module.

Added the ghdb module.

Added the Google Hacking Database for use with the ghdb module.

Fixed a bug in the config loading process when initializing a workspace.

Fixed a bug in the csv_file import module. (@EthanR)

Updated the banner.

Added the necessary components to support repositories as a new data source/sink.

Added several module methods for querying and searching the Github API.

Added the github_miner, github_repos, and github_dorks modules.

Added the gists_search module.

Added a custom list of google dorks for use with the ghdb module.

Fixed several bugs in the bing_linkedin, linkedin, and linkedin_crawl modules. Resolves issue #149. (@fullmetalcache)

Fixed a bug in the leaks_dump module caused by a change in the API.

Optimized the is_hash hash detection method.

Fixed a bug in the hashes_org module caused by a change in the API.

Removed an uneeded comment from the salesmaple module.

Added the metacrawler module and dependencies.

Removed the facebook module. Resolves issue #151.

Removed the youtube module due to the API no longer being available. Resolves issue #153.
  • Loading branch information...
lanmaster53 committed Jul 14, 2015
1 parent a340236 commit 55c604b23e3985ec0b3dbd043be532d9eedc1e87
@@ -1,3 +1,4 @@
*.pyc
*sublime*
venv/
scripts/
@@ -5,3 +5,5 @@ lxml==3.4.4
mechanize==0.2.5
slowaes==0.1a1
XlsxWriter==0.7.3
olefile==0.42.1
PyPDF2==1.25.1
@@ -1,4 +1,4 @@
__version__ = '4.6.3'
__version__ = '4.7.0'

# ex. x.y.z
# x - Incremented for changes requiring migration. (major revision)
@@ -5,11 +5,10 @@
_/ _/ _/ _/ _/ _/ _/ _/_/ _/ _/_/ _/ _/
_/ _/ _/_/_/_/ _/_/_/ _/_/_/ _/ _/ _/ _/ _/_/_/

+---------------------------------------------------------------------------+
| _ ___ _ __ |
| |_)| _ _|_ |_|.|| _ | _ |_ _ _ _ _ _|_o _ _ (_ _ _ _o_|_ |
| |_)|(_|(_|\ | ||||_\ _|_| || (_)| |||(_| | |(_)| | __)(/_(_|_|| | | \/ |
| / |
| Consulting | Research | Development | Training |
| http://www.blackhillsinfosec.com |
+---------------------------------------------------------------------------+

/\
/ \\ /\
Sponsored by... /\ /\/ \\V \/\
/ \\/ // \\\\\ \\ \/\
// // BLACK HILLS \/ \\
www.blackhillsinfosec.com

Large diffs are not rendered by default.

@@ -0,0 +1 @@
password
@@ -0,0 +1,34 @@
# https://twitter.com/egyp7/status/628955613528109056
# rails secret token
filename:secret_token.rb config
language:ruby secret_token

# private keys
path:.ssh/id_rsa BEGIN

# https://twitter.com/TekDefense/status/294556153151647744
# md5 hash of most used password 123456
e10adc3949ba59abbe56e057f20f883e

# http://seclists.org/fulldisclosure/2014/Mar/343
# database passwords
mysql.binero.se
define("DB_PASSWORD"

# http://seclists.org/fulldisclosure/2013/Jun/15
# possible SQL injection
extension:php mysql_query $_GET

# http://blog.conviso.com.br/2013/06/github-hacking-for-fun-and-sensitive.html
# private keys
extension:pem private
extension:conf FTP server configuration
# email addresses
extension:xls mail
extension:sql mysql dump
# possible PHP backdoor
stars:>1000 forks:>100 extension:php "eval(preg_replace("

# https://twitter.com/lanmaster53/status/629102944252772356
# Flask apps with possible SSTI vulns
extension:py flask render_template_string
@@ -0,0 +1,20 @@
# directory indexing
intitle:index.of

# config files
ext:xml | ext:conf | ext:cnf | ext:reg | ext:inf | ext:rdp | ext:cfg | ext:txt | ext:ora | ext:ini

# db files
ext:sql | ext:dbf | ext:mdb

# logs
ext:log

# backups
ext:bkf | ext:bkp | ext:bak | ext:old | ext:backup

# sql errors
intext:"sql syntax near" | intext:"syntax error has occurred" | intext:"incorrect syntax near" | intext:"unexpected end of SQL command" | intext:"Warning: mysql_connect()" | intext:"Warning: mysql_query()" | intext:"Warning: pg_connect()"

# docs
ext:doc | ext:docx | ext:odt | ext:pdf | ext:rtf | ext:sxw | ext:psw | ext:ppt | ext:pptx | ext:pps | ext:csv
@@ -128,7 +128,7 @@ def __parse_file(self):
quote = self.options['quote_character']
values = []

with open(filename, 'rb') as infile:
with open(filename, 'rU') as infile:
# if sep is not a one character string, csv.reader will raise a TypeError
if not quote:
csvreader = csv.reader(infile, delimiter=str(sep), quoting=csv.QUOTE_NONE)

This file was deleted.

@@ -0,0 +1,41 @@
from recon.core.module import BaseModule
from urllib import quote_plus

class Module(BaseModule):
meta = {
'name': 'Github Resource Miner',
'author': 'Tim Tomes (@LaNMaSteR53)',
'description': 'Uses the Github API to enumerate repositories and member profiles associated with a company search string. Updates the respective tables with the results.',
'query': 'SELECT DISTINCT company FROM companies WHERE company IS NOT NULL',
}

def module_run(self, companies):
for company in companies:
self.heading(company, level=0)
# enumerate members
self.heading('Members', level=1)
members = self.query_github_api('/orgs/%s/members' % (quote_plus(company)))
for member in members:
data = {
'username': member['login'],
'url': member['html_url'],
'notes': company,
'resource': 'Github',
'category': 'coding',
}
self.output('%s (%s)' % (data['username'], data['url']))
self.add_profiles(**data)
# enumerate repositories
self.heading('Repositories', level=1)
repos = self.query_github_api('/orgs/%s/repos' % (quote_plus(company)))
for repo in repos:
data = {
'name': repo['name'],
'owner': repo['owner']['login'],
'description': repo['description'],
'url': repo['html_url'],
'resource': 'Github',
'category': 'repo',
}
self.output('%s - %s' % (data['name'], data['description']))
self.add_repositories(**data)
@@ -5,13 +5,14 @@
import time

class Module(BaseModule):

meta = {
'name': 'Bing Linkedin Profile Harvester',
'author':'Mike Larch and Brian Fehrman (@fullmetalcache)',
'description': 'Harvests contacts from linkedin.com by querying Bing for Linkedin pages related to the given companies, parsing the profiles, and adding them to the \'profiles\' table',
'query': 'SELECT DISTINCT company FROM companies WHERE company IS NOT NULL ORDER BY company',
'description': 'Harvests profiles from linkedin.com by querying Bing for Linkedin pages related to the given companies, parsing the profiles, and adding them to the \'profiles\' table.',
'query': 'SELECT DISTINCT company FROM companies WHERE company IS NOT NULL',
'options': (
('limit', 2, False, 'number of pages to use from bing search'),
('limit', 2, True, 'number of pages to use from bing search (0 = unlimited)'),
('previous', False, True, 'include previous employees'),
),
}
@@ -48,16 +49,29 @@ def get_urls(self, company):

def get_info(self, company, url):
time.sleep(1)

self.verbose('Parsing \'%s\'...' % (url))

resp = self.request(url)

retries = 5
resp = None

while 0 < retries:
try:
retries -= 1
resp = self.request(url)
break
except Exception as e:
self.error('{0}, {1} retries left'.format(e, retries))

if resp is None:
return

tree = fromstring(resp.text)

company_found = self.parse_company(tree, resp.text, company)

if company_found is None:
self.error('No company found on profile page.')
self.error('No match for {0} found on the page or person is not a current employee'.format(company))
return

# output the results
@@ -97,23 +111,34 @@ def parse_company_exp(self, resp, company):
try:
experiences = resp.split('<div id="experience-', 1)[1]
experiences = experiences.split('-view">', 1)[1]
experiences = experiences.split('<script>', 1)[0]
experiences = experiences.split('</div></div>')

except IndexError:
return None


if (experiences is None) or (company is None):
return None

total = len(experiences)
for idx, experience in enumerate(experiences):
if idx == (total - 1):
break

experience = experience.split("</span>", 1)[0]
exp_lower = experience.lower()
if company.lower() in exp_lower:
if 'present' in exp_lower or previous:

try:
time_exp = experience.split('date-locale',1)[1]
time_exp = time_exp.split('</span>', 1)[0]
except IndexError:
continue

time_exp = time_exp.lower()
experience = experience.lower()

if (company.lower() in experience) or (company.replace(" ","").lower() in experience):
if 'present' in time_exp or previous:
company_found = company
break

return company_found

def parse_company_tree(self, tree, company):
@@ -131,7 +156,7 @@ def parse_company_tree(self, tree, company):
except:
pass

if company_found is not None:
if (company_found is not None) and (company is not None):
if company.lower() not in company_found.lower():
company_found = None

0 comments on commit 55c604b

Please sign in to comment.
You can’t perform that action at this time.