Skip to content

Commit

Permalink
Public release
Browse files Browse the repository at this point in the history
  • Loading branch information
0x09AL committed Aug 22, 2017
1 parent b9c3017 commit 76ce46e
Show file tree
Hide file tree
Showing 12 changed files with 409 additions and 0 deletions.
Binary file added bin/geckodriver-v0.18.0-linux64.tar.gz
Binary file not shown.
Empty file added modules/__init__.py
Empty file.
80 changes: 80 additions & 0 deletions modules/mailfunctions.py
@@ -0,0 +1,80 @@
import time
import requests
import json

class MailFunctions(object):


Persons = []

def __init__(self,persons):

self.Persons = persons
print "[+] Mail functions initialized [+]"

def generateEmails(self,domain,format):
# Email formats
# 1-{firstname}.{lastname}@{domain}
# 2-{lastname}.{firstname}@{domain}
# 3-{firstname}-{lastname}@{domain}
# 4-{firstname[0]}{lastname}@{domain}
# 5-{lastname}{firstname[0]}@{domain}
# 6-{lastname[0]}{firstname}@{domain}
# 7-{firstname}{lastname[0]}@{domain}
for person in self.Persons:
try:
firstname = person[0].split(" ")[0]
lastname = person[0].split(" ")[1]
except Exception, error:
print "[-] Error: %s [-]" % error

if(format==1):
email = "%s.%s@%s" % (firstname,lastname,domain)
elif(format==2):
email = "%s.%s@%s" % (lastname,firstname, domain)
elif(format==3):
email = "%s-%s@%s" % (firstname,lastname, domain)
elif(format==4):
email = "%s%s@%s" % (firstname[0],lastname,domain)
elif(format==5):
email = "%s.%s@%s" % (lastname,firstname[0],domain)
elif(format==6):
email = "%s.%s@%s" % (firstname,lastname[0],domain)
elif(format==7):
email = "%s.%s@%s" % (lastname[0],firstname,domain)
else:
print "[-] Invalid Option [-]" # Normally we should never come here
# return
person.append(email.lower())
return self.Persons

def saveOutput(self,emailList):

filename = "%s%s%s" % ("output/",str(int(time.time())),'.csv')
print "[+] Saving output to %s " % filename
output = open(filename , "w")
head = "First Name,Last Name,Position,Email\n"
output.write(head)

for person in range (0,len(emailList)):
line = "%s,%s,%s,%s\n" % (emailList[person][0].split(" ")[0],emailList[person][0].split(" ")[1],emailList[person][1],emailList[person][3])
output.write(line)

output.close()

def checkPwned(self,emailList):

for email in emailList:
url = "https://haveibeenpwned.com/api/v2/breachedaccount/%s?truncateResponse=true" % email[3]
time.sleep(1) # Sleep to avoid many requests error from the web server

r = requests.get(url,headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0'})
if(r.status_code == 200):
try:
pwnedSites = json.loads(r.text)
for site in pwnedSites:
print "[+] %s pwned at %s breach [+]" % (email[3],site["Name"])
except Exception:
pass


82 changes: 82 additions & 0 deletions modules/parser.py
@@ -0,0 +1,82 @@
from bs4 import BeautifulSoup
import requests

class Parser(object):

company = ""
linkedInUrl = ""
htmlData = ""
linkedinURLS = []


def __init__(self,country):

self.linkedInUrl = "https://%s.linkedin.com/in/" % country
#self.company = company


def readHTMLFile(self,htmlData):
# This will initialize the html data
self.htmlData = htmlData

def getCompany():
# Returns the company name
return self.company

def getExtractedLinks(self):
# Returns Extracted Links
print "[+] Parsing Data from html file [+]"

if(self.htmlData == ""):
print "[-] There is no html data exiting [-]"
exit()
soupParser = BeautifulSoup(self.htmlData, 'html.parser')

for link in soupParser.find_all('a'):

temp = str(link.get('href'))

if(temp.startswith(self.linkedInUrl)):
if(temp not in self.linkedinURLS):
self.linkedinURLS.append(temp)

return self.linkedinURLS
# Return linkedinURLS array



def getEmployeeInformation(self):
# Will return the dictionary that contains employee data
return


# The response parameter is the data that every visited link will response basically the html page of the persons linkedin

def extractName(self,response):
# Will return the name and the surname from the response
soupParser = BeautifulSoup(response, 'html.parser')
name = soupParser.findAll("h1", class_="pv-top-card-section__name")[0].string

return name

def extractPosition(self,response):
soupParser = BeautifulSoup(response, 'html.parser')
position = soupParser.findAll("h2", class_="pv-top-card-section__headline")[0].string
return position

def extractCompany(self,response):
# Will return the company from the response
soupParser = BeautifulSoup(response, 'html.parser')
company = soupParser.findAll("h3", class_="pv-top-card-section__company")[0].string
return company



def extractPhone(self,response):
# Will return the phone if found
return





68 changes: 68 additions & 0 deletions modules/requester.py
@@ -0,0 +1,68 @@
from selenium import webdriver
from pyvirtualdisplay import Display
from time import sleep
import sys


RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"

class Requester(object):

timeout = 10

def __init__(self):
display = Display(visible=0, size=(1600, 1024))
display.start()
self.driver = webdriver.Firefox()
self.driver.delete_all_cookies()



def doLogin(self,username,password):

self.driver.get("https://www.linkedin.com/uas/login")
self.driver.execute_script('localStorage.clear();')
if(str(self.driver.title).startswith("Sign In")):
print "[+] Login Page loaded successfully [+]"
lnkUsername = self.driver.find_element_by_id("session_key-login")
lnkUsername.send_keys(username)
lnkPassword = self.driver.find_element_by_id("session_password-login")
lnkPassword.send_keys(password)
self.driver.find_element_by_id("btn-primary").click()
sleep(5)
if(str(self.driver.title) == "LinkedIn"):
sys.stdout.write(CYAN)
print "[+] Login Success [+]"
sys.stdout.write(RESET)
else:
sys.stdout.write(RED)
print "[-] Login Failed [-]"
sys.stdout.write(RESET)



def doGetLinkedin(self,url):
self.driver.get(url)
sleep(3)
# Fix this with a better error Handling
return self.driver.page_source.encode('ascii','replace')

def getLinkedinLinks(self,state,company):
print "[+] Getting profiles from Google [+]"
dork = "site:%s.linkedin.com Current: %s" % (state , company)
self.driver.get("https://www.google.al/search?q=%s&t=h_&ia=web" % dork)
# Extend the search - Commented because of tests
#self.driver.execute_script("window.scrollTo(0,document.body.scrollHeight);")
#sleep(5)
return self.driver.page_source.encode('ascii','replace')

def kill(self):
self.driver.quit()


0 comments on commit 76ce46e

Please sign in to comment.