Skip to content

Commit

Permalink
Version 2, Features Added
Browse files Browse the repository at this point in the history
  • Loading branch information
hash3liZer committed Jan 24, 2019
1 parent 7135696 commit 7420881
Show file tree
Hide file tree
Showing 12 changed files with 143 additions and 8,338 deletions.
27 changes: 16 additions & 11 deletions handlers/ask.py
Expand Up @@ -13,19 +13,23 @@ class ASK:
TIMEOUT = 10
RESPONSE = ""
SUBDOMAINS = []
AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246"
HEADERS = {
'User-Agent' : '',
'Referer' : '',
}

def __init__(self, _class, _dm, _hd, _ag):
def __init__(self, _class, _dm):
self.session = requests.Session()
self.baseclass = _class
self.domain = _dm
self.URL = self.URL % (self.domain)
self.REGEXP = self.REGEXP % (self.domain)
self.headers = self.headerer(_hd, _ag)
self.agent = _ag
self.url = self.URL % (self.domain)
self.regexp = self.REGEXP % (self.domain)
self.headers = self.headerer( self.HEADERS, self.AGENT )

def headerer(self, headers, _ag):
headers['User-Agent'] = _ag
headers['Referer'] = self.URL
headers['Referer'] = self.url
return headers

def execute(self):
Expand All @@ -37,7 +41,7 @@ def request(self):
self.baseclass.THREADS += 1

try:
req = self.session.get(self.URL, headers=self.headers, cookies=self.COOKIES, timeout=self.TIMEOUT)
req = self.session.get(self.url, headers=self.headers, cookies=self.COOKIES, timeout=self.TIMEOUT)
if req.status_code < 400:
self.RESPONSE = req.text
self.extract()
Expand All @@ -50,13 +54,14 @@ def request(self):

def append(self, error=False):
self.LOCK.acquire()
self.baseclass.add( self.SUBDOMAINS, self.SERVICE )
self.baseclass.pushtoscreen( self.SUBDOMAINS, self.SERVICE, error )
self.baseclass.move( self.SERVICE, self.SUBDOMAINS )
self.LOCK.release()

def extract(self):
_html = soup(self.RESPONSE)
for cite in _html.findAll("p", attrs={'class': 'PartialSearchResults-item-url'}):
sub = re.search(self.REGEXP, cite.text, re.IGNORECASE)
sub = re.search(self.regexp, cite.text, re.IGNORECASE)
if sub:
self.SUBDOMAINS.append(sub.group())
_sub = sub.group()
if _sub not in self.SUBDOMAINS:
self.SUBDOMAINS.append(_sub)
27 changes: 16 additions & 11 deletions handlers/baidu.py
Expand Up @@ -13,19 +13,23 @@ class BAIDU:
TIMEOUT = 10
RESPONSE = ""
SUBDOMAINS = []
AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246"
HEADERS = {
'User-Agent' : '',
'Referer' : '',
}

def __init__(self, _class, _dm, _hd, _ag):
def __init__(self, _class, _dm):
self.session = requests.Session()
self.baseclass = _class
self.domain = _dm
self.URL = self.URL % (self.domain, self.domain)
self.REGEXP = self.REGEXP % (self.domain)
self.headers = self.headerer(_hd, _ag)
self.agent = _ag
self.url = self.URL % (self.domain, self.domain)
self.regexp = self.REGEXP % (self.domain)
self.headers = self.headerer( self.HEADERS, self.AGENT )

def headerer(self, headers, _ag):
headers['User-Agent'] = _ag
headers['Referer'] = self.URL
headers['Referer'] = self.url
return headers

def execute(self):
Expand All @@ -37,7 +41,7 @@ def request(self):
self.baseclass.THREADS += 1

try:
req = self.session.get(self.URL, headers=self.headers, cookies=self.COOKIES, timeout=self.TIMEOUT)
req = self.session.get(self.url, headers=self.headers, cookies=self.COOKIES, timeout=self.TIMEOUT)
if req.status_code < 400:
self.RESPONSE = req.text
self.extract()
Expand All @@ -50,13 +54,14 @@ def request(self):

def append(self, error=False):
self.LOCK.acquire()
self.baseclass.add( self.SUBDOMAINS, self.SERVICE )
self.baseclass.pushtoscreen( self.SUBDOMAINS, self.SERVICE, error )
self.baseclass.move( self.SERVICE, self.SUBDOMAINS )
self.LOCK.release()

def extract(self):
_html = soup(self.RESPONSE)
for cite in _html.findAll("a", attrs={'class': 'c-showurl'}):
sub = re.search(self.REGEXP, cite.text, re.IGNORECASE)
sub = re.search(self.regexp, cite.text, re.IGNORECASE)
if sub:
self.SUBDOMAINS.append(sub.group())
_sub = sub.group()
if _sub not in self.SUBDOMAINS:
self.SUBDOMAINS.append(_sub)
27 changes: 16 additions & 11 deletions handlers/bing.py
Expand Up @@ -13,19 +13,23 @@ class BING:
TIMEOUT = 10
RESPONSE = ""
SUBDOMAINS = []
AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246"
HEADERS = {
'User-Agent' : '',
'Referer' : '',
}

def __init__(self, _class, _dm, _hd, _ag):
def __init__(self, _class, _dm):
self.session = requests.Session()
self.baseclass = _class
self.domain = _dm
self.URL = self.URL % (self.domain)
self.REGEXP = self.REGEXP % (self.domain)
self.headers = self.headerer(_hd, _ag)
self.agent = _ag
self.url = self.URL % (self.domain)
self.regexp = self.REGEXP % (self.domain)
self.headers = self.headerer(self.HEADERS, self.AGENT)

def headerer(self, headers, _ag):
headers['User-Agent'] = _ag
headers['Referer'] = self.URL
headers['Referer'] = self.url
return headers

def execute(self):
Expand All @@ -37,7 +41,7 @@ def request(self):
self.baseclass.THREADS += 1

try:
req = self.session.get(self.URL, headers=self.headers, cookies=self.COOKIES, timeout=self.TIMEOUT)
req = self.session.get(self.url, headers=self.headers, cookies=self.COOKIES, timeout=self.TIMEOUT)
if req.status_code < 400:
self.RESPONSE = req.text
self.extract()
Expand All @@ -50,13 +54,14 @@ def request(self):

def append(self, error=False):
self.LOCK.acquire()
self.baseclass.add( self.SUBDOMAINS, self.SERVICE )
self.baseclass.pushtoscreen( self.SUBDOMAINS, self.SERVICE, error )
self.baseclass.move( self.SERVICE, self.SUBDOMAINS )
self.LOCK.release()

def extract(self):
_html = soup(self.RESPONSE)
for cite in _html.findAll("cite"):
sub = re.search(self.REGEXP, cite.text, re.IGNORECASE)
sub = re.search(self.regexp, cite.text, re.IGNORECASE)
if sub:
self.SUBDOMAINS.append(sub.group())
_sub = sub.group()
if _sub not in self.SUBDOMAINS:
self.SUBDOMAINS.append(_sub)
27 changes: 16 additions & 11 deletions handlers/crt.py
Expand Up @@ -13,19 +13,23 @@ class CRTSEARCH:
TIMEOUT = 10
RESPONSE = ""
SUBDOMAINS = []
AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246"
HEADERS = {
'User-Agent' : '',
'Referer' : '',
}

def __init__(self, _class, _dm, _hd, _ag):
def __init__(self, _class, _dm):
self.session = requests.Session()
self.baseclass = _class
self.domain = _dm
self.URL = self.URL % ("%25", self.domain)
self.REGEXP = self.REGEXP % (self.domain)
self.headers = self.headerer(_hd, _ag)
self.agent = _ag
self.url = self.URL % ("%25", self.domain)
self.regexp = self.REGEXP % (self.domain)
self.headers = self.headerer( self.HEADERS, self.AGENT )

def headerer(self, headers, _ag):
headers['User-Agent'] = _ag
headers['Referer'] = self.URL
headers['Referer'] = self.url
return headers

def execute(self):
Expand All @@ -37,7 +41,7 @@ def request(self):
self.baseclass.THREADS += 1

try:
req = self.session.get(self.URL, headers=self.headers, cookies=self.COOKIES, timeout=self.TIMEOUT)
req = self.session.get(self.url, headers=self.headers, cookies=self.COOKIES, timeout=self.TIMEOUT)
if req.status_code < 400:
self.RESPONSE = req.text
self.extract()
Expand All @@ -50,13 +54,14 @@ def request(self):

def append(self, error=True):
self.LOCK.acquire()
self.baseclass.add( self.SUBDOMAINS, self.SERVICE )
self.baseclass.pushtoscreen( self.SUBDOMAINS, self.SERVICE, error )
self.baseclass.move( self.SERVICE, self.SUBDOMAINS )
self.LOCK.release()

def extract(self):
_html = soup(self.RESPONSE)
for cite in _html.findAll("td"):
sub = re.match(self.REGEXP, cite.text, re.IGNORECASE)
sub = re.match(self.regexp, cite.text, re.IGNORECASE)
if sub:
self.SUBDOMAINS.append(sub.group())
_sub = sub.group()
if _sub not in self.SUBDOMAINS:
self.SUBDOMAINS.append(_sub)
18 changes: 11 additions & 7 deletions handlers/dnsdumpster.py
Expand Up @@ -15,15 +15,19 @@ class DNSDUMPSTER:
TIMEOUT = 10
RESPONSE = ""
SUBDOMAINS = []
AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246"
HEADERS = {
'User-Agent' : '',
'Referer' : '',
}

def __init__(self, _class, _dm, _hd, _ag):
def __init__(self, _class, _dm):
self.session = requests.Session()
self.baseclass = _class
self.domain = _dm
self.DATA['targettip'] = self.domain
self.REGEXP = self.REGEXP % (self.domain)
self.headers = self.headerer(_hd, _ag)
self.agent = _ag
self.regexp = self.REGEXP % (self.domain)
self.headers = self.headerer( self.HEADERS, self.AGENT )

def headerer(self, headers, _ag):
headers['User-Agent'] = _ag
Expand Down Expand Up @@ -62,12 +66,12 @@ def request(self):

def append(self, error=False):
self.LOCK.acquire()
self.baseclass.add( self.SUBDOMAINS, self.SERVICE )
self.baseclass.pushtoscreen( self.SUBDOMAINS, self.SERVICE, error )
self.baseclass.move( self.SERVICE, self.SUBDOMAINS )
self.LOCK.release()

def extract(self):
links = re.findall(r"<td class=\"col-md-4\">(.*?)<br>", self.RESPONSE)
for link in links:
if link.endswith(self.domain):
self.SUBDOMAINS.append(link)
if link not in self.SUBDOMAINS:
self.SUBDOMAINS.append(link)
33 changes: 19 additions & 14 deletions handlers/google.py
Expand Up @@ -7,25 +7,29 @@ class GOOGLE:

COOKIES = None
SERVICE = "Google"
LOCK = threading.Semaphore(value=1)
URL = "http://www.google.com/search?q=site:%s"
REGEXP = "([a-z0-9]+[.])+%s"
TIMEOUT = 10
RESPONSE = ""
SUBDOMAINS = []
LOCK = threading.Semaphore(value=1)
URL = "http://www.google.com/search?q=site:%s"
REGEXP = "([a-z0-9]+[.])+%s"
AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246"
HEADERS = {
'User-Agent' : '',
'Referer' : '',
}

def __init__(self, _class, _dm, _hd, _ag):
def __init__(self, _class, _dm):
self.session = requests.Session()
self.baseclass = _class
self.domain = _dm
self.URL = self.URL % (self.domain)
self.REGEXP = self.REGEXP % (self.domain)
self.headers = self.headerer(_hd, _ag)
self.agent = _ag
self.url = self.URL % (self.domain)
self.regexp = self.REGEXP % (self.domain)
self.headers = self.headerer(self.HEADERS, self.AGENT)

def headerer(self, headers, _ag):
headers['User-Agent'] = _ag
headers['Referer'] = self.URL
headers['Referer'] = self.url
return headers

def execute(self):
Expand All @@ -37,7 +41,7 @@ def request(self):
self.baseclass.THREADS += 1

try:
req = self.session.get(self.URL, headers=self.headers, cookies=self.COOKIES, timeout=self.TIMEOUT)
req = self.session.get(self.url, headers=self.headers, cookies=self.COOKIES, timeout=self.TIMEOUT)
if req.status_code < 400:
self.RESPONSE = req.text
self.extract()
Expand All @@ -50,13 +54,14 @@ def request(self):

def append(self, error=False):
self.LOCK.acquire()
self.baseclass.add( self.SUBDOMAINS, self.SERVICE )
self.baseclass.pushtoscreen( self.SUBDOMAINS , self.SERVICE, error )
self.baseclass.move( self.SERVICE, self.SUBDOMAINS )
self.LOCK.release()

def extract(self):
_html = soup(self.RESPONSE)
for cite in _html.findAll("cite"):
sub = re.search(self.REGEXP, cite.text, re.IGNORECASE)
sub = re.search(self.regexp, cite.text, re.IGNORECASE)
if sub:
self.SUBDOMAINS.append(sub.group())
_sub = sub.group()
if _sub not in self.SUBDOMAINS:
self.SUBDOMAINS.append(_sub)

0 comments on commit 7420881

Please sign in to comment.