Skip to content
Browse files

First commit

  • Loading branch information...
0 parents commit af0b02e0c19b268ab1c6fd79c86bcbc38189df4f SensePost Pty Ltd committed Feb 10, 2012
Showing with 2,171 additions and 0 deletions.
  1. +127 −0 BiLE-weigh.pl
  2. +238 −0 BiLE.pl
  3. +249 −0 README.markdown
  4. +162 −0 common
  5. +117 −0 jarf-dnsbrute
  6. +137 −0 jarf-rev
  7. +217 −0 qtrace.pl
  8. +302 −0 readme.txt
  9. +274 −0 tld-expand.pl
  10. +147 −0 vet-IPrange.pl
  11. +201 −0 vet-mx.pl
127 BiLE-weigh.pl
@@ -0,0 +1,127 @@
+#!/usr/bin/perl
+
+## perl BiLE-weigh.pl domain.com outputfile.mine
+## Takes output file *.mine from BiLE.pl
+## domain.com is the website domain
+
+$|=1;
+
+@exceptionlist=("microsoft.com","216.239.5","yahoo.com",
+ "ultraseek.com","ananzi.co.za","macromedia.com",
+ "clickstream","w3.org","adobe.com","google.com");
+
+if ($#ARGV < 1){die "perl BiLE-weigh.pl domain.com output.file.from.bile.mine\n";}
+
+#load and init
+`cat @ARGV[1] | sort | uniq > @ARGV[1].2`;
+`mv @ARGV[1].2 @ARGV[1]`;
+open (IN,"@ARGV[1]") || die "Cant open data file\n";
+while (<IN>){
+ chomp;
+ ($src,$dst,$cellid)=split(/:/,$_);
+ if ($src ne $dst){
+ $flag=0;
+ foreach $except (@exceptionlist){
+ if (($src =~ /$except/) || ($dst =~ /$except/)) {$flag=1;}
+ }
+ if ($flag == 0){push @structure,$_;}
+ }
+}
+close(IN);
+
+
+$sites{@ARGV[0]}=300;
+
+
+####################compute first cell node values
+print "compute nodes\n";
+print "Nodes alone\n";
+$ws=weight(@ARGV[0],"s");
+$wd=weight(@ARGV[0],"d");
+print "src $ws dst $wd\n";
+foreach $piece (@structure){
+
+
+ ($src,$dst,$cellid)=split(/:/,$piece);
+
+ ## link -from- X to node
+ if ($src eq @ARGV[0]){
+ $newsites{$dst}=$newsites{$dst}+($sites{$src}*(1/$ws));
+ }
+
+ ## link -to- X from node
+ if ($dst eq @ARGV[0]){
+ $newsites{$src}=$newsites{$src}+($sites{$dst}*(0.6/$wd));
+ }
+}
+
+&writenodes;
+
+
+undef $sites;
+undef %sites;
+&loadnodes;
+
+#between nodes
+foreach $blah (keys %sites){
+ print "\n[Testing with node $blah]\n";
+ $ws=weight($blah,"s");
+ $wd=weight($blah,"d");
+ print "src $ws dst $wd\n";
+ foreach $piece (@structure){
+
+ ($src,$dst,$cellid)=split(/:/,$piece);
+
+ ## link -from- node to other node (2/3)
+ if ($src eq $blah){
+ $newsites{$dst}=$newsites{$dst}+($sites{$src}*(1/$ws));
+ $add=($sites{$src}*(1/$ws));
+ $orig=$sites{$src};
+ }
+
+ ## link -to- node from nodes (1/3)
+ if ($dst eq $blah){
+ $newsites{$src}=$newsites{$src}+($sites{$dst}*(0.6/$wd));
+
+ $add=($sites{$dst}*(0.6/$wd));
+ $orig=$sites{$dst};
+ }
+ }
+
+}
+
+&writenodes;
+
+`cat temp | sort -r -t ":" +1 -n > @ARGV[1].sorted`;
+
+
+sub loadnodes{
+ $sites="";
+ open (IN,"temp") || die "cant open temp file\n";
+ while (<IN>){
+ chomp;
+ ($node,$value)=split(/:/,$_);
+ $sites{$node}=$value;
+ }
+ close (IN);
+}
+
+sub writenodes{
+ open (OUT,">temp") || die "Cant write\n";
+ foreach $blah (keys %newsites){
+ print OUT "$blah:$newsites{$blah}\n";
+ }
+ close OUT;
+}
+
+sub weight{
+ ($site,$mode)=@_;
+ $from=0; $to=0;
+ foreach $piece (@structure){
+ ($src,$dst,$cellid)=split(/:/,$piece);
+ if ($dst eq $site){$from++};
+ if ($src eq $site){$to++;}
+ }
+ if ($mode eq "s"){return $to;}
+ if ($mode eq "d"){return $from;}
+}
238 BiLE.pl
@@ -0,0 +1,238 @@
+#!/usr/bin/perl
+
+### perl BiLE.pl web.site.com output.name
+### BiLE will out put two files *.mine and *.walrus
+### *.walrus can be ignored for now
+
+use Socket;
+$|=1;
+
+if ($#ARGV<1){die "Usage BiLE.pl <site> <outfile>\n";}
+$tocheck=@ARGV[0];
+
+@links=getlinks($tocheck,4,0);
+push @links,&linkto($tocheck,0);
+
+undef @lotsoflinks;
+
+foreach $link (@links){
+ if ($link ne $tocheck){
+ push @lotsoflinks,&linkto($link,1);
+ push @lotsoflinks,&getlinks($link,3,1);
+ }
+}
+
+
+
+###
+##SubRoutines
+###
+
+sub linkto{
+ my ($tocheck,$cellid)=@_;
+ if (length($tocheck<3)){return "";}
+ my @returns=("");
+
+ undef @global;
+
+ @global=dedupe(returngoogle("link:$tocheck","web"));
+ foreach $taa (@global){print "[$taa]\n";}
+
+ open (OUT,">>@ARGV[1].mine") || die "cant open out file\n";
+ open (OUTT,">>@ARGV[1].walrus") || die "cant open walrus file\n";
+ print OUT "----> Links to: [$tocheck]\n";
+ foreach $site (@global){
+ ($site,$crap)=split(/[\`\!\@\#\$\%\^\&\*\(\)\=\\\|\+\[\]\'\>\<\/\?\,\"\' ]/,$site);
+ if (($site =~ /\./) && (length($site)>2) && ($site !~ /shdocvw/)) {
+ print OUT "$site:$tocheck\n";
+ print OUTT "$tocheck:$site\n";
+ push @returns,$site;
+ }
+ }
+ close (OUT);
+ close(OUTT);
+ return (@returns);
+}
+
+#################################
+sub getlinks{
+
+ my @return=("");
+ my @global=("");
+
+ ($site,$depth,$cellid)=@_;
+ if (length($site)<3){return "";}
+ print "mirroring $tocheck\n";
+ $rc=system("rm -Rf work");
+ $mc="httrack $site --max-size=350000 --max-time=600 -I0 --quiet --do-not-log -O work.$site --depth=$depth -%v-K -*.gif -*.jpg -*.pdf -*.zip -*.dat -*.exe -*.doc -*.avi -*.pps -*.ppt 2>&1";
+ $rc=system ($mc);
+
+
+ #HTTP hrefs
+ @res=`grep -ri "://" work.$site/*`;
+
+ foreach $line (@res){
+ ($file,$crap,$stuff)=split(/:/,$line);
+ ($crap,$getit,$crap)=split(/\//,$file);
+ ($crap,$want)=split(/\/\//,$stuff);
+
+ ($want,$crap)=split(/\//,$want);
+ ($want,$crap)=split(/\"/,$want);
+ ($want,$crap)=split(/\>/,$want);
+ ($want,$crap)=split(/\</,$want);
+ ($want,$crap)=split(/[\`\!\@\#\$\%\^\&\*\(\)\=\\\|\+\[\]\'\>\<\/\?\,\"\']/,$want);
+ $want =~ s/\[\]\;//g;
+ if ((length($want)>0) && ($getit ne $want)) {
+ if (($want =~ /\./) && (length($want)>2) && ($want !~/shdocvw/) &&
+ ($want !~ /[\`!\@\#\$\%\^\&\*\(\)\=\\\|\+\[\]\'\>\<\/\?\,\"]/)) {
+ $store="$site:$want";
+ push @global,$store;
+ push @return,$want;
+ }
+ }
+ }
+
+ ## To get mailtos:
+ @res=`grep -ri "\@" work.$site/*`;
+ foreach $line (@res){
+ ($crap,$want)=split(/\@/,$line);
+ ($want,$crap)=split(/[ ">\n?<']/,$want);
+ ($left,$right)=split(/\./,$want);
+ if ( ($want =~ /\./) && (length($want)>3) && (length($right)> 1) && ($want !~/shdocvw/)){
+ ($want,$crap)=split(/[\`\!\@\#\$\%\^\&\*\(\)\=\\\|\+\[\]\'\>\<\/\?\,\"\']/,$want);
+ $store="$site:$want";
+ push @global,$store;
+ push @return,$want;
+ }
+ }
+
+
+ @global=dedupe(@global);
+
+ open (OUT,">>@ARGV[1].mine") || die "cant open out file\n";
+ open (OUTT,">>@ARGV[1].walrus") || die "cant open walrus file\n";
+ print OUT "====> Link from: [$site]\n";
+ foreach $site (@global){
+ print OUT "$site\n";
+ print OUTT "$site\n";
+ }
+ close (OUT);
+ close(OUTT);
+
+# $rc=system("rm -Rf work");
+ return (dedupe(@return));
+}
+
+
+
+#############Putting it together.
+sub returngoogle{
+ ($term,$type)=@_;
+ if ($type eq "web") {$gtype="search"; $host="www.google.com";}
+ if ($type eq "news") {$gtype="groups"; $host="groups.google.com";};
+ if ($term !~ /link\:/){
+ $term="%2b".$term;
+ $term=~s/\./\.\%2b/g;
+ $term=~s/ /+/g;
+ }
+ $port=80; $target = inet_aton($host);
+ $enough=numg($term,$gtype);
+ print "The number is $enough\n";
+ undef @rglobal;
+ for ($i=0; $i<=$enough; $i=$i+100){
+ print "tick $i\n";
+ @response=sendraw("GET /$gtype?q=$term&num=100&hl=en&safe=off&start=$i&sa=N&filter=0 HTTP/1.0\r\n\r\n");
+
+ undef @collect;
+
+ @collect=googleparseweb(@response);
+ foreach (@collect){
+ print "[$_]\n";
+ push @rglobal,$_;
+ }
+ }
+ return @rglobal;
+}
+
+############find out how many request we must do
+sub numg{
+ ($theterm,$gtype)=@_;
+ @response=sendraw("GET /$gtype?q=$theterm&num=10&hl=en&safe=off&start=10&sa=N&filter=0 HTTP/1.0\r\n\r\n");
+ $flag=0;
+ foreach $line (@response){
+ if ($line =~ /of about/){
+ ($duh,$one)=split(/of about \<b\>/,$line);
+ ($two,$duh)=split(/\</,$one);
+ $flag=1;
+ last;
+ }
+ #single reply
+ if ($line =~ /of \<b\>/){
+ ($duh,$one)=split(/of \<b\>/,$line);
+ ($two,$duh)=split(/\</,$one);
+ $flag=1;
+ last;
+ }
+ }
+ if ($flag==0){return 0;}
+ for ($r=0; $r<=1000; $r=$r+100){
+ if (($two>$r) && ($two<100+$r)) {$top=$r+100;}
+ }
+ if (($two>1000) || ($two =~ /\,/)) {
+ $top=1000;
+ print "Over 1000 hits..\n";
+ }
+
+ print "Received $two Hits - Google for $top returns\n";
+ return $top;
+}
+
+###########Parse for web stuff
+sub googleparseweb{
+
+ my @googles;
+
+ foreach $line (@_){
+ if ($line =~ /http/){
+ (@stuffs)=split(/\/\//,$line);
+ foreach $stuff (@stuffs){
+ ($want,$crap)=split(/\//,$stuff);
+ if (($want !~ /</) && ($want !~ /google/)){push @googles,$want;}
+ }
+ }
+ }
+ return dedupe(@googles);
+}
+
+
+###########Good old old sendraw
+sub sendraw {
+ my ($pstr)=@_;
+ socket(S,PF_INET,SOCK_STREAM,getprotobyname('tcp')||0) || return "";
+ if(connect(S,pack "SnA4x8",2,$port,$target)){
+ my @in="";
+ select(S); $|=1; print $pstr;
+ while(<S>) {
+ push @in,$_; last if ($line=~ /^[\n\r]+$/ );
+ }
+ select(STDOUT);
+ return @in;
+ } else { return ""; }
+}
+
+
+#########################-- dedupe
+sub dedupe
+{
+ (@keywords) = @_;
+ my %hash = ();
+ foreach (@keywords) {
+ $_ =~ tr/[A-Z]/[a-z]/;
+ chomp;
+ if (length($_)>1){$hash{$_} = $_;}
+ }
+ return keys %hash;
+} #dedupe
+
+
+
249 README.markdown
@@ -0,0 +1,249 @@
+#1. Name
+BiLE Suite
+#2. Author
+Roelof Temmingh
+#3. License, version & release date
+License : GPLv2
+Version : v1.0
+Release Date : Unknown
+
+#4. Description
+##4.1 BiLE.pl
+The Bi-directional Link Extractor. BiLE leans on Google and HTTrack to
+automate the collections to and from the target site, and then applies a
+simple statistical weighing algorithm to deduce which Web sites have the
+strongest .relationships. with the target site.
+
+We run BiLE.pl against the target Web site by simply specifying the Website
+address and a name for the output file.
+##4.2 BiLE-weight.pl
+The next tool used in the collection is BiLE-weigh, which takes the output
+of BiLE and calculates the significance of each site found. The weighing
+algorithm is complex and the details will not be discussed; what should be
+noted is:
+
+The target site that was given as an input parameter does not need to end
+up with the highest weight. This is a good sign that the provided target
+site is not the central site of the organization.
+
+A link to a site with many links to the site weighs less than a link to a
+site with fewer links to the site.
+A link from a site with many links weighs less than a link from a site
+with fewer links.
+A link from a site weighs more than a link to a site.
+
+##4.3 tld-expand.pl
+The tld-expand.pl script is used to find domains in any other TLDs.
+##4.4 vet-IPrange.pl
+The output of BiLE-weigh now lists a number of domains with a relevance
+number. The sites with a lower relevance number that are situated much lower
+down the list are not as important as the top sites.The results from the
+BiLE-weigh have listed a number of domains with their relevance to our
+target Web site. Sites that rank much further down the list are not as
+important as the top sites. The next step is to take the list of sites and
+match their domain names to IPs.
+
+For this, we use vet-IPrange.The vet-IPrange tool performs DNS lookups for a
+supplied list of DNS names. It will then write the IP address of each lookup
+into a file, and then perform a lookup on a second set of names. If the IP
+address matches any of the IP addresses obtained from the first step, the
+tool will add the DNS name to the file.
+#4.5 qtrace.pl
+qtrace is used to plot the boundaries of networks. It uses a heavily
+modified traceroute using a #custom compiled hping# to perform multiple
+traceroutes to boundary sections of a class C network. qtrace uses a list of
+single IP addresses to test the network size. Output is written to a
+specified file.
+##4.6 vet-mx.pl
+Looking at the MX records of a company can also be used to group domains
+together. For this process, we use the vet-mx tool. The tool performs MX
+lookups for a list of domains, and stores each IP it gets in a file. vet-mx
+performs a second run of lookups on a list of domains, and if any of the IPs
+of the MX records matches any of the first phase IPs found, the domain is
+added to the output file.
+
+##4.7 jarf-rev.pl
+jarf-rev is used to perform a reverse DNS lookup on an IP range. All reverse
+entries that match the filter file are displayed to screen (STDOUT). The
+output displayed is the DNS name followed by IP address.
+
+##4.8 jarl-dnsbrute.pl
+The jarf-dnsbrute script is a DNS brute forcer, for when DNS zone transfers
+are not allowed. jarf-dnsbrute will perform forward DNS lookups using a
+specified domain name with a list of names for hosts. The script is
+multithreaded, setting off up to 10 threads at a time.
+
+#5. Usage
+##5.1 BiLE.pl
+> perl bile-weigh.pl www.sensepost.com sp\_bile\_out.txt.mine out.txt
+Two output files are produced, *.mine and *.walrus, for now *.mine is the
+important file we will use later.
+
+This command will run for some time. BiLE will use HTTrack to download and
+analyze the entire site, extracting links to other sites that will also be
+downloaded, analyzed, and so forth. BiLE will also run a series of Google
+searches using the link: directive to see what external sites have HTTP
+links toward our target site.
+
+The output of this a file containing all the link pairs in the format:
+
+Source\_site:Destination\_site
+
+BiLE produces output that only contains the source and destination sites for
+each link, but tells us nothing about the relevance of each site. Once you
+have a list of all the .relationships. (links to and from your chosen target
+Web site), you want to sort them according to relevance. The tool we use
+here, bile-weigh.pl, uses a complex formula to sort the relationships so you
+can easily see which are most important.
+
+##5.2 BiLE-weight.pl
+> perl bile-weigh.pl www.sensepost.com sp\_bile\_out.txt.mine out.txt
+
+Input fields:
+< website > is a Web site name; for example, www.sensepost.com
+input file typically output from BiLE
+
+Output:
+Creates a file called < input file name >.sorted, sorted by weight with lower
+weights first.
+
+Output format:
+Site name:weight
+
+The list you get should look something like:
+
+www.sensepost.com:378.69
+www.redpay.com:91.15
+www.hackrack.com:65.71
+www.condyn.net:76.15
+www.nmrc.org:38.08
+www.nanoteq.co.za:38.08
+www.2computerguys.com:38.08
+www.securityfocus.com:35.10
+www.marcusevans.com:30.00
+www.convmgmt.com:24.00
+www.sqlsecurity.com:23.08
+www.scmagazine.com:23.08
+www.osvdb.org:23.08
+
+The number you see next to each site is the .weight. that BiLE has assigned.
+The weight in itself is an arbitrary value and of no real use to us. What is
+interesting, however, is the relationship between the values of the sites.
+The rate at which the sites discovered become less relevant is referred to
+as the .rate of decay.. A slow rate of decay means there are many sites with
+a high relevance.an indication of widespread cross-linking. A steep decent
+shows us that the site is fairly unknown and unconnected.a stand-alone site.
+It is in the latter case that HTML Link Analysis becomes interesting to us,
+as these links are likely to reflect actual business relationships.
+
+##5.3 tld-expand.pl
+> perl exp-tld.pl [input file] [output file]
+
+Input fields:
+Input file, is the file containing a list of domains
+
+Output:
+Output file, is the output file containing domains expanded by TLD
+
+Note:
+tld-expand will run for awhile depending on how many domains are listed in
+the input file. One can monitor the output by; tail -f outputfilename
+#5.4 vet-IPrange.pl
+> perl vet-IPrange.pl [input file] [true domain file] [output file] < range >
+
+Input fields:
+Input file, file containing list of domains
+True domain file contains list of domains to be compared to
+
+Output:
+Output file a file containing matched domains
+
+##5.5 qtrace.pl
+> perl qtrace.pl [ip_address_file] [output_file]
+
+Input fields:
+Full IP addresses one per line
+Output results to file
+
+Typical use:
+perl qtrace.pl ip\_list.txt outputfile.txt
+
+Output format:
+Network range 10.10.1.1-10.10.28
+
+##5.6 vet-mx.pl
+> perl vet-mx.pl [input file] [true domain file] [output file]
+Input fields:
+Input file, is the file containing a list of domains
+True domain file contains list of domains to be compared to
+
+Output:
+Output file, is an output file containing matched domains
+
+##5.7 jarf-rey
+> perl jarf-rev [subnetblock]
+Input fields:
+Subnetblock specified is the first three octets of network address
+
+Typical use:
+> perl jarf-rev 192.168.37.1-192.168.37.118
+Output format:
+DNS name ; IP number
+DNS name is blank if no reverse entry could be discovered.
+
+##5.8 jarf-dnsbrute.pl
+
+> perl jarf-dnsbrute [domain_name] [file_with_names]
+
+
+Input fields:
+Domain name the domain name
+File\_with\_name the full path the file containing common DNS names
+
+Typical use:
+> perl jarf-dnsbrute syngress.com common
+
+Output format:
+DNS name ; IP number
+
+#6. Requirements
+##6.1 BiLE.pl
+
+In order for BiLE.pl to run correctly httrack needs to be installed on the
+operating system. Line 67 of BiLE.pl can be modified to point to the httrack
+executable:
+
+> $mc="httrack $site......
+
+to
+
+> $mc="/home/sensepost/tools/httrack $site......
+
+##6.2 BiLE-weigh.pl
+
+There are no real requirements, except that the script requires the *.bile
+output file from the BiLE.pl script
+
+##6.3 tld-expand.pl
+N/A
+
+##6.4 vet-IPrange.pl
+N/A
+
+##6.5 qtrace.pl
+
+NB! hping-1s is a recompiled hping with setuid support,1sec timeout - setuid
+Note: remember to allow icmp type 11 into your network!
+
+Line 59 of qtrace.pl can be modified to point to the hping-1s executable:
+
+> my @res=`hping-1s -2......
+
+to
+
+> my @res=`/home/sensepost/tools/modified/hping-1s -2.....
+
+##6.6 vet-mx.pl
+N/A
+
+
162 common
@@ -0,0 +1,162 @@
+www
+ftp
+ns
+mail
+3com
+aix
+apache
+back
+bastion
+bind
+border
+bsd
+business
+chains
+cisco
+content
+corporate
+cvp
+debian
+dns
+domino
+dominoserver
+download
+e-bus
+e-business
+e-mail
+e-safe
+email
+esafe
+external
+extranet
+firebox
+firewall
+freebsd
+front
+ftp
+fw
+fw-
+fwe
+fwi
+gate
+gatekeeper
+gateway
+gauntlet
+group
+help
+hop
+hp
+hp-ux
+hpjet
+hpux
+http
+https
+hub
+ibm
+ids
+info
+inside
+internal
+internet
+intranet
+ipchains
+ipfw
+irix
+jet
+list
+lotus
+lotusdomino
+lotusnotes
+lotusserver
+mail
+mailfeed
+mailgate
+mailgateway
+mailgroup
+mailhost
+maillist
+mailmarshall
+mailpop
+mailrelay
+mandrake
+mimesweeper
+ms
+msproxy
+mx
+nameserver
+news
+newsdesk
+newsfeed
+newsgroup
+newsroom
+newsserver
+nntp
+notes
+noteserver
+notesserver
+ns
+nt
+openbsd
+outside
+pix
+pop
+pop3
+pophost
+popmail
+popserver
+print
+printer
+printspool
+private
+proxy
+proxyserver
+public
+qpop
+raptor
+read
+redcreek
+redhat
+route
+router
+router
+scanner
+screen
+screening
+secure
+seek
+slackware
+smail
+smap
+smtp
+smtpgateway
+smtpgw
+sniffer
+snort
+solaris
+sonic
+spool
+squid
+sun
+sunos
+suse
+switch
+transfer
+trend
+trendmicro
+unseen
+vlan
+wall
+web
+webmail
+webserver
+webswitch
+win2000
+win2k
+win31
+win95
+win98
+winnt
+write
+ww
+www
+xfer
117 jarf-dnsbrute
@@ -0,0 +1,117 @@
+#!/usr/bin/perl
+ use Data::Dumper; # Vital debugging tool
+ use Net::DNS;
+my $res= Net::DNS::Resolver->new();
+
+##############
+#Sub Routines
+##############
+sub dedupe(@){return uniq map { lc $_ } @_; }
+
+# ----------------------------
+sub forward($;$) {my( $host,$mode, )= @_;
+ $mode ||= 0;
+# ----------------------------
+ my @results = ();
+ my $query = $res->search($host);
+ return () unless $query;
+ foreach my $rr ($query->answer) {
+ next unless ( $rr->type eq "A" );
+ my $address = $rr->{"address"};
+ my $data = $address;
+ $data = "$host;$data" if ( $mode >= 1 );
+ $data = "$data;FL" if ( $mode >= 2 );
+ $data = "$data;$host" if ( $mode >= 3 );
+ push @results, $data; }
+ return uniq sort @results;
+ }
+
+# ----------------------------
+sub nslookup($;$){my( $host,$mode, ) = @_;
+ $mode ||= 0;
+# ----------------------------
+ my @results = ();
+ my $query = $res->query($host,"NS");
+ return () unless $query;
+ foreach my $rr ($query->answer) {
+ next unless ( $rr->type eq "NS" );
+ my $address = $rr->{"nsdname"};
+ my @addresses = forward($address);
+ foreach my $data (@addresses){
+ $data = "$host;$data" if ( $mode >= 1 );
+ $data = "$data;NS" if ( $mode >= 2 );
+ $data = "$data;$host" if ( $mode >= 3 );
+ push @results, $data; }
+ }
+ return uniq sort @results;
+ }
+
+# ----------------------------
+###
+#Main Program starts here
+###
+$|=1;
+
+use Parallel::ForkManager;
+
+my $MAX_PROCESSES = 6;
+my $pm=new Parallel::ForkManager($MAX_PROCESSES);
+
+################# POPULATE @stuff ####################
+if ($#ARGV < 1) {die "perl jarf-dnsbrute domain.com commonnames.txt\n\n";}
+
+my $domain=$ARGV[0];
+my $common=@ARGV[1];
+my @stuff = ();
+
+if ( -r $common ) {
+ open DATA, $common;
+ @stuff = <DATA>;
+ close DATA;
+ chomp @stuff;
+ }
+else {
+ print STDERR "Couldn't read file '$common'\n";
+ exit 1;
+ }
+
+unless ( scalar @stuff ) {
+ print STDERR "Couldn't load any data\n";
+ exit 2;
+ }
+
+@stuff = map "$_.$domain", @stuff;
+
+
+my $brutel=0;
+my $mode=2;
+
+if ($mode==0){$mode=1;}
+
+my ($bullshit)=forward("name.$domain");
+if (length($bullshit)>0){
+ if ($mode==2){
+ print "-any*non*valid-.$domain;$bullshit;CRAP\n";
+ } else {print "-any*non*valid-.$domain;$bullshit;\n";}
+exit 3;
+}
+
+############## OK ready to launch!
+foreach $thing (@stuff){
+ my $pid = $pm->start("id: $thing") and next;
+ do_work($thing);
+ $pm->finish();
+}
+$pm->wait_all_children();
+
+##############
+sub do_work{
+ my ($passed)=@_;
+ @nslookupout=forward($passed,$mode);
+ foreach $out (@nslookupout){
+ my ($testname,$testip)=split(/;/,$out);
+ if ($testip ne $bullshit){
+ print "$out\n";
+ }
+ }
+}
137 jarf-rev
@@ -0,0 +1,137 @@
+#!/usr/bin/perl
+
+###
+### USAGE: perl jarf-rev 10.10.1.1-10.10.1.234
+###
+
+if (scalar @ARGV < 1) {die "perl jarf-rev 10.10.1.1-10.10.1.234 \r\n";}
+
+use Parallel::ForkManager;
+
+###
+#Sub Routines
+###
+
+####
+sub do_work{
+ my ($search)=@_;
+ @nslookupout=`nslookup -timeout=3 -retry=2 $search $ns 2>&1`;
+ foreach $line (@nslookupout){
+ if ($line =~ /Name:/){
+ ($duh,$returner,@crap)=split(/Name: /,$line);
+ }
+ }
+ $returner=~s/ //g;
+ chomp $returner;
+ if ($mode==0){
+ $tosave=$returner.";".$search.";RL";
+ }
+ if ($mode==1){
+ $tosave=$returner.";".$search.";RL";
+ }
+ if ($mode==2){
+ $tosave=$returner.";".$search.";RL";
+ }
+ print "$tosave\n";
+}
+
+####
+sub dedupe(@){return uniq map { lc $_ } @_;}
+
+####
+
+sub ip2long
+{
+ my @ips = split (/\./, $_[0]);
+ my $binNum = "";
+ foreach $tuple (@ips) {
+ $binNum = $binNum.dec2bin($tuple);
+ }
+ $BigNum = bin2dec($binNum);
+ return ($BigNum);
+}
+
+####
+sub dec2bin
+{
+ my $str = unpack("B32", pack("N", shift));
+ $str =~ s/^0+(?=\d)//;
+ my $RetStr = "";
+ for ($i=0; $i< 8 - length($str); $i++) {
+ $RetStr=$RetStr."0";
+ }
+ $RetStr = $RetStr.$str;
+ return $RetStr;
+}
+
+####
+sub bin2dec
+{
+ return unpack("N", pack("B32", substr("0" x 32 . shift, -32)));
+}
+
+####
+sub long2ip
+{
+ my $binNum = dec2bin($_[0]);
+ my $ipNum = "";
+ my $i;
+ my $ln;
+ if ( ($ln = length($binNum)) < 32) {
+ my $Pad = "";
+ for ($p=0; $p<32-$ln; $p++) {
+ $Pad.="0";
+ }
+ $binNum=$Pad.$binNum;
+ }
+ for ($i=0; $i<32; $i+=8) {
+ my $tuple = substr($binNum, $i, 8);
+ my $des = bin2dec($tuple);
+ if ($ipNum eq "") {
+ $ipNum = $ipNum.$des;
+ } else {
+ $ipNum = $ipNum.".".$des;
+ }
+ }
+ return ($ipNum);
+}
+
+###
+#Main Program starts here
+###
+
+$|=1;
+
+my $MAX_PROCESSES = 10;
+my $pm=new Parallel::ForkManager($MAX_PROCESSES);
+
+my $subnet=@ARGV[0];
+my $ns=@ARGV[1];
+my $mode=2;
+
+##get the real length of the range
+my ($StartIP, $StopIP) = split (/-/,$subnet);
+my $Start=ip2long($StartIP);
+my $Stop=ip2long($StopIP)+1;
+my $Dif=$Stop-$Start;
+
+if ($StartIP eq $StopIP){$Dif=1;}
+
+## load it up
+for (my $z=0; $z < $Dif; $z++) {
+ $Longval=$Start+$z;
+ $NewIP = long2ip($Longval);
+ push @stuff,$NewIP;
+}
+
+## fire away!
+foreach $thing (@stuff){
+ my $pid = $pm->start("id: $thing") and next;
+ do_work($thing);
+ $pm->finish();
+}
+$pm->wait_all_children();
+
+
+
+
217 qtrace.pl
@@ -0,0 +1,217 @@
+#!/usr/bin/perl
+
+## REQUIREMENTS:
+## NB! NB! NB! hping-1s (must be recompiled hping with setuid support,1sec timeout - setuid)
+## Note: remember to allow icmp type 11 into your network!
+##
+
+###
+#Sub Routines
+###
+
+###############
+sub ip2long
+{
+ my @ips = split (/\./, $_[0]);
+ my $binNum = "";
+ foreach $tuple (@ips) {
+ $binNum = $binNum.dec2bin($tuple);
+ }
+ $BigNum = bin2dec($binNum);
+ return ($BigNum);
+}
+######################
+sub dec2bin
+{
+ my $str = unpack("B32", pack("N", shift));
+ $str =~ s/^0+(?=\d)//;
+ my $RetStr = "";
+ for ($i=0; $i< 8 - length($str); $i++) {
+ $RetStr=$RetStr."0";
+ }
+ $RetStr = $RetStr.$str;
+ return $RetStr;
+}
+########################
+sub bin2dec
+{
+ return unpack("N", pack("B32", substr("0" x 32 . shift, -32)));
+}
+########################
+sub findnet
+{
+ $classc = "";
+ ($iptouse) = @_;
+ if (!($iptouse =~ /127.0.0.1/))
+ {
+ @splitter=split(/\./,$iptouse);
+ $classc=@splitter[0].".".@splitter[1].".".@splitter[2];
+ }
+ return ($classc);
+} # findnet
+########################
+sub rampup{
+ ($passed,$ttl,$top)=@_;
+ my $flag=0;
+ my $i=$ttl;
+ if ($ttl==0){$i=$top;}
+ while (($flag==0) || ($i<1)){
+ my @res=`hping-1s -2 -t $i $passed -n -c 2 -p 53 2>&1`;
+ foreach my $line (@res){
+ if ($line =~ /TTL/){
+ ($crap,$want)=split(/=/,$line);
+ $want =~ s/ //g; chomp $want;
+ if ($want ne $passed) {
+ return ($i,$want);
+ }
+ }
+ }
+ $i--;
+ }
+return 0;
+}
+# ------------------------
+
+###
+#Main Program starts here
+###
+$|=1;
+
+ if($#ARGV<1){die "qtrace.pl <inputfile_with_ips> <outputfile>\n"; }
+ $file = @ARGV[0];
+ $deel="32";
+ $acc="2";
+ $outfile = @ARGV[1];
+
+open (IN,"$file") || die "Cant open input file please check\n";
+
+ ##command line mode
+ open (IN,"$file") || die "Cant open the IP file\n";
+ while (<IN>){
+ chomp;
+ if ($_ !~ /\./){print "$_ Not an IP number $_\n";}
+ else {push @IPS,$_;}
+ }
+close (IN);
+
+##ok rest is pretty generic..
+
+#check for usage problems
+if (($deel !=4 ) && ($deel != 8) && ($deel != 16) && ($deel != 32) && ($deel != 64)){
+ die "Duh - i said 4,8,16,32 or 64!!\n";
+}
+if (($acc > 4) || ($acc<0)) {
+ die "Duh - accuracy is 0-4! Go away! LEave! Shoo!!\n";
+}
+
+#first ramp up...
+foreach $ip (@IPS){
+ #defaults
+ $lowerbound=&findnet($ip).".0";
+ $upperbound=&findnet($ip).".255";
+
+ #check the file if our IP falls within a range we already had
+ if (open (NETS,"$outfile")){
+ $exitflag=0;
+ while (<NETS>){
+ chomp;
+ #for wrapper..
+ $_ =~ s/[\>\<\#]//g;
+ ($startip,$endip)=split(/\-/,$_);
+ $startiplong = ip2long($startip);
+ $endiplong = ip2long($endip);
+ $ouriplong= ip2long($ip);
+
+ if (($startiplong <= $ouriplong) && ($endiplong >= $ouriplong)){
+ $exitflag=1;
+ }
+
+ }
+ }
+
+ ##it doesn't..we have to test..
+ if ($exitflag==0){
+
+ $thing=&findnet($ip);
+ ($rampup,$duh)=rampup("$thing.1",0,25);
+ print "Done ramping - [$rampup]\n";
+ #### go down from here.
+ my ($crap,$crap,$crap,$want)=split(/\./,$ip);
+
+ $count=0;
+ for ($i = $deel*int($want/$deel); $i >= 0; $i=$i-$deel){
+
+ $value=$i+1;
+ $totrace=&findnet($ip).".".$value;
+
+ $pieceres="";
+ for (1..3){
+ ($duh,$lh)=rampup($totrace,$rampup+$acc,0);
+ chomp $lh;
+ $pieceres=$pieceres.$lh." ";
+ }
+
+ @allres[$count]=$pieceres;
+
+ if ($count > 0){
+ @one=sort(split(/ /,@allres[$count]));
+ @two=sort(split(/ /,@allres[$count-1]));
+
+ $neqsum=0;
+ for (0..2){
+ if (@one[$_] ne @two[$_]) {$neqsum++;}
+ }
+ if ($neqsum >= 3){
+ $boundary=&findnet($ip).".".($i+$deel);
+ $lowerbound=$boundary;
+ last;
+ }
+ }
+ $count++;
+ }
+
+ print "$ip - lower boundary is $lowerbound\n";
+
+ ## find upper boundary
+
+ $count=1;
+ for ($i = $deel*(1+(int($want/$deel))); $i < 256; $i=$i+$deel){
+
+ $value=$i+1;
+ $totrace=&findnet($ip).".".$value;
+
+ $pieceres="";
+ for (1..3){
+ ($duh,$lh)=rampup($totrace,$rampup+$acc,25);
+ chomp $lh;
+ $pieceres=$pieceres.$lh." ";
+ }
+
+ @allres[$count]=$pieceres;
+
+ #we can test anyhow..we have boundary from previous step
+ @one=sort(split(/ /,@allres[$count]));
+ @two=sort(split(/ /,@allres[$count-1]));
+
+ $neqsum=0;
+ for (0..2){
+ if (@one[$_] ne @two[$_]) {$neqsum++;}
+ }
+ if ($neqsum >= 3){
+ $boundary=&findnet($ip).".".($i);
+ $upperbound=$boundary;
+ last;
+ }
+ $count++;
+ }
+
+ print "$ip - upper bound is $upperbound\n\n";
+
+ open (OUT,"+>>$outfile") || die "Cant create output file\n";
+ print OUT "$lowerbound\-$upperbound\n";
+ close (OUT);
+ }
+}
+close (OUT);
+print("Sleeping 10\n");
+print("close 2\n");
302 readme.txt
@@ -0,0 +1,302 @@
+!!!Please read carefully through this readme file, as there are various changes one will have
+!!!to make as to get the scripts as to make them work correctly for your operating system.
+
+
+#########
+#BiLE.pl#
+#########
+
+The Bi-directional Link Extractor. BiLE leans on Google and HTTrack to
+automate the collections to and from the target site, and then applies a
+simple statistical weighing algorithm to deduce which Web sites have the
+strongest .relationships. with the target site.
+
+We run BiLE.pl against the target Web site by simply specifying the Website
+address and a name for the output file.
+
+How to use:
+###########
+
+>perl BiLE.pl www.sensepost.com sp_bile_out.txt
+
+Two output files are produced, *.mine and *.walrus, for now *.mine is the
+important file we will use later.
+
+This command will run for some time. BiLE will use HTTrack to download and
+analyze the entire site, extracting links to other sites that will also be
+downloaded, analyzed, and so forth. BiLE will also run a series of Google
+searches using the link: directive to see what external sites have HTTP
+links toward our target site.
+
+The output of this a file containing all the link pairs in the format:
+
+Source_site:Destination_site
+
+BiLE produces output that only contains the source and destination sites for
+each link, but tells us nothing about the relevance of each site. Once you
+have a list of all the .relationships. (links to and from your chosen target
+Web site), you want to sort them according to relevance. The tool we use
+here, bile-weigh.pl, uses a complex formula to sort the relationships so you
+can easily see which are most important.
+
+Requirements:
+#############
+
+In order for BiLE.pl to run correctly httrack needs to be installed on the
+operating system. Line 67 of BiLE.pl can be modified to point to the httrack
+executable:
+
+ $mc="httrack $site......
+
+ to
+
+ $mc="/home/sensepost/tools/httrack $site......
+
+
+
+
+###############
+#BiLE-weigh.pl#
+###############
+
+The next tool used in the collection is BiLE-weigh, which takes the output
+of BiLE and calculates the significance of each site found. The weighing
+algorithm is complex and the details will not be discussed; what should be
+noted is:
+
+ The target site that was given as an input parameter does not need to end
+ up with the highest weight. This is a good sign that the provided target
+ site is not the central site of the organization.
+
+ A link to a site with many links to the site weighs less than a link to a
+ site with fewer links to the site.
+
+ A link from a site with many links weighs less than a link from a site
+ with fewer links.
+
+ A link from a site weighs more than a link to a site.
+
+
+How to use:
+###########
+
+>perl bile-weigh.pl www.sensepost.com sp_bile_out.txt.mine out.txt
+
+Input fields:
+<website> is a Web site name; for example, www.sensepost.com
+input file typically output from BiLE
+
+Output:
+Creates a file called <input file name>.sorted, sorted by weight with lower
+weights first.
+
+Output format:
+Site name:weight
+
+The list you get should look something like:
+
+www.sensepost.com:378.69
+www.redpay.com:91.15
+www.hackrack.com:65.71
+www.condyn.net:76.15
+www.nmrc.org:38.08
+www.nanoteq.co.za:38.08
+www.2computerguys.com:38.08
+www.securityfocus.com:35.10
+www.marcusevans.com:30.00
+www.convmgmt.com:24.00
+www.sqlsecurity.com:23.08
+www.scmagazine.com:23.08
+www.osvdb.org:23.08
+
+The number you see next to each site is the .weight. that BiLE has assigned.
+The weight in itself is an arbitrary value and of no real use to us. What is
+interesting, however, is the relationship between the values of the sites.
+The rate at which the sites discovered become less relevant is referred to
+as the .rate of decay.. A slow rate of decay means there are many sites with
+a high relevance.an indication of widespread cross-linking. A steep decent
+shows us that the site is fairly unknown and unconnected.a stand-alone site.
+It is in the latter case that HTML Link Analysis becomes interesting to us,
+as these links are likely to reflect actual business relationships.
+
+Requirements:
+#############
+
+There are no real requirements, except that the script requires the *.bile
+output file from the BiLE.pl script
+
+
+
+
+###############
+#tld-expand.pl#
+###############
+
+The tld-expand.pl script is used to find domains in any other TLDs.
+
+How to use:
+###########
+>perl exp-tld.pl [input file] [output file]
+
+Input fields:
+Input file, is the file containing a list of domains
+
+Output:
+Output file, is the output file containing domains expanded by TLD
+
+
+Note:
+#####
+tld-expand will run for awhile depending on how many domains are listed in
+the input file. One can monitor the output by; tail -f outputfilename
+
+
+
+
+################
+#vet-IPrange.pl#
+################
+
+The output of BiLE-weigh now lists a number of domains with a relevance
+number. The sites with a lower relevance number that are situated much lower
+down the list are not as important as the top sites.The results from the
+BiLE-weigh have listed a number of domains with their relevance to our
+target Web site. Sites that rank much further down the list are not as
+important as the top sites. The next step is to take the list of sites and
+match their domain names to IPs.
+
+For this, we use vet-IPrange.The vet-IPrange tool performs DNS lookups for a
+supplied list of DNS names. It will then write the IP address of each lookup
+into a file, and then perform a lookup on a second set of names. If the IP
+address matches any of the IP addresses obtained from the first step, the
+tool will add the DNS name to the file.
+
+How to use:
+###########
+>perl vet-IPrange.pl [input file] [true domain file] [output file] <range>
+
+Input fields:
+Input file, file containing list of domains
+True domain file contains list of domains to be compared to
+
+Output:
+Output file a file containing matched domains
+
+
+
+
+###########
+#qtrace.pl#
+###########
+
+qtrace is used to plot the boundaries of networks. It uses a heavily
+modified traceroute using a #custom compiled hping# to perform multiple
+traceroutes to boundary sections of a class C network. qtrace uses a list of
+single IP addresses to test the network size. Output is written to a
+specified file.
+
+How to use:
+###########
+>perl qtrace.pl [ip_address_file] [output_file]
+
+Input fields:
+Full IP addresses one per line
+Output results to file
+
+Typical use:
+perl qtrace.pl ip_list.txt outputfile.txt
+
+Output format:
+Network range 10.10.1.1-10.10.28
+
+Requirements:
+#############
+
+NB! hping-1s is a recompiled hping with setuid support,1sec timeout - setuid
+Note: remember to allow icmp type 11 into your network!
+
+Line 59 of qtrace.pl can be modified to point to the hping-1s executable:
+
+ my @res=`hping-1s -2......
+
+ to
+
+ my @res=`/home/sensepost/tools/modified/hping-1s -2.....
+
+
+
+###########
+#vet-mx.pl#
+###########
+
+Looking at the MX records of a company can also be used to group domains
+together. For this process, we use the vet-mx tool. The tool performs MX
+lookups for a list of domains, and stores each IP it gets in a file. vet-mx
+performs a second run of lookups on a list of domains, and if any of the IPs
+of the MX records matches any of the first phase IPs found, the domain is
+added to the output file.
+
+How to use:
+###########
+>perl vet-mx.pl [input file] [true domain file] [output file]
+
+Input fields:
+Input file, is the file containing a list of domains
+True domain file contains list of domains to be compared to
+
+Output:
+Output file, is an output file containing matched domains
+
+
+
+##########
+#jarf-rev#
+##########
+
+jarf-rev is used to perform a reverse DNS lookup on an IP range. All reverse
+entries that match the filter file are displayed to screen (STDOUT). The
+output displayed is the DNS name followed by IP address.
+
+How to use:
+###########
+>perl jarf-rev [subnetblock]
+
+Input fields:
+Subnetblock specified is the first three octets of network address
+
+
+Typical use:
+>perl jarf-rev 192.168.37.1-192.168.37.118
+
+Output format:
+DNS name ; IP number
+DNS name is blank if no reverse entry could be discovered.
+
+
+
+
+###############
+#jarf-dnsbrute#
+###############
+
+The jarf-dnsbrute script is a DNS brute forcer, for when DNS zone transfers
+are not allowed. jarf-dnsbrute will perform forward DNS lookups using a
+specified domain name with a list of names for hosts. The script is
+multithreaded, setting off up to 10 threads at a time.
+
+How to use:
+###########
+>perl jarf-dnsbrute [domain_name] [file_with_names]
+
+Input fields:
+Domain name the domain name
+File_with_name the full path the file containing common DNS names
+
+Typical use:
+>perl jarf-dnsbrute syngress.com common
+
+Output format:
+DNS name ; IP number
+
+
+
274 tld-expand.pl
@@ -0,0 +1,274 @@
+#!/usr/bin/perl
+
+#use diagnostics;
+#use Data::Dumper; # Data struct debugging
+#use Getopt::Long qw( HelpMessage VersionMessage :config default no_ignore_case );
+use File::Basename;
+use Net::DNS;
+
+if ($#ARGV<1){die "Usage tld-expand.pl input.file output.file\n";}
+
+# ----------------------------
+# Load TLDS
+#
+# DNS/whois manupilations
+# ----------------------------
+sub loadTLDS() {
+ return dedupe(qw( com org net edu mil gov uk af al dz as
+ ad ao ai aq ag ar am aw ac au at
+ az bs bh bd bb by be bz bj bm bt
+ bo ba bw bv br io bn bg bf bi kh
+ cm ca cv ky cf td cl cn cx cc co
+ km cd cg ck cr ci hr cu cy cz dk
+ dj dm do tp ec eg sv gq er ee et
+ fk fo fj fi fr gf pf tf ga gm ge
+ de gh gi gr gl gd gp gu gt gg gn
+ gw gy ht hm va hn hk hu is in id
+ ir iq ie im il it jm jp je jo kz
+ ke ki kp kr kw kg la lv lb ls lr
+ ly li lt lu mo mk mg mw my mv ml
+ mt mh mq mr mu yt mx fm md mc mn
+ ms ma mz mm na nr np nl an nc nz
+ ni ne ng nu nf mp no om pk pw pa
+ pg py pe ph pn pl pt pr qa re ro
+ ru rw kn lc vc ws sm st sa sn sc
+ sl sg sk si sb so za gz es lk sh
+ pm sd sr sj sz se ch sy tw tj tz
+ th tg tk to tt tn tr tm tc tv ug
+ ua ae gb us um uy uz vu ve vn vg
+ vi wf eh ye yu za zr zm zw int gs
+ info biz su name coop aero ));
+ } # end sub loadTLDS
+
+
+
+# ----------------------------
+sub loadsubTLDS() {
+# ----------------------------
+ return ( "", "com", "co", "ac", "org", "net", "gov", "mil", "mod" );
+ }
+
+
+
+# ----------------------------
+# Unique
+#
+# Returns the unique elements from the passed list.
+# ----------------------------
+sub uniq(@) {
+# ----------------------------
+ my %seen = ();
+ return grep { ! $seen{$_} ++ } @_; }
+
+
+
+# ----------------------------
+# De-Duplicate
+#
+# Returns the (case insensitive) unique elements from the passed list.
+# ---------------------------
+sub dedupe(@) {
+# ----------------------------
+ return uniq map { lc $_ } @_; }
+# ----------------------------
+sub getHostIP($;$$){my($host,$timeout,$server, ) = @_;
+ $timeout ||= 3;
+ my $maxcount = 3;
+ $maxcount = $timeout if ($timeout > 0 && $timeout < 11);
+
+ my $count = 0;
+ my @results;
+ my $result;
+
+ while ($count < $timeout && !$result) { my $query = $res->search($host);
+ if ($query) {
+ foreach my $rr ($query->answer()) {
+ next unless $rr->type() eq "A";
+ $result = $rr->address();
+ push @results, $result; }
+ return @results; }
+ $count ++;
+ }
+ return undef;
+ }
+
+
+# ----------------------------
+sub mxlookup($;$) {my( $domain, $mode, ) = @_;
+ $mode ||= 0;
+# ----------------------------
+ my @results = ();
+ my @mxs = mx($res, $domain);
+ my $timeout = 0;
+ foreach my $mx (@mxs) {
+ next unless ( $timeout < 10);
+ my $exchange = $mx->{"exchange"};
+ my @addresses = getHostIP($exchange);
+ my $address = $addresses[0];
+ my $data = $address;
+ $data = "$exchange;$data" if ( $mode >= 1 );
+ $data = "$data;MX" if ( $mode >= 2 );
+ $data = "$data;$domain" if ( $mode >= 3 );
+ push @results, $data; }
+ return uniq sort grep $_, @results;
+ }
+
+
+
+# ----------------------------
+# getting domain back 1
+# ----------------------------
+sub piecedomain($) { my( $passed, ) = @_;
+# ----------------------------
+ #find what TLD we are in and remove it
+ my @parts = split /\./, $passed;
+ my $lastpart = $parts[$#parts];
+ my $yeah;
+
+ foreach my $tld (loadTLDS()) {
+ if ( $tld eq $lastpart ) {
+ ( $yeah, undef ) = split /\.$tld/, $passed;
+ last; }} # end foreach my $tld (@TLDS)
+ @parts = split /\./, $yeah;
+ $lastpart = $parts[$#parts];
+ foreach my $sub_tld (loadsubTLDS()) {
+ if ( $sub_tld eq $lastpart ) {
+ ( $yeah, undef ) = split /\.$sub_tld/, $yeah;
+ last; }} # end foreach my $sub_tld (@OTHERS)
+ return "$yeah";
+ } # end sub piecedomain
+
+
+
+# ----------------------------
+# find_net
+# ----------------------------
+sub findnet($) {my($iptouse, ) = @_;
+# ----------------------------
+ my @splitter = split /\./, $iptouse;
+ return join ".", @splitter[0..2];
+ } # findnet
+
+# ----------------------------
+sub forward($;$){my( $host,$mode, )= @_;
+ $mode ||= 0;
+# ----------------------------
+ my @results = ();
+ my $query = $res->search($host);
+ return () unless $query;
+ foreach my $rr ($query->answer) {
+ next unless ( $rr->type eq "A" );
+ my $address = $rr->{"address"};
+ my $data = $address;
+ $data = "$host;$data" if ( $mode >= 1 );
+ $data = "$data;FL" if ( $mode >= 2 );
+ $data = "$data;$host" if ( $mode >= 3 );
+ push @results, $data; }
+ return uniq sort grep $_, @results;
+ }
+
+# ----------------------------
+sub get_array_from_filename($;$) { my ($filename, $comment_char, ) = @_;
+ $comment_char ||= "#";
+# ----------------------------
+ open FILE, $filename or die "Cannot open file $filename";
+ my @file_array = <FILE>;
+ close FILE;
+ chomp @file_array;
+ return grep !/^\s*$comment_char/, @file_array;
+ }
+
+# ----------------------------
+sub exp_tld($$$) { my( $fh, $wrapper_mode, $domains_ref, ) = @_;
+# ----------------------------
+ my $baselinedomain = "bigred-control-sp";
+ my $baselinedomain2 = "redbig-control-sp";
+ my @TLDS = loadTLDS();
+ #my @TLDS = qw(cc br fr za nz cz);
+ my @SUB_TLDS = loadsubTLDS();
+ my @domains = map piecedomain($_), @$domains_ref;
+ foreach my $tld (sort @TLDS) {
+ foreach my $domain (@domains) {
+ foreach my $sub (@SUB_TLDS) {
+ my $workdomain;
+ my $fwork;
+ my $fwork2;
+ if ( $sub ) {
+ $workdomain = "$domain.$sub.$tld";
+ $fwork = "$baselinedomain.$sub.$tld";
+ $fwork2 = "$baselinedomain2.$sub.$tld"; }
+ else {
+ $workdomain = "$domain.$tld";
+ $fwork = "$baselinedomain.$tld";
+ $fwork2 = "$baselinedomain2.$tld"; }
+
+
+ my $result = `nslookup -timeout=3 -retry=2 -query=ANY $workdomain 2>&1`;
+
+ if (($result =~ /answer/i) || ($result =~ /internet address/ )) {
+
+ my @mxes = mxlookup($fwork, 0 );
+ push @mxes, mxlookup($fwork2, 0 );
+ @mxes = dedupe(@mxes);
+ my @fakemxnet = map findnet($_), @mxes;
+
+# Determine the real MX records
+ my @realmxes = mxlookup($workdomain);
+ my @realmxnet = map findnet($_), @realmxes;
+
+# Check if there's a match
+ my $mxflag = 0;
+ foreach my $rmxnet (@realmxnet) {
+ foreach my $fmxnet (@fakemxnet) {
+ if ( $rmxnet eq $fmxnet ) {
+ $mxflag = 1;
+ last;
+ }
+ }
+ last if $mxflag; }
+
+# Determine the networks of the fake A records
+ my @aaes = forward("www.$fwork", 0);
+ push @aaes, &forward( $fwork2, 0 );
+ @aaes = dedupe(@aaes);
+ my @fakeaanet = map findnet($_), @aaes;
+
+# Determine the real A records
+ my @realaaes = forward("www.$workdomain");
+ my @realaanet = map findnet($_), @realaaes;
+
+# Check if there's a match
+ my $aflag = 0;
+ foreach my $ranet (@realaanet) {
+ foreach my $fanet (@fakeaanet) {
+ if ( $ranet eq $fanet ) {
+ $aflag = 1;
+ last; }}
+ last if $aflag; }
+
+ if ( ( $aflag == 0 ) && ( $mxflag == 0 ) ) {
+ my $output = $workdomain;
+ print $fh $output, "\n";
+ print $workdomain, "\n"; }}
+ } # mext $sub
+ } # next $domain
+ } # next $tld
+ }
+
+
+
+# ============================================================================
+
+
+
+my $input_file = @ARGV[0];
+my $output_file = $ARGV[1];
+$res = Net::DNS::Resolver->new();
+
+@input = get_array_from_filename($input_file);
+$wrapper_mode =1;
+open OUT, ">$output_file" or die "Cant read output file '$output_file'\n";
+
+ select OUT; $| = 1;
+ exp_tld(*OUT, 0, \@input);
+ close OUT;
147 vet-IPrange.pl
@@ -0,0 +1,147 @@
+#!/usr/bin/perl
+
+####
+#### Usage: perl vet-IPrange.pl Input_file True_file Output_file [range]
+#### Input_File: list of websites to to checked
+#### True_File: list of websites to be checked against
+#### Out_file: list of websites where the A record match (within range)
+#### [Range: optional (defaults to 32). Range of match]
+####
+
+use Net::DNS;
+$res= Net::DNS::Resolver->new();
+
+###
+#Sub Routines
+###
+sub uniq(@){
+# ----------------------------
+ my %seen = ();
+ return grep { ! $seen{$_} ++ } @_; }
+# -----------------------------
+sub ip2long
+{
+ my @ips = split (/\./, $_[0]);
+ my $binNum = "";
+ foreach $tuple (@ips) {
+ $binNum = $binNum.dec2bin($tuple);
+ }
+ $BigNum = bin2dec($binNum);
+ return ($BigNum);
+}
+#########
+sub dedupe(@){return uniq map { lc $_ } @_;}
+
+# ----------------------------
+sub dec2bin{
+# ----------------------------
+ my $str = unpack( "B32", pack( "N", shift ) );
+ $str =~ s/^0+(?=\d)//;
+ my $RetStr = "";
+ for ( my $i=0 ; $i < 8-length $str ; $i++ ) {
+ $RetStr= $RetStr . "0";}
+ $RetStr= $RetStr . $str;
+ return $RetStr;
+ }
+
+sub bin2dec{
+# ----------------------------
+ return unpack "N", pack "B32", substr "0" x 32 . shift, -32;
+ }
+
+# ----------------------------
+
+sub forward {
+ ($passed,$mode)=@_;
+ undef @returns;
+ @nslookupout=`nslookup -timeout=$DNSTIMEOUT -retry=$DNSRETRY $passed $nameserver 2>&1;`;
+ my $flag=0;
+ foreach $line (@nslookupout){
+ if (($line =~ /$passed/) && ($line !~ /can't find/)){$flag=1;}
+ if ($line =~ /Address/){
+ ($duh,$returner,@crap)=split(/s: /,$line);
+ }
+ }
+ $returner=~s/ //g;
+ if ($flag==1){
+ @ips=split(/,/,$returner);
+ foreach $ips (@ips){
+ chomp $ips;
+ $ips=~s/ //g;
+ $passed=~s/ //g;
+ $tosave=$ips;
+ if ($mode==0){push @returns,$tosave;}
+ if ($mode==1){
+ $work=$passed.";".$tosave;
+ push @returns,$work;
+ }
+ if ($mode==2){
+ $work=$passed.";".$tosave.";FL";
+ push @returns,$work;
+ }
+
+ }
+ }
+ return @returns;
+}
+# --------------
+###
+#Main Program starts here
+###
+
+$|=1;
+
+if ($#ARGV<2){die "vet-IPrange.pl inputfile truefile output [range]\n";}
+
+if (@ARGV[3]==0){$range=32;} else {$range=@ARGV[3];}
+
+open (IN,"@ARGV[0]") || die "Cant read input file\n";
+open (TRUE,"@ARGV[1]") || die "Cant read true file\n";
+
+##load files
+while (<IN>){chomp; push @list,$_;}
+close (IN);
+
+while (<TRUE>){ chomp; push @trues,$_;}
+close (TRUE);
+
+
+#### get a list of the confirmed IPs
+foreach $domain (@trues){
+ @arecords=forward($domain,0);
+ foreach $quick (@arecords){push @realips,$quick;}
+}
+@realips=dedupe(@realips);
+print "All IPs are [@realips]\n";
+
+#### now compare the others
+foreach $entry (@list){
+
+ @ips=forward($entry,0);
+ print "Working on [$entry]\n";
+
+ #check it
+ $flag=0;
+ foreach $quick (@realips){
+ $realinbig=&ip2long($quick);
+ foreach $ip (@ips){
+ $ipinbig=&ip2long($ip);
+ if (abs($ipinbig-$realinbig) < $range){
+ $flag=1;
+ }
+ }
+ }
+ if ($flag==1){
+ print "The host $entry match!\n";
+ push @results,$entry;
+ }
+}
+
+@results=dedupe(@results);
+foreach $entry (@results){
+ #logger(2,"$entry\n");
+}
+
+close (OUT);
+
+
201 vet-mx.pl
@@ -0,0 +1,201 @@
+#!/usr/bin/perl
+
+####
+#### Usage: perl vet-mx.pl -v -i inputfile1 -i inputtruefile2 -o outputfile
+#### Input_File: list of domains to be checked
+#### True_File: list of domains to be checked against
+#### Out_file: list of domains where the record matched
+####
+
+# ----------------------------
+# Modules
+# ----------------------------
+use File::Basename;
+use strict;
+use Net::DNS;
+my $res= Net::DNS::Resolver->new();
+
+if ($#ARGV<2){die "Usage vet-mx.pl input.file1 input.true.file2 output.file\n";}
+
+
+###
+#Sub Routines
+###
+
+# ----------------------------
+sub uniq(@) {
+# ----------------------------
+ my %seen = ();
+ return grep { ! $seen{$_}++ } @_ }
+
+# ----------------------------
+sub dedupe(@) {
+# ----------------------------
+ return uniq map { lc $_ } @_; }
+
+
+#######################
+sub get_array_from_filename($;$) {
+ my ($filename, $comment_char,) = @_;
+ open FILE, $filename or die "Cannot open file $filename";
+ my @file_array = ();
+ while (<FILE>) {
+ chomp;
+ next if ( $comment_char && $_ =~ /^\s*$comment_char/ );
+ last if ( $_ eq "exit" );
+ push @file_array, $_; }
+ close FILE;
+ chomp @file_array;
+ return @file_array;
+ }
+
+########################
+# ----------------------------
+sub mxlookup($;$) {my( $domain,$mode, )= @_;
+ $mode ||= 0;
+# ----------------------------
+ my $res = Net::DNS::Resolver->new();
+ my @results = ();
+ my @mxs = mx($res, $domain);
+ my $timeout = 0;
+ foreach my $mx (@mxs) {
+ next unless ( $timeout < 10);
+ my $exchange = $mx->{"exchange"};
+ my @addresses = getHostIP($exchange);
+ my $address = $addresses[0];
+ my $data = $address;
+ $data = "$exchange;$data" if ( $mode >= 1 );
+ $data = "$data;MX" if ( $mode >= 2 );
+ $data = "$data;$domain" if ( $mode >= 3 );
+ push @results, $data; }
+ return uniq sort grep $_, @results;
+ }
+
+#########
+sub getHostIP($;$$) {my( $host,$timeout,$server, )= @_;
+ $timeout||= 3;
+
+ my $maxcount = 3;
+ $maxcount = $timeout if ($timeout > 0 && $timeout < 11);
+#Here we do the actual lookup.
+ my $count = 0;
+ my @results;
+ my $result;
+#Continue looping untill we get a result or exceed the retry count
+ while ($count < $timeout && !$result) {
+ my $query = $res->search($host);
+ if ($query) {
+ foreach my $rr ($query->answer()) {
+ next unless $rr->type() eq "A";
+#Return the address.
+ $result = $rr->address();
+ push @results, $result; }
+ return @results; }
+ $count ++;
+ }
+#If we haven't found anything yet, return an error.
+ return undef;
+ }
+
+# ----------------------------
+sub loadsubTLDS() {
+# ----------------------------
+ return ( "", "com", "co", "ac", "org", "net", "gov", "mil", "mod" );
+ }
+
+# ----------------------------
+sub loadTLDS() {
+ return dedupe(qw( com org net edu mil gov uk af al dz as
+ ad ao ai aq ag ar am aw ac au at
+ az bs bh bd bb by be bz bj bm bt
+ bo ba bw bv br io bn bg bf bi kh
+ cm ca cv ky cf td cl cn cx cc co
+ km cd cg ck cr ci hr cu cy cz dk
+ dj dm do tp ec eg sv gq er ee et
+ fk fo fj fi fr gf pf tf ga gm ge
+ de gh gi gr gl gd gp gu gt gg gn
+ gw gy ht hm va hn hk hu is in id
+ ir iq ie im il it jm jp je jo kz
+ ke ki kp kr kw kg la lv lb ls lr
+ ly li lt lu mo mk mg mw my mv ml
+ mt mh mq mr mu yt mx fm md mc mn
+ ms ma mz mm na nr np nl an nc nz
+ ni ne ng nu nf mp no om pk pw pa
+ pg py pe ph pn pl pt pr qa re ro
+ ru rw kn lc vc ws sm st sa sn sc
+ sl sg sk si sb so za gz es lk sh
+ pm sd sr sj sz se ch sy tw tj tz
+ th tg tk to tt tn tr tm tc tv ug
+ ua ae gb us um uy uz vu ve vn vg
+ vi wf eh ye yu za zr zm zw int gs
+ info biz su name coop aero ));
+ } # end sub loadTLDS
+
+# ----------------------------
+# Getting domain back 2
+# ----------------------------
+sub realdomain($) { my( $passed, ) = @_;
+# ----------------------------
+ #find what TLD we are in and remove it
+ my @parts = split /\./, $passed;
+ my $lastpart = $parts[$#parts];
+ my $yeah;
+ foreach my $tld (loadTLDS()){
+ if ( $tld eq $lastpart ){
+ ( $yeah, undef )= split /\.$tld\z/, $passed;
+ last; }}
+ # end foreach my $tld (@TLDS)
+
+ # check if theres a CO or COM in
+ @parts = split /\./, $yeah;
+ $lastpart = $parts[$#parts];
+ foreach my $sub_tld (loadsubTLDS()){
+ if ( $sub_tld eq $lastpart ){
+ ( $yeah, undef )= split /\.$sub_tld\z/, $yeah;
+ last; }}
+ # end foreach my $sub_tld (@OTHERS)
+
+ # ok - now if there are two or more things left - we chop the last
+ # else we just take it
+ @parts = split /\./, $yeah;
+ $lastpart = $parts[$#parts];
+ # we add the stuff we chopped off
+ ( undef, $yeah )= split /$lastpart\./, $passed;
+ return "$lastpart.$yeah";
+ }
+ # end sub realdomain
+
+
+my $input_files0 = @ARGV[0];
+my $input_files1 = @ARGV[1];
+my $output_file = $ARGV[2];
+
+ my @good = get_array_from_filename($input_files0);
+ my @vet = get_array_from_filename($input_files1);
+
+# Establish Output
+open OUT, ">$output_file" or die "Cant read output file '$output_file'\n";
+ select OUT; $| = 1;
+
+# ======================>
+# Stuff happens here
+# ======================>
+ @good = map realdomain($_), @good;
+
+ my @realmxes = dedupe( map mxlookup($_, 0), @good );
+
+ my @results = ();
+
+ foreach my $entry (@vet) {
+ my $romain = realdomain($entry);
+ my @mxes = mxlookup($romain, 0);
+
+ my $flag = 0;
+ foreach my $quick (@realmxes) {
+ foreach my $mx (@mxes) {
+ $flag = 1 if ( $mx eq $quick ); }}
+ push @results, $romain if $flag;
+ } # next $entry (@list)
+ @results = dedupe(@results);
+ print OUT join("\n", @results), "\n";
+ close OUT;

0 comments on commit af0b02e

Please sign in to comment.
Something went wrong with that request. Please try again.