diff --git a/clipper/Makefile b/clipper/Makefile index 2c083f2..dad4a7f 100644 --- a/clipper/Makefile +++ b/clipper/Makefile @@ -42,7 +42,7 @@ getrel: $(PKG).spec: perl -pe 's/%RELEASE%/${REL}/' $(PKG).spex > $(PKG).spec -$(PKG).tar.gz: Makefile $(TOOLS) $(SRC) $(PKG).spec fastq-lib.cpp fastq-lib.h sam-stats.cpp fastq-stats.cpp gcModel.cpp gcModel.h varcall.cpp utils.h README CHANGES sparsehash-2.0.2 samtools/*.c t +$(PKG).tar.gz: Makefile $(TOOLS) $(SRC) $(PKG).spec fastq-lib.cpp fastq-lib.h sam-stats.cpp fastq-stats.cpp gcModel.cpp gcModel.h varcall.cpp utils.h README CHANGES sparsehash-2.0.3 samtools/*.c t rm -rf $(PKG).${VER}-${REL} mkdir $(PKG).${VER}-${REL} mkdir $(PKG).${VER}-${REL}/tidx @@ -68,10 +68,10 @@ $(PKG).${VER}-${REL}.tar.gz: $(PKG).tar.gz %: %.cpp fastq-lib.cpp fastq-lib.h sparsehash $(CC) $(CFLAGS) $< fastq-lib.cpp -o $@ -sparsehash: sparsehash-2.0.2 - cd sparsehash-2.0.2; ./configure; make +sparsehash: sparsehash-2.0.3 + cd sparsehash-2.0.3; ./configure; make mkdir sparsehash - cp -r sparsehash-2.0.2/src/sparsehash/* sparsehash/ + cp -r sparsehash-2.0.3/src/sparsehash/* sparsehash/ # why the libbam.a doesn't work? not sure... *.o works sam-stats: sam-stats.cpp samtools/libbam.a samtools/bam.h fastq-lib.h sparsehash diff --git a/clipper/ea-bcl2fastq.cpp b/clipper/ea-bcl2fastq.cpp index ece28c0..5bc0a1e 100644 --- a/clipper/ea-bcl2fastq.cpp +++ b/clipper/ea-bcl2fastq.cpp @@ -290,7 +290,7 @@ int main (int argc, char **argv) { ++output_fnum; // output file number is sequential masks[i].rnum=output_fnum; // save file number as "read number" if (usegz) { - outtmp = string_format("gzip -2 --rsyncable -c > %s.%d.fq.gz",out.c_str(),output_fnum); + outtmp = string_format("gzip -2 -c > %s.%d.fq.gz",out.c_str(),output_fnum); fo=popenordie(outtmp.c_str(),"w"); } else { outtmp = string_format("%s.%d.fq",out.c_str(),output_fnum); diff --git a/clipper/fastq-lib.cpp b/clipper/fastq-lib.cpp index e4e4321..df092c6 100644 --- a/clipper/fastq-lib.cpp +++ b/clipper/fastq-lib.cpp @@ -160,7 +160,7 @@ FILE *gzopen(const char *f, const char *m, bool*isgz) { if (!strcmp(ext,".gz")) { char *tmp=(char *)malloc(strlen(f)+100); if (strchr(m,'w')) { - strcpy(tmp, "gzip -3 --rsyncable > '"); + strcpy(tmp, "gzip -3 > '"); strcat(tmp, f); strcat(tmp, "'"); } else { @@ -361,7 +361,7 @@ int getstr (char ** lineptr, size_t *n, FILE * stream, char terminator, int offs NUL-terminate the line buffer. */ assert(*n - nchars_avail == read_pos - *lineptr); - if (nchars_avail < 1) + if (nchars_avail < 2) { if (*n > 64) *n *= 2; diff --git a/clipper/fastq-mcf.cpp b/clipper/fastq-mcf.cpp index af59e57..e508925 100644 --- a/clipper/fastq-mcf.cpp +++ b/clipper/fastq-mcf.cpp @@ -31,7 +31,7 @@ See "void usage" below for usage. #include "fastq-lib.h" -#define VERSION "1.04.807" +#define VERSION "1.05" #define MAX_ADAPTER_NUM 1000 #define SCANLEN 15 @@ -174,7 +174,7 @@ class inbuffer { fq->qual.s[--fq->qual.n] = '\0'; } - return fq->qual.n > 0; + return fq->qual.n >= 0; // github issue 46, 53 } else { return ::read_fq(fin, rno, fq, name); } @@ -220,6 +220,8 @@ int main (int argc, char **argv) { int ilv3 = -1; int duplen = 0; int dupskip = 0; + int min_start_trim = 0; + int min_end_trim = 0; bool noexec = 0; bool hompol_filter = 0; bool lowcom_filter = 0; @@ -258,6 +260,8 @@ int main (int argc, char **argv) { {"mate-min-len", 1, 0, 0}, {"homopolymer-pct", 1, 0, 0}, {"lowcomplex-pct", 1, 0, 0}, + {"min-start-trim", 1, 0, 0}, + {"min-end-trim", 1, 0, 0}, {0, 0, 0, 0} }; @@ -275,6 +279,10 @@ int main (int argc, char **argv) { keeponlyclip=1; } else if(!strcmp(oname, "mate-qual-mean")) { qf2_mean=atoi(optarg); + } else if (!strcmp(oname, "min-start-trim")) { + min_start_trim = atoi(optarg); + } else if (!strcmp(oname, "min-end-trim")) { + min_end_trim = atoi(optarg); } else if(!strcmp(oname, "homopolymer-pct")) { hompol_pct=atof(optarg)/100.0; hompol_filter=1; @@ -631,6 +639,10 @@ int main (int argc, char **argv) { --nq; --ns; // don't count newline for read len // skip poor quals/lots of N's when doing sampling (otherwise you'll miss some) + if (ns == 0) { // github issue 46, 53 + ++skipped; + continue; + } if ((st.st_size > (sampcnt * 500)) && (skipped < sampcnt) && poorqual(i, ns, s, q)) { ++skipped; continue; @@ -970,6 +982,15 @@ int main (int argc, char **argv) { } } ++nrec; + if (fq[0].qual.n == 0) { // github issue 46, 53 + ++nfiltered; + continue; + } else if (i_n > 1) { + if (fq[1].qual.n == 0) { + ++nfiltered; + continue; + } + } if (read_ok < 0) { ++nerr; continue; @@ -1003,6 +1024,9 @@ int main (int argc, char **argv) { for (f=0;f1) { fprintf(stderr, "Option -v requires a single character argument"); @@ -801,6 +803,13 @@ int main (int argc, char **argv) { // TODO: output barcode read ...but only for unmatched? int b; for (b=0;b<=bcnt;++b) { + size_t nameseq_len = strlen(bc[b].id.s); + if ((b < bcnt) && seqnames) { + nameseq_len = strlen(bc[b].seq.s); + if (bc[b].dual) + nameseq_len += bc[b].dual_n + 1; + } + for (i=0;i, using BCFIL as a master list\n" "-B BCFIL Use barcodes from BCFIL, no determination step, codes in \n" "-H Use barcodes from illumina's header, instead of a read\n" +"-s Substitute barcode sequence instead of barcode label into output file names\n" "-b Force beginning of line (5') for barcode matching\n" "-e Force end of line (3') for batcode matching\n" "-t NUM Divide threshold for auto-determine by factor NUM (1), > 1 = more sensitive\n" diff --git a/clipper/fastq-stats.cpp b/clipper/fastq-stats.cpp index 5710bbd..d0f5f31 100644 --- a/clipper/fastq-stats.cpp +++ b/clipper/fastq-stats.cpp @@ -153,7 +153,7 @@ int window = 2000000; int cyclemax = 35; int gcCyclemax = 100; // to compare with fastqc, seq is rounded to nearest 100 to reduce # of gc models; for < 200 length, this is teh same as max=100 float gcSum; -int gcTotal; +uint64_t gcTotal; int show_max = 10; bool debug = 0; @@ -565,6 +565,8 @@ int main( int argc, char**argv ) { if(gc) { // put these where they belong + if (debug) + printf("gcTotal\t%lu\tgcSum\t%f\n\n", gcTotal, gcSum); printf("pct-gc cycle-max\t%d\n", gcCyclemax); printf("pct-gc mean\t%.2f\n", 100.0 * gcSum / gcTotal); } diff --git a/clipper/mirna-quant.cpp b/clipper/mirna-quant.cpp index d609dbc..98eae13 100644 --- a/clipper/mirna-quant.cpp +++ b/clipper/mirna-quant.cpp @@ -681,7 +681,7 @@ FILE *gzopen(const char *f, const char *m, bool*isgz) { if (!strcmp(fext(f),".gz")) { char *tmp=(char *)malloc(strlen(f)+100); if (strchr(m,'w')) { - strcpy(tmp, "gzip --rsyncable > '"); + strcpy(tmp, "gzip > '"); strcat(tmp, f); strcat(tmp, "'"); } else { diff --git a/clipper/sparsehash-2.0.2/AUTHORS b/clipper/sparsehash-2.0.3/AUTHORS similarity index 100% rename from clipper/sparsehash-2.0.2/AUTHORS rename to clipper/sparsehash-2.0.3/AUTHORS diff --git a/clipper/sparsehash-2.0.2/COPYING b/clipper/sparsehash-2.0.3/COPYING similarity index 100% rename from clipper/sparsehash-2.0.2/COPYING rename to clipper/sparsehash-2.0.3/COPYING diff --git a/clipper/sparsehash-2.0.2/ChangeLog b/clipper/sparsehash-2.0.3/ChangeLog similarity index 98% rename from clipper/sparsehash-2.0.2/ChangeLog rename to clipper/sparsehash-2.0.3/ChangeLog index 4cbe701..fd53c6f 100644 --- a/clipper/sparsehash-2.0.2/ChangeLog +++ b/clipper/sparsehash-2.0.3/ChangeLog @@ -1,3 +1,8 @@ +Mon Oct 12 21:00:00 2015 Google Inc. + + * sparsehash: version 2.0.3 + * Fix compilation on modern compilers and operating systems + Thu Feb 23 23:47:18 2012 Google Inc. * sparsehash: version 2.0.2 diff --git a/clipper/sparsehash-2.0.2/INSTALL b/clipper/sparsehash-2.0.3/INSTALL similarity index 100% rename from clipper/sparsehash-2.0.2/INSTALL rename to clipper/sparsehash-2.0.3/INSTALL diff --git a/clipper/sparsehash-2.0.2/Makefile.am b/clipper/sparsehash-2.0.3/Makefile.am similarity index 100% rename from clipper/sparsehash-2.0.2/Makefile.am rename to clipper/sparsehash-2.0.3/Makefile.am diff --git a/clipper/sparsehash-2.0.2/Makefile.in b/clipper/sparsehash-2.0.3/Makefile.in similarity index 100% rename from clipper/sparsehash-2.0.2/Makefile.in rename to clipper/sparsehash-2.0.3/Makefile.in diff --git a/clipper/sparsehash-2.0.2/NEWS b/clipper/sparsehash-2.0.3/NEWS similarity index 98% rename from clipper/sparsehash-2.0.2/NEWS rename to clipper/sparsehash-2.0.3/NEWS index 589c709..4af929c 100644 --- a/clipper/sparsehash-2.0.2/NEWS +++ b/clipper/sparsehash-2.0.3/NEWS @@ -1,4 +1,9 @@ -== 23 Ferbruary 2012 == +== 12 October 2015 == + +Various small fixes to ensure compilation on modern compilers and operating +systems. Tagged as 2.0.3 + +== 23 February 2012 == A backwards incompatibility arose from flattening the include headers structure for the folder. diff --git a/clipper/sparsehash-2.0.2/README b/clipper/sparsehash-2.0.3/README similarity index 100% rename from clipper/sparsehash-2.0.2/README rename to clipper/sparsehash-2.0.3/README diff --git a/clipper/sparsehash-2.0.2/README_windows.txt b/clipper/sparsehash-2.0.3/README_windows.txt similarity index 97% rename from clipper/sparsehash-2.0.2/README_windows.txt rename to clipper/sparsehash-2.0.3/README_windows.txt index 47c1b35..54df6f8 100644 --- a/clipper/sparsehash-2.0.2/README_windows.txt +++ b/clipper/sparsehash-2.0.3/README_windows.txt @@ -1,25 +1,25 @@ -This project has been ported to Windows. A working solution file -exists in this directory: - sparsehash.sln - -You can load this solution file into either VC++ 7.1 (Visual Studio -2003) or VC++ 8.0 (Visual Studio 2005) -- in the latter case, it will -automatically convert the files to the latest format for you. - -When you build the solution, it will create a number of -unittests,which you can run by hand (or, more easily, under the Visual -Studio debugger) to make sure everything is working properly on your -system. The binaries will end up in a directory called "debug" or -"release" in the top-level directory (next to the .sln file). - -Note that these systems are set to build in Debug mode by default. -You may want to change them to Release mode. - -I have little experience with Windows programming, so there may be -better ways to set this up than I've done! If you run across any -problems, please post to the google-sparsehash Google Group, or report -them on the sparsehash Google Code site: - http://groups.google.com/group/google-sparsehash - http://code.google.com/p/sparsehash/issues/list - --- craig +This project has been ported to Windows. A working solution file +exists in this directory: + sparsehash.sln + +You can load this solution file into either VC++ 7.1 (Visual Studio +2003) or VC++ 8.0 (Visual Studio 2005) -- in the latter case, it will +automatically convert the files to the latest format for you. + +When you build the solution, it will create a number of +unittests,which you can run by hand (or, more easily, under the Visual +Studio debugger) to make sure everything is working properly on your +system. The binaries will end up in a directory called "debug" or +"release" in the top-level directory (next to the .sln file). + +Note that these systems are set to build in Debug mode by default. +You may want to change them to Release mode. + +I have little experience with Windows programming, so there may be +better ways to set this up than I've done! If you run across any +problems, please post to the google-sparsehash Google Group, or report +them on the sparsehash Google Code site: + http://groups.google.com/group/google-sparsehash + http://code.google.com/p/sparsehash/issues/list + +-- craig diff --git a/clipper/sparsehash-2.0.2/TODO b/clipper/sparsehash-2.0.3/TODO similarity index 100% rename from clipper/sparsehash-2.0.2/TODO rename to clipper/sparsehash-2.0.3/TODO diff --git a/clipper/sparsehash-2.0.2/aclocal.m4 b/clipper/sparsehash-2.0.3/aclocal.m4 similarity index 100% rename from clipper/sparsehash-2.0.2/aclocal.m4 rename to clipper/sparsehash-2.0.3/aclocal.m4 diff --git a/clipper/sparsehash-2.0.3/autogen.sh b/clipper/sparsehash-2.0.3/autogen.sh new file mode 100755 index 0000000..11b085d --- /dev/null +++ b/clipper/sparsehash-2.0.3/autogen.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +# Before using, you should figure out all the .m4 macros that your +# configure.m4 script needs and make sure they exist in the m4/ +# directory. +# +# These are the files that this script might edit: +# aclocal.m4 configure Makefile.in src/config.h.in \ +# compile config.guess config.sub depcomp install-sh missing mkinstalldirs +# +# Here's a command you can run to see what files aclocal will import: +# aclocal -I ../autoconf --output=- | sed -n 's/^m4_include..\([^]]*\).*/\1/p' + +set -ex +rm -rf autom4te.cache + +aclocal --force -I m4 +autoconf -f -W all,no-obsolete +autoheader -f -W all +automake -a -c -f -W all + +rm -rf autom4te.cache +exit 0 diff --git a/clipper/sparsehash-2.0.2/config.guess b/clipper/sparsehash-2.0.3/config.guess similarity index 100% rename from clipper/sparsehash-2.0.2/config.guess rename to clipper/sparsehash-2.0.3/config.guess diff --git a/clipper/sparsehash-2.0.2/config.sub b/clipper/sparsehash-2.0.3/config.sub similarity index 100% rename from clipper/sparsehash-2.0.2/config.sub rename to clipper/sparsehash-2.0.3/config.sub diff --git a/clipper/sparsehash-2.0.2/configure b/clipper/sparsehash-2.0.3/configure similarity index 100% rename from clipper/sparsehash-2.0.2/configure rename to clipper/sparsehash-2.0.3/configure diff --git a/clipper/sparsehash-2.0.2/configure.ac b/clipper/sparsehash-2.0.3/configure.ac similarity index 100% rename from clipper/sparsehash-2.0.2/configure.ac rename to clipper/sparsehash-2.0.3/configure.ac diff --git a/clipper/sparsehash-2.0.2/depcomp b/clipper/sparsehash-2.0.3/depcomp similarity index 100% rename from clipper/sparsehash-2.0.2/depcomp rename to clipper/sparsehash-2.0.3/depcomp diff --git a/clipper/sparsehash-2.0.2/doc/dense_hash_map.html b/clipper/sparsehash-2.0.3/doc/dense_hash_map.html similarity index 100% rename from clipper/sparsehash-2.0.2/doc/dense_hash_map.html rename to clipper/sparsehash-2.0.3/doc/dense_hash_map.html diff --git a/clipper/sparsehash-2.0.2/doc/dense_hash_set.html b/clipper/sparsehash-2.0.3/doc/dense_hash_set.html similarity index 100% rename from clipper/sparsehash-2.0.2/doc/dense_hash_set.html rename to clipper/sparsehash-2.0.3/doc/dense_hash_set.html diff --git a/clipper/sparsehash-2.0.2/doc/designstyle.css b/clipper/sparsehash-2.0.3/doc/designstyle.css similarity index 100% rename from clipper/sparsehash-2.0.2/doc/designstyle.css rename to clipper/sparsehash-2.0.3/doc/designstyle.css diff --git a/clipper/sparsehash-2.0.2/doc/implementation.html b/clipper/sparsehash-2.0.3/doc/implementation.html similarity index 100% rename from clipper/sparsehash-2.0.2/doc/implementation.html rename to clipper/sparsehash-2.0.3/doc/implementation.html diff --git a/clipper/sparsehash-2.0.2/doc/index.html b/clipper/sparsehash-2.0.3/doc/index.html similarity index 100% rename from clipper/sparsehash-2.0.2/doc/index.html rename to clipper/sparsehash-2.0.3/doc/index.html diff --git a/clipper/sparsehash-2.0.2/doc/performance.html b/clipper/sparsehash-2.0.3/doc/performance.html similarity index 100% rename from clipper/sparsehash-2.0.2/doc/performance.html rename to clipper/sparsehash-2.0.3/doc/performance.html diff --git a/clipper/sparsehash-2.0.2/doc/sparse_hash_map.html b/clipper/sparsehash-2.0.3/doc/sparse_hash_map.html similarity index 100% rename from clipper/sparsehash-2.0.2/doc/sparse_hash_map.html rename to clipper/sparsehash-2.0.3/doc/sparse_hash_map.html diff --git a/clipper/sparsehash-2.0.2/doc/sparse_hash_set.html b/clipper/sparsehash-2.0.3/doc/sparse_hash_set.html similarity index 100% rename from clipper/sparsehash-2.0.2/doc/sparse_hash_set.html rename to clipper/sparsehash-2.0.3/doc/sparse_hash_set.html diff --git a/clipper/sparsehash-2.0.2/doc/sparsetable.html b/clipper/sparsehash-2.0.3/doc/sparsetable.html similarity index 100% rename from clipper/sparsehash-2.0.2/doc/sparsetable.html rename to clipper/sparsehash-2.0.3/doc/sparsetable.html diff --git a/clipper/sparsehash-2.0.3/experimental/Makefile b/clipper/sparsehash-2.0.3/experimental/Makefile new file mode 100644 index 0000000..aa997f7 --- /dev/null +++ b/clipper/sparsehash-2.0.3/experimental/Makefile @@ -0,0 +1,9 @@ +example: example.o libchash.o + $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $^ + +.SUFFIXES: .c .o .h +.c.o: + $(CC) -c $(CPPFLAGS) $(CFLAGS) -o $@ $< + +example.o: example.c libchash.h +libchash.o: libchash.c libchash.h diff --git a/clipper/sparsehash-2.0.3/experimental/README b/clipper/sparsehash-2.0.3/experimental/README new file mode 100644 index 0000000..150161d --- /dev/null +++ b/clipper/sparsehash-2.0.3/experimental/README @@ -0,0 +1,14 @@ +This is a C version of sparsehash (and also, maybe, densehash) that I +wrote way back when, and served as the inspiration for the C++ +version. The API for the C version is much uglier than the C++, +because of the lack of template support. I believe the class works, +but I'm not convinced it's really flexible or easy enough to use. + +It would be nice to rework this C class to follow the C++ API as +closely as possible (eg have a set_deleted_key() instead of using a +#define like this code does now). I believe the code compiles and +runs, if anybody is interested in using it now, but it's subject to +major change in the future, as people work on it. + +Craig Silverstein +20 March 2005 diff --git a/clipper/sparsehash-2.0.3/experimental/example.c b/clipper/sparsehash-2.0.3/experimental/example.c new file mode 100644 index 0000000..47b9dba --- /dev/null +++ b/clipper/sparsehash-2.0.3/experimental/example.c @@ -0,0 +1,55 @@ +#include +#include +#include +#include +#include "libchash.h" + +static void TestInsert() { + struct HashTable* ht; + HTItem* bck; + + ht = AllocateHashTable(1, 0); /* value is 1 byte, 0: don't copy keys */ + + HashInsert(ht, PTR_KEY(ht, "January"), 31); /* 0: don't overwrite old val */ + bck = HashInsert(ht, PTR_KEY(ht, "February"), 28); + bck = HashInsert(ht, PTR_KEY(ht, "March"), 31); + + bck = HashFind(ht, PTR_KEY(ht, "February")); + assert(bck); + assert(bck->data == 28); + + FreeHashTable(ht); +} + +static void TestFindOrInsert() { + struct HashTable* ht; + int i; + int iterations = 1000000; + int range = 30; /* random number between 1 and 30 */ + + ht = AllocateHashTable(4, 0); /* value is 4 bytes, 0: don't copy keys */ + + /* We'll test how good rand() is as a random number generator */ + for (i = 0; i < iterations; ++i) { + int key = rand() % range; + HTItem* bck = HashFindOrInsert(ht, key, 0); /* initialize to 0 */ + bck->data++; /* found one more of them */ + } + + for (i = 0; i < range; ++i) { + HTItem* bck = HashFind(ht, i); + if (bck) { + printf("%3d: %d\n", bck->key, bck->data); + } else { + printf("%3d: 0\n", i); + } + } + + FreeHashTable(ht); +} + +int main(int argc, char** argv) { + TestInsert(); + TestFindOrInsert(); + return 0; +} diff --git a/clipper/sparsehash-2.0.3/experimental/libchash.c b/clipper/sparsehash-2.0.3/experimental/libchash.c new file mode 100644 index 0000000..761cf24 --- /dev/null +++ b/clipper/sparsehash-2.0.3/experimental/libchash.c @@ -0,0 +1,1538 @@ +/* Copyright (c) 1998 - 2005, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * --- + * Author: Craig Silverstein + * + * This library is intended to be used for in-memory hash tables, + * though it provides rudimentary permanent-storage capabilities. + * It attempts to be fast, portable, and small. The best algorithm + * to fulfill these goals is an internal probing hashing algorithm, + * as in Knuth, _Art of Computer Programming_, vol III. Unlike + * chained (open) hashing, it doesn't require a pointer for every + * item, yet it is still constant time lookup in practice. + * + * Also to save space, we let the contents (both data and key) that + * you insert be a union: if the key/data is small, we store it + * directly in the hashtable, otherwise we store a pointer to it. + * To keep you from having to figure out which, use KEY_PTR and + * PTR_KEY to convert between the arguments to these functions and + * a pointer to the real data. For instance: + * char key[] = "ab", *key2; + * HTItem *bck; HashTable *ht; + * HashInsert(ht, PTR_KEY(ht, key), 0); + * bck = HashFind(ht, PTR_KEY(ht, "ab")); + * key2 = KEY_PTR(ht, bck->key); + * + * There are a rich set of operations supported: + * AllocateHashTable() -- Allocates a hashtable structure and + * returns it. + * cchKey: if it's a positive number, then each key is a + * fixed-length record of that length. If it's 0, + * the key is assumed to be a \0-terminated string. + * fSaveKey: normally, you are responsible for allocating + * space for the key. If this is 1, we make a + * copy of the key for you. + * ClearHashTable() -- Removes everything from a hashtable + * FreeHashTable() -- Frees memory used by a hashtable + * + * HashFind() -- takes a key (use PTR_KEY) and returns the + * HTItem containing that key, or NULL if the + * key is not in the hashtable. + * HashFindLast() -- returns the item found by last HashFind() + * HashFindOrInsert() -- inserts the key/data pair if the key + * is not already in the hashtable, or + * returns the appropraite HTItem if it is. + * HashFindOrInsertItem() -- takes key/data as an HTItem. + * HashInsert() -- adds a key/data pair to the hashtable. What + * it does if the key is already in the table + * depends on the value of SAMEKEY_OVERWRITE. + * HashInsertItem() -- takes key/data as an HTItem. + * HashDelete() -- removes a key/data pair from the hashtable, + * if it's there. RETURNS 1 if it was there, + * 0 else. + * If you use sparse tables and never delete, the full data + * space is available. Otherwise we steal -2 (maybe -3), + * so you can't have data fields with those values. + * HashDeleteLast() -- deletes the item returned by the last Find(). + * + * HashFirstBucket() -- used to iterate over the buckets in a + * hashtable. DON'T INSERT OR DELETE WHILE + * ITERATING! You can't nest iterations. + * HashNextBucket() -- RETURNS NULL at the end of iterating. + * + * HashSetDeltaGoalSize() -- if you're going to insert 1000 items + * at once, call this fn with arg 1000. + * It grows the table more intelligently. + * + * HashSave() -- saves the hashtable to a file. It saves keys ok, + * but it doesn't know how to interpret the data field, + * so if the data field is a pointer to some complex + * structure, you must send a function that takes a + * file pointer and a pointer to the structure, and + * write whatever you want to write. It should return + * the number of bytes written. If the file is NULL, + * it should just return the number of bytes it would + * write, without writing anything. + * If your data field is just an integer, not a + * pointer, just send NULL for the function. + * HashLoad() -- loads a hashtable. It needs a function that takes + * a file and the size of the structure, and expects + * you to read in the structure and return a pointer + * to it. You must do memory allocation, etc. If + * the data is just a number, send NULL. + * HashLoadKeys() -- unlike HashLoad(), doesn't load the data off disk + * until needed. This saves memory, but if you look + * up the same key a lot, it does a disk access each + * time. + * You can't do Insert() or Delete() on hashtables that were loaded + * from disk. + * + * See libchash.h for parameters you can modify. Make sure LOG_WORD_SIZE + * is defined correctly for your machine! (5 for 32 bit words, 6 for 64). + */ + +#include +#include +#include +#include /* for strcmp, memcmp, etc */ +#include /* ULTRIX needs this for in.h */ +#include /* for reading/writing hashtables */ +#include +#include "libchash.h" /* all the types */ + + /* if keys are stored directly but cchKey is less than sizeof(ulong), */ + /* this cuts off the bits at the end */ +char grgKeyTruncMask[sizeof(ulong)][sizeof(ulong)]; +#define KEY_TRUNC(ht, key) \ + ( STORES_PTR(ht) || (ht)->cchKey == sizeof(ulong) \ + ? (key) : ((key) & *(ulong *)&(grgKeyTruncMask[(ht)->cchKey][0])) ) + + /* round num up to a multiple of wordsize. (LOG_WORD_SIZE-3 is in bytes) */ +#define WORD_ROUND(num) ( ((num-1) | ((1<<(LOG_WORD_SIZE-3))-1)) + 1 ) +#define NULL_TERMINATED 0 /* val of cchKey if keys are null-term strings */ + + /* Useful operations we do to keys: compare them, copy them, free them */ + +#define KEY_CMP(ht, key1, key2) ( !STORES_PTR(ht) ? (key1) - (key2) : \ + (key1) == (key2) ? 0 : \ + HashKeySize(ht) == NULL_TERMINATED ? \ + strcmp((char *)key1, (char *)key2) :\ + memcmp((void *)key1, (void *)key2, \ + HashKeySize(ht)) ) + +#define COPY_KEY(ht, keyTo, keyFrom) do \ + if ( !STORES_PTR(ht) || !(ht)->fSaveKeys ) \ + (keyTo) = (keyFrom); /* just copy pointer or info */\ + else if ( (ht)->cchKey == NULL_TERMINATED ) /* copy 0-term.ed str */\ + { \ + (keyTo) = (ulong)HTsmalloc( WORD_ROUND(strlen((char *)(keyFrom))+1) ); \ + strcpy((char *)(keyTo), (char *)(keyFrom)); \ + } \ + else \ + { \ + (keyTo) = (ulong) HTsmalloc( WORD_ROUND((ht)->cchKey) ); \ + memcpy( (char *)(keyTo), (char *)(keyFrom), (ht)->cchKey); \ + } \ + while ( 0 ) + +#define FREE_KEY(ht, key) do \ + if ( STORES_PTR(ht) && (ht)->fSaveKeys ) \ + if ( (ht)->cchKey == NULL_TERMINATED ) \ + HTfree((char *)(key), WORD_ROUND(strlen((char *)(key))+1)); \ + else \ + HTfree((char *)(key), WORD_ROUND((ht)->cchKey)); \ + while ( 0 ) + + /* the following are useful for bitmaps */ + /* Format is like this (if 1 word = 4 bits): 3210 7654 ba98 fedc ... */ +typedef ulong HTBitmapPart; /* this has to be unsigned, for >> */ +typedef HTBitmapPart HTBitmap[1<> LOG_WORD_SIZE) << (LOG_WORD_SIZE-3) ) +#define MOD2(i, logmod) ( (i) & ((1<<(logmod))-1) ) +#define DIV_NUM_ENTRIES(i) ( (i) >> LOG_WORD_SIZE ) +#define MOD_NUM_ENTRIES(i) ( MOD2(i, LOG_WORD_SIZE) ) +#define MODBIT(i) ( ((ulong)1) << MOD_NUM_ENTRIES(i) ) + +#define TEST_BITMAP(bm, i) ( (bm)[DIV_NUM_ENTRIES(i)] & MODBIT(i) ? 1 : 0 ) +#define SET_BITMAP(bm, i) (bm)[DIV_NUM_ENTRIES(i)] |= MODBIT(i) +#define CLEAR_BITMAP(bm, i) (bm)[DIV_NUM_ENTRIES(i)] &= ~MODBIT(i) + + /* the following are useful for reading and writing hashtables */ +#define READ_UL(fp, data) \ + do { \ + long _ul; \ + fread(&_ul, sizeof(_ul), 1, (fp)); \ + data = ntohl(_ul); \ + } while (0) + +#define WRITE_UL(fp, data) \ + do { \ + long _ul = htonl((long)(data)); \ + fwrite(&_ul, sizeof(_ul), 1, (fp)); \ + } while (0) + + /* Moves data from disk to memory if necessary. Note dataRead cannot be * + * NULL, because then we might as well (and do) load the data into memory */ +#define LOAD_AND_RETURN(ht, loadCommand) /* lC returns an HTItem * */ \ + if ( !(ht)->fpData ) /* data is stored in memory */ \ + return (loadCommand); \ + else /* must read data off of disk */ \ + { \ + int cchData; \ + HTItem *bck; \ + if ( (ht)->bckData.data ) free((char *)(ht)->bckData.data); \ + ht->bckData.data = (ulong)NULL; /* needed if loadCommand fails */ \ + bck = (loadCommand); \ + if ( bck == NULL ) /* loadCommand failed: key not found */ \ + return NULL; \ + else \ + (ht)->bckData = *bck; \ + fseek(ht->fpData, (ht)->bckData.data, SEEK_SET); \ + READ_UL((ht)->fpData, cchData); \ + (ht)->bckData.data = (ulong)(ht)->dataRead((ht)->fpData, cchData); \ + return &((ht)->bckData); \ + } + + +/* ======================================================================== */ +/* UTILITY ROUTINES */ +/* ---------------------- */ + +/* HTsmalloc() -- safe malloc + * allocates memory, or crashes if the allocation fails. + */ +static void *HTsmalloc(unsigned long size) +{ + void *retval; + + if ( size == 0 ) + return NULL; + retval = (void *)malloc(size); + if ( !retval ) + { + fprintf(stderr, "HTsmalloc: Unable to allocate %lu bytes of memory\n", + size); + exit(1); + } + return retval; +} + +/* HTscalloc() -- safe calloc + * allocates memory and initializes it to 0, or crashes if + * the allocation fails. + */ +static void *HTscalloc(unsigned long size) +{ + void *retval; + + retval = (void *)calloc(size, 1); + if ( !retval && size > 0 ) + { + fprintf(stderr, "HTscalloc: Unable to allocate %lu bytes of memory\n", + size); + exit(1); + } + return retval; +} + +/* HTsrealloc() -- safe calloc + * grows the amount of memory from a source, or crashes if + * the allocation fails. + */ +static void *HTsrealloc(void *ptr, unsigned long new_size, long delta) +{ + if ( ptr == NULL ) + return HTsmalloc(new_size); + ptr = realloc(ptr, new_size); + if ( !ptr && new_size > 0 ) + { + fprintf(stderr, "HTsrealloc: Unable to reallocate %lu bytes of memory\n", + new_size); + exit(1); + } + return ptr; +} + +/* HTfree() -- keep track of memory use + * frees memory using free, but updates count of how much memory + * is being used. + */ +static void HTfree(void *ptr, unsigned long size) +{ + if ( size > 0 ) /* some systems seem to not like freeing NULL */ + free(ptr); +} + +/*************************************************************************\ +| HTcopy() | +| Sometimes we interpret data as a ulong. But ulongs must be | +| aligned on some machines, so instead of casting we copy. | +\*************************************************************************/ + +unsigned long HTcopy(char *ul) +{ + unsigned long retval; + + memcpy(&retval, ul, sizeof(retval)); + return retval; +} + +/*************************************************************************\ +| HTSetupKeyTrunc() | +| If keys are stored directly but cchKey is less than | +| sizeof(ulong), this cuts off the bits at the end. | +\*************************************************************************/ + +static void HTSetupKeyTrunc(void) +{ + int i, j; + + for ( i = 0; i < sizeof(unsigned long); i++ ) + for ( j = 0; j < sizeof(unsigned long); j++ ) + grgKeyTruncMask[i][j] = j < i ? 255 : 0; /* chars have 8 bits */ +} + + +/* ======================================================================== */ +/* TABLE ROUTINES */ +/* -------------------- */ + +/* The idea is that a hashtable with (logically) t buckets is divided + * into t/M groups of M buckets each. (M is a constant set in + * LOG_BM_WORDS for efficiency.) Each group is stored sparsely. + * Thus, inserting into the table causes some array to grow, which is + * slow but still constant time. Lookup involves doing a + * logical-position-to-sparse-position lookup, which is also slow but + * constant time. The larger M is, the slower these operations are + * but the less overhead (slightly). + * + * To store the sparse array, we store a bitmap B, where B[i] = 1 iff + * bucket i is non-empty. Then to look up bucket i we really look up + * array[# of 1s before i in B]. This is constant time for fixed M. + * + * Terminology: the position of an item in the overall table (from + * 1 .. t) is called its "location." The logical position in a group + * (from 1 .. M ) is called its "position." The actual location in + * the array (from 1 .. # of non-empty buckets in the group) is + * called its "offset." + * + * The following operations are supported: + * o Allocate an array with t buckets, all empty + * o Free a array (but not whatever was stored in the buckets) + * o Tell whether or not a bucket is empty + * o Return a bucket with a given location + * o Set the value of a bucket at a given location + * o Iterate through all the buckets in the array + * o Read and write an occupancy bitmap to disk + * o Return how much memory is being allocated by the array structure + */ + +#ifndef SparseBucket /* by default, each bucket holds an HTItem */ +#define SparseBucket HTItem +#endif + +typedef struct SparseBin { + SparseBucket *binSparse; + HTBitmap bmOccupied; /* bmOccupied[i] is 1 if bucket i has an item */ + short cOccupied; /* size of binSparse; useful for iterators, eg */ +} SparseBin; + +typedef struct SparseIterator { + long posGroup; + long posOffset; + SparseBin *binSparse; /* state info, to avoid args for NextBucket() */ + ulong cBuckets; +} SparseIterator; + +#define LOG_LOW_BIN_SIZE ( LOG_BM_WORDS+LOG_WORD_SIZE ) +#define SPARSE_GROUPS(cBuckets) ( (((cBuckets)-1) >> LOG_LOW_BIN_SIZE) + 1 ) + + /* we need a small function to figure out # of items set in the bm */ +static HTOffset EntriesUpto(HTBitmapPart *bm, int i) +{ /* returns # of set bits in 0..i-1 */ + HTOffset retval = 0; + static HTOffset rgcBits[256] = /* # of bits set in one char */ + {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8}; + + if ( i == 0 ) return 0; + for ( ; i > sizeof(*bm)*8; i -= sizeof(*bm)*8, bm++ ) + { /* think of it as loop unrolling */ +#if LOG_WORD_SIZE >= 3 /* 1 byte per word, or more */ + retval += rgcBits[*bm & 255]; /* get the low byte */ +#if LOG_WORD_SIZE >= 4 /* at least 2 bytes */ + retval += rgcBits[(*bm >> 8) & 255]; +#if LOG_WORD_SIZE >= 5 /* at least 4 bytes */ + retval += rgcBits[(*bm >> 16) & 255]; + retval += rgcBits[(*bm >> 24) & 255]; +#if LOG_WORD_SIZE >= 6 /* 8 bytes! */ + retval += rgcBits[(*bm >> 32) & 255]; + retval += rgcBits[(*bm >> 40) & 255]; + retval += rgcBits[(*bm >> 48) & 255]; + retval += rgcBits[(*bm >> 56) & 255]; +#if LOG_WORD_SIZE >= 7 /* not a concern for a while... */ +#error Need to rewrite EntriesUpto to support such big words +#endif /* >8 bytes */ +#endif /* 8 bytes */ +#endif /* 4 bytes */ +#endif /* 2 bytes */ +#endif /* 1 byte */ + } + switch ( i ) { /* from 0 to 63 */ + case 0: + return retval; +#if LOG_WORD_SIZE >= 3 /* 1 byte per word, or more */ + case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: + return (retval + rgcBits[*bm & ((1 << i)-1)]); +#if LOG_WORD_SIZE >= 4 /* at least 2 bytes */ + case 9: case 10: case 11: case 12: case 13: case 14: case 15: case 16: + return (retval + rgcBits[*bm & 255] + + rgcBits[(*bm >> 8) & ((1 << (i-8))-1)]); +#if LOG_WORD_SIZE >= 5 /* at least 4 bytes */ + case 17: case 18: case 19: case 20: case 21: case 22: case 23: case 24: + return (retval + rgcBits[*bm & 255] + rgcBits[(*bm >> 8) & 255] + + rgcBits[(*bm >> 16) & ((1 << (i-16))-1)]); + case 25: case 26: case 27: case 28: case 29: case 30: case 31: case 32: + return (retval + rgcBits[*bm & 255] + rgcBits[(*bm >> 8) & 255] + + rgcBits[(*bm >> 16) & 255] + + rgcBits[(*bm >> 24) & ((1 << (i-24))-1)]); +#if LOG_WORD_SIZE >= 6 /* 8 bytes! */ + case 33: case 34: case 35: case 36: case 37: case 38: case 39: case 40: + return (retval + rgcBits[*bm & 255] + rgcBits[(*bm >> 8) & 255] + + rgcBits[(*bm >> 16) & 255] + rgcBits[(*bm >> 24) & 255] + + rgcBits[(*bm >> 32) & ((1 << (i-32))-1)]); + case 41: case 42: case 43: case 44: case 45: case 46: case 47: case 48: + return (retval + rgcBits[*bm & 255] + rgcBits[(*bm >> 8) & 255] + + rgcBits[(*bm >> 16) & 255] + rgcBits[(*bm >> 24) & 255] + + rgcBits[(*bm >> 32) & 255] + + rgcBits[(*bm >> 40) & ((1 << (i-40))-1)]); + case 49: case 50: case 51: case 52: case 53: case 54: case 55: case 56: + return (retval + rgcBits[*bm & 255] + rgcBits[(*bm >> 8) & 255] + + rgcBits[(*bm >> 16) & 255] + rgcBits[(*bm >> 24) & 255] + + rgcBits[(*bm >> 32) & 255] + rgcBits[(*bm >> 40) & 255] + + rgcBits[(*bm >> 48) & ((1 << (i-48))-1)]); + case 57: case 58: case 59: case 60: case 61: case 62: case 63: case 64: + return (retval + rgcBits[*bm & 255] + rgcBits[(*bm >> 8) & 255] + + rgcBits[(*bm >> 16) & 255] + rgcBits[(*bm >> 24) & 255] + + rgcBits[(*bm >> 32) & 255] + rgcBits[(*bm >> 40) & 255] + + rgcBits[(*bm >> 48) & 255] + + rgcBits[(*bm >> 56) & ((1 << (i-56))-1)]); +#endif /* 8 bytes */ +#endif /* 4 bytes */ +#endif /* 2 bytes */ +#endif /* 1 byte */ + } + assert("" == "word size is too big in EntriesUpto()"); + return -1; +} +#define SPARSE_POS_TO_OFFSET(bm, i) ( EntriesUpto(&((bm)[0]), i) ) +#define SPARSE_BUCKET(bin, location) \ + ( (bin)[(location) >> LOG_LOW_BIN_SIZE].binSparse + \ + SPARSE_POS_TO_OFFSET((bin)[(location)>>LOG_LOW_BIN_SIZE].bmOccupied, \ + MOD2(location, LOG_LOW_BIN_SIZE)) ) + + +/*************************************************************************\ +| SparseAllocate() | +| SparseFree() | +| Allocates, sets-to-empty, and frees a sparse array. All you need | +| to tell me is how many buckets you want. I return the number of | +| buckets I actually allocated, setting the array as a parameter. | +| Note that you have to set auxilliary parameters, like cOccupied. | +\*************************************************************************/ + +static ulong SparseAllocate(SparseBin **pbinSparse, ulong cBuckets) +{ + int cGroups = SPARSE_GROUPS(cBuckets); + + *pbinSparse = (SparseBin *) HTscalloc(sizeof(**pbinSparse) * cGroups); + return cGroups << LOG_LOW_BIN_SIZE; +} + +static SparseBin *SparseFree(SparseBin *binSparse, ulong cBuckets) +{ + ulong iGroup, cGroups = SPARSE_GROUPS(cBuckets); + + for ( iGroup = 0; iGroup < cGroups; iGroup++ ) + HTfree(binSparse[iGroup].binSparse, (sizeof(*binSparse[iGroup].binSparse) + * binSparse[iGroup].cOccupied)); + HTfree(binSparse, sizeof(*binSparse) * cGroups); + return NULL; +} + +/*************************************************************************\ +| SparseIsEmpty() | +| SparseFind() | +| You give me a location (ie a number between 1 and t), and I | +| return the bucket at that location, or NULL if the bucket is | +| empty. It's OK to call Find() on an empty table. | +\*************************************************************************/ + +static int SparseIsEmpty(SparseBin *binSparse, ulong location) +{ + return !TEST_BITMAP(binSparse[location>>LOG_LOW_BIN_SIZE].bmOccupied, + MOD2(location, LOG_LOW_BIN_SIZE)); +} + +static SparseBucket *SparseFind(SparseBin *binSparse, ulong location) +{ + if ( SparseIsEmpty(binSparse, location) ) + return NULL; + return SPARSE_BUCKET(binSparse, location); +} + +/*************************************************************************\ +| SparseInsert() | +| You give me a location, and contents to put there, and I insert | +| into that location and RETURN a pointer to the location. If | +| bucket was already occupied, I write over the contents only if | +| *pfOverwrite is 1. We set *pfOverwrite to 1 if there was someone | +| there (whether or not we overwrote) and 0 else. | +\*************************************************************************/ + +static SparseBucket *SparseInsert(SparseBin *binSparse, SparseBucket *bckInsert, + ulong location, int *pfOverwrite) +{ + SparseBucket *bckPlace; + HTOffset offset; + + bckPlace = SparseFind(binSparse, location); + if ( bckPlace ) /* means we replace old contents */ + { + if ( *pfOverwrite ) + *bckPlace = *bckInsert; + *pfOverwrite = 1; + return bckPlace; + } + + binSparse += (location >> LOG_LOW_BIN_SIZE); + offset = SPARSE_POS_TO_OFFSET(binSparse->bmOccupied, + MOD2(location, LOG_LOW_BIN_SIZE)); + binSparse->binSparse = (SparseBucket *) + HTsrealloc(binSparse->binSparse, + sizeof(*binSparse->binSparse) * ++binSparse->cOccupied, + sizeof(*binSparse->binSparse)); + memmove(binSparse->binSparse + offset+1, + binSparse->binSparse + offset, + (binSparse->cOccupied-1 - offset) * sizeof(*binSparse->binSparse)); + binSparse->binSparse[offset] = *bckInsert; + SET_BITMAP(binSparse->bmOccupied, MOD2(location, LOG_LOW_BIN_SIZE)); + *pfOverwrite = 0; + return binSparse->binSparse + offset; +} + +/*************************************************************************\ +| SparseFirstBucket() | +| SparseNextBucket() | +| SparseCurrentBit() | +| Iterate through the occupied buckets of a dense hashtable. You | +| must, of course, have allocated space yourself for the iterator. | +\*************************************************************************/ + +static SparseBucket *SparseNextBucket(SparseIterator *iter) +{ + if ( iter->posOffset != -1 && /* not called from FirstBucket()? */ + (++iter->posOffset < iter->binSparse[iter->posGroup].cOccupied) ) + return iter->binSparse[iter->posGroup].binSparse + iter->posOffset; + + iter->posOffset = 0; /* start the next group */ + for ( iter->posGroup++; iter->posGroup < SPARSE_GROUPS(iter->cBuckets); + iter->posGroup++ ) + if ( iter->binSparse[iter->posGroup].cOccupied > 0 ) + return iter->binSparse[iter->posGroup].binSparse; /* + 0 */ + return NULL; /* all remaining groups were empty */ +} + +static SparseBucket *SparseFirstBucket(SparseIterator *iter, + SparseBin *binSparse, ulong cBuckets) +{ + iter->binSparse = binSparse; /* set it up for NextBucket() */ + iter->cBuckets = cBuckets; + iter->posOffset = -1; /* when we advance, we're at 0 */ + iter->posGroup = -1; + return SparseNextBucket(iter); +} + +/*************************************************************************\ +| SparseWrite() | +| SparseRead() | +| These are routines for storing a sparse hashtable onto disk. We | +| store the number of buckets and a bitmap indicating which buckets | +| are allocated (occupied). The actual contents of the buckets | +| must be stored separately. | +\*************************************************************************/ + +static void SparseWrite(FILE *fp, SparseBin *binSparse, ulong cBuckets) +{ + ulong i, j; + + WRITE_UL(fp, cBuckets); + for ( i = 0; i < SPARSE_GROUPS(cBuckets); i++ ) + for ( j = 0; j < (1<rgBuckets, cBuckets); +} + +static ulong DenseAllocate(DenseBin **pbin, ulong cBuckets) +{ + *pbin = (DenseBin *) HTsmalloc(sizeof(*pbin)); + (*pbin)->rgBuckets = (DenseBucket *) HTsmalloc(sizeof(*(*pbin)->rgBuckets) + * cBuckets); + DenseClear(*pbin, cBuckets); + return cBuckets; +} + +static DenseBin *DenseFree(DenseBin *bin, ulong cBuckets) +{ + HTfree(bin->rgBuckets, sizeof(*bin->rgBuckets) * cBuckets); + HTfree(bin, sizeof(*bin)); + return NULL; +} + +static int DenseIsEmpty(DenseBin *bin, ulong location) +{ + return DENSE_IS_EMPTY(bin->rgBuckets, location); +} + +static DenseBucket *DenseFind(DenseBin *bin, ulong location) +{ + if ( DenseIsEmpty(bin, location) ) + return NULL; + return bin->rgBuckets + location; +} + +static DenseBucket *DenseInsert(DenseBin *bin, DenseBucket *bckInsert, + ulong location, int *pfOverwrite) +{ + DenseBucket *bckPlace; + + bckPlace = DenseFind(bin, location); + if ( bckPlace ) /* means something is already there */ + { + if ( *pfOverwrite ) + *bckPlace = *bckInsert; + *pfOverwrite = 1; /* set to 1 to indicate someone was there */ + return bckPlace; + } + else + { + bin->rgBuckets[location] = *bckInsert; + *pfOverwrite = 0; + return bin->rgBuckets + location; + } +} + +static DenseBucket *DenseNextBucket(DenseIterator *iter) +{ + for ( iter->pos++; iter->pos < iter->cBuckets; iter->pos++ ) + if ( !DenseIsEmpty(iter->bin, iter->pos) ) + return iter->bin->rgBuckets + iter->pos; + return NULL; /* all remaining groups were empty */ +} + +static DenseBucket *DenseFirstBucket(DenseIterator *iter, + DenseBin *bin, ulong cBuckets) +{ + iter->bin = bin; /* set it up for NextBucket() */ + iter->cBuckets = cBuckets; + iter->pos = -1; /* thus the next bucket will be 0 */ + return DenseNextBucket(iter); +} + +static void DenseWrite(FILE *fp, DenseBin *bin, ulong cBuckets) +{ + ulong pos = 0, bit, bm; + + WRITE_UL(fp, cBuckets); + while ( pos < cBuckets ) + { + bm = 0; + for ( bit = 0; bit < 8*sizeof(ulong); bit++ ) + { + if ( !DenseIsEmpty(bin, pos) ) + SET_BITMAP(&bm, bit); /* in fks-hash.h */ + if ( ++pos == cBuckets ) + break; + } + WRITE_UL(fp, bm); + } +} + +static ulong DenseRead(FILE *fp, DenseBin **pbin) +{ + ulong pos = 0, bit, bm, cBuckets; + + READ_UL(fp, cBuckets); + cBuckets = DenseAllocate(pbin, cBuckets); + while ( pos < cBuckets ) + { + READ_UL(fp, bm); + for ( bit = 0; bit < 8*sizeof(ulong); bit++ ) + { + if ( TEST_BITMAP(&bm, bit) ) /* in fks-hash.h */ + DENSE_SET_OCCUPIED((*pbin)->rgBuckets, pos); + else + DENSE_SET_EMPTY((*pbin)->rgBuckets, pos); + if ( ++pos == cBuckets ) + break; + } + } + return cBuckets; +} + +static ulong DenseMemory(ulong cBuckets, ulong cOccupied) +{ + return cBuckets * sizeof(DenseBucket); +} + + +/* ======================================================================== */ +/* HASHING ROUTINES */ +/* ---------------------- */ + +/* Implements a simple quadratic hashing scheme. We have a single hash + * table of size t and a single hash function h(x). When inserting an + * item, first we try h(x) % t. If it's occupied, we try h(x) + + * i*(i-1)/2 % t for increasing values of i until we hit a not-occupied + * space. To make this dynamic, we double the size of the hash table as + * soon as more than half the cells are occupied. When deleting, we can + * choose to shrink the hashtable when less than a quarter of the + * cells are occupied, or we can choose never to shrink the hashtable. + * For lookup, we check h(x) + i*(i-1)/2 % t (starting with i=0) until + * we get a match or we hit an empty space. Note that as a result, + * we can't make a cell empty on deletion, or lookups may end prematurely. + * Instead we mark the cell as "deleted." We thus steal the value + * DELETED as a possible "data" value. As long as data are pointers, + * that's ok. + * The hash increment we use, i(i-1)/2, is not the standard quadratic + * hash increment, which is i^2. i(i-1)/2 covers the entire bucket space + * when the hashtable size is a power of two, as it is for us. In fact, + * the first n probes cover n distinct buckets; then it repeats. This + * guarantees insertion will always succeed. + * If you linear hashing, set JUMP in chash.h. You can also change + * various other parameters there. + */ + +/*************************************************************************\ +| Hash() | +| The hash function I use is due to Bob Jenkins (see | +| http://burtleburtle.net/bob/hash/evahash.html | +| According to http://burtleburtle.net/bob/c/lookup2.c, | +| his implementation is public domain.) | +| It takes 36 instructions, in 18 cycles if you're lucky. | +| hashing depends on the fact the hashtable size is always a | +| power of 2. cBuckets is probably ht->cBuckets. | +\*************************************************************************/ + +#if LOG_WORD_SIZE == 5 /* 32 bit words */ + +#define mix(a,b,c) \ +{ \ + a -= b; a -= c; a ^= (c>>13); \ + b -= c; b -= a; b ^= (a<<8); \ + c -= a; c -= b; c ^= (b>>13); \ + a -= b; a -= c; a ^= (c>>12); \ + b -= c; b -= a; b ^= (a<<16); \ + c -= a; c -= b; c ^= (b>>5); \ + a -= b; a -= c; a ^= (c>>3); \ + b -= c; b -= a; b ^= (a<<10); \ + c -= a; c -= b; c ^= (b>>15); \ +} +#ifdef WORD_HASH /* play with this on little-endian machines */ +#define WORD_AT(ptr) ( *(ulong *)(ptr) ) +#else +#define WORD_AT(ptr) ( (ptr)[0] + ((ulong)(ptr)[1]<<8) + \ + ((ulong)(ptr)[2]<<16) + ((ulong)(ptr)[3]<<24) ) +#endif + +#elif LOG_WORD_SIZE == 6 /* 64 bit words */ + +#define mix(a,b,c) \ +{ \ + a -= b; a -= c; a ^= (c>>43); \ + b -= c; b -= a; b ^= (a<<9); \ + c -= a; c -= b; c ^= (b>>8); \ + a -= b; a -= c; a ^= (c>>38); \ + b -= c; b -= a; b ^= (a<<23); \ + c -= a; c -= b; c ^= (b>>5); \ + a -= b; a -= c; a ^= (c>>35); \ + b -= c; b -= a; b ^= (a<<49); \ + c -= a; c -= b; c ^= (b>>11); \ + a -= b; a -= c; a ^= (c>>12); \ + b -= c; b -= a; b ^= (a<<18); \ + c -= a; c -= b; c ^= (b>>22); \ +} +#ifdef WORD_HASH /* alpha is little-endian, btw */ +#define WORD_AT(ptr) ( *(ulong *)(ptr) ) +#else +#define WORD_AT(ptr) ( (ptr)[0] + ((ulong)(ptr)[1]<<8) + \ + ((ulong)(ptr)[2]<<16) + ((ulong)(ptr)[3]<<24) + \ + ((ulong)(ptr)[4]<<32) + ((ulong)(ptr)[5]<<40) + \ + ((ulong)(ptr)[6]<<48) + ((ulong)(ptr)[7]<<56) ) +#endif + +#else /* neither 32 or 64 bit words */ +#error This hash function can only hash 32 or 64 bit words. Sorry. +#endif + +static ulong Hash(HashTable *ht, char *key, ulong cBuckets) +{ + ulong a, b, c, cchKey, cchKeyOrig; + + cchKeyOrig = ht->cchKey == NULL_TERMINATED ? strlen(key) : ht->cchKey; + a = b = c = 0x9e3779b9; /* the golden ratio; an arbitrary value */ + + for ( cchKey = cchKeyOrig; cchKey >= 3 * sizeof(ulong); + cchKey -= 3 * sizeof(ulong), key += 3 * sizeof(ulong) ) + { + a += WORD_AT(key); + b += WORD_AT(key + sizeof(ulong)); + c += WORD_AT(key + sizeof(ulong)*2); + mix(a,b,c); + } + + c += cchKeyOrig; + switch ( cchKey ) { /* deal with rest. Cases fall through */ +#if LOG_WORD_SIZE == 5 + case 11: c += (ulong)key[10]<<24; + case 10: c += (ulong)key[9]<<16; + case 9 : c += (ulong)key[8]<<8; + /* the first byte of c is reserved for the length */ + case 8 : b += WORD_AT(key+4); a+= WORD_AT(key); break; + case 7 : b += (ulong)key[6]<<16; + case 6 : b += (ulong)key[5]<<8; + case 5 : b += key[4]; + case 4 : a += WORD_AT(key); break; + case 3 : a += (ulong)key[2]<<16; + case 2 : a += (ulong)key[1]<<8; + case 1 : a += key[0]; + /* case 0 : nothing left to add */ +#elif LOG_WORD_SIZE == 6 + case 23: c += (ulong)key[22]<<56; + case 22: c += (ulong)key[21]<<48; + case 21: c += (ulong)key[20]<<40; + case 20: c += (ulong)key[19]<<32; + case 19: c += (ulong)key[18]<<24; + case 18: c += (ulong)key[17]<<16; + case 17: c += (ulong)key[16]<<8; + /* the first byte of c is reserved for the length */ + case 16: b += WORD_AT(key+8); a+= WORD_AT(key); break; + case 15: b += (ulong)key[14]<<48; + case 14: b += (ulong)key[13]<<40; + case 13: b += (ulong)key[12]<<32; + case 12: b += (ulong)key[11]<<24; + case 11: b += (ulong)key[10]<<16; + case 10: b += (ulong)key[ 9]<<8; + case 9: b += (ulong)key[ 8]; + case 8: a += WORD_AT(key); break; + case 7: a += (ulong)key[ 6]<<48; + case 6: a += (ulong)key[ 5]<<40; + case 5: a += (ulong)key[ 4]<<32; + case 4: a += (ulong)key[ 3]<<24; + case 3: a += (ulong)key[ 2]<<16; + case 2: a += (ulong)key[ 1]<<8; + case 1: a += (ulong)key[ 0]; + /* case 0: nothing left to add */ +#endif + } + mix(a,b,c); + return c & (cBuckets-1); +} + + +/*************************************************************************\ +| Rehash() | +| You give me a hashtable, a new size, and a bucket to follow, and | +| I resize the hashtable's bin to be the new size, rehashing | +| everything in it. I keep particular track of the bucket you pass | +| in, and RETURN a pointer to where the item in the bucket got to. | +| (If you pass in NULL, I return an arbitrary pointer.) | +\*************************************************************************/ + +static HTItem *Rehash(HashTable *ht, ulong cNewBuckets, HTItem *bckWatch) +{ + Table *tableNew; + ulong iBucketFirst; + HTItem *bck, *bckNew = NULL; + ulong offset; /* the i in h(x) + i*(i-1)/2 */ + int fOverwrite = 0; /* not an issue: there can be no collisions */ + + assert( ht->table ); + cNewBuckets = Table(Allocate)(&tableNew, cNewBuckets); + /* Since we RETURN the new position of bckWatch, we want * + * to make sure it doesn't get moved due to some table * + * rehashing that comes after it's inserted. Thus, we * + * have to put it in last. This makes the loop weird. */ + for ( bck = HashFirstBucket(ht); ; bck = HashNextBucket(ht) ) + { + if ( bck == NULL ) /* we're done iterating, so look at bckWatch */ + { + bck = bckWatch; + if ( bck == NULL ) /* I guess bckWatch wasn't specified */ + break; + } + else if ( bck == bckWatch ) + continue; /* ignore if we see it during the iteration */ + + offset = 0; /* a new i for a new bucket */ + for ( iBucketFirst = Hash(ht, KEY_PTR(ht, bck->key), cNewBuckets); + !Table(IsEmpty)(tableNew, iBucketFirst); + iBucketFirst = (iBucketFirst + JUMP(KEY_PTR(ht,bck->key), offset)) + & (cNewBuckets-1) ) + ; + bckNew = Table(Insert)(tableNew, bck, iBucketFirst, &fOverwrite); + if ( bck == bckWatch ) /* we're done with the last thing to do */ + break; + } + Table(Free)(ht->table, ht->cBuckets); + ht->table = tableNew; + ht->cBuckets = cNewBuckets; + ht->cDeletedItems = 0; + return bckNew; /* new position of bckWatch, which was inserted last */ +} + +/*************************************************************************\ +| Find() | +| Does the quadratic searching stuff. RETURNS NULL if we don't | +| find an object with the given key, and a pointer to the Item | +| holding the key, if we do. Also sets posLastFind. If piEmpty is | +| non-NULL, we set it to the first open bucket we pass; helpful for | +| doing a later insert if the search fails, for instance. | +\*************************************************************************/ + +static HTItem *Find(HashTable *ht, ulong key, ulong *piEmpty) +{ + ulong iBucketFirst; + HTItem *item; + ulong offset = 0; /* the i in h(x) + i*(i-1)/2 */ + int fFoundEmpty = 0; /* set when we pass over an empty bucket */ + + ht->posLastFind = NULL; /* set up for failure: a new find starts */ + if ( ht->table == NULL ) /* empty hash table: find is bound to fail */ + return NULL; + + iBucketFirst = Hash(ht, KEY_PTR(ht, key), ht->cBuckets); + while ( 1 ) /* now try all i > 0 */ + { + item = Table(Find)(ht->table, iBucketFirst); + if ( item == NULL ) /* it's not in the table */ + { + if ( piEmpty && !fFoundEmpty ) *piEmpty = iBucketFirst; + return NULL; + } + else + { + if ( IS_BCK_DELETED(item) ) /* always 0 ifdef INSERT_ONLY */ + { + if ( piEmpty && !fFoundEmpty ) + { + *piEmpty = iBucketFirst; + fFoundEmpty = 1; + } + } else + if ( !KEY_CMP(ht, key, item->key) ) /* must be occupied */ + { + ht->posLastFind = item; + return item; /* we found it! */ + } + } + iBucketFirst = ((iBucketFirst + JUMP(KEY_PTR(ht, key), offset)) + & (ht->cBuckets-1)); + } +} + +/*************************************************************************\ +| Insert() | +| If an item with the key already exists in the hashtable, RETURNS | +| a pointer to the item (replacing its data if fOverwrite is 1). | +| If not, we find the first place-to-insert (which Find() is nice | +| enough to set for us) and insert the item there, RETURNing a | +| pointer to the item. We might grow the hashtable if it's getting | +| full. Note we include buckets holding DELETED when determining | +| fullness, because they slow down searching. | +\*************************************************************************/ + +static ulong NextPow2(ulong x) /* returns next power of 2 > x, or 2^31 */ +{ + if ( ((x << 1) >> 1) != x ) /* next power of 2 overflows */ + x >>= 1; /* so we return highest power of 2 we can */ + while ( (x & (x-1)) != 0 ) /* blacks out all but the top bit */ + x &= (x-1); + return x << 1; /* makes it the *next* power of 2 */ +} + +static HTItem *Insert(HashTable *ht, ulong key, ulong data, int fOverwrite) +{ + HTItem *item, bckInsert; + ulong iEmpty; /* first empty bucket key probes */ + + if ( ht->table == NULL ) /* empty hash table: find is bound to fail */ + return NULL; + item = Find(ht, key, &iEmpty); + ht->posLastFind = NULL; /* last operation is insert, not find */ + if ( item ) + { + if ( fOverwrite ) + item->data = data; /* key already matches */ + return item; + } + + COPY_KEY(ht, bckInsert.key, key); /* make our own copy of the key */ + bckInsert.data = data; /* oh, and the data too */ + item = Table(Insert)(ht->table, &bckInsert, iEmpty, &fOverwrite); + if ( fOverwrite ) /* we overwrote a deleted bucket */ + ht->cDeletedItems--; + ht->cItems++; /* insert couldn't have overwritten */ + if ( ht->cDeltaGoalSize > 0 ) /* closer to our goal size */ + ht->cDeltaGoalSize--; + if ( ht->cItems + ht->cDeletedItems >= ht->cBuckets * OCCUPANCY_PCT + || ht->cDeltaGoalSize < 0 ) /* we must've overestimated # of deletes */ + item = Rehash(ht, + NextPow2((ulong)(((ht->cDeltaGoalSize > 0 ? + ht->cDeltaGoalSize : 0) + + ht->cItems) / OCCUPANCY_PCT)), + item); + return item; +} + +/*************************************************************************\ +| Delete() | +| Removes the item from the hashtable, and if fShrink is 1, will | +| shrink the hashtable if it's too small (ie even after halving, | +| the ht would be less than half full, though in order to avoid | +| oscillating table size, we insist that after halving the ht would | +| be less than 40% full). RETURNS 1 if the item was found, 0 else. | +| If fLastFindSet is true, then this function is basically | +| DeleteLastFind. | +\*************************************************************************/ + +static int Delete(HashTable *ht, ulong key, int fShrink, int fLastFindSet) +{ + if ( !fLastFindSet && !Find(ht, key, NULL) ) + return 0; + SET_BCK_DELETED(ht, ht->posLastFind); /* find set this, how nice */ + ht->cItems--; + ht->cDeletedItems++; + if ( ht->cDeltaGoalSize < 0 ) /* heading towards our goal of deletion */ + ht->cDeltaGoalSize++; + + if ( fShrink && ht->cItems < ht->cBuckets * OCCUPANCY_PCT*0.4 + && ht->cDeltaGoalSize >= 0 /* wait until we're done deleting */ + && (ht->cBuckets >> 1) >= MIN_HASH_SIZE ) /* shrink */ + Rehash(ht, + NextPow2((ulong)((ht->cItems+ht->cDeltaGoalSize)/OCCUPANCY_PCT)), + NULL); + ht->posLastFind = NULL; /* last operation is delete, not find */ + return 1; +} + + +/* ======================================================================== */ +/* USER-VISIBLE API */ +/* ---------------------- */ + +/*************************************************************************\ +| AllocateHashTable() | +| ClearHashTable() | +| FreeHashTable() | +| Allocate() allocates a hash table and sets up size parameters. | +| Free() frees it. Clear() deletes all the items from the hash | +| table, but frees not. | +| cchKey is < 0 if the keys you send me are meant to be pointers | +| to \0-terminated strings. Then -cchKey is the maximum key size. | +| If cchKey < one word (ulong), the keys you send me are the keys | +| themselves; else the keys you send me are pointers to the data. | +| If fSaveKeys is 1, we copy any keys given to us to insert. We | +| also free these keys when freeing the hash table. If it's 0, the | +| user is responsible for key space management. | +| AllocateHashTable() RETURNS a hash table; the others TAKE one. | +\*************************************************************************/ + +HashTable *AllocateHashTable(int cchKey, int fSaveKeys) +{ + HashTable *ht; + + ht = (HashTable *) HTsmalloc(sizeof(*ht)); /* set everything to 0 */ + ht->cBuckets = Table(Allocate)(&ht->table, MIN_HASH_SIZE); + ht->cchKey = cchKey <= 0 ? NULL_TERMINATED : cchKey; + ht->cItems = 0; + ht->cDeletedItems = 0; + ht->fSaveKeys = fSaveKeys; + ht->cDeltaGoalSize = 0; + ht->iter = HTsmalloc( sizeof(TableIterator) ); + + ht->fpData = NULL; /* set by HashLoad, maybe */ + ht->bckData.data = (ulong) NULL; /* this must be done */ + HTSetupKeyTrunc(); /* in util.c */ + return ht; +} + +void ClearHashTable(HashTable *ht) +{ + HTItem *bck; + + if ( STORES_PTR(ht) && ht->fSaveKeys ) /* need to free keys */ + for ( bck = HashFirstBucket(ht); bck; bck = HashNextBucket(ht) ) + { + FREE_KEY(ht, bck->key); + if ( ht->fSaveKeys == 2 ) /* this means key stored in one block */ + break; /* ...so only free once */ + } + Table(Free)(ht->table, ht->cBuckets); + ht->cBuckets = Table(Allocate)(&ht->table, MIN_HASH_SIZE); + + ht->cItems = 0; + ht->cDeletedItems = 0; + ht->cDeltaGoalSize = 0; + ht->posLastFind = NULL; + ht->fpData = NULL; /* no longer HashLoading */ + if ( ht->bckData.data ) free( (char *)(ht)->bckData.data); + ht->bckData.data = (ulong) NULL; +} + +void FreeHashTable(HashTable *ht) +{ + ClearHashTable(ht); + if ( ht->iter ) HTfree(ht->iter, sizeof(TableIterator)); + if ( ht->table ) Table(Free)(ht->table, ht->cBuckets); + free(ht); +} + +/*************************************************************************\ +| HashFind() | +| HashFindLast() | +| HashFind(): looks in h(x) + i(i-1)/2 % t as i goes up from 0 | +| until we either find the key or hit an empty bucket. RETURNS a | +| pointer to the item in the hit bucket, if we find it, else | +| RETURNS NULL. | +| HashFindLast() returns the item returned by the last | +| HashFind(), which may be NULL if the last HashFind() failed. | +| LOAD_AND_RETURN reads the data from off disk, if necessary. | +\*************************************************************************/ + +HTItem *HashFind(HashTable *ht, ulong key) +{ + LOAD_AND_RETURN(ht, Find(ht, KEY_TRUNC(ht, key), NULL)); +} + +HTItem *HashFindLast(HashTable *ht) +{ + LOAD_AND_RETURN(ht, ht->posLastFind); +} + +/*************************************************************************\ +| HashFindOrInsert() | +| HashFindOrInsertItem() | +| HashInsert() | +| HashInsertItem() | +| HashDelete() | +| HashDeleteLast() | +| Pretty obvious what these guys do. Some take buckets (items), | +| some take keys and data separately. All things RETURN the bucket | +| (a pointer into the hashtable) if appropriate. | +\*************************************************************************/ + +HTItem *HashFindOrInsert(HashTable *ht, ulong key, ulong dataInsert) +{ + /* This is equivalent to Insert without samekey-overwrite */ + return Insert(ht, KEY_TRUNC(ht, key), dataInsert, 0); +} + +HTItem *HashFindOrInsertItem(HashTable *ht, HTItem *pItem) +{ + return HashFindOrInsert(ht, pItem->key, pItem->data); +} + +HTItem *HashInsert(HashTable *ht, ulong key, ulong data) +{ + return Insert(ht, KEY_TRUNC(ht, key), data, SAMEKEY_OVERWRITE); +} + +HTItem *HashInsertItem(HashTable *ht, HTItem *pItem) +{ + return HashInsert(ht, pItem->key, pItem->data); +} + +int HashDelete(HashTable *ht, ulong key) +{ + return Delete(ht, KEY_TRUNC(ht, key), !FAST_DELETE, 0); +} + +int HashDeleteLast(HashTable *ht) +{ + if ( !ht->posLastFind ) /* last find failed */ + return 0; + return Delete(ht, 0, !FAST_DELETE, 1); /* no need to specify a key */ +} + +/*************************************************************************\ +| HashFirstBucket() | +| HashNextBucket() | +| Iterates through the items in the hashtable by iterating through | +| the table. Since we know about deleted buckets and loading data | +| off disk, and the table doesn't, our job is to take care of these | +| things. RETURNS a bucket, or NULL after the last bucket. | +\*************************************************************************/ + +HTItem *HashFirstBucket(HashTable *ht) +{ + HTItem *retval; + + for ( retval = Table(FirstBucket)(ht->iter, ht->table, ht->cBuckets); + retval; retval = Table(NextBucket)(ht->iter) ) + if ( !IS_BCK_DELETED(retval) ) + LOAD_AND_RETURN(ht, retval); + return NULL; +} + +HTItem *HashNextBucket(HashTable *ht) +{ + HTItem *retval; + + while ( (retval=Table(NextBucket)(ht->iter)) ) + if ( !IS_BCK_DELETED(retval) ) + LOAD_AND_RETURN(ht, retval); + return NULL; +} + +/*************************************************************************\ +| HashSetDeltaGoalSize() | +| If we're going to insert 100 items, set the delta goal size to | +| 100 and we take that into account when inserting. Likewise, if | +| we're going to delete 10 items, set it to -100 and we won't | +| rehash until all 100 have been done. It's ok to be wrong, but | +| it's efficient to be right. Returns the delta value. | +\*************************************************************************/ + +int HashSetDeltaGoalSize(HashTable *ht, int delta) +{ + ht->cDeltaGoalSize = delta; +#if FAST_DELETE == 1 || defined INSERT_ONLY + if ( ht->cDeltaGoalSize < 0 ) /* for fast delete, we never */ + ht->cDeltaGoalSize = 0; /* ...rehash after deletion */ +#endif + return ht->cDeltaGoalSize; +} + + +/*************************************************************************\ +| HashSave() | +| HashLoad() | +| HashLoadKeys() | +| Routines for saving and loading the hashtable from disk. We can | +| then use the hashtable in two ways: loading it back into memory | +| (HashLoad()) or loading only the keys into memory, in which case | +| the data for a given key is loaded off disk when the key is | +| retrieved. The data is freed when something new is retrieved in | +| its place, so this is not a "lazy-load" scheme. | +| The key is saved automatically and restored upon load, but the | +| user needs to specify a routine for reading and writing the data. | +| fSaveKeys is of course set to 1 when you read in a hashtable. | +| HashLoad RETURNS a newly allocated hashtable. | +| DATA_WRITE() takes an fp and a char * (representing the data | +| field), and must perform two separate tasks. If fp is NULL, | +| return the number of bytes written. If not, writes the data to | +| disk at the place the fp points to. | +| DATA_READ() takes an fp and the number of bytes in the data | +| field, and returns a char * which points to wherever you've | +| written the data. Thus, you must allocate memory for the data. | +| Both dataRead and dataWrite may be NULL if you just wish to | +| store the data field directly, as an integer. | +\*************************************************************************/ + +void HashSave(FILE *fp, HashTable *ht, int (*dataWrite)(FILE *, char *)) +{ + long cchData, posStart; + HTItem *bck; + + /* File format: magic number (4 bytes) + : cchKey (one word) + : cItems (one word) + : cDeletedItems (one word) + : table info (buckets and a bitmap) + : cchAllKeys (one word) + Then the keys, in a block. If cchKey is NULL_TERMINATED, the keys + are null-terminated too, otherwise this takes up cchKey*cItems bytes. + Note that keys are not written for DELETED buckets. + Then the data: + : EITHER DELETED (one word) to indicate it's a deleted bucket, + : OR number of bytes for this (non-empty) bucket's data + (one word). This is not stored if dataWrite == NULL + since the size is known to be sizeof(ul). Plus: + : the data for this bucket (variable length) + All words are in network byte order. */ + + fprintf(fp, "%s", MAGIC_KEY); + WRITE_UL(fp, ht->cchKey); /* WRITE_UL, READ_UL, etc in fks-hash.h */ + WRITE_UL(fp, ht->cItems); + WRITE_UL(fp, ht->cDeletedItems); + Table(Write)(fp, ht->table, ht->cBuckets); /* writes cBuckets too */ + + WRITE_UL(fp, 0); /* to be replaced with sizeof(key block) */ + posStart = ftell(fp); + for ( bck = HashFirstBucket(ht); bck; bck = HashNextBucket(ht) ) + fwrite(KEY_PTR(ht, bck->key), 1, + (ht->cchKey == NULL_TERMINATED ? + strlen(KEY_PTR(ht, bck->key))+1 : ht->cchKey), fp); + cchData = ftell(fp) - posStart; + fseek(fp, posStart - sizeof(unsigned long), SEEK_SET); + WRITE_UL(fp, cchData); + fseek(fp, 0, SEEK_END); /* done with our sojourn at the header */ + + /* Unlike HashFirstBucket, TableFirstBucket iters through deleted bcks */ + for ( bck = Table(FirstBucket)(ht->iter, ht->table, ht->cBuckets); + bck; bck = Table(NextBucket)(ht->iter) ) + if ( dataWrite == NULL || IS_BCK_DELETED(bck) ) + WRITE_UL(fp, bck->data); + else /* write cchData followed by the data */ + { + WRITE_UL(fp, (*dataWrite)(NULL, (char *)bck->data)); + (*dataWrite)(fp, (char *)bck->data); + } +} + +static HashTable *HashDoLoad(FILE *fp, char * (*dataRead)(FILE *, int), + HashTable *ht) +{ + ulong cchKey; + char szMagicKey[4], *rgchKeys; + HTItem *bck; + + fread(szMagicKey, 1, 4, fp); + if ( strncmp(szMagicKey, MAGIC_KEY, 4) ) + { + fprintf(stderr, "ERROR: not a hash table (magic key is %4.4s, not %s)\n", + szMagicKey, MAGIC_KEY); + exit(3); + } + Table(Free)(ht->table, ht->cBuckets); /* allocated in AllocateHashTable */ + + READ_UL(fp, ht->cchKey); + READ_UL(fp, ht->cItems); + READ_UL(fp, ht->cDeletedItems); + ht->cBuckets = Table(Read)(fp, &ht->table); /* next is the table info */ + + READ_UL(fp, cchKey); + rgchKeys = (char *) HTsmalloc( cchKey ); /* stores all the keys */ + fread(rgchKeys, 1, cchKey, fp); + /* We use the table iterator so we don't try to LOAD_AND_RETURN */ + for ( bck = Table(FirstBucket)(ht->iter, ht->table, ht->cBuckets); + bck; bck = Table(NextBucket)(ht->iter) ) + { + READ_UL(fp, bck->data); /* all we need if dataRead is NULL */ + if ( IS_BCK_DELETED(bck) ) /* always 0 if defined(INSERT_ONLY) */ + continue; /* this is why we read the data first */ + if ( dataRead != NULL ) /* if it's null, we're done */ + if ( !ht->fpData ) /* load data into memory */ + bck->data = (ulong)dataRead(fp, bck->data); + else /* store location of data on disk */ + { + fseek(fp, bck->data, SEEK_CUR); /* bck->data held size of data */ + bck->data = ftell(fp) - bck->data - sizeof(unsigned long); + } + + if ( ht->cchKey == NULL_TERMINATED ) /* now read the key */ + { + bck->key = (ulong) rgchKeys; + rgchKeys = strchr(rgchKeys, '\0') + 1; /* read past the string */ + } + else + { + if ( STORES_PTR(ht) ) /* small keys stored directly */ + bck->key = (ulong) rgchKeys; + else + memcpy(&bck->key, rgchKeys, ht->cchKey); + rgchKeys += ht->cchKey; + } + } + if ( !STORES_PTR(ht) ) /* keys are stored directly */ + HTfree(rgchKeys - cchKey, cchKey); /* we've advanced rgchK to end */ + return ht; +} + +HashTable *HashLoad(FILE *fp, char * (*dataRead)(FILE *, int)) +{ + HashTable *ht; + ht = AllocateHashTable(0, 2); /* cchKey set later, fSaveKey should be 2! */ + return HashDoLoad(fp, dataRead, ht); +} + +HashTable *HashLoadKeys(FILE *fp, char * (*dataRead)(FILE *, int)) +{ + HashTable *ht; + + if ( dataRead == NULL ) + return HashLoad(fp, NULL); /* no reason not to load the data here */ + ht = AllocateHashTable(0, 2); /* cchKey set later, fSaveKey should be 2! */ + ht->fpData = fp; /* tells HashDoLoad() to only load keys */ + ht->dataRead = dataRead; + return HashDoLoad(fp, dataRead, ht); +} + +/*************************************************************************\ +| PrintHashTable() | +| A debugging tool. Prints the entire contents of the hash table, | +| like so: : key of the contents. Returns number of bytes | +| allocated. If time is not -1, we print it as the time required | +| for the hash. If iForm is 0, we just print the stats. If it's | +| 1, we print the keys and data too, but the keys are printed as | +| ulongs. If it's 2, we print the keys correctly (as long numbers | +| or as strings). | +\*************************************************************************/ + +ulong PrintHashTable(HashTable *ht, double time, int iForm) +{ + ulong cbData = 0, cbBin = 0, cItems = 0, cOccupied = 0; + HTItem *item; + + printf("HASH TABLE.\n"); + if ( time > -1.0 ) + { + printf("----------\n"); + printf("Time: %27.2f\n", time); + } + + for ( item = Table(FirstBucket)(ht->iter, ht->table, ht->cBuckets); + item; item = Table(NextBucket)(ht->iter) ) + { + cOccupied++; /* this includes deleted buckets */ + if ( IS_BCK_DELETED(item) ) /* we don't need you for anything else */ + continue; + cItems++; /* this is for a sanity check */ + if ( STORES_PTR(ht) ) + cbData += ht->cchKey == NULL_TERMINATED ? + WORD_ROUND(strlen((char *)item->key)+1) : ht->cchKey; + else + cbBin -= sizeof(item->key), cbData += sizeof(item->key); + cbBin -= sizeof(item->data), cbData += sizeof(item->data); + if ( iForm != 0 ) /* we want the actual contents */ + { + if ( iForm == 2 && ht->cchKey == NULL_TERMINATED ) + printf("%s/%lu\n", (char *)item->key, item->data); + else if ( iForm == 2 && STORES_PTR(ht) ) + printf("%.*s/%lu\n", + (int)ht->cchKey, (char *)item->key, item->data); + else /* either key actually is a ulong, or iForm == 1 */ + printf("%lu/%lu\n", item->key, item->data); + } + } + assert( cItems == ht->cItems ); /* sanity check */ + cbBin = Table(Memory)(ht->cBuckets, cOccupied); + + printf("----------\n"); + printf("%lu buckets (%lu bytes). %lu empty. %lu hold deleted items.\n" + "%lu items (%lu bytes).\n" + "%lu bytes total. %lu bytes (%2.1f%%) of this is ht overhead.\n", + ht->cBuckets, cbBin, ht->cBuckets - cOccupied, cOccupied - ht->cItems, + ht->cItems, cbData, + cbData + cbBin, cbBin, cbBin*100.0/(cbBin+cbData)); + + return cbData + cbBin; +} diff --git a/clipper/sparsehash-2.0.3/experimental/libchash.h b/clipper/sparsehash-2.0.3/experimental/libchash.h new file mode 100644 index 0000000..8c8ac30 --- /dev/null +++ b/clipper/sparsehash-2.0.3/experimental/libchash.h @@ -0,0 +1,253 @@ +/* Copyright (c) 1998 - 2005, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * --- + * Author: Craig Silverstein + * + * This library is intended to be used for in-memory hash tables, + * though it provides rudimentary permanent-storage capabilities. + * It attempts to be fast, portable, and small. The best algorithm + * to fulfill these goals is an internal probing hashing algorithm, + * as in Knuth, _Art of Computer Programming_, vol III. Unlike + * chained (open) hashing, it doesn't require a pointer for every + * item, yet it is still constant time lookup in practice. + * + * Also to save space, we let the contents (both data and key) that + * you insert be a union: if the key/data is small, we store it + * directly in the hashtable, otherwise we store a pointer to it. + * To keep you from having to figure out which, use KEY_PTR and + * PTR_KEY to convert between the arguments to these functions and + * a pointer to the real data. For instance: + * char key[] = "ab", *key2; + * HTItem *bck; HashTable *ht; + * HashInsert(ht, PTR_KEY(ht, key), 0); + * bck = HashFind(ht, PTR_KEY(ht, "ab")); + * key2 = KEY_PTR(ht, bck->key); + * + * There are a rich set of operations supported: + * AllocateHashTable() -- Allocates a hashtable structure and + * returns it. + * cchKey: if it's a positive number, then each key is a + * fixed-length record of that length. If it's 0, + * the key is assumed to be a \0-terminated string. + * fSaveKey: normally, you are responsible for allocating + * space for the key. If this is 1, we make a + * copy of the key for you. + * ClearHashTable() -- Removes everything from a hashtable + * FreeHashTable() -- Frees memory used by a hashtable + * + * HashFind() -- takes a key (use PTR_KEY) and returns the + * HTItem containing that key, or NULL if the + * key is not in the hashtable. + * HashFindLast() -- returns the item found by last HashFind() + * HashFindOrInsert() -- inserts the key/data pair if the key + * is not already in the hashtable, or + * returns the appropraite HTItem if it is. + * HashFindOrInsertItem() -- takes key/data as an HTItem. + * HashInsert() -- adds a key/data pair to the hashtable. What + * it does if the key is already in the table + * depends on the value of SAMEKEY_OVERWRITE. + * HashInsertItem() -- takes key/data as an HTItem. + * HashDelete() -- removes a key/data pair from the hashtable, + * if it's there. RETURNS 1 if it was there, + * 0 else. + * If you use sparse tables and never delete, the full data + * space is available. Otherwise we steal -2 (maybe -3), + * so you can't have data fields with those values. + * HashDeleteLast() -- deletes the item returned by the last Find(). + * + * HashFirstBucket() -- used to iterate over the buckets in a + * hashtable. DON'T INSERT OR DELETE WHILE + * ITERATING! You can't nest iterations. + * HashNextBucket() -- RETURNS NULL at the end of iterating. + * + * HashSetDeltaGoalSize() -- if you're going to insert 1000 items + * at once, call this fn with arg 1000. + * It grows the table more intelligently. + * + * HashSave() -- saves the hashtable to a file. It saves keys ok, + * but it doesn't know how to interpret the data field, + * so if the data field is a pointer to some complex + * structure, you must send a function that takes a + * file pointer and a pointer to the structure, and + * write whatever you want to write. It should return + * the number of bytes written. If the file is NULL, + * it should just return the number of bytes it would + * write, without writing anything. + * If your data field is just an integer, not a + * pointer, just send NULL for the function. + * HashLoad() -- loads a hashtable. It needs a function that takes + * a file and the size of the structure, and expects + * you to read in the structure and return a pointer + * to it. You must do memory allocation, etc. If + * the data is just a number, send NULL. + * HashLoadKeys() -- unlike HashLoad(), doesn't load the data off disk + * until needed. This saves memory, but if you look + * up the same key a lot, it does a disk access each + * time. + * You can't do Insert() or Delete() on hashtables that were loaded + * from disk. + */ + +#include +#include /* includes definition of "ulong", we hope */ +#define ulong u_long + +#define MAGIC_KEY "CHsh" /* when we save the file */ + +#ifndef LOG_WORD_SIZE /* 5 for 32 bit words, 6 for 64 */ +#if defined (__LP64__) || defined (_LP64) +#define LOG_WORD_SIZE 6 /* log_2(sizeof(ulong)) [in bits] */ +#else +#define LOG_WORD_SIZE 5 /* log_2(sizeof(ulong)) [in bits] */ +#endif +#endif + + /* The following gives a speed/time tradeoff: how many buckets are * + * in each bin. 0 gives 32 buckets/bin, which is a good number. */ +#ifndef LOG_BM_WORDS +#define LOG_BM_WORDS 0 /* each group has 2^L_B_W * 32 buckets */ +#endif + + /* The following are all parameters that affect performance. */ +#ifndef JUMP +#define JUMP(key, offset) ( ++(offset) ) /* ( 1 ) for linear hashing */ +#endif +#ifndef Table +#define Table(x) Sparse##x /* Dense##x for dense tables */ +#endif +#ifndef FAST_DELETE +#define FAST_DELETE 0 /* if it's 1, we never shrink the ht */ +#endif +#ifndef SAMEKEY_OVERWRITE +#define SAMEKEY_OVERWRITE 1 /* overwrite item with our key on insert? */ +#endif +#ifndef OCCUPANCY_PCT +#define OCCUPANCY_PCT 0.5 /* large PCT means smaller and slower */ +#endif +#ifndef MIN_HASH_SIZE +#define MIN_HASH_SIZE 512 /* ht size when first created */ +#endif + /* When deleting a bucket, we can't just empty it (future hashes * + * may fail); instead we set the data field to DELETED. Thus you * + * should set DELETED to a data value you never use. Better yet, * + * if you don't need to delete, define INSERT_ONLY. */ +#ifndef INSERT_ONLY +#define DELETED -2UL +#define IS_BCK_DELETED(bck) ( (bck) && (bck)->data == DELETED ) +#define SET_BCK_DELETED(ht, bck) do { (bck)->data = DELETED; \ + FREE_KEY(ht, (bck)->key); } while ( 0 ) +#else +#define IS_BCK_DELETED(bck) 0 +#define SET_BCK_DELETED(ht, bck) \ + do { fprintf(stderr, "Deletion not supported for insert-only hashtable\n");\ + exit(2); } while ( 0 ) +#endif + + /* We need the following only for dense buckets (Dense##x above). * + * If you need to, set this to a value you'll never use for data. */ +#define EMPTY -3UL /* steal more of the bck->data space */ + + + /* This is what an item is. Either can be cast to a pointer. */ +typedef struct { + ulong data; /* 4 bytes for data: either a pointer or an integer */ + ulong key; /* 4 bytes for the key: either a pointer or an int */ +} HTItem; + +struct Table(Bin); /* defined in chash.c, I hope */ +struct Table(Iterator); +typedef struct Table(Bin) Table; /* Expands to SparseBin, etc */ +typedef struct Table(Iterator) TableIterator; + + /* for STORES_PTR to work ok, cchKey MUST BE DEFINED 1st, cItems 2nd! */ +typedef struct HashTable { + ulong cchKey; /* the length of the key, or if it's \0 terminated */ + ulong cItems; /* number of items currently in the hashtable */ + ulong cDeletedItems; /* # of buckets holding DELETE in the hashtable */ + ulong cBuckets; /* size of the table */ + Table *table; /* The actual contents of the hashtable */ + int fSaveKeys; /* 1 if we copy keys locally; 2 if keys in one block */ + int cDeltaGoalSize; /* # of coming inserts (or deletes, if <0) we expect */ + HTItem *posLastFind; /* position of last Find() command */ + TableIterator *iter; /* used in First/NextBucket */ + + FILE *fpData; /* if non-NULL, what item->data points into */ + char * (*dataRead)(FILE *, int); /* how to load data from disk */ + HTItem bckData; /* holds data after being loaded from disk */ +} HashTable; + + /* Small keys are stored and passed directly, but large keys are + * stored and passed as pointers. To make it easier to remember + * what to pass, we provide two functions: + * PTR_KEY: give it a pointer to your data, and it returns + * something appropriate to send to Hash() functions or + * be stored in a data field. + * KEY_PTR: give it something returned by a Hash() routine, and + * it returns a (char *) pointer to the actual data. + */ +#define HashKeySize(ht) ( ((ulong *)(ht))[0] ) /* this is how we inline */ +#define HashSize(ht) ( ((ulong *)(ht))[1] ) /* ...a la C++ :-) */ + +#define STORES_PTR(ht) ( HashKeySize(ht) == 0 || \ + HashKeySize(ht) > sizeof(ulong) ) +#define KEY_PTR(ht, key) ( STORES_PTR(ht) ? (char *)(key) : (char *)&(key) ) +#ifdef DONT_HAVE_TO_WORRY_ABOUT_BUS_ERRORS +#define PTR_KEY(ht, ptr) ( STORES_PTR(ht) ? (ulong)(ptr) : *(ulong *)(ptr) ) +#else +#define PTR_KEY(ht, ptr) ( STORES_PTR(ht) ? (ulong)(ptr) : HTcopy((char *)ptr)) +#endif + + + /* Function prototypes */ +unsigned long HTcopy(char *pul); /* for PTR_KEY, not for users */ + +struct HashTable *AllocateHashTable(int cchKey, int fSaveKeys); +void ClearHashTable(struct HashTable *ht); +void FreeHashTable(struct HashTable *ht); + +HTItem *HashFind(struct HashTable *ht, ulong key); +HTItem *HashFindLast(struct HashTable *ht); +HTItem *HashFindOrInsert(struct HashTable *ht, ulong key, ulong dataInsert); +HTItem *HashFindOrInsertItem(struct HashTable *ht, HTItem *pItem); + +HTItem *HashInsert(struct HashTable *ht, ulong key, ulong data); +HTItem *HashInsertItem(struct HashTable *ht, HTItem *pItem); + +int HashDelete(struct HashTable *ht, ulong key); +int HashDeleteLast(struct HashTable *ht); + +HTItem *HashFirstBucket(struct HashTable *ht); +HTItem *HashNextBucket(struct HashTable *ht); + +int HashSetDeltaGoalSize(struct HashTable *ht, int delta); + +void HashSave(FILE *fp, struct HashTable *ht, int (*write)(FILE *, char *)); +struct HashTable *HashLoad(FILE *fp, char * (*read)(FILE *, int)); +struct HashTable *HashLoadKeys(FILE *fp, char * (*read)(FILE *, int)); diff --git a/clipper/sparsehash-2.0.3/google-sparsehash.sln b/clipper/sparsehash-2.0.3/google-sparsehash.sln new file mode 100755 index 0000000..4e57c62 --- /dev/null +++ b/clipper/sparsehash-2.0.3/google-sparsehash.sln @@ -0,0 +1,71 @@ +Microsoft Visual Studio Solution File, Format Version 8.00 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "type_traits_unittest", "vsprojects\type_traits_unittest\type_traits_unittest.vcproj", "{008CCFED-7D7B-46F8-8E13-03837A2258B3}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "template_util_unittest", "vsprojects\template_util_unittest\template_util_unittest.vcproj", "{F08CCFED-7D7B-46F8-8E13-03837A2258B3}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sparsetable_unittest", "vsprojects\sparsetable_unittest\sparsetable_unittest.vcproj", "{E420867B-8BFA-4739-99EC-E008AB762FF9}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "hashtable_test", "vsprojects\hashtable_test\hashtable_test.vcproj", "{FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "simple_test", "vsprojects\simple_test\simple_test.vcproj", "{FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libc_allocator_with_realloc_test", "vsprojects\libc_allocator_with_realloc_test\libc_allocator_with_realloc_test.vcproj", "{FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "time_hash_map", "vsprojects\time_hash_map\time_hash_map.vcproj", "{A74E5DB8-5295-487A-AB1D-23859F536F45}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Global + GlobalSection(SolutionConfiguration) = preSolution + Debug = Debug + Release = Release + EndGlobalSection + GlobalSection(ProjectDependencies) = postSolution + EndGlobalSection + GlobalSection(ProjectConfiguration) = postSolution + {008CCFED-7D7B-46F8-8E13-03837A2258B3}.Debug.ActiveCfg = Debug|Win32 + {008CCFED-7D7B-46F8-8E13-03837A2258B3}.Debug.Build.0 = Debug|Win32 + {008CCFED-7D7B-46F8-8E13-03837A2258B3}.Release.ActiveCfg = Release|Win32 + {008CCFED-7D7B-46F8-8E13-03837A2258B3}.Release.Build.0 = Release|Win32 + {F08CCFED-7D7B-46F8-8E13-03837A2258B3}.Debug.ActiveCfg = Debug|Win32 + {F08CCFED-7D7B-46F8-8E13-03837A2258B3}.Debug.Build.0 = Debug|Win32 + {F08CCFED-7D7B-46F8-8E13-03837A2258B3}.Release.ActiveCfg = Release|Win32 + {F08CCFED-7D7B-46F8-8E13-03837A2258B3}.Release.Build.0 = Release|Win32 + {E420867B-8BFA-4739-99EC-E008AB762FF9}.Debug.ActiveCfg = Debug|Win32 + {E420867B-8BFA-4739-99EC-E008AB762FF9}.Debug.Build.0 = Debug|Win32 + {E420867B-8BFA-4739-99EC-E008AB762FF9}.Release.ActiveCfg = Release|Win32 + {E420867B-8BFA-4739-99EC-E008AB762FF9}.Release.Build.0 = Release|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}.Debug.ActiveCfg = Debug|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}.Debug.Build.0 = Debug|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}.Release.ActiveCfg = Release|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}.Release.Build.0 = Release|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}.Debug.ActiveCfg = Debug|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}.Debug.Build.0 = Debug|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}.Release.ActiveCfg = Release|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}.Release.Build.0 = Release|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}.Debug.ActiveCfg = Debug|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}.Debug.Build.0 = Debug|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}.Release.ActiveCfg = Release|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}.Release.Build.0 = Release|Win32 + {A74E5DB8-5295-487A-AB1D-23859F536F45}.Debug.ActiveCfg = Debug|Win32 + {A74E5DB8-5295-487A-AB1D-23859F536F45}.Debug.Build.0 = Debug|Win32 + {A74E5DB8-5295-487A-AB1D-23859F536F45}.Release.ActiveCfg = Release|Win32 + {A74E5DB8-5295-487A-AB1D-23859F536F45}.Release.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + EndGlobalSection + GlobalSection(ExtensibilityAddIns) = postSolution + EndGlobalSection +EndGlobal diff --git a/clipper/sparsehash-2.0.2/install-sh b/clipper/sparsehash-2.0.3/install-sh similarity index 100% rename from clipper/sparsehash-2.0.2/install-sh rename to clipper/sparsehash-2.0.3/install-sh diff --git a/clipper/sparsehash-2.0.2/m4/acx_pthread.m4 b/clipper/sparsehash-2.0.3/m4/acx_pthread.m4 similarity index 100% rename from clipper/sparsehash-2.0.2/m4/acx_pthread.m4 rename to clipper/sparsehash-2.0.3/m4/acx_pthread.m4 diff --git a/clipper/sparsehash-2.0.2/m4/google_namespace.m4 b/clipper/sparsehash-2.0.3/m4/google_namespace.m4 similarity index 100% rename from clipper/sparsehash-2.0.2/m4/google_namespace.m4 rename to clipper/sparsehash-2.0.3/m4/google_namespace.m4 diff --git a/clipper/sparsehash-2.0.2/m4/namespaces.m4 b/clipper/sparsehash-2.0.3/m4/namespaces.m4 similarity index 100% rename from clipper/sparsehash-2.0.2/m4/namespaces.m4 rename to clipper/sparsehash-2.0.3/m4/namespaces.m4 diff --git a/clipper/sparsehash-2.0.2/m4/stl_hash.m4 b/clipper/sparsehash-2.0.3/m4/stl_hash.m4 similarity index 100% rename from clipper/sparsehash-2.0.2/m4/stl_hash.m4 rename to clipper/sparsehash-2.0.3/m4/stl_hash.m4 diff --git a/clipper/sparsehash-2.0.2/m4/stl_hash_fun.m4 b/clipper/sparsehash-2.0.3/m4/stl_hash_fun.m4 similarity index 100% rename from clipper/sparsehash-2.0.2/m4/stl_hash_fun.m4 rename to clipper/sparsehash-2.0.3/m4/stl_hash_fun.m4 diff --git a/clipper/sparsehash-2.0.2/missing b/clipper/sparsehash-2.0.3/missing similarity index 100% rename from clipper/sparsehash-2.0.2/missing rename to clipper/sparsehash-2.0.3/missing diff --git a/clipper/sparsehash-2.0.2/packages/deb.sh b/clipper/sparsehash-2.0.3/packages/deb.sh similarity index 100% rename from clipper/sparsehash-2.0.2/packages/deb.sh rename to clipper/sparsehash-2.0.3/packages/deb.sh diff --git a/clipper/sparsehash-2.0.3/packages/deb/README b/clipper/sparsehash-2.0.3/packages/deb/README new file mode 100644 index 0000000..57becfd --- /dev/null +++ b/clipper/sparsehash-2.0.3/packages/deb/README @@ -0,0 +1,7 @@ +The list of files here isn't complete. For a step-by-step guide on +how to set this package up correctly, check out + http://www.debian.org/doc/maint-guide/ + +Most of the files that are in this directory are boilerplate. +However, you may need to change the list of binary-arch dependencies +in 'rules'. diff --git a/clipper/sparsehash-2.0.3/packages/deb/changelog b/clipper/sparsehash-2.0.3/packages/deb/changelog new file mode 100644 index 0000000..5792e32 --- /dev/null +++ b/clipper/sparsehash-2.0.3/packages/deb/changelog @@ -0,0 +1,173 @@ +sparsehash (2.0.2-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. and others Thu, 23 Feb 2012 23:47:18 +0000 + +sparsehash (2.0.1-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. and others Wed, 01 Feb 2012 02:57:48 +0000 + +sparsehash (2.0-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. and others Tue, 31 Jan 2012 11:33:04 -0800 + +sparsehash (1.12-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Tue, 20 Dec 2011 21:04:04 -0800 + +sparsehash (1.11-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Thu, 23 Jun 2011 21:12:58 -0700 + +sparsehash (1.10-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Thu, 20 Jan 2011 16:07:39 -0800 + +sparsehash (1.9-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Fri, 24 Sep 2010 11:37:50 -0700 + +sparsehash (1.8.1-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Thu, 29 Jul 2010 15:01:29 -0700 + +sparsehash (1.8-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Thu, 29 Jul 2010 09:53:26 -0700 + +sparsehash (1.7-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Wed, 31 Mar 2010 12:32:03 -0700 + +sparsehash (1.6-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Fri, 08 Jan 2010 14:47:55 -0800 + +sparsehash (1.5.2-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Tue, 12 May 2009 14:16:38 -0700 + +sparsehash (1.5.1-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Fri, 08 May 2009 15:23:44 -0700 + +sparsehash (1.5-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Wed, 06 May 2009 11:28:49 -0700 + +sparsehash (1.4-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Wed, 28 Jan 2009 17:11:31 -0800 + +sparsehash (1.3-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Thu, 06 Nov 2008 15:06:09 -0800 + +sparsehash (1.2-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Thu, 18 Sep 2008 13:53:20 -0700 + +sparsehash (1.1-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Mon, 11 Feb 2008 16:30:11 -0800 + +sparsehash (1.0-1) unstable; urgency=low + + * New upstream release. We are now out of beta. + + -- Google Inc. Tue, 13 Nov 2007 15:15:46 -0800 + +sparsehash (0.9.1-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Fri, 12 Oct 2007 12:35:24 -0700 + +sparsehash (0.9-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Tue, 09 Oct 2007 14:15:21 -0700 + +sparsehash (0.8-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Tue, 03 Jul 2007 12:55:04 -0700 + +sparsehash (0.7-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Mon, 11 Jun 2007 11:33:41 -0700 + +sparsehash (0.6-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Tue, 20 Mar 2007 17:29:34 -0700 + +sparsehash (0.5-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Sat, 21 Oct 2006 13:47:47 -0700 + +sparsehash (0.4-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Sun, 23 Apr 2006 22:42:35 -0700 + +sparsehash (0.3-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Thu, 03 Nov 2005 20:12:31 -0800 + +sparsehash (0.2-1) unstable; urgency=low + + * New upstream release. + + -- Google Inc. Mon, 02 May 2005 07:04:46 -0700 + +sparsehash (0.1-1) unstable; urgency=low + + * Initial release. + + -- Google Inc. Tue, 15 Feb 2005 07:17:02 -0800 diff --git a/clipper/sparsehash-2.0.3/packages/deb/compat b/clipper/sparsehash-2.0.3/packages/deb/compat new file mode 100644 index 0000000..b8626c4 --- /dev/null +++ b/clipper/sparsehash-2.0.3/packages/deb/compat @@ -0,0 +1 @@ +4 diff --git a/clipper/sparsehash-2.0.3/packages/deb/control b/clipper/sparsehash-2.0.3/packages/deb/control new file mode 100644 index 0000000..f647979 --- /dev/null +++ b/clipper/sparsehash-2.0.3/packages/deb/control @@ -0,0 +1,17 @@ +Source: sparsehash +Section: libdevel +Priority: optional +Maintainer: Google Inc. and others +Build-Depends: debhelper (>= 4.0.0) +Standards-Version: 3.6.1 + +Package: sparsehash +Section: libs +Architecture: any +Description: hash_map and hash_set classes with minimal space overhead + This package contains several hash-map implementations, similar + in API to SGI's hash_map class, but with different performance + characteristics. sparse_hash_map uses very little space overhead: 1-2 + bits per entry. dense_hash_map is typically faster than the default + SGI STL implementation. This package also includes hash-set analogues + of these classes. diff --git a/clipper/sparsehash-2.0.3/packages/deb/copyright b/clipper/sparsehash-2.0.3/packages/deb/copyright new file mode 100644 index 0000000..948b744 --- /dev/null +++ b/clipper/sparsehash-2.0.3/packages/deb/copyright @@ -0,0 +1,36 @@ +This package was debianized by Donovan Hide +on Wed, Thu, 23 Feb 2012 23:47:18 +0000. + +It was downloaded from +http://code.google.com/p/sparsehash/downloads/list + +Upstream Author: google-sparsehash@googlegroups.com + +Copyright (c) 2005, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/clipper/sparsehash-2.0.3/packages/deb/docs b/clipper/sparsehash-2.0.3/packages/deb/docs new file mode 100644 index 0000000..752adb4 --- /dev/null +++ b/clipper/sparsehash-2.0.3/packages/deb/docs @@ -0,0 +1,16 @@ +AUTHORS +COPYING +ChangeLog +INSTALL +NEWS +README +TODO +doc/dense_hash_map.html +doc/dense_hash_set.html +doc/sparse_hash_map.html +doc/sparse_hash_set.html +doc/sparsetable.html +doc/implementation.html +doc/performance.html +doc/index.html +doc/designstyle.css diff --git a/clipper/sparsehash-2.0.3/packages/deb/rules b/clipper/sparsehash-2.0.3/packages/deb/rules new file mode 100755 index 0000000..f520bef --- /dev/null +++ b/clipper/sparsehash-2.0.3/packages/deb/rules @@ -0,0 +1,117 @@ +#!/usr/bin/make -f +# -*- makefile -*- +# Sample debian/rules that uses debhelper. +# This file was originally written by Joey Hess and Craig Small. +# As a special exception, when this file is copied by dh-make into a +# dh-make output file, you may use that output file without restriction. +# This special exception was added by Craig Small in version 0.37 of dh-make. + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + + +# These are used for cross-compiling and for saving the configure script +# from having to guess our platform (since we know it already) +DEB_HOST_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE) +DEB_BUILD_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE) + + +CFLAGS = -Wall -g + +ifneq (,$(findstring noopt,$(DEB_BUILD_OPTIONS))) + CFLAGS += -O0 +else + CFLAGS += -O2 +endif +ifeq (,$(findstring nostrip,$(DEB_BUILD_OPTIONS))) + INSTALL_PROGRAM += -s +endif + +# shared library versions, option 1 +#version=2.0.5 +#major=2 +# option 2, assuming the library is created as src/.libs/libfoo.so.2.0.5 or so +version=`ls src/.libs/lib*.so.* | \ + awk '{if (match($$0,/[0-9]+\.[0-9]+\.[0-9]+$$/)) print substr($$0,RSTART)}'` +major=`ls src/.libs/lib*.so.* | \ + awk '{if (match($$0,/\.so\.[0-9]+$$/)) print substr($$0,RSTART+4)}'` + +config.status: configure + dh_testdir + # Add here commands to configure the package. + CFLAGS="$(CFLAGS)" ./configure --host=$(DEB_HOST_GNU_TYPE) --build=$(DEB_BUILD_GNU_TYPE) --prefix=/usr --mandir=\$${prefix}/share/man --infodir=\$${prefix}/share/info + + +build: build-stamp +build-stamp: config.status + dh_testdir + + # Add here commands to compile the package. + $(MAKE) + + touch build-stamp + +clean: + dh_testdir + dh_testroot + rm -f build-stamp + + # Add here commands to clean up after the build process. + -$(MAKE) distclean +ifneq "$(wildcard /usr/share/misc/config.sub)" "" + cp -f /usr/share/misc/config.sub config.sub +endif +ifneq "$(wildcard /usr/share/misc/config.guess)" "" + cp -f /usr/share/misc/config.guess config.guess +endif + + + dh_clean + +install: build + dh_testdir + dh_testroot + dh_clean -k + dh_installdirs + + # Add here commands to install the package into debian/tmp + $(MAKE) install DESTDIR=$(CURDIR)/debian/tmp + + +# Build architecture-independent files here. +binary-indep: build install +# We have nothing to do by default. + +# Build architecture-dependent files here. +binary-arch: build install + dh_testdir + dh_testroot + dh_installchangelogs ChangeLog + dh_installdocs + dh_installexamples + dh_install --sourcedir=debian/tmp +# dh_installmenu +# dh_installdebconf +# dh_installlogrotate +# dh_installemacsen +# dh_installpam +# dh_installmime +# dh_installinit +# dh_installcron +# dh_installinfo + dh_installman + dh_link + dh_strip + dh_compress + dh_fixperms +# dh_perl +# dh_python + dh_makeshlibs + dh_installdeb + dh_shlibdeps + dh_gencontrol + dh_md5sums + dh_builddeb + +binary: binary-indep binary-arch +.PHONY: build clean binary-indep binary-arch binary install diff --git a/clipper/sparsehash-2.0.3/packages/deb/sparsehash.dirs b/clipper/sparsehash-2.0.3/packages/deb/sparsehash.dirs new file mode 100644 index 0000000..f4d02ef --- /dev/null +++ b/clipper/sparsehash-2.0.3/packages/deb/sparsehash.dirs @@ -0,0 +1,5 @@ +usr/include +usr/include/google +usr/include/sparsehash +usr/lib +usr/lib/pkgconfig diff --git a/clipper/sparsehash-2.0.3/packages/deb/sparsehash.install b/clipper/sparsehash-2.0.3/packages/deb/sparsehash.install new file mode 100644 index 0000000..b6ee103 --- /dev/null +++ b/clipper/sparsehash-2.0.3/packages/deb/sparsehash.install @@ -0,0 +1,6 @@ +usr/include/google/* +usr/include/sparsehash/* +usr/lib/pkgconfig/* +debian/tmp/usr/include/google/* +debian/tmp/usr/include/sparsehash/* +debian/tmp/usr/lib/pkgconfig/* diff --git a/clipper/sparsehash-2.0.2/packages/rpm.sh b/clipper/sparsehash-2.0.3/packages/rpm.sh similarity index 100% rename from clipper/sparsehash-2.0.2/packages/rpm.sh rename to clipper/sparsehash-2.0.3/packages/rpm.sh diff --git a/clipper/sparsehash-2.0.2/packages/rpm/rpm.spec b/clipper/sparsehash-2.0.3/packages/rpm/rpm.spec similarity index 100% rename from clipper/sparsehash-2.0.2/packages/rpm/rpm.spec rename to clipper/sparsehash-2.0.3/packages/rpm/rpm.spec diff --git a/clipper/sparsehash-2.0.2/sparsehash.sln b/clipper/sparsehash-2.0.3/sparsehash.sln similarity index 98% rename from clipper/sparsehash-2.0.2/sparsehash.sln rename to clipper/sparsehash-2.0.3/sparsehash.sln index 9b0b371..f2d2ea2 100755 --- a/clipper/sparsehash-2.0.2/sparsehash.sln +++ b/clipper/sparsehash-2.0.3/sparsehash.sln @@ -1,63 +1,63 @@ -Microsoft Visual Studio Solution File, Format Version 8.00 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "type_traits_unittest", "vsprojects\type_traits_unittest\type_traits_unittest.vcproj", "{008CCFED-7D7B-46F8-8E13-03837A2258B3}" - ProjectSection(ProjectDependencies) = postProject - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sparsetable_unittest", "vsprojects\sparsetable_unittest\sparsetable_unittest.vcproj", "{E420867B-8BFA-4739-99EC-E008AB762FF9}" - ProjectSection(ProjectDependencies) = postProject - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "hashtable_test", "vsprojects\hashtable_test\hashtable_test.vcproj", "{FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}" - ProjectSection(ProjectDependencies) = postProject - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "simple_test", "vsprojects\simple_test\simple_test.vcproj", "{FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}" - ProjectSection(ProjectDependencies) = postProject - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libc_allocator_with_realloc_test", "vsprojects\libc_allocator_with_realloc_test\libc_allocator_with_realloc_test.vcproj", "{FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}" - ProjectSection(ProjectDependencies) = postProject - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "time_hash_map", "vsprojects\time_hash_map\time_hash_map.vcproj", "{A74E5DB8-5295-487A-AB1D-23859F536F45}" - ProjectSection(ProjectDependencies) = postProject - EndProjectSection -EndProject -Global - GlobalSection(SolutionConfiguration) = preSolution - Debug = Debug - Release = Release - EndGlobalSection - GlobalSection(ProjectDependencies) = postSolution - EndGlobalSection - GlobalSection(ProjectConfiguration) = postSolution - {008CCFED-7D7B-46F8-8E13-03837A2258B3}.Debug.ActiveCfg = Debug|Win32 - {008CCFED-7D7B-46F8-8E13-03837A2258B3}.Debug.Build.0 = Debug|Win32 - {008CCFED-7D7B-46F8-8E13-03837A2258B3}.Release.ActiveCfg = Release|Win32 - {008CCFED-7D7B-46F8-8E13-03837A2258B3}.Release.Build.0 = Release|Win32 - {E420867B-8BFA-4739-99EC-E008AB762FF9}.Debug.ActiveCfg = Debug|Win32 - {E420867B-8BFA-4739-99EC-E008AB762FF9}.Debug.Build.0 = Debug|Win32 - {E420867B-8BFA-4739-99EC-E008AB762FF9}.Release.ActiveCfg = Release|Win32 - {E420867B-8BFA-4739-99EC-E008AB762FF9}.Release.Build.0 = Release|Win32 - {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}.Debug.ActiveCfg = Debug|Win32 - {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}.Debug.Build.0 = Debug|Win32 - {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}.Release.ActiveCfg = Release|Win32 - {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}.Release.Build.0 = Release|Win32 - {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}.Debug.ActiveCfg = Debug|Win32 - {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}.Debug.Build.0 = Debug|Win32 - {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}.Release.ActiveCfg = Release|Win32 - {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}.Release.Build.0 = Release|Win32 - {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}.Debug.ActiveCfg = Debug|Win32 - {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}.Debug.Build.0 = Debug|Win32 - {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}.Release.ActiveCfg = Release|Win32 - {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}.Release.Build.0 = Release|Win32 - {A74E5DB8-5295-487A-AB1D-23859F536F45}.Debug.ActiveCfg = Debug|Win32 - {A74E5DB8-5295-487A-AB1D-23859F536F45}.Debug.Build.0 = Debug|Win32 - {A74E5DB8-5295-487A-AB1D-23859F536F45}.Release.ActiveCfg = Release|Win32 - {A74E5DB8-5295-487A-AB1D-23859F536F45}.Release.Build.0 = Release|Win32 - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - EndGlobalSection - GlobalSection(ExtensibilityAddIns) = postSolution - EndGlobalSection -EndGlobal +Microsoft Visual Studio Solution File, Format Version 8.00 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "type_traits_unittest", "vsprojects\type_traits_unittest\type_traits_unittest.vcproj", "{008CCFED-7D7B-46F8-8E13-03837A2258B3}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sparsetable_unittest", "vsprojects\sparsetable_unittest\sparsetable_unittest.vcproj", "{E420867B-8BFA-4739-99EC-E008AB762FF9}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "hashtable_test", "vsprojects\hashtable_test\hashtable_test.vcproj", "{FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "simple_test", "vsprojects\simple_test\simple_test.vcproj", "{FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libc_allocator_with_realloc_test", "vsprojects\libc_allocator_with_realloc_test\libc_allocator_with_realloc_test.vcproj", "{FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "time_hash_map", "vsprojects\time_hash_map\time_hash_map.vcproj", "{A74E5DB8-5295-487A-AB1D-23859F536F45}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Global + GlobalSection(SolutionConfiguration) = preSolution + Debug = Debug + Release = Release + EndGlobalSection + GlobalSection(ProjectDependencies) = postSolution + EndGlobalSection + GlobalSection(ProjectConfiguration) = postSolution + {008CCFED-7D7B-46F8-8E13-03837A2258B3}.Debug.ActiveCfg = Debug|Win32 + {008CCFED-7D7B-46F8-8E13-03837A2258B3}.Debug.Build.0 = Debug|Win32 + {008CCFED-7D7B-46F8-8E13-03837A2258B3}.Release.ActiveCfg = Release|Win32 + {008CCFED-7D7B-46F8-8E13-03837A2258B3}.Release.Build.0 = Release|Win32 + {E420867B-8BFA-4739-99EC-E008AB762FF9}.Debug.ActiveCfg = Debug|Win32 + {E420867B-8BFA-4739-99EC-E008AB762FF9}.Debug.Build.0 = Debug|Win32 + {E420867B-8BFA-4739-99EC-E008AB762FF9}.Release.ActiveCfg = Release|Win32 + {E420867B-8BFA-4739-99EC-E008AB762FF9}.Release.Build.0 = Release|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}.Debug.ActiveCfg = Debug|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}.Debug.Build.0 = Debug|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}.Release.ActiveCfg = Release|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0535}.Release.Build.0 = Release|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}.Debug.ActiveCfg = Debug|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}.Debug.Build.0 = Debug|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}.Release.ActiveCfg = Release|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0538}.Release.Build.0 = Release|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}.Debug.ActiveCfg = Debug|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}.Debug.Build.0 = Debug|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}.Release.ActiveCfg = Release|Win32 + {FCDB3718-F01C-4DE4-B9F5-E10F2C5C0539}.Release.Build.0 = Release|Win32 + {A74E5DB8-5295-487A-AB1D-23859F536F45}.Debug.ActiveCfg = Debug|Win32 + {A74E5DB8-5295-487A-AB1D-23859F536F45}.Debug.Build.0 = Debug|Win32 + {A74E5DB8-5295-487A-AB1D-23859F536F45}.Release.ActiveCfg = Release|Win32 + {A74E5DB8-5295-487A-AB1D-23859F536F45}.Release.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + EndGlobalSection + GlobalSection(ExtensibilityAddIns) = postSolution + EndGlobalSection +EndGlobal diff --git a/clipper/sparsehash-2.0.2/src/config.h.in b/clipper/sparsehash-2.0.3/src/config.h.in similarity index 100% rename from clipper/sparsehash-2.0.2/src/config.h.in rename to clipper/sparsehash-2.0.3/src/config.h.in diff --git a/clipper/sparsehash-2.0.2/src/config.h.include b/clipper/sparsehash-2.0.3/src/config.h.include similarity index 100% rename from clipper/sparsehash-2.0.2/src/config.h.include rename to clipper/sparsehash-2.0.3/src/config.h.include diff --git a/clipper/sparsehash-2.0.2/src/google/dense_hash_map b/clipper/sparsehash-2.0.3/src/google/dense_hash_map similarity index 100% rename from clipper/sparsehash-2.0.2/src/google/dense_hash_map rename to clipper/sparsehash-2.0.3/src/google/dense_hash_map diff --git a/clipper/sparsehash-2.0.2/src/google/dense_hash_set b/clipper/sparsehash-2.0.3/src/google/dense_hash_set similarity index 100% rename from clipper/sparsehash-2.0.2/src/google/dense_hash_set rename to clipper/sparsehash-2.0.3/src/google/dense_hash_set diff --git a/clipper/sparsehash-2.0.2/src/google/sparse_hash_map b/clipper/sparsehash-2.0.3/src/google/sparse_hash_map similarity index 100% rename from clipper/sparsehash-2.0.2/src/google/sparse_hash_map rename to clipper/sparsehash-2.0.3/src/google/sparse_hash_map diff --git a/clipper/sparsehash-2.0.2/src/google/sparse_hash_set b/clipper/sparsehash-2.0.3/src/google/sparse_hash_set similarity index 100% rename from clipper/sparsehash-2.0.2/src/google/sparse_hash_set rename to clipper/sparsehash-2.0.3/src/google/sparse_hash_set diff --git a/clipper/sparsehash-2.0.2/src/google/sparsehash/densehashtable.h b/clipper/sparsehash-2.0.3/src/google/sparsehash/densehashtable.h similarity index 100% rename from clipper/sparsehash-2.0.2/src/google/sparsehash/densehashtable.h rename to clipper/sparsehash-2.0.3/src/google/sparsehash/densehashtable.h diff --git a/clipper/sparsehash-2.0.2/src/google/sparsehash/hashtable-common.h b/clipper/sparsehash-2.0.3/src/google/sparsehash/hashtable-common.h similarity index 100% rename from clipper/sparsehash-2.0.2/src/google/sparsehash/hashtable-common.h rename to clipper/sparsehash-2.0.3/src/google/sparsehash/hashtable-common.h diff --git a/clipper/sparsehash-2.0.2/src/google/sparsehash/libc_allocator_with_realloc.h b/clipper/sparsehash-2.0.3/src/google/sparsehash/libc_allocator_with_realloc.h similarity index 100% rename from clipper/sparsehash-2.0.2/src/google/sparsehash/libc_allocator_with_realloc.h rename to clipper/sparsehash-2.0.3/src/google/sparsehash/libc_allocator_with_realloc.h diff --git a/clipper/sparsehash-2.0.2/src/google/sparsehash/sparsehashtable.h b/clipper/sparsehash-2.0.3/src/google/sparsehash/sparsehashtable.h similarity index 100% rename from clipper/sparsehash-2.0.2/src/google/sparsehash/sparsehashtable.h rename to clipper/sparsehash-2.0.3/src/google/sparsehash/sparsehashtable.h diff --git a/clipper/sparsehash-2.0.2/src/google/sparsetable b/clipper/sparsehash-2.0.3/src/google/sparsetable similarity index 100% rename from clipper/sparsehash-2.0.2/src/google/sparsetable rename to clipper/sparsehash-2.0.3/src/google/sparsetable diff --git a/clipper/sparsehash-2.0.2/src/google/template_util.h b/clipper/sparsehash-2.0.3/src/google/template_util.h similarity index 100% rename from clipper/sparsehash-2.0.2/src/google/template_util.h rename to clipper/sparsehash-2.0.3/src/google/template_util.h diff --git a/clipper/sparsehash-2.0.2/src/google/type_traits.h b/clipper/sparsehash-2.0.3/src/google/type_traits.h similarity index 100% rename from clipper/sparsehash-2.0.2/src/google/type_traits.h rename to clipper/sparsehash-2.0.3/src/google/type_traits.h diff --git a/clipper/sparsehash-2.0.2/src/hash_test_interface.h b/clipper/sparsehash-2.0.3/src/hash_test_interface.h similarity index 99% rename from clipper/sparsehash-2.0.2/src/hash_test_interface.h rename to clipper/sparsehash-2.0.3/src/hash_test_interface.h index 294d4dd..98b4662 100644 --- a/clipper/sparsehash-2.0.2/src/hash_test_interface.h +++ b/clipper/sparsehash-2.0.3/src/hash_test_interface.h @@ -433,7 +433,7 @@ class HashtableInterface_SparseHashMap bool supports_num_table_copies() const { return false; } bool supports_serialization() const { return true; } - void set_empty_key(const typename p::key_type& k) { } + void set_empty_key(const typename p::key_type&) { } void clear_empty_key() { } typename p::key_type empty_key() const { return typename p::key_type(); } @@ -540,7 +540,7 @@ class HashtableInterface_SparseHashSet bool supports_num_table_copies() const { return false; } bool supports_serialization() const { return true; } - void set_empty_key(const typename p::key_type& k) { } + void set_empty_key(const typename p::key_type&) { } void clear_empty_key() { } typename p::key_type empty_key() const { return typename p::key_type(); } @@ -656,7 +656,7 @@ class HashtableInterface_SparseHashtable bool supports_num_table_copies() const { return true; } bool supports_serialization() const { return true; } - void set_empty_key(const typename p::key_type& k) { } + void set_empty_key(const typename p::key_type&) { } void clear_empty_key() { } typename p::key_type empty_key() const { return typename p::key_type(); } diff --git a/clipper/sparsehash-2.0.2/src/hashtable_test.cc b/clipper/sparsehash-2.0.3/src/hashtable_test.cc similarity index 99% rename from clipper/sparsehash-2.0.2/src/hashtable_test.cc rename to clipper/sparsehash-2.0.3/src/hashtable_test.cc index 21c60a7..193138b 100644 --- a/clipper/sparsehash-2.0.2/src/hashtable_test.cc +++ b/clipper/sparsehash-2.0.3/src/hashtable_test.cc @@ -586,8 +586,8 @@ TYPED_TEST(HashtableIntTest, Typedefs) { typename TypeParam::const_pointer cp; // I can't declare variables of reference-type, since I have nothing // to point them to, so I just make sure that these types exist. - typedef typename TypeParam::reference r; - typedef typename TypeParam::const_reference cf; + __attribute__((unused)) typedef typename TypeParam::reference r; + __attribute__((unused)) typedef typename TypeParam::const_reference cf; typename TypeParam::iterator i; typename TypeParam::const_iterator ci; @@ -901,7 +901,7 @@ TYPED_TEST(HashtableAllTest, Swap) { #ifdef _MSC_VER other_ht.swap(this->ht_); #else - swap(this->ht_, other_ht); + std::swap(this->ht_, other_ht); #endif EXPECT_EQ(this->UniqueKey(1), this->ht_.deleted_key()); diff --git a/clipper/sparsehash-2.0.2/src/libc_allocator_with_realloc_test.cc b/clipper/sparsehash-2.0.3/src/libc_allocator_with_realloc_test.cc similarity index 100% rename from clipper/sparsehash-2.0.2/src/libc_allocator_with_realloc_test.cc rename to clipper/sparsehash-2.0.3/src/libc_allocator_with_realloc_test.cc diff --git a/clipper/sparsehash-2.0.2/src/simple_compat_test.cc b/clipper/sparsehash-2.0.3/src/simple_compat_test.cc similarity index 100% rename from clipper/sparsehash-2.0.2/src/simple_compat_test.cc rename to clipper/sparsehash-2.0.3/src/simple_compat_test.cc diff --git a/clipper/sparsehash-2.0.2/src/simple_test.cc b/clipper/sparsehash-2.0.3/src/simple_test.cc similarity index 100% rename from clipper/sparsehash-2.0.2/src/simple_test.cc rename to clipper/sparsehash-2.0.3/src/simple_test.cc diff --git a/clipper/sparsehash-2.0.2/src/sparsehash/dense_hash_map b/clipper/sparsehash-2.0.3/src/sparsehash/dense_hash_map similarity index 100% rename from clipper/sparsehash-2.0.2/src/sparsehash/dense_hash_map rename to clipper/sparsehash-2.0.3/src/sparsehash/dense_hash_map diff --git a/clipper/sparsehash-2.0.2/src/sparsehash/dense_hash_set b/clipper/sparsehash-2.0.3/src/sparsehash/dense_hash_set similarity index 100% rename from clipper/sparsehash-2.0.2/src/sparsehash/dense_hash_set rename to clipper/sparsehash-2.0.3/src/sparsehash/dense_hash_set diff --git a/clipper/sparsehash-2.0.2/src/sparsehash/internal/densehashtable.h b/clipper/sparsehash-2.0.3/src/sparsehash/internal/densehashtable.h similarity index 100% rename from clipper/sparsehash-2.0.2/src/sparsehash/internal/densehashtable.h rename to clipper/sparsehash-2.0.3/src/sparsehash/internal/densehashtable.h diff --git a/clipper/sparsehash-2.0.2/src/sparsehash/internal/hashtable-common.h b/clipper/sparsehash-2.0.3/src/sparsehash/internal/hashtable-common.h similarity index 99% rename from clipper/sparsehash-2.0.2/src/sparsehash/internal/hashtable-common.h rename to clipper/sparsehash-2.0.3/src/sparsehash/internal/hashtable-common.h index 1224e0a..bac2b88 100644 --- a/clipper/sparsehash-2.0.2/src/sparsehash/internal/hashtable-common.h +++ b/clipper/sparsehash-2.0.3/src/sparsehash/internal/hashtable-common.h @@ -51,7 +51,7 @@ _START_GOOGLE_NAMESPACE_ template struct SparsehashCompileAssert { }; #define SPARSEHASH_COMPILE_ASSERT(expr, msg) \ - typedef SparsehashCompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] + __attribute__((unused)) typedef SparsehashCompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] namespace sparsehash_internal { diff --git a/clipper/sparsehash-2.0.2/src/sparsehash/internal/libc_allocator_with_realloc.h b/clipper/sparsehash-2.0.3/src/sparsehash/internal/libc_allocator_with_realloc.h similarity index 100% rename from clipper/sparsehash-2.0.2/src/sparsehash/internal/libc_allocator_with_realloc.h rename to clipper/sparsehash-2.0.3/src/sparsehash/internal/libc_allocator_with_realloc.h diff --git a/clipper/sparsehash-2.0.2/src/sparsehash/internal/sparsehashtable.h b/clipper/sparsehash-2.0.3/src/sparsehash/internal/sparsehashtable.h similarity index 99% rename from clipper/sparsehash-2.0.2/src/sparsehash/internal/sparsehashtable.h rename to clipper/sparsehash-2.0.3/src/sparsehash/internal/sparsehashtable.h index 7ee1391..f54ea51 100644 --- a/clipper/sparsehash-2.0.2/src/sparsehash/internal/sparsehashtable.h +++ b/clipper/sparsehash-2.0.3/src/sparsehash/internal/sparsehashtable.h @@ -165,7 +165,7 @@ struct sparse_hashtable_iterator { public: typedef sparse_hashtable_iterator iterator; typedef sparse_hashtable_const_iterator const_iterator; - typedef typename sparsetable::nonempty_iterator + typedef typename sparsetable::nonempty_iterator st_iterator; typedef std::forward_iterator_tag iterator_category; // very little defined! @@ -217,7 +217,7 @@ struct sparse_hashtable_const_iterator { public: typedef sparse_hashtable_iterator iterator; typedef sparse_hashtable_const_iterator const_iterator; - typedef typename sparsetable::const_nonempty_iterator + typedef typename sparsetable::const_nonempty_iterator st_iterator; typedef std::forward_iterator_tag iterator_category; // very little defined! @@ -271,7 +271,7 @@ struct sparse_hashtable_destructive_iterator { public: typedef sparse_hashtable_destructive_iterator iterator; - typedef typename sparsetable::destructive_iterator + typedef typename sparsetable::destructive_iterator st_iterator; typedef std::forward_iterator_tag iterator_category; // very little defined! diff --git a/clipper/sparsehash-2.0.2/src/sparsehash/sparse_hash_map b/clipper/sparsehash-2.0.3/src/sparsehash/sparse_hash_map similarity index 100% rename from clipper/sparsehash-2.0.2/src/sparsehash/sparse_hash_map rename to clipper/sparsehash-2.0.3/src/sparsehash/sparse_hash_map diff --git a/clipper/sparsehash-2.0.2/src/sparsehash/sparse_hash_set b/clipper/sparsehash-2.0.3/src/sparsehash/sparse_hash_set similarity index 100% rename from clipper/sparsehash-2.0.2/src/sparsehash/sparse_hash_set rename to clipper/sparsehash-2.0.3/src/sparsehash/sparse_hash_set diff --git a/clipper/sparsehash-2.0.2/src/sparsehash/sparsetable b/clipper/sparsehash-2.0.3/src/sparsehash/sparsetable similarity index 100% rename from clipper/sparsehash-2.0.2/src/sparsehash/sparsetable rename to clipper/sparsehash-2.0.3/src/sparsehash/sparsetable diff --git a/clipper/sparsehash-2.0.2/src/sparsehash/template_util.h b/clipper/sparsehash-2.0.3/src/sparsehash/template_util.h similarity index 100% rename from clipper/sparsehash-2.0.2/src/sparsehash/template_util.h rename to clipper/sparsehash-2.0.3/src/sparsehash/template_util.h diff --git a/clipper/sparsehash-2.0.2/src/sparsehash/type_traits.h b/clipper/sparsehash-2.0.3/src/sparsehash/type_traits.h similarity index 100% rename from clipper/sparsehash-2.0.2/src/sparsehash/type_traits.h rename to clipper/sparsehash-2.0.3/src/sparsehash/type_traits.h diff --git a/clipper/sparsehash-2.0.2/src/sparsetable_unittest.cc b/clipper/sparsehash-2.0.3/src/sparsetable_unittest.cc similarity index 100% rename from clipper/sparsehash-2.0.2/src/sparsetable_unittest.cc rename to clipper/sparsehash-2.0.3/src/sparsetable_unittest.cc diff --git a/clipper/sparsehash-2.0.2/src/template_util_unittest.cc b/clipper/sparsehash-2.0.3/src/template_util_unittest.cc similarity index 100% rename from clipper/sparsehash-2.0.2/src/template_util_unittest.cc rename to clipper/sparsehash-2.0.3/src/template_util_unittest.cc diff --git a/clipper/sparsehash-2.0.2/src/testutil.h b/clipper/sparsehash-2.0.3/src/testutil.h similarity index 100% rename from clipper/sparsehash-2.0.2/src/testutil.h rename to clipper/sparsehash-2.0.3/src/testutil.h diff --git a/clipper/sparsehash-2.0.2/src/time_hash_map.cc b/clipper/sparsehash-2.0.3/src/time_hash_map.cc similarity index 100% rename from clipper/sparsehash-2.0.2/src/time_hash_map.cc rename to clipper/sparsehash-2.0.3/src/time_hash_map.cc diff --git a/clipper/sparsehash-2.0.2/src/type_traits_unittest.cc b/clipper/sparsehash-2.0.3/src/type_traits_unittest.cc similarity index 100% rename from clipper/sparsehash-2.0.2/src/type_traits_unittest.cc rename to clipper/sparsehash-2.0.3/src/type_traits_unittest.cc diff --git a/clipper/sparsehash-2.0.3/src/windows/config.h b/clipper/sparsehash-2.0.3/src/windows/config.h new file mode 100644 index 0000000..2040758 --- /dev/null +++ b/clipper/sparsehash-2.0.3/src/windows/config.h @@ -0,0 +1,149 @@ +#ifndef GOOGLE_SPARSEHASH_WINDOWS_CONFIG_H_ +#define GOOGLE_SPARSEHASH_WINDOWS_CONFIG_H_ + +/* src/config.h.in. Generated from configure.ac by autoheader. */ + +/* Namespace for Google classes */ +#define GOOGLE_NAMESPACE ::google + +/* the location of the header defining hash functions */ +#define HASH_FUN_H + +/* the location of or */ +#define HASH_MAP_H + +/* the namespace of the hash<> function */ +#define HASH_NAMESPACE stdext + +/* the location of or */ +#define HASH_SET_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_GOOGLE_MALLOC_EXTENSION_H + +/* define if the compiler has hash_map */ +#define HAVE_HASH_MAP 1 + +/* define if the compiler has hash_set */ +#define HAVE_HASH_SET 1 + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the `memmove' function. */ +#define HAVE_MEMMOVE 1 + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* define if the compiler implements namespaces */ +#define HAVE_NAMESPACES 1 + +/* Define if you have POSIX threads libraries and header files. */ +#undef HAVE_PTHREAD + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_RESOURCE_H + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TIME_H + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_UTSNAME_H + +/* Define to 1 if the system has the type `uint16_t'. */ +#undef HAVE_UINT16_T + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* define if the compiler supports unordered_{map,set} */ +#undef HAVE_UNORDERED_MAP + +/* Define to 1 if the system has the type `u_int16_t'. */ +#undef HAVE_U_INT16_T + +/* Define to 1 if the system has the type `__uint16'. */ +#define HAVE___UINT16 1 + +/* Name of package */ +#undef PACKAGE + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define to necessary symbol if this constant uses a non-standard name on + your system. */ +#undef PTHREAD_CREATE_JOINABLE + +/* The system-provided hash function including the namespace. */ +#define SPARSEHASH_HASH HASH_NAMESPACE::hash_compare + +/* The system-provided hash function, in namespace HASH_NAMESPACE. */ +#define SPARSEHASH_HASH_NO_NAMESPACE hash_compare + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Version number of package */ +#undef VERSION + +/* Stops putting the code inside the Google namespace */ +#define _END_GOOGLE_NAMESPACE_ } + +/* Puts following code inside the Google namespace */ +#define _START_GOOGLE_NAMESPACE_ namespace google { + + +// --------------------------------------------------------------------- +// Extra stuff not found in config.h.in + +#define HAVE_WINDOWS_H 1 // used in time_hash_map + +// This makes sure the definitions in config.h and sparseconfig.h match +// up. If they don't, the compiler will complain about redefinition. +#include + +// TODO(csilvers): include windows/port.h in every relevant source file instead? +#include "windows/port.h" + +#endif /* GOOGLE_SPARSEHASH_WINDOWS_CONFIG_H_ */ diff --git a/clipper/sparsehash-2.0.3/src/windows/google/sparsehash/sparseconfig.h b/clipper/sparsehash-2.0.3/src/windows/google/sparsehash/sparseconfig.h new file mode 100644 index 0000000..3091559 --- /dev/null +++ b/clipper/sparsehash-2.0.3/src/windows/google/sparsehash/sparseconfig.h @@ -0,0 +1,49 @@ +/* + * NOTE: This file is for internal use only. + * Do not use these #defines in your own program! + */ + +/* Namespace for Google classes */ +#define GOOGLE_NAMESPACE ::google + +/* the location of the header defining hash functions */ +#define HASH_FUN_H + +/* the namespace of the hash<> function */ +#define HASH_NAMESPACE stdext + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if the system has the type `uint16_t'. */ +#undef HAVE_UINT16_T + +/* Define to 1 if the system has the type `u_int16_t'. */ +#undef HAVE_U_INT16_T + +/* Define to 1 if the system has the type `__uint16'. */ +#define HAVE___UINT16 1 + +/* The system-provided hash function including the namespace. */ +#define SPARSEHASH_HASH HASH_NAMESPACE::hash_compare + +/* The system-provided hash function, in namespace HASH_NAMESPACE. */ +#define SPARSEHASH_HASH_NO_NAMESPACE hash_compare + +/* Stops putting the code inside the Google namespace */ +#define _END_GOOGLE_NAMESPACE_ } + +/* Puts following code inside the Google namespace */ +#define _START_GOOGLE_NAMESPACE_ namespace google { diff --git a/clipper/sparsehash-2.0.3/src/windows/port.cc b/clipper/sparsehash-2.0.3/src/windows/port.cc new file mode 100644 index 0000000..d46ffb9 --- /dev/null +++ b/clipper/sparsehash-2.0.3/src/windows/port.cc @@ -0,0 +1,64 @@ +/* Copyright (c) 2007, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * --- + * Author: Craig Silverstein + */ + +#include +#ifndef WIN32 +# error You should only be including windows/port.cc in a windows environment! +#endif + +#include "config.h" +#include // for va_list, va_start, va_end +#include "port.h" + +// Calls the windows _vsnprintf, but always NUL-terminate. +int snprintf(char *str, size_t size, const char *format, ...) { + if (size == 0) // not even room for a \0? + return -1; // not what C99 says to do, but what windows does + str[size-1] = '\0'; + va_list ap; + va_start(ap, format); + const int r = _vsnprintf(str, size-1, format, ap); + va_end(ap); + return r; +} + +std::string TmpFile(const char* basename) { + char tmppath_buffer[1024]; + int tmppath_len = GetTempPathA(sizeof(tmppath_buffer), tmppath_buffer); + if (tmppath_len <= 0 || tmppath_len >= sizeof(tmppath_buffer)) { + return basename; // an error, so just bail on tmppath + } + snprintf(tmppath_buffer + tmppath_len, sizeof(tmppath_buffer) - tmppath_len, + "\\%s", basename); + return tmppath_buffer; +} diff --git a/clipper/sparsehash-2.0.3/src/windows/port.h b/clipper/sparsehash-2.0.3/src/windows/port.h new file mode 100644 index 0000000..0ce8184 --- /dev/null +++ b/clipper/sparsehash-2.0.3/src/windows/port.h @@ -0,0 +1,72 @@ +/* Copyright (c) 2007, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * --- + * Author: Craig Silverstein + * + * These are some portability typedefs and defines to make it a bit + * easier to compile this code -- in particular, unittests -- under VC++. + * Other portability code is found in windows/sparsehash/internal/sparseconfig.h. + * + * Several of these are taken from glib: + * http://developer.gnome.org/doc/API/glib/glib-windows-compatability-functions.html + */ + +#ifndef SPARSEHASH_WINDOWS_PORT_H_ +#define SPARSEHASH_WINDOWS_PORT_H_ + +#include +#include "config.h" + +#ifdef WIN32 + +#define WIN32_LEAN_AND_MEAN /* We always want minimal includes */ +#include +#include /* because we so often use open/close/etc */ +#include + +// 4996: Yes, we're ok using the "unsafe" functions like _vsnprintf and fopen +// 4127: We use "while (1)" sometimes: yes, we know it's a constant +// 4181: type_traits_test is explicitly testing 'qualifier applied to reference' +#pragma warning(disable:4996 4127 4181) + + +// file I/O +#define unlink _unlink +#define strdup _strdup + +// We can't just use _snprintf as a drop-in replacement, because it +// doesn't always NUL-terminate. :-( +extern int snprintf(char *str, size_t size, const char *format, ...); + +extern std::string TmpFile(const char* basename); // used in hashtable_unittest + +#endif /* WIN32 */ + +#endif /* SPARSEHASH_WINDOWS_PORT_H_ */ diff --git a/clipper/sparsehash-2.0.3/src/windows/sparsehash/internal/sparseconfig.h b/clipper/sparsehash-2.0.3/src/windows/sparsehash/internal/sparseconfig.h new file mode 100644 index 0000000..3091559 --- /dev/null +++ b/clipper/sparsehash-2.0.3/src/windows/sparsehash/internal/sparseconfig.h @@ -0,0 +1,49 @@ +/* + * NOTE: This file is for internal use only. + * Do not use these #defines in your own program! + */ + +/* Namespace for Google classes */ +#define GOOGLE_NAMESPACE ::google + +/* the location of the header defining hash functions */ +#define HASH_FUN_H + +/* the namespace of the hash<> function */ +#define HASH_NAMESPACE stdext + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if the system has the type `uint16_t'. */ +#undef HAVE_UINT16_T + +/* Define to 1 if the system has the type `u_int16_t'. */ +#undef HAVE_U_INT16_T + +/* Define to 1 if the system has the type `__uint16'. */ +#define HAVE___UINT16 1 + +/* The system-provided hash function including the namespace. */ +#define SPARSEHASH_HASH HASH_NAMESPACE::hash_compare + +/* The system-provided hash function, in namespace HASH_NAMESPACE. */ +#define SPARSEHASH_HASH_NO_NAMESPACE hash_compare + +/* Stops putting the code inside the Google namespace */ +#define _END_GOOGLE_NAMESPACE_ } + +/* Puts following code inside the Google namespace */ +#define _START_GOOGLE_NAMESPACE_ namespace google { diff --git a/clipper/sparsehash-2.0.2/vsprojects/hashtable_test/hashtable_test.vcproj b/clipper/sparsehash-2.0.3/vsprojects/hashtable_test/hashtable_test.vcproj similarity index 96% rename from clipper/sparsehash-2.0.2/vsprojects/hashtable_test/hashtable_test.vcproj rename to clipper/sparsehash-2.0.3/vsprojects/hashtable_test/hashtable_test.vcproj index 9ecc139..94eea19 100755 --- a/clipper/sparsehash-2.0.2/vsprojects/hashtable_test/hashtable_test.vcproj +++ b/clipper/sparsehash-2.0.3/vsprojects/hashtable_test/hashtable_test.vcproj @@ -1,197 +1,197 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/clipper/sparsehash-2.0.2/vsprojects/libc_allocator_with_realloc_test/libc_allocator_with_realloc_test.vcproj b/clipper/sparsehash-2.0.3/vsprojects/libc_allocator_with_realloc_test/libc_allocator_with_realloc_test.vcproj similarity index 96% rename from clipper/sparsehash-2.0.2/vsprojects/libc_allocator_with_realloc_test/libc_allocator_with_realloc_test.vcproj rename to clipper/sparsehash-2.0.3/vsprojects/libc_allocator_with_realloc_test/libc_allocator_with_realloc_test.vcproj index 1d2ba69..1d35d04 100755 --- a/clipper/sparsehash-2.0.2/vsprojects/libc_allocator_with_realloc_test/libc_allocator_with_realloc_test.vcproj +++ b/clipper/sparsehash-2.0.3/vsprojects/libc_allocator_with_realloc_test/libc_allocator_with_realloc_test.vcproj @@ -1,161 +1,161 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/clipper/sparsehash-2.0.2/vsprojects/simple_test/simple_test.vcproj b/clipper/sparsehash-2.0.3/vsprojects/simple_test/simple_test.vcproj similarity index 96% rename from clipper/sparsehash-2.0.2/vsprojects/simple_test/simple_test.vcproj rename to clipper/sparsehash-2.0.3/vsprojects/simple_test/simple_test.vcproj index 5e3b05a..afa0c69 100755 --- a/clipper/sparsehash-2.0.2/vsprojects/simple_test/simple_test.vcproj +++ b/clipper/sparsehash-2.0.3/vsprojects/simple_test/simple_test.vcproj @@ -1,188 +1,188 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/clipper/sparsehash-2.0.2/vsprojects/sparsetable_unittest/sparsetable_unittest.vcproj b/clipper/sparsehash-2.0.3/vsprojects/sparsetable_unittest/sparsetable_unittest.vcproj similarity index 96% rename from clipper/sparsehash-2.0.2/vsprojects/sparsetable_unittest/sparsetable_unittest.vcproj rename to clipper/sparsehash-2.0.3/vsprojects/sparsetable_unittest/sparsetable_unittest.vcproj index 2e8429a..ecece4a 100755 --- a/clipper/sparsehash-2.0.2/vsprojects/sparsetable_unittest/sparsetable_unittest.vcproj +++ b/clipper/sparsehash-2.0.3/vsprojects/sparsetable_unittest/sparsetable_unittest.vcproj @@ -1,170 +1,170 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/clipper/sparsehash-2.0.3/vsprojects/template_util_unittest/template_util_unittest.vcproj b/clipper/sparsehash-2.0.3/vsprojects/template_util_unittest/template_util_unittest.vcproj new file mode 100755 index 0000000..58a9a1f --- /dev/null +++ b/clipper/sparsehash-2.0.3/vsprojects/template_util_unittest/template_util_unittest.vcproj @@ -0,0 +1,167 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/clipper/sparsehash-2.0.2/vsprojects/time_hash_map/time_hash_map.vcproj b/clipper/sparsehash-2.0.3/vsprojects/time_hash_map/time_hash_map.vcproj similarity index 96% rename from clipper/sparsehash-2.0.2/vsprojects/time_hash_map/time_hash_map.vcproj rename to clipper/sparsehash-2.0.3/vsprojects/time_hash_map/time_hash_map.vcproj index c89f1e9..bf40c4c 100755 --- a/clipper/sparsehash-2.0.2/vsprojects/time_hash_map/time_hash_map.vcproj +++ b/clipper/sparsehash-2.0.3/vsprojects/time_hash_map/time_hash_map.vcproj @@ -1,188 +1,188 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/clipper/sparsehash-2.0.2/vsprojects/type_traits_unittest/type_traits_unittest.vcproj b/clipper/sparsehash-2.0.3/vsprojects/type_traits_unittest/type_traits_unittest.vcproj similarity index 96% rename from clipper/sparsehash-2.0.2/vsprojects/type_traits_unittest/type_traits_unittest.vcproj rename to clipper/sparsehash-2.0.3/vsprojects/type_traits_unittest/type_traits_unittest.vcproj index 5d9b75b..b74e57d 100755 --- a/clipper/sparsehash-2.0.2/vsprojects/type_traits_unittest/type_traits_unittest.vcproj +++ b/clipper/sparsehash-2.0.3/vsprojects/type_traits_unittest/type_traits_unittest.vcproj @@ -1,167 +1,167 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +