diff --git a/Makefile b/Makefile index 858c2e0..a2f182d 100644 --- a/Makefile +++ b/Makefile @@ -100,16 +100,17 @@ SEARCH_FRAGMENTS = $(wildcard search_*_phase*.c) VERSION = $(shell cat VERSION) BITS=32 -# cygwin will stay 32 bit for now. -ifeq (1,$(MINGW)) - # msys will always be 32 bit so look at the cpu arch. - ifneq (,$(findstring AMD64,$(PROCESSOR_ARCHITEW6432))) - BITS=64 - else - ifneq (,$(findstring AMD64,$(PROCESSOR_ARCHITECTURE))) - BITS=64 - endif - endif +ifeq (x86_64,$(shell uname -m)) + BITS=64 +endif +# msys will always be 32 bit so look at the cpu arch instead. +ifneq (,$(findstring AMD64,$(PROCESSOR_ARCHITEW6432))) + ifeq (1,$(MINGW)) + BITS=64 + endif +endif +ifeq (32,$(BITS)) + $(error bowtie2 compilation requires a 64-bit platform ) endif ifeq (1,$(LINUX)) diff --git a/SeqAn-1.1/seqan/index/index_sa_lss.h b/SeqAn-1.1/seqan/index/index_sa_lss.h index cb9c58a..7c7b938 100644 --- a/SeqAn-1.1/seqan/index/index_sa_lss.h +++ b/SeqAn-1.1/seqan/index/index_sa_lss.h @@ -57,7 +57,7 @@ struct _Context_LSS { TValue g; - g=pm-I; /* group number.*/ + g=(TValue)(pm-I); /* group number.*/ V[*pl]=g; /* update group number of first position.*/ if (pl==pm) *pl=-1; /* one element, sorted group.*/ @@ -90,7 +90,7 @@ struct _Context_LSS pa=pb; /* continue sorting rest of the subarray.*/ } if (pa==pn) { /* check if last part is single element.*/ - V[*pa]=pa-I; + V[*pa]=(TValue)(pa-I); *pa=-1; /* sorted group.*/ } } @@ -157,17 +157,17 @@ struct _Context_LSS --pc; } pn=p+n; - if ((s=pa-p)>(t=pb-pa)) + if ((s=(TValue)(pa-p))>(t=(TValue)(pb-pa))) s=t; for (pl=p, pm=pb-s; s; --s, ++pl, ++pm) SEQAN_LSSSWAP(pl, pm); - if ((s=pd-pc)>(t=pn-pd-1)) + if ((s=(TValue)(pd-pc))>(t=(TValue)(pn-pd-1))) s=t; for (pl=pb, pm=pn-s; s; --s, ++pl, ++pm) SEQAN_LSSSWAP(pl, pm); - s=pb-pa; - t=pd-pc; + s=(TValue)(pb-pa); + t=(TValue)(pd-pc); if (s>0) sort_split(p, s); update_group(p+s, p+n-t-1); @@ -197,13 +197,13 @@ struct _Context_LSS for (pi=p+k-1, i=n; pi>=p; --pi) { d=x[c=*pi]; /* c is position, d is next in list.*/ x[c]=g=i; /* last position equals group number.*/ - if (d>=0) { /* if more than one element in group.*/ + if (d == 0 || d > 0) { /* if more than one element in group.*/ p[i--]=c; /* p is permutation for the sorted x.*/ do { d=x[c=d]; /* next in linked list.*/ x[c]=g; /* group number in x.*/ p[i--]=c; /* permutation in p.*/ - } while (d>=0); + } while (d == 0 || d > 0); } else p[i--]=-1; /* one element, sorted group.*/ } @@ -305,7 +305,7 @@ struct _Context_LSS pi=I; /* pi is first position of group.*/ sl=0; /* sl is negated length of sorted groups.*/ do { - if ((s=*pi)<0) { + if ((s=*pi) <= 0 && (s=*pi) != 0) { pi-=s; /* skip over sorted group.*/ sl+=s; /* add negated length to sl.*/ } else { @@ -314,7 +314,7 @@ struct _Context_LSS sl=0; } pk=I+V[s]+1; /* pk-1 is last position of unsorted group.*/ - sort_split(pi, pk-pi); + sort_split(pi, (TValue)(pk-pi)); pi=pk; /* next group.*/ } } while (pi<=I+n); diff --git a/aligner.h b/aligner.h index 93903e2..0eff0ff 100644 --- a/aligner.h +++ b/aligner.h @@ -944,7 +944,7 @@ class PairedBWAlignerV1 : public Aligner { patsrc_->bufa().qual) : (off1 ? patsrc_->bufb().qualRev : patsrc_->bufa().qualRev); - uint32_t qlen = seqan::length(seq); // length of outstanding mate + uint32_t qlen = (uint32_t)seqan::length(seq); // length of outstanding mate uint32_t alen = (off1 ? patsrc_->bufa().length() : patsrc_->bufb().length()); int minins = minInsert_; @@ -1894,7 +1894,7 @@ class PairedBWAlignerV2 : public Aligner { patsrc_->bufa().qual) : (range.mate1 ? patsrc_->bufb().qualRev : patsrc_->bufa().qualRev); - uint32_t qlen = seqan::length(seq); // length of outstanding mate + uint32_t qlen = (uint32_t)seqan::length(seq); // length of outstanding mate uint32_t alen = (range.mate1 ? patsrc_->bufa().length() : patsrc_->bufb().length()); int minins = minInsert_; diff --git a/binary_sa_search.h b/binary_sa_search.h index 8de0328..fd754ce 100644 --- a/binary_sa_search.h +++ b/binary_sa_search.h @@ -30,7 +30,7 @@ TIndexOffU binarySASearch(const TStr& host, const String& sa) { TIndexOffU lLcp = 0, rLcp = 0; // greatest observed LCPs on left and right - TIndexOffU l = 0, r = length(sa)+1; // binary-search window + TIndexOffU l = 0, r = (TIndexOffU)length(sa)+1; // binary-search window TIndexOffU hostLen = TIndexOffU(length(host)); while(true) { assert_gt(r, l); diff --git a/blockwise_sa.h b/blockwise_sa.h index 929e504..2dcf765 100644 --- a/blockwise_sa.h +++ b/blockwise_sa.h @@ -25,19 +25,19 @@ using namespace seqan; // Helpers for printing verbose messages #ifndef VMSG_NL -#define VMSG_NL(args...) \ +#define VMSG_NL(...) \ if(this->verbose()) { \ stringstream tmp; \ - tmp << args << endl; \ + tmp << __VA_ARGS__ << endl; \ this->verbose(tmp.str()); \ } #endif #ifndef VMSG -#define VMSG(args...) \ +#define VMSG(...) \ if(this->verbose()) { \ stringstream tmp; \ - tmp << args; \ + tmp << __VA_ARGS__; \ this->verbose(tmp.str()); \ } #endif @@ -49,7 +49,7 @@ template class BlockwiseSA { public: BlockwiseSA(const TStr& __text, - TIndexOffU __bucketSz, + TIndexOffU __bucketSz, bool __sanityCheck = false, bool __passMemExc = false, bool __verbose = false, @@ -130,7 +130,7 @@ class BlockwiseSA { bool sanityCheck() const { return _sanityCheck; } bool verbose() const { return _verbose; } ostream& log() const { return _logger; } - uint32_t size() const { return length(_text)+1; } + size_t size() const { return length(_text)+1; } protected: /// Reset back to the first block @@ -148,7 +148,7 @@ class BlockwiseSA { /// Optionally output a verbose message void verbose(const string& s) const { if(this->verbose()) { - this->log() << s; + this->log() << s.c_str(); this->log().flush(); } } @@ -172,7 +172,7 @@ template class InorderBlockwiseSA : public BlockwiseSA { public: InorderBlockwiseSA(const TStr& __text, - TIndexOffU __bucketSz, + TIndexOffU __bucketSz, bool __sanityCheck = false, bool __passMemExc = false, bool __verbose = false, @@ -191,7 +191,7 @@ class KarkkainenBlockwiseSA : public InorderBlockwiseSA { typedef DifferenceCoverSample TDC; KarkkainenBlockwiseSA(const TStr& __text, - TIndexOffU __bucketSz, + TIndexOffU __bucketSz, uint32_t __dcV, uint32_t __seed = 0, bool __sanityCheck = false, @@ -288,15 +288,15 @@ class KarkkainenBlockwiseSA : public InorderBlockwiseSA { * Defined in blockwise_sa.cpp */ inline bool tieBreakingLcp(TIndexOffU aOff, - TIndexOffU bOff, - TIndexOffU& lcp, + TIndexOffU bOff, + TIndexOffU& lcp, bool& lcpIsSoft); /** * Compare two suffixes using the difference-cover sample. */ inline bool suffixCmp(TIndexOffU cmp, - TIndexOffU i, + TIndexOffU i, int64_t& j, int64_t& k, bool& kSoft, @@ -320,8 +320,8 @@ void KarkkainenBlockwiseSA::qsort(String& bucket) { typedef typename Value::Type TAlphabet; const TStr& t = this->text(); TIndexOffU *s = begin(bucket); - uint32_t slen = seqan::length(bucket); - TIndexOffU len = seqan::length(t); + TIndexOffU slen = (TIndexOffU)seqan::length(bucket); + TIndexOffU len = (TIndexOffU)seqan::length(t); if(_dc != NULL) { // Use the difference cover as a tie-breaker if we have it VMSG_NL(" (Using difference cover)"); @@ -350,8 +350,8 @@ template<> void KarkkainenBlockwiseSA > >::qsort(String& bucket) { const String >& t = this->text(); TIndexOffU *s = begin(bucket); - uint32_t slen = (uint32_t)seqan::length(bucket); - TIndexOffU len = seqan::length(t); + TIndexOffU slen = (TIndexOffU)seqan::length(bucket); + TIndexOffU len = (TIndexOffU)seqan::length(t); if(_dc != NULL) { // Use the difference cover as a tie-breaker if we have it VMSG_NL(" (Using difference cover)"); @@ -555,7 +555,7 @@ void KarkkainenBlockwiseSA::buildSamples() { // VMSG_NL("Iterated too many times; trying again..."); // buildSamples(); // } - VMSG_NL("Avg bucket size: " << ((float)(len-length(_sampleSuffs)) / (length(_sampleSuffs)+1)) << " (target: " << bsz << ")"); + VMSG_NL("Avg bucket size: " << ((double)(len-length(_sampleSuffs)) / (length(_sampleSuffs)+1)) << " (target: " << bsz << ")"); } /** @@ -579,8 +579,8 @@ static TIndexOffU suffixLcp(const T& t, TIndexOffU aOff, TIndexOffU bOff) { */ template inline bool KarkkainenBlockwiseSA::tieBreakingLcp(TIndexOffU aOff, - TIndexOffU bOff, - TIndexOffU& lcp, + TIndexOffU bOff, + TIndexOffU& lcp, bool& lcpIsSoft) { const TStr& t = this->text(); @@ -619,10 +619,11 @@ bool KarkkainenBlockwiseSA::tieBreakingLcp(TIndexOffU aOff, * filled in then calculate it from scratch. */ template -static TIndexOffU lookupSuffixZ(const T& t, - TIndexOffU zOff, - TIndexOffU off, - const String& z) +static TIndexOffU lookupSuffixZ( + const T& t, + TIndexOffU zOff, + TIndexOffU off, + const String& z) { if(zOff < length(z)) { TIndexOffU ret = z[zOff]; @@ -638,12 +639,13 @@ static TIndexOffU lookupSuffixZ(const T& t, * false -> i > cmp */ template inline -bool KarkkainenBlockwiseSA::suffixCmp(TIndexOffU cmp, - TIndexOffU i, - int64_t& j, - int64_t& k, - bool& kSoft, - const String& z) +bool KarkkainenBlockwiseSA::suffixCmp( + TIndexOffU cmp, + TIndexOffU i, + int64_t& j, + int64_t& k, + bool& kSoft, + const String& z) { const TStr& t = this->text(); TIndexOffU len = TIndexOffU(length(t)); diff --git a/bowtie_build_main.cpp b/bowtie_build_main.cpp index e4252f8..402ac1b 100644 --- a/bowtie_build_main.cpp +++ b/bowtie_build_main.cpp @@ -43,7 +43,7 @@ int main(int argc, const char **argv) { myargs[i] = args[i].c_str(); } if(args.size() == 1) continue; - lastret = bowtie_build(args.size(), myargs); + lastret = bowtie_build((int)args.size(), myargs); free(myargs); } if(lastret == -1) { diff --git a/bowtie_inspect.cpp b/bowtie_inspect.cpp index bb96fce..c47d83d 100644 --- a/bowtie_inspect.cpp +++ b/bowtie_inspect.cpp @@ -397,7 +397,7 @@ void print_index_summary( cout << "FTab-Chars" << '\t' << ebwt.eh().ftabChars() << endl; for(size_t i = 0; i < ebwt.nPat(); i++) { cout << "Sequence-" << (i+1) - << '\t' << p_refnames[refs.expandIdx(i)] + << '\t' << p_refnames[refs.expandIdx((uint32_t)i)] << '\t' << (ebwt.plen()[i] + (color ? 1 : 0)) << endl; } diff --git a/bowtie_main.cpp b/bowtie_main.cpp index 80678d0..f2468cb 100644 --- a/bowtie_main.cpp +++ b/bowtie_main.cpp @@ -43,7 +43,7 @@ int main(int argc, const char **argv) { myargs[i] = args[i].c_str(); } if(args.size() == 1) continue; - lastret = bowtie(args.size(), myargs); + lastret = bowtie((int)args.size(), myargs); free(myargs); } if(lastret == -1) { diff --git a/color_dec.cpp b/color_dec.cpp index 947f4a2..9386ff5 100644 --- a/color_dec.cpp +++ b/color_dec.cpp @@ -190,7 +190,7 @@ void decodeHit( assert_geq(readc, 0); omin = INT_MAX; // t <- index of column in dynamic programming table - t = c - readi + 1; + t = (int)(c - readi + 1); const int refc = ref[refi + t]; int from[] = { table[0][4][t-1], table[1][4][t-1], table[2][4][t-1], table[3][4][t-1] }; diff --git a/diff_sample.h b/diff_sample.h index be81465..8762cd1 100644 --- a/diff_sample.h +++ b/diff_sample.h @@ -14,19 +14,19 @@ using namespace std; using namespace seqan; #ifndef VMSG_NL -#define VMSG_NL(args...) \ +#define VMSG_NL(...) \ if(this->verbose()) { \ stringstream tmp; \ - tmp << args << endl; \ + tmp << __VA_ARGS__ << endl; \ this->verbose(tmp.str()); \ } #endif #ifndef VMSG -#define VMSG(args...) \ +#define VMSG(...) \ if(this->verbose()) { \ stringstream tmp; \ - tmp << args; \ + tmp << __VA_ARGS__; \ this->verbose(tmp.str()); \ } #endif @@ -496,7 +496,7 @@ template static unsigned int myLog2(T i) { assert_eq(1, popCount(i)); // must be power of 2 for(size_t j = 0; j < sizeof(T)*8; j++) { - if(i & 1) return j; + if(i & 1) return (int)j; i >>= 1; } assert(false); @@ -521,7 +521,7 @@ class DifferenceCoverSample { _sanity(__sanity), _ds(getDiffCover(_v, _verbose, _sanity)), _dmap(getDeltaMap(_v, _ds)), - _d(length(_ds)), + _d((uint32_t)length(_ds)), _doffs(), _isaPrime(), _dInv(), @@ -533,7 +533,10 @@ class DifferenceCoverSample { assert_eq(1, popCount(_v)); // must be power of 2 // Build map from d's to idx's fill(_dInv, _v, 0xffffffff, Exact()); - for(size_t i = 0; i < length(_ds); i++) _dInv[_ds[i]] = i; + uint32_t lim = (uint32_t)length(_ds); + for(uint32_t i = 0; i < lim; i++) { + _dInv[_ds[i]] = i; + } } /** @@ -600,7 +603,7 @@ class DifferenceCoverSample { void verbose(const string& s) const { if(this->verbose()) { - this->log() << s; + this->log() << s.c_str(); this->log().flush(); } } @@ -625,10 +628,10 @@ class DifferenceCoverSample { * string 'host' are identical up to depth 'v'. */ template -static inline bool suffixLt(const TStr& host, uint32_t suf1, uint32_t suf2) { - uint32_t hlen = length(host); +static inline bool suffixLt(const TStr& host, TIndexOffU suf1, TIndexOffU suf2) { + TIndexOffU hlen = (TIndexOffU)length(host); assert_neq(suf1, suf2); - uint32_t i = 0; + TIndexOffU i = 0; while(suf1 + i < hlen && suf2 + i < hlen) { if(host[suf1+i] < host[suf2+i]) return true; if(host[suf1+i] > host[suf2+i]) return false; @@ -652,7 +655,7 @@ void DifferenceCoverSample::doBuiltSanityCheck() const { assert(built()); VMSG_NL(" Doing sanity check"); TIndexOffU added = 0; - String sorted; + String sorted; fill(sorted, length(_isaPrime), OFF_MASK, Exact()); for(size_t di = 0; di < this->d(); di++) { uint32_t d = _ds[di]; @@ -682,7 +685,7 @@ template void DifferenceCoverSample::buildSPrime(String& sPrime) { const TStr& t = this->text(); const String& ds = this->ds(); - TIndexOffU tlen = length(t); + TIndexOffU tlen = (TIndexOffU)length(t); uint32_t v = this->v(); uint32_t d = this->d(); assert_gt(v, 2); @@ -702,7 +705,7 @@ void DifferenceCoverSample::buildSPrime(String& sPrime) { sPrimeSz += sz; } appendValue(_doffs, sPrimeSz); - #ifndef NDEBUG +#ifndef NDEBUG if(tlenDivV > 0) { for(size_t i = 0; i < d; i++) { assert_gt(_doffs[i+1], _doffs[i]); @@ -710,7 +713,7 @@ void DifferenceCoverSample::buildSPrime(String& sPrime) { assert(diff == tlenDivV || diff == tlenDivV+1); } } - #endif +#endif assert_eq(length(_doffs), d+1); // Size sPrime appropriately reserve(sPrime, sPrimeSz+1, Exact()); // reserve extra slot for LS @@ -721,7 +724,7 @@ void DifferenceCoverSample::buildSPrime(String& sPrime) { TIndexOffU i = 0; for(uint64_t ti = 0; ti <= tlen; ti += v) { for(uint32_t di = 0; di < d; di++) { - TIndexOffU tti = ti + ds[di]; + TIndexOffU tti = (TIndexOffU)(ti + ds[di]); if(tti > tlen) break; TIndexOffU spi = _doffs[di] + i; assert_lt(spi, _doffs[di+1]); @@ -740,10 +743,11 @@ void DifferenceCoverSample::buildSPrime(String& sPrime) { * string 'host' are identical up to depth 'v'. */ template -static inline bool suffixSameUpTo(const TStr& host, - TIndexOffU suf1, - TIndexOffU suf2, - TIndexOffU v) +static inline bool suffixSameUpTo( + const TStr& host, + TIndexOffU suf1, + TIndexOffU suf2, + TIndexOffU v) { for(TIndexOffU i = 0; i < v; i++) { bool endSuf1 = suf1+i >= length(host); @@ -780,7 +784,7 @@ void DifferenceCoverSample::build() { String sPrimeOrder; reserve(sPrimeOrder, length(sPrime)+1, Exact()); // reserve extra slot for LS resize(sPrimeOrder, length(sPrime), Exact()); - for(size_t i = 0; i < length(sPrimeOrder); i++) { + for(TIndexOffU i = 0; i < (TIndexOffU)length(sPrimeOrder); i++) { sPrimeOrder[i] = i; } // sPrime now holds suffix-offsets for DC samples. @@ -849,7 +853,13 @@ void DifferenceCoverSample::build() { { Timer timer(cout, " Invoking Larsson-Sadakane on ranks time: ", this->verbose()); VMSG_NL(" Invoking Larsson-Sadakane on ranks"); - createSuffixArray(sPrime, _isaPrime, LarssonSadakane(), length(_isaPrime)); + _Context_LSS c; + c.suffixsort( + (TIndexOff*)begin(_isaPrime, Standard()), + (TIndexOff*)begin(sPrime, Standard()), + length(sPrime) - 1, + (unsigned)length(_isaPrime), + 0); } // sPrime now contains the suffix array (which we ignore) assert_eq(length(_isaPrime), length(sPrime)); diff --git a/ebwt.h b/ebwt.h index e62426f..14a3e66 100644 --- a/ebwt.h +++ b/ebwt.h @@ -1443,8 +1443,8 @@ class EbwtSearchParams { hit.quals[destpos] = '!'; } if(nmm[i] != 'M') { - uint32_t off = i - (colExEnds? 1:0); - if(!_fw) off = nqlen - off - 1; + uint32_t off = (uint32_t)i - (colExEnds? 1:0); + if(!_fw) off = (uint32_t)nqlen - off - 1; assert_lt(off, nqlen); hit.mms.set(off); hit.refcs[off] = "ACGT"[ref->getBase(h.first, h.second+i)]; @@ -1493,7 +1493,7 @@ class EbwtSearchParams { assert_neq(4, (int)_texts[h.first][h.second + i]); // Forward pattern appears at h if((int)hit.patSeq[i] != (int)_texts[h.first][h.second + i]) { - uint32_t qoff = i; + uint32_t qoff = (uint32_t)i; // if ebwtFw != _fw the 3' end is on on the // left end of the pattern, but the diff vector // should encode mismatches w/r/t the 5' end, diff --git a/ebwt_search.cpp b/ebwt_search.cpp index 136a8ae..946e671 100644 --- a/ebwt_search.cpp +++ b/ebwt_search.cpp @@ -1012,39 +1012,6 @@ static const char *argv0 = NULL; } \ skipped = false; -static inline void finishReadWithHitmask(PatternSourcePerThread* p, - HitSinkPerThread* sink, - SyncBitset& hitMask, - bool r, - bool& skipped) -{ - /* Don't do finishRead if the read isn't legit */ - if(!p->empty()) { - /* r = whether to consider reporting the read as unaligned */ - bool reportUnAl = r; - if(reportUnAl) { - /* If the done-mask already shows the read as done, */ - /* then we already reported the unaligned read and */ - /* should refrain from re-reporting*/ - reportUnAl = !skipped; - if(reportUnAl) { - /* If there hasn't been a hit reported, then report */ - /* read as unaligned */ - reportUnAl = !hitMask.test(p->patid()); - } - } - if(sink->finishRead(*p, true, reportUnAl) > 0) { - /* We reported a hit for the read, so we set the */ - /* appropriate bit in the hitMask to prevent it from */ - /* being reported as unaligned. */ - if(!reportUnAl && sink->dumpsReads()) { - hitMask.setOver(p->patid()); - } - } - } - skipped = false; -} - /// Macro for getting the next read, possibly aborting depending on /// whether the result is empty or the patid exceeds the limit, and /// marshaling the read into convenient variables. @@ -2089,7 +2056,7 @@ static void seededQualSearchWorkerFull(void *vp) { while(true) { FINISH_READ(patsrc); GET_READ(patsrc); - size_t plen = length(patFw); + uint32_t plen = (uint32_t)length(patFw); uint32_t s = seedLen; uint32_t s3 = (s >> 1); /* length of 3' half of seed */ uint32_t s5 = (s >> 1) + (s & 1); /* length of 5' half of seed */ diff --git a/ebwt_search_backtrack.h b/ebwt_search_backtrack.h index c2945d4..3766874 100644 --- a/ebwt_search_backtrack.h +++ b/ebwt_search_backtrack.h @@ -220,7 +220,7 @@ class GreedyDFSRangeSource { */ void setQlen(uint32_t qlen) { assert(_qry != NULL); - _qlen = min(length(*_qry), qlen); + _qlen = min((uint32_t)length(*_qry), qlen); } /// Return the maximum number of allowed backtracks in a given call @@ -254,7 +254,7 @@ class GreedyDFSRangeSource { bool ret; // m = depth beyond which ftab must not extend or else we might // miss some legitimate paths - uint32_t m = min(_unrevOff, _qlen); + uint32_t m = min(_unrevOff, (uint32_t)_qlen); if(nsInFtab == 0 && m >= (uint32_t)ftabChars) { uint32_t ftabOff = calcFtabOff(); TIndexOffU top = ebwt.ftabHi(ftabOff); @@ -313,7 +313,7 @@ class GreedyDFSRangeSource { if(_partialsBuf.size() > 0) { #ifndef NDEBUG for(size_t i = 0; i < _partialsBuf.size(); i++) { - assert(_partialsBuf[i].repOk(_qualThresh, _qlen, (*_qual), _maqPenalty)); + assert(_partialsBuf[i].repOk(_qualThresh, (uint32_t)_qlen, (*_qual), _maqPenalty)); } #endif _partials->addPartials(_params.patId(), _partialsBuf); @@ -456,7 +456,7 @@ class GreedyDFSRangeSource { // eligible uint8_t lowAltQual = 0xff; uint32_t d = depth; - uint32_t cur = _qlen - d - 1; // current offset into _qry + uint32_t cur = (uint32_t)_qlen - d - 1; // current offset into _qry while(cur < _qlen) { // Try to advance further given that if(_verbose) { @@ -696,7 +696,7 @@ class GreedyDFSRangeSource { uint32_t loHalfMms = 0, hiHalfMms = 0; assert_geq(_mms.size(), stackDepth); for(size_t i = 0; i < stackDepth; i++) { - uint32_t d = _qlen - _mms[i] - 1; + uint32_t d = (uint32_t)_qlen - _mms[i] - 1; if (d < _5depth) hiHalfMms++; else if(d < _3depth) loHalfMms++; else assert(false); @@ -773,7 +773,7 @@ class GreedyDFSRangeSource { // Walk from left to right for(; i >= depth; i--) { assert_geq(i, unrevOff); - icur = _qlen - i - 1; // current offset into _qry + icur = (uint32_t)(_qlen - i - 1); // current offset into _qry uint8_t qi = qualAt(icur); assert_lt(elims[i], 16); if((qi == lowAltQual || !_considerQuals) && elims[i] != 15) { @@ -802,7 +802,7 @@ class GreedyDFSRangeSource { bttop = pairTop(pairs, i, j); btbot = pairBot(pairs, i, j); btham += mmPenalty(_maqPenalty, qi); - btcint = j; + btcint = (uint32_t)j; btchar = "acgt"[j]; assert_leq(btham, _qualThresh); break; // found our target; we can stop @@ -841,7 +841,7 @@ class GreedyDFSRangeSource { SideLocus::initFromTopBot(bttop, btbot, ebwt._eh, ebwt._ebwt, _preLtop, _preLbot); - icur = _qlen - i - 1; // current offset into _qry + icur = (uint32_t)(_qlen - i - 1); // current offset into _qry // Slide over to the next backtacking frame within // pairs and elims; won't interfere with our frame or // any of our parents' frames @@ -919,7 +919,7 @@ class GreedyDFSRangeSource { // so we can go ahead and use it // Rightmost char gets least significant bit-pairs int ftabChars = ebwt._eh._ftabChars; - TIndexOffU ftabOff = (*_qry)[_qlen - ftabChars]; + TIndexOffU ftabOff = (TIndexOffU)(int)(*_qry)[_qlen - ftabChars]; assert_lt(ftabOff, 4); assert_lt(ftabOff, ebwt._eh._ftabLen-1); for(int j = ftabChars - 1; j > 0; j--) { @@ -927,8 +927,8 @@ class GreedyDFSRangeSource { if(_qlen-j == icur) { ftabOff |= btcint; } else { - assert_lt((TIndexOffU)(*_qry)[_qlen-j], 4); - ftabOff |= (TIndexOffU)(*_qry)[_qlen-j]; + assert_lt((int)(*_qry)[_qlen-j], 4); + ftabOff |= (int)(*_qry)[_qlen-j]; } assert_lt(ftabOff, ebwt._eh._ftabLen-1); } @@ -961,7 +961,7 @@ class GreedyDFSRangeSource { assert_leq(iham, _qualThresh); // Continue from selected alternative range ret = backtrack(stackDepth+1,// added 1 mismatch to alignment - i+1, // start from next position after + (uint32_t)i+1, // start from next position after btUnrevOff, // new unrevisitable boundary btOneRevOff, // new 1-revisitable boundary btTwoRevOff, // new 2-revisitable boundary @@ -1042,7 +1042,7 @@ class GreedyDFSRangeSource { // recalculate them eligibleNum = 0; eligibleSz = 0; - eli = k; + eli = (uint32_t)k; eltop = pairTop(pairs, k, l); elbot = pairBot(pairs, k, l); assert_eq(elbot-eltop, spread); @@ -1141,7 +1141,7 @@ class GreedyDFSRangeSource { // Count the mismatches in the lo and hi halves uint32_t loHalfMms = 0, hiHalfMms = 0; for(size_t i = 0; i < stackDepth; i++) { - uint32_t depth = _qlen - mms[i] - 1; + uint32_t depth = (uint32_t)(_qlen - mms[i] - 1); if (depth < _5depth) hiHalfMms++; else if(depth < _3depth) loHalfMms++; else assert(false); @@ -1249,7 +1249,7 @@ class GreedyDFSRangeSource { int loHalfMms = 0, hiHalfMms = 0; assert_geq(mms.size(), stackDepth); for(size_t i = 0; i < stackDepth; i++) { - TIndexOffU d = _qlen - mms[i] - 1; + TIndexOffU d = (TIndexOffU)(_qlen - mms[i] - 1); if (d < _5depth) hiHalfMms++; else if(d < _3depth) loHalfMms++; else assert(false); @@ -1503,7 +1503,7 @@ class GreedyDFSRangeSource { cost |= (stratum << 14); assert_geq(cost, (uint32_t)(stratum << 14)); // Report the range of full alignments - hit = reportFullAlignment(stackDepth + numMuts, top, bot, stratum, cost); + hit = reportFullAlignment((uint32_t)(stackDepth + numMuts), top, bot, stratum, cost); // Re-apply partial-alignment mutations applyPartialMutations(); assert_eq(tmp, (*_qry)); @@ -1551,8 +1551,8 @@ class GreedyDFSRangeSource { _color, _primer, _trimc, colorExEnds, snpPhred, _refs, _mms, _refcs, stackDepth, ri, top, bot, - _qlen, stratum, cost, _patid, _seed, - _params)) + (uint32_t)_qlen, stratum, cost, _patid, + _seed, _params)) { // Return value of true means that we can stop return true; @@ -1599,7 +1599,7 @@ class GreedyDFSRangeSource { al.entry.pos0 = (uint16_t)_mms[0]; // pos ASSERT_ONLY(uint8_t qual0 = mmPenalty(_maqPenalty, phredCharToPhredQual((*_qual)[_mms[0]]))); ASSERT_ONLY(qualTot += qual0); - uint32_t ci = _qlen - _mms[0] - 1; + uint32_t ci = (uint32_t)(_qlen - _mms[0] - 1); // _chars[] is index in terms of RHS-relative depth int c = (int)(Dna5)_chars[ci]; assert_lt(c, 4); @@ -1615,7 +1615,7 @@ class GreedyDFSRangeSource { al.entry.pos1 = (uint16_t)_mms[1]; // pos ASSERT_ONLY(uint8_t qual1 = mmPenalty(_maqPenalty, phredCharToPhredQual((*_qual)[_mms[1]]))); ASSERT_ONLY(qualTot += qual1); - ci = _qlen - _mms[1] - 1; + ci = (uint32_t)(_qlen - _mms[1] - 1); // _chars[] is index in terms of RHS-relative depth c = (int)(Dna5)_chars[ci]; assert_lt(c, 4); @@ -1630,7 +1630,7 @@ class GreedyDFSRangeSource { al.entry.pos2 = (uint16_t)_mms[2]; // pos ASSERT_ONLY(uint8_t qual2 = mmPenalty(_maqPenalty, phredCharToPhredQual((*_qual)[_mms[2]]))); ASSERT_ONLY(qualTot += qual2); - ci = _qlen - _mms[2] - 1; + ci = (uint32_t)(_qlen - _mms[2] - 1); // _chars[] is index in terms of RHS-relative depth c = (int)(Dna5)_chars[ci]; assert_lt(c, 4); @@ -1649,7 +1649,7 @@ class GreedyDFSRangeSource { assert_leq(qualTot, _qualThresh); assert(validPartialAlignment(al)); #ifndef NDEBUG - assert(al.repOk(_qualThresh, _qlen, (*_qual), _maqPenalty)); + assert(al.repOk(_qualThresh, (uint32_t)_qlen, (*_qual), _maqPenalty)); for(size_t i = 0; i < _partialsBuf.size(); i++) { assert(validPartialAlignment(_partialsBuf[i])); assert(!samePartialAlignment(_partialsBuf[i], al)); @@ -1679,7 +1679,7 @@ class GreedyDFSRangeSource { uint32_t cumSz = 0; uint32_t eligiblesVisited = 0; for(; i <= d; i++) { - uint32_t icur = _qlen - i - 1; // current offset into _qry + uint32_t icur = (uint32_t)(_qlen - i - 1); // current offset into _qry uint8_t qi = qualAt(icur); assert_lt(elims[i], 16); if((qi == lowAltQual || !_considerQuals) && elims[i] != 15) { @@ -1931,7 +1931,7 @@ class EbwtRangeSource : public RangeSource { */ void setQlen(uint32_t qlen) { assert(qry_ != NULL); - qlen_ = min(length(*qry_), qlen); + qlen_ = min((uint32_t)length(*qry_), qlen); } /** @@ -1985,7 +1985,7 @@ class EbwtRangeSource : public RangeSource { assert_leq(iham, qualLim_); // m = depth beyond which ftab must not extend or else we might // miss some legitimate paths - uint32_t m = min(offRev0_, qlen_); + uint32_t m = min(offRev0_, (uint32_t)qlen_); // Let skipInvalidExact = true if using the ftab would be a // waste because it would jump directly to an alignment we // couldn't use. @@ -2029,7 +2029,7 @@ class EbwtRangeSource : public RangeSource { return; } if(!b->init( - pm.rpool, pm.epool, pm.bpool.lastId(), qlen_, + pm.rpool, pm.epool, pm.bpool.lastId(), (uint32_t)qlen_, offRev0_, offRev1_, offRev2_, offRev3_, 0, ftabChars, icost, iham, top, bot, ebwt._eh, ebwt._ebwt)) @@ -2056,7 +2056,7 @@ class EbwtRangeSource : public RangeSource { assert(pm.empty()); return; } - if(!b->init(pm.rpool, pm.epool, pm.bpool.lastId(), qlen_, + if(!b->init(pm.rpool, pm.epool, pm.bpool.lastId(), (uint32_t)qlen_, offRev0_, offRev1_, offRev2_, offRev3_, 0, 0, icost, iham, 0, 0, ebwt._eh, ebwt._ebwt)) { @@ -2118,7 +2118,7 @@ class EbwtRangeSource : public RangeSource { } } - assert(br->repOk(qlen_)); + assert(br->repOk((uint32_t)qlen_)); ASSERT_ONLY(int stratum = br->cost_ >> 14); // shift the stratum over assert_lt(stratum, 4); @@ -2141,11 +2141,11 @@ class EbwtRangeSource : public RangeSource { // Stop extending this branch because it violates a half- // and-half constraint if(metrics_ != NULL) metrics_->curBacktracks_++; - pm.curtail(br, qlen_, depth3_, qualOrder_); + pm.curtail(br, (uint32_t)qlen_, depth3_, qualOrder_); goto bail; } - cur = qlen_ - depth - 1; // current offset into qry_ + cur = (uint32_t)(qlen_ - depth - 1); // current offset into qry_ if(depth < qlen_) { // Determine whether ranges at this location are candidates // for backtracking @@ -2291,7 +2291,7 @@ class EbwtRangeSource : public RangeSource { // Check whether we've obtained an exact alignment when // we've been instructed not to report exact alignments - nedits = br->edits_.size(); + nedits = (uint32_t)br->edits_.size(); invalidExact = (hit && nedits == 0 && !reportExacts_); assert_leq(br->ham_, qualLim_); @@ -2301,7 +2301,7 @@ class EbwtRangeSource : public RangeSource { // This alignment doesn't satisfy the half-and-half // requirements; reject it if(metrics_ != NULL) metrics_->curBacktracks_++; - pm.curtail(br, qlen_, depth3_, qualOrder_); + pm.curtail(br, (uint32_t)qlen_, depth3_, qualOrder_); goto bail; } @@ -2333,7 +2333,7 @@ class EbwtRangeSource : public RangeSource { curRange_.mms.clear(); curRange_.refcs.clear(); for(size_t i = 0; i < nedits; i++) { - curRange_.mms.push_back(qlen_ - br->edits_.get(i).pos - 1); + curRange_.mms.push_back((uint32_t)(qlen_ - br->edits_.get(i).pos - 1)); curRange_.refcs.push_back((char)br->edits_.get(i).chr); } addPartialEdits(); @@ -2349,11 +2349,11 @@ class EbwtRangeSource : public RangeSource { assert(curRange_.repOk()); // Must curtail because we've consumed the whole pattern if(metrics_ != NULL) metrics_->curBacktracks_++; - pm.curtail(br, qlen_, depth3_, qualOrder_); + pm.curtail(br, (uint32_t)qlen_, depth3_, qualOrder_); } else if(empty || cur == 0) { // The branch couldn't be extended further if(metrics_ != NULL) metrics_->curBacktracks_++; - pm.curtail(br, qlen_, depth3_, qualOrder_); + pm.curtail(br, (uint32_t)qlen_, depth3_, qualOrder_); } else { // Extend the branch by one position; no change to its cost // so there's no need to reconsider where it lies in the @@ -2364,7 +2364,7 @@ class EbwtRangeSource : public RangeSource { bail: // Make sure the front element of the priority queue is // extendable (i.e. not curtailed) and then prep it. - if(!pm.splitAndPrep(rand_, qlen_, qualLim_, depth3_, + if(!pm.splitAndPrep(rand_, (uint32_t)qlen_, qualLim_, depth3_, qualOrder_, fuzzy_, ebwt_->_eh, ebwt_->_ebwt, ebwt_->_fw)) { @@ -2407,7 +2407,7 @@ class EbwtRangeSource : public RangeSource { if(seedRange_.valid()) { const size_t srSz = seedRange_.mms.size(); for(size_t i = 0; i < srSz; i++) { - curRange_.mms.push_back(qlen_ - seedRange_.mms[i] - 1); + curRange_.mms.push_back((uint32_t)(qlen_ - seedRange_.mms[i] - 1)); curRange_.refcs.push_back(seedRange_.refcs[i]); } curRange_.numMms += srSz; @@ -2423,7 +2423,7 @@ class EbwtRangeSource : public RangeSource { * true. */ bool hhCheck(Branch *b, uint32_t depth, bool empty) { - const uint32_t nedits = b->edits_.size(); + const uint32_t nedits = (uint32_t)b->edits_.size(); ASSERT_ONLY(uint32_t lim3 = (offRev3_ == offRev2_)? 2 : 3); ASSERT_ONLY(uint32_t lim5 = (offRev1_ == offRev0_)? 2 : 1); if((depth == (depth5_-1)) && !empty) { @@ -2477,7 +2477,7 @@ class EbwtRangeSource : public RangeSource { // Crossing from the hi-half into the lo-half ASSERT_ONLY(uint32_t lim3 = (offRev3_ == offRev2_)? 2 : 3); ASSERT_ONLY(uint32_t lim5 = (offRev1_ == offRev0_)? 2 : 1); - const uint32_t nedits = b->edits_.size(); + const uint32_t nedits = (uint32_t)b->edits_.size(); if(d == depth5_) { assert_leq(nedits, lim5); if(nedits == 0) { @@ -2768,7 +2768,7 @@ class EbwtRangeSourceDriver : uint32_t rev2Off = cextToDepth(rev2Off_, sRight, s, len_); uint32_t rev3Off = cextToDepth(rev3Off_, sRight, s, len_); // Truncate the pattern if necessary - uint32_t qlen = seqan::length(qual); + uint32_t qlen = (uint32_t)seqan::length(qual); if(seed_) { if(len_ > s) { rs_->setQlen(s); diff --git a/ebwt_search_util.h b/ebwt_search_util.h index 5a1e3e2..8adf0c0 100644 --- a/ebwt_search_util.h +++ b/ebwt_search_util.h @@ -181,7 +181,7 @@ class PartialAlignmentManager { */ void addPartials(uint32_t patid, const vector& ps) { if(ps.size() == 0) return; - tthread::lock_guard guard(mutex_m); + tthread::lock_guard guard(mutex_m); size_t origPlSz = _partialsList.size(); // Assert that the entry doesn't exist yet assert(_partialsMap.find(patid) == _partialsMap.end()); @@ -238,7 +238,7 @@ class PartialAlignmentManager { */ void getPartials(uint32_t patid, vector& ps) { assert_eq(0, ps.size()); - tthread::lock_guard guard(mutex_m); + tthread::lock_guard guard(mutex_m); getPartialsUnsync(patid, ps); } @@ -263,7 +263,7 @@ class PartialAlignmentManager { } else { // list assert_eq(1, type); - uint32_t off = al.off.off; + uint32_t off = (uint32_t)al.off.off; do { assert_lt(off, _partialsList.size()); ASSERT_ONLY(type = _partialsList[off].entry.type); @@ -314,7 +314,7 @@ class PartialAlignmentManager { { reserve(muts, 4); assert_eq(0, length(muts)); - uint32_t plen = length(seq); + uint32_t plen = (uint32_t)length(seq); assert_gt(plen, 0); assert_neq(1, pal.unk.type); // Do first mutation diff --git a/hit.cpp b/hit.cpp index 3bcb979..34356fd 100644 --- a/hit.cpp +++ b/hit.cpp @@ -40,7 +40,7 @@ void VerboseHitSink::reportMaxed(vector& hs, PatternSourcePerThread& p) { int strat = min(hs[i].stratum, hs[i+1].stratum); if(strat == bestStratum) { if(num == r) { - hs[i].oms = hs[i+1].oms = hs.size()/2; + hs[i].oms = hs[i+1].oms = (uint32_t)(hs.size()/2); reportHits(hs, i, i+2); break; } @@ -57,7 +57,7 @@ void VerboseHitSink::reportMaxed(vector& hs, PatternSourcePerThread& p) { assert_leq(num, hs.size()); uint32_t r = rand.nextU32() % num; Hit& h = hs[r]; - h.oms = hs.size(); + h.oms = (uint32_t)hs.size(); reportHit(h, false); } } @@ -97,7 +97,7 @@ void VerboseHitSink::append(ostream& ss, bool firstfield = true; if(partition != 0) { int pospart = abs(partition); - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; // Output a partitioning key @@ -126,7 +126,7 @@ void VerboseHitSink::append(ostream& ss, // output another alignment for that partition spill = true; } - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; // Print partition id with leading 0s so that Hadoop @@ -144,7 +144,7 @@ void VerboseHitSink::append(ostream& ss, } ss << s2.c_str(); } - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; // Print offset with leading 0s @@ -155,7 +155,7 @@ void VerboseHitSink::append(ostream& ss, } ss << s3; } - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; ss << (h.fw? "+":"-"); @@ -163,17 +163,17 @@ void VerboseHitSink::append(ostream& ss, // end if(partition != 0) } else { assert(!dospill); - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; ss << h.patName; } - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; ss << (h.fw? '+' : '-'); } - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; // .first is text id, .second is offset @@ -185,33 +185,33 @@ void VerboseHitSink::append(ostream& ss, ss << h.h.first; } } - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; ss << (h.h.second + offBase); } // end else clause of if(partition != 0) } - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; const String* pat = &h.patSeq; if(h.color && colorSeq) pat = &h.colSeq; ss << *pat; } - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; const String* qual = &h.quals; if(h.color && colorQual) qual = &h.colQuals; ss << *qual; } - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; ss << h.oms; } - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; // Look for SNP annotations falling within the alignment @@ -235,7 +235,7 @@ void VerboseHitSink::append(ostream& ss, } size_t off = ai->first.second - h.h.second; if(!h.fw) off = len - off - 1; - snpAnnots[off] = ai->second.second; + snpAnnots[(int)off] = ai->second.second; } } // Output mismatch column @@ -263,13 +263,13 @@ void VerboseHitSink::append(ostream& ss, } if(partition != 0) { // Fields addded as of Crossbow 0.1.4 - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; ss << (int)h.mate; } // Print label, or whole read name if label isn't found - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; int labelOff = -1; @@ -296,13 +296,13 @@ void VerboseHitSink::append(ostream& ss, } if(cost) { // Stratum - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; ss << (int)h.stratum; } // Cost - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; ss << (int)h.cost; @@ -310,7 +310,7 @@ void VerboseHitSink::append(ostream& ss, } if(showSeed) { // Seed - if(!suppress.test(field++)) { + if(!suppress.test((uint32_t)field++)) { if(firstfield) firstfield = false; else ss << '\t'; ss << h.seed; diff --git a/hit.h b/hit.h index a940ab1..bf6d8d3 100644 --- a/hit.h +++ b/hit.h @@ -27,7 +27,6 @@ using namespace std; using namespace seqan; -using tthread::lock_guard; /// Constants for the various output modes enum output_types { @@ -201,7 +200,7 @@ class RecalTable { for(int i = 0; i < (int)h.length(); i++) { int ii = i; if(!h.fw) { - ii = h.length() - ii - 1; + ii = (int)(h.length() - ii - 1); } int qc = (int)h.patSeq[ii]; int rc = qc; @@ -438,7 +437,7 @@ class HitSink { out(h.h.first).writeChars(buf, ss.tellp()); } unlock(hs[end-1].h.first); - lock_guard guard(main_mutex_m); + tthread::lock_guard guard(main_mutex_m); commitHits(hs); first_ = false; numAligned_++; @@ -610,7 +609,7 @@ class HitSink { if(!p.paired() || onePairFile_) { // Dump unpaired read to an aligned-read file of the same format if(!dumpAlBase_.empty()) { - lock_guard guard(dumpAlignLock_); + tthread::lock_guard guard(dumpAlignLock_); if(dumpAl_ == NULL) { assert(dumpAlQv_ == NULL); dumpAl_ = openOf(dumpAlBase_, 0, ""); @@ -629,7 +628,7 @@ class HitSink { // Dump paired-end read to an aligned-read file (or pair of // files) of the same format if(!dumpAlBase_.empty()) { - lock_guard guard(dumpAlignLockPE_); + tthread::lock_guard guard(dumpAlignLockPE_); if(dumpAl_1_ == NULL) { assert(dumpAlQv_1_ == NULL); assert(dumpAlQv_2_ == NULL); @@ -664,7 +663,7 @@ class HitSink { if(!p.paired() || onePairFile_) { // Dump unpaired read to an unaligned-read file of the same format if(!dumpUnalBase_.empty()) { - lock_guard guard(dumpUnalLock_); + tthread::lock_guard guard(dumpUnalLock_); if(dumpUnal_ == NULL) { assert(dumpUnalQv_ == NULL); dumpUnal_ = openOf(dumpUnalBase_, 0, ""); @@ -683,7 +682,7 @@ class HitSink { // Dump paired-end read to an unaligned-read file (or pair // of files) of the same format if(!dumpUnalBase_.empty()) { - lock_guard guard(dumpUnalLockPE_); + tthread::lock_guard guard(dumpUnalLockPE_); if(dumpUnal_1_ == NULL) { assert(dumpUnal_1_ == NULL); assert(dumpUnal_2_ == NULL); @@ -719,7 +718,7 @@ class HitSink { if(!p.paired() || onePairFile_) { // Dump unpaired read to an maxed-out-read file of the same format if(!dumpMaxBase_.empty()) { - lock_guard guard(dumpMaxLock_); + tthread::lock_guard guard(dumpMaxLock_); if(dumpMax_ == NULL) { dumpMax_ = openOf(dumpMaxBase_, 0, ""); assert(dumpMax_ != NULL); @@ -736,7 +735,7 @@ class HitSink { // Dump paired-end read to a maxed-out-read file (or pair // of files) of the same format if(!dumpMaxBase_.empty()) { - lock_guard guard(dumpMaxLockPE_); + tthread::lock_guard guard(dumpMaxLockPE_); if(dumpMax_1_ == NULL) { assert(dumpMaxQv_1_ == NULL); assert(dumpMaxQv_2_ == NULL); @@ -764,7 +763,7 @@ class HitSink { * want to print a placeholder when output is chained. */ virtual void reportMaxed(vector& hs, PatternSourcePerThread& p) { - lock_guard guard(main_mutex_m); + tthread::lock_guard guard(main_mutex_m); numMaxed_++; } @@ -773,7 +772,7 @@ class HitSink { * want to print a placeholder when output is chained. */ virtual void reportUnaligned(PatternSourcePerThread& p) { - lock_guard guard(main_mutex_m); + tthread::lock_guard guard(main_mutex_m); numUnaligned_++; } @@ -782,7 +781,7 @@ class HitSink { /// Implementation of hit-report virtual void reportHit(const Hit& h) { assert(h.repOk()); - lock_guard guard(main_mutex_m); + tthread::lock_guard guard(main_mutex_m); commitHit(h); first_ = false; if(h.mate > 0) numReportedPaired_++; @@ -831,7 +830,7 @@ class HitSink { int _numWrappers; /// # threads owning a wrapper for this HitSink vector _locks; /// pthreads mutexes for per-file critical sections MUTEX_T main_mutex_m; /// pthreads mutexes for fields of this object - MUTEX_T numWrapper_mutex_m; + MUTEX_T numWrapper_mutex_m; // Output filenames for dumping std::string dumpAlBase_; @@ -1018,7 +1017,7 @@ class HitSinkPerThread { } _sink.reportHits(_bufferedHits); _sink.dumpAlign(p); - ret = _bufferedHits.size(); + ret = (uint32_t)_bufferedHits.size(); _bufferedHits.clear(); } assert_eq(0, _bufferedHits.size()); @@ -1333,7 +1332,7 @@ class NBestFirstStratHitSinkPerThread : public HitSinkPerThread { for(size_t i = 0; i < sz; i++) { // Set 'oms' according to the number of other alignments // at this stratum - _bufferedHits[i].oms = (sz / mult_) - 1; + _bufferedHits[i].oms = ((uint32_t)sz / mult_) - 1; } return ret; } diff --git a/hit_set.h b/hit_set.h index e6795df..40df01b 100644 --- a/hit_set.h +++ b/hit_set.h @@ -42,13 +42,13 @@ struct HitSetEnt { assert_eq(stratum, (cost >> 14)); fb.writeChars((const char*)&cost, 2); fb.writeChars((const char*)&oms, 4); - uint32_t sz = edits.size(); + uint32_t sz = (uint32_t)edits.size(); fb.writeChars((const char*)&sz, 4); std::vector::const_iterator it; for(it = edits.begin(); it != edits.end(); it++) { it->serialize(fb); } - sz = cedits.size(); + sz = (uint32_t)cedits.size(); fb.writeChars((const char*)&sz, 4); for(it = cedits.begin(); it != cedits.end(); it++) { it->serialize(fb); @@ -242,11 +242,11 @@ struct HitSet { */ void serialize(OutFileBuf& fb) const { fb.write(color ? 1 : 0); - uint32_t i = seqan::length(name); + uint32_t i = (uint32_t)seqan::length(name); assert_gt(i, 0); fb.writeChars((const char*)&i, 4); fb.writeChars(seqan::begin(name), i); - i = seqan::length(seq); + i = (uint32_t)seqan::length(seq); assert_gt(i, 0); assert_lt(i, 1024); fb.writeChars((const char*)&i, 4); @@ -254,7 +254,7 @@ struct HitSet { fb.write("ACGTN"[(int)seq[j]]); } fb.writeChars(seqan::begin(qual), i); - i = ents.size(); + i = (uint32_t)ents.size(); fb.writeChars((const char*)&i, 4); std::vector::const_iterator it; for(it = ents.begin(); it != ents.end(); it++) { diff --git a/log.h b/log.h index a1287e5..20e5066 100644 --- a/log.h +++ b/log.h @@ -4,20 +4,18 @@ #include #include "threading.h" -using tthread::lock_guard; - class SyncLogger { public: SyncLogger() { } void msg(const char *s) { - lock_guard guard(mutex_m); + tthread::lock_guard guard(mutex_m); std::cout << s << std::endl; } void msg(const std::string& s) { - lock_guard guard(mutex_m); + tthread::lock_guard guard(mutex_m); std::cout << s << std::endl; } diff --git a/multikey_qsort.h b/multikey_qsort.h index de970de..d28b775 100644 --- a/multikey_qsort.h +++ b/multikey_qsort.h @@ -226,14 +226,15 @@ static inline void vecswap2(TVal* s, size_t slen, TVal* s2, TPos i, TPos j, TPos * (*prior to* swapping the = regions to the center) */ template -bool assertPartitionedSuf(const THost& host, - TIndexOffU *s, - size_t slen, - int hi, - int pivot, - size_t begin, - size_t end, - size_t depth) +bool assertPartitionedSuf( + const THost& host, + TIndexOffU *s, + size_t slen, + int hi, + int pivot, + size_t begin, + size_t end, + size_t depth) { size_t hlen = length(host); int state = 0; // 0 -> 1st = section, 1 -> < section, 2 -> > section, 3 -> 2nd = section @@ -264,14 +265,15 @@ bool assertPartitionedSuf(const THost& host, * (*after* swapping the = regions to the center) */ template -bool assertPartitionedSuf2(const THost& host, - TIndexOffU *s, - size_t slen, - int hi, - int pivot, - size_t begin, - size_t end, - size_t depth) +bool assertPartitionedSuf2( + const THost& host, + TIndexOffU *s, + size_t slen, + int hi, + int pivot, + size_t begin, + size_t end, + size_t depth) { size_t hlen = length(host); int state = 0; // 0 -> < section, 1 -> = section, 2 -> > section @@ -297,7 +299,7 @@ bool assertPartitionedSuf2(const THost& host, * 'host' is a seemingly legitimate suffix-offset list (at this time, * we just check that it doesn't list any suffix twice). */ -static void sanityCheckInputSufs(TIndexOffU *s, size_t slen) { +static inline void sanityCheckInputSufs(TIndexOffU *s, size_t slen) { assert_gt(slen, 0); for(size_t i = 0; i < slen; i++) { // Actually, it's convenient to allow the caller to provide @@ -329,17 +331,17 @@ void sanityCheckOrderedSufs(const T& host, // Allow s[i+t] to point off the end of the string; this is // convenient for some callers if(s[i+1] >= hlen) continue; +#ifndef NDEBUG if(upto == OFF_MASK) { assert(dollarLt(suffix(host, s[i]), suffix(host, s[i+1]))); } else { -#ifndef NDEBUG if(prefix(suffix(host, s[i]), upto) > prefix(suffix(host, s[i+1]), upto)) { // operator > treats shorter strings as // lexicographically smaller, but we want to opposite - assert(isPrefix(suffix(host, s[i+1]), suffix(host, s[i]))); + //assert(isPrefix(suffix(host, s[i+1]), suffix(host, s[i]))); } -#endif } +#endif } } @@ -366,15 +368,16 @@ void sanityCheckOrderedSufs(const T& host, * keys share a long prefix. */ template -void mkeyQSortSuf(const T& host, - size_t hlen, - TIndexOffU *s, - size_t slen, - int hi, - size_t begin, - size_t end, - size_t depth, - size_t upto = OFF_MASK) +void mkeyQSortSuf( + const T& host, + size_t hlen, + TIndexOffU *s, + size_t slen, + int hi, + size_t begin, + size_t end, + size_t depth, + size_t upto = OFF_MASK) { // Helper for making the recursive call; sanity-checks arguments to // make sure that the problem actually got smaller. @@ -455,13 +458,14 @@ void mkeyQSortSuf(const T& host, * Toplevel function for multikey quicksort over suffixes. */ template -void mkeyQSortSuf(const T& host, - TIndexOffU *s, - size_t slen, - int hi, - bool verbose = false, - bool sanityCheck = false, - size_t upto = OFF_MASK) +void mkeyQSortSuf( + const T& host, + TIndexOffU *s, + size_t slen, + int hi, + bool verbose = false, + bool sanityCheck = false, + size_t upto = OFF_MASK) { size_t hlen = length(host); assert(!empty(s)); @@ -478,16 +482,17 @@ void mkeyQSortSuf(const T& host, * as s and s2[i] = i). */ template -void mkeyQSortSuf2(const T& host, - size_t hlen, - TIndexOffU *s, - size_t slen, - TIndexOffU *s2, - int hi, - size_t begin, - size_t end, - size_t depth, - size_t upto = OFF_MASK) +void mkeyQSortSuf2( + const T& host, + size_t hlen, + TIndexOffU *s, + size_t slen, + TIndexOffU *s2, + int hi, + size_t begin, + size_t end, + size_t depth, + size_t upto = OFF_MASK) { // Helper for making the recursive call; sanity-checks arguments to // make sure that the problem actually got smaller. @@ -570,14 +575,15 @@ void mkeyQSortSuf2(const T& host, * swapping. */ template -void mkeyQSortSuf2(const T& host, - TIndexOffU *s, - size_t slen, - TIndexOffU *s2, - int hi, - bool verbose = false, - bool sanityCheck = false, - size_t upto = OFF_MASK) +void mkeyQSortSuf2( + const T& host, + TIndexOffU *s, + size_t slen, + TIndexOffU *s2, + int hi, + bool verbose = false, + bool sanityCheck = false, + size_t upto = OFF_MASK) { size_t hlen = length(host); if(sanityCheck) sanityCheckInputSufs(s, slen); @@ -592,6 +598,7 @@ void mkeyQSortSuf2(const T& host, for(size_t i = 0; i < slen; i++) { assert_eq(s[i], sOrig[s2[i]]); } + delete[] sOrig; } } @@ -610,12 +617,12 @@ bool sufDcLt(const T1& host, const DifferenceCoverSample& dc, bool sanityCheck = false) { - uint32_t diff = dc.tieBreakOff(s1, s2); + size_t diff = dc.tieBreakOff(s1, s2); assert_lt(diff, dc.v()); assert_lt(diff, length(host)-s1); assert_lt(diff, length(host)-s2); if(sanityCheck) { - for(uint32_t i = 0; i < diff; i++) { + for(size_t i = 0; i < diff; i++) { assert_eq(host[s1+i], host[s2+i]); } } @@ -674,7 +681,7 @@ void qsortSufDc(const T& host, * Toplevel function for multikey quicksort over suffixes. */ template -void mkeyQSortSufDcU8(const T1& seqanHost, +void mkeyQSortSufDcU8(const T1& host1, const T2& host, size_t hlen, TIndexOffU* s, @@ -685,8 +692,8 @@ void mkeyQSortSufDcU8(const T1& seqanHost, bool sanityCheck = false) { if(sanityCheck) sanityCheckInputSufs(s, slen); - mkeyQSortSufDcU8(seqanHost, host, hlen, s, slen, dc, hi, 0, slen, 0, sanityCheck); - if(sanityCheck) sanityCheckOrderedSufs(seqanHost, hlen, s, slen, OFF_MASK); + mkeyQSortSufDcU8(host1, host, hlen, s, slen, dc, hi, 0, slen, 0, sanityCheck); + if(sanityCheck) sanityCheckOrderedSufs(host1, hlen, s, slen, OFF_MASK); } /** @@ -694,7 +701,7 @@ void mkeyQSortSufDcU8(const T1& seqanHost, * cover to break the tie. */ template inline -bool sufDcLtU8(const T1& seqanHost, +bool sufDcLtU8(const T1& host1, const T2& host, size_t hlen, size_t s1, @@ -708,15 +715,15 @@ bool sufDcLtU8(const T1& seqanHost, assert_lt(diff, hlen-s1); assert_lt(diff, hlen-s2); if(sanityCheck) { - for(uint32_t i = 0; i < diff; i++) { - assert_eq(host[s1+i], host[s2+i]); + for(size_t i = 0; i < diff; i++) { + assert_eq(host[s1+i], host1[s2+i]); } } - bool ret = dc.breakTie(s1+diff, s2+diff) < 0; + bool ret = dc.breakTie((TIndexOffU)(s1+diff), (TIndexOffU)(s2+diff)) < 0; // Sanity-check return value using dollarLt #ifndef NDEBUG if(sanityCheck && - ret != dollarLt(suffix(seqanHost, s1), suffix(seqanHost, s2))) + ret != dollarLt(suffix(host1, s1), suffix(host1, s2))) { assert(false); } @@ -728,7 +735,7 @@ bool sufDcLtU8(const T1& seqanHost, * k log(k) */ template inline -void qsortSufDcU8(const T1& seqanHost, +void qsortSufDcU8(const T1& host1, const T2& host, size_t hlen, TIndexOffU* s, @@ -752,10 +759,10 @@ void qsortSufDcU8(const T1& seqanHost, SWAP(s, end-1, a); // move pivot to end size_t cur = 0; for(size_t i = begin; i < end-1; i++) { - if(sufDcLtU8(seqanHost, host, hlen, s[i], s[end-1], dc, sanityCheck)) { + if(sufDcLtU8(host1, host, hlen, s[i], s[end-1], dc, sanityCheck)) { #ifndef NDEBUG if(sanityCheck) - assert(dollarLt(suffix(seqanHost, s[i]), suffix(seqanHost, s[end-1]))); + assert(dollarLt(suffix(host1, s[i]), suffix(host1, s[end-1]))); assert_lt(begin + cur, end-1); #endif SWAP(s, i, begin + cur); @@ -765,8 +772,8 @@ void qsortSufDcU8(const T1& seqanHost, // Put pivot into place assert_lt(cur, end-begin); SWAP(s, end-1, begin+cur); - if(begin+cur > begin) qsortSufDcU8(seqanHost, host, hlen, s, slen, dc, begin, begin+cur); - if(end > begin+cur+1) qsortSufDcU8(seqanHost, host, hlen, s, slen, dc, begin+cur+1, end); + if(begin+cur > begin) qsortSufDcU8(host1, host, hlen, s, slen, dc, begin, begin+cur); + if(end > begin+cur+1) qsortSufDcU8(host1, host, hlen, s, slen, dc, begin+cur+1, end); } #define BUCKET_SORT_CUTOFF (4 * 1024 * 1024) @@ -780,7 +787,7 @@ static TIndexOffU bkts[4][4 * 1024 * 1024]; * works fine as long as TStr is not packed. */ template -inline uint8_t get_uint8(const TStr& t, uint32_t off) { +inline uint8_t get_uint8(const TStr& t, size_t off) { return t[off]; } @@ -790,7 +797,7 @@ inline uint8_t get_uint8(const TStr& t, uint32_t off) { * to Dna then to uint8_t. */ template<> -inline uint8_t get_uint8(const String >& t, uint32_t off) { +inline uint8_t get_uint8(const String >& t, size_t off) { return (uint8_t)(Dna)t[off]; } @@ -800,10 +807,10 @@ inline uint8_t get_uint8(const String >& t, uint32_t off) { */ template static inline int char_at_suf_u8(const TStr& host, - uint32_t hlen, + size_t hlen, TIndexOffU* s, - uint32_t si, - uint32_t off, + size_t si, + size_t off, uint8_t hi) { return ((off+s[si]) < hlen) ? get_uint8(host, off+s[si]) : (hi); @@ -811,7 +818,7 @@ static inline int char_at_suf_u8(const TStr& host, template static void selectionSortSufDcU8( - const T1& seqanHost, + const T1& host1, const T2& host, size_t hlen, TIndexOffU* s, @@ -825,19 +832,19 @@ static void selectionSortSufDcU8( { #define ASSERT_SUF_LT(l, r) \ if(sanityCheck && \ - !dollarLt(suffix(seqanHost, s[l]), \ - suffix(seqanHost, s[r]))) { \ - cout << "l: " << suffixStr(seqanHost, s[l]) << endl; \ - cout << "r: " << suffixStr(seqanHost, s[r]) << endl; \ + !dollarLt(suffix(host1, s[l]), \ + suffix(host1, s[r]))) { \ + cout << "l: " << suffixStr(host1, s[l]) << endl; \ + cout << "r: " << suffixStr(host1, s[r]) << endl; \ assert(false); \ } assert_gt(end, begin+1); assert_leq(end-begin, SELECTION_SORT_CUTOFF); assert_eq(hi, 4); - TIndexOffU v = dc.v(); + size_t v = dc.v(); if(end == begin+2) { - TIndexOffU off = dc.tieBreakOff(s[begin], s[begin+1]); + size_t off = dc.tieBreakOff(s[begin], s[begin+1]); if(off + s[begin] >= hlen || off + s[begin+1] >= hlen) { @@ -845,11 +852,11 @@ static void selectionSortSufDcU8( } if(off != OFF_MASK) { if(off < depth) { - qsortSufDcU8(seqanHost, host, hlen, s, slen, dc, + qsortSufDcU8(host1, host, hlen, s, slen, dc, begin, end, sanityCheck); // It's helpful for debugging if we call this here if(sanityCheck) { - sanityCheckOrderedSufs(seqanHost, hlen, s, slen, + sanityCheckOrderedSufs(host1, hlen, s, slen, OFF_MASK, begin, end); } return; @@ -910,14 +917,14 @@ static void selectionSortSufDcU8( // difference cover if(k == lim+1) { assert_neq(j, targ); - if(sufDcLtU8(seqanHost, host, hlen, s[j], s[targ], dc, sanityCheck)) { + if(sufDcLtU8(host1, host, hlen, s[j], s[targ], dc, sanityCheck)) { // j < targ - assert(!sufDcLtU8(seqanHost, host, hlen, s[targ], s[j], dc, sanityCheck)); + assert(!sufDcLtU8(host1, host, hlen, s[targ], s[j], dc, sanityCheck)); ASSERT_SUF_LT(j, targ); targ = j; targoff = joff; } else { - assert(sufDcLtU8(seqanHost, host, hlen, s[targ], s[j], dc, sanityCheck)); + assert(sufDcLtU8(host1, host, hlen, s[targ], s[j], dc, sanityCheck)); ASSERT_SUF_LT(targ, j); // ! } } @@ -934,14 +941,13 @@ static void selectionSortSufDcU8( } } if(sanityCheck) { - sanityCheckOrderedSufs(seqanHost, hlen, s, slen, - OFF_MASK, begin, end); + sanityCheckOrderedSufs(host1, hlen, s, slen, OFF_MASK, begin, end); } } template static void bucketSortSufDcU8( - const T1& seqanHost, + const T1& host1, const T2& host, size_t hlen, TIndexOffU* s, @@ -953,9 +959,9 @@ static void bucketSortSufDcU8( size_t depth, bool sanityCheck = false) { - uint32_t cnts[] = { 0, 0, 0, 0, 0 }; + size_t cnts[] = { 0, 0, 0, 0, 0 }; #define BKT_RECURSE_SUF_DC_U8(nbegin, nend) { \ - bucketSortSufDcU8(seqanHost, host, hlen, s, slen, dc, hi, \ + bucketSortSufDcU8(host1, host, hlen, s, slen, dc, hi, \ (nbegin), (nend), depth+1, sanityCheck); \ } assert_gt(end, begin); @@ -966,21 +972,21 @@ static void bucketSortSufDcU8( // Quicksort the remaining suffixes using difference cover // for constant-time comparisons; this is O(k*log(k)) where // k=(end-begin) - qsortSufDcU8(seqanHost, host, hlen, s, slen, dc, begin, end, sanityCheck); + qsortSufDcU8(host1, host, hlen, s, slen, dc, begin, end, sanityCheck); return; } if(end-begin <= SELECTION_SORT_CUTOFF) { // Bucket sort remaining items - selectionSortSufDcU8(seqanHost, host, hlen, s, slen, dc, hi, + selectionSortSufDcU8(host1, host, hlen, s, slen, dc, hi, begin, end, depth, sanityCheck); if(sanityCheck) { - sanityCheckOrderedSufs(seqanHost, hlen, s, slen, + sanityCheckOrderedSufs(host1, hlen, s, slen, OFF_MASK, begin, end); } return; } for(size_t i = begin; i < end; i++) { - uint32_t off = depth+s[i]; + size_t off = depth + s[i]; uint8_t c = (off < hlen) ? get_uint8(host, off) : hi; assert_leq(c, 4); if(c == 0) { @@ -1034,7 +1040,7 @@ static void bucketSortSufDcU8( * bounds the depth to which the function sorts. */ template -void mkeyQSortSufDcU8(const T1& seqanHost, +void mkeyQSortSufDcU8(const T1& host1, const T2& host, size_t hlen, TIndexOffU* s, @@ -1050,7 +1056,7 @@ void mkeyQSortSufDcU8(const T1& seqanHost, // make sure that the problem actually got smaller. #define MQS_RECURSE_SUF_DC_U8(nbegin, nend, ndepth) { \ assert(nbegin > begin || nend < end || ndepth > depth); \ - mkeyQSortSufDcU8(seqanHost, host, hlen, s, slen, dc, hi, nbegin, nend, ndepth, sanityCheck); \ + mkeyQSortSufDcU8(host1, host, hlen, s, slen, dc, hi, nbegin, nend, ndepth, sanityCheck); \ } assert_leq(begin, slen); assert_leq(end, slen); @@ -1060,18 +1066,18 @@ void mkeyQSortSufDcU8(const T1& seqanHost, // Quicksort the remaining suffixes using difference cover // for constant-time comparisons; this is O(k*log(k)) where // k=(end-begin) - qsortSufDcU8(seqanHost, host, hlen, s, slen, dc, begin, end, sanityCheck); + qsortSufDcU8(host1, host, hlen, s, slen, dc, begin, end, sanityCheck); if(sanityCheck) { - sanityCheckOrderedSufs(seqanHost, hlen, s, slen, OFF_MASK, begin, end); + sanityCheckOrderedSufs(host1, hlen, s, slen, OFF_MASK, begin, end); } return; } if(n <= BUCKET_SORT_CUTOFF) { // Bucket sort remaining items - bucketSortSufDcU8(seqanHost, host, hlen, s, slen, dc, + bucketSortSufDcU8(host1, host, hlen, s, slen, dc, (uint8_t)hi, begin, end, depth, sanityCheck); if(sanityCheck) { - sanityCheckOrderedSufs(seqanHost, hlen, s, slen, OFF_MASK, begin, end); + sanityCheckOrderedSufs(host1, hlen, s, slen, OFF_MASK, begin, end); } return; } @@ -1105,14 +1111,12 @@ void mkeyQSortSufDcU8(const T1& seqanHost, // Invariant: everything after d is = pivot, everything // between c and d is > int cc = 0; // shouldn't have to init but gcc on Mac complains - bool hiLatch = true; + //bool hiLatch = true; while(b <= c && v <= (cc = CHAR_AT_SUF_U8(c, depth))) { if(v == cc) { SWAP(s, c, d); d--; } - else if(hiLatch && cc == hi) { - - } + //else if(hiLatch && cc == hi) { } c--; } if(b > c) break; @@ -1123,10 +1127,8 @@ void mkeyQSortSufDcU8(const T1& seqanHost, assert(a > begin || c < end-1); // there was at least one =s assert_lt(d-c, n); // they can't all have been > pivot assert_lt(b-a, n); // they can't all have been < pivot - //assert(assertPartitionedSuf(host, s, slen, hi, v, begin, end, depth)); // check pre-=-swap invariant r = min(a-begin, b-a); VECSWAP(s, begin, b-r, r); // swap left = to center r = min(d-c, end-d-1); VECSWAP(s, b, end-r, r); // swap right = to center - //assert(assertPartitionedSuf2(host, s, slen, hi, v, begin, end, depth)); // check post-=-swap invariant r = b-a; // r <- # of <'s if(r > 0) { MQS_RECURSE_SUF_DC_U8(begin, begin + r, depth); // recurse on <'s diff --git a/pat.h b/pat.h index 8e36de6..ed52b13 100644 --- a/pat.h +++ b/pat.h @@ -164,7 +164,7 @@ struct ReadBuf { /// Return length of the read in the buffer uint32_t length() const { - return seqan::length(patFw); + return (uint32_t)seqan::length(patFw); } /** @@ -1084,7 +1084,7 @@ class RandomPatternSource : public PatternSource { return; } uint32_t ra = rand_.nextU32(); - patid = readCnt_; + patid = (uint32_t)readCnt_; readCnt_++; unlock(); fillRandomRead(r, ra, length_, patid); @@ -1102,7 +1102,7 @@ class RandomPatternSource : public PatternSource { } uint32_t rna = rand_.nextU32(); uint32_t rnb = rand_.nextU32(); - patid = readCnt_; + patid = (uint32_t)readCnt_; readCnt_++; unlock(); fillRandomRead(ra, rna, length_, patid); @@ -1422,7 +1422,7 @@ class VectorPatternSource : public TrimmingPatternSource { r.name = os.str(); cur_++; readCnt_++; - patid = readCnt_; + patid = (uint32_t)readCnt_; unlock(); } /** @@ -1464,7 +1464,7 @@ class VectorPatternSource : public TrimmingPatternSource { ra.color = rb.color = color_; cur_++; readCnt_++; - patid = readCnt_; + patid = (uint32_t)readCnt_; unlock(); } virtual void reset() { @@ -1884,7 +1884,7 @@ class FastaPatternSource : public BufferedFilePatternSource { fb_.resetLastN(); // Count the read readCnt_++; - patid = readCnt_-1; + patid = (uint32_t)readCnt_-1; return; } } @@ -1892,14 +1892,14 @@ class FastaPatternSource : public BufferedFilePatternSource { _setLength(r.qual, dstLen); // Set up a default name if one hasn't been set if(nameLen == 0) { - itoa10(readCnt_, r.nameBuf); + itoa10((int)readCnt_, r.nameBuf); _setBegin(r.name, r.nameBuf); - nameLen = strlen(r.nameBuf); + nameLen = (int)strlen(r.nameBuf); _setLength(r.name, nameLen); } assert_gt(nameLen, 0); readCnt_++; - patid = readCnt_-1; + patid = (uint32_t)(readCnt_-1); r.readOrigBufLen = fb_.copyLastN(r.readOrigBuf); fb_.resetLastN(); if(doquals) { @@ -2028,7 +2028,7 @@ class TabbedPatternSource : public BufferedFilePatternSource { // '\n' readCnt_++; - patid = readCnt_-1; + patid = (uint32_t)(readCnt_-1); } /// Read another pair of patterns from a FASTA input file @@ -2078,7 +2078,7 @@ class TabbedPatternSource : public BufferedFilePatternSource { ra.readOrigBufLen = fb_.copyLastN(ra.readOrigBuf); fb_.resetLastN(); readCnt_++; - patid = readCnt_-1; + patid = (uint32_t)(readCnt_-1); return; } assert_neq('\t', fb_.peek()); @@ -2121,7 +2121,7 @@ class TabbedPatternSource : public BufferedFilePatternSource { // '\n' readCnt_++; - patid = readCnt_-1; + patid = (uint32_t)(readCnt_-1); } /** @@ -2169,12 +2169,12 @@ class TabbedPatternSource : public BufferedFilePatternSource { } // Set up a default name if one hasn't been set if(nameLen == 0) { - itoa10(readCnt_, r.nameBuf); + itoa10((int)readCnt_, r.nameBuf); _setBegin(r.name, r.nameBuf); - nameLen = strlen(r.nameBuf); + nameLen = (int)strlen(r.nameBuf); _setLength(r.name, nameLen); if(r2 != NULL) { - itoa10(readCnt_, (*r2).nameBuf); + itoa10((int)readCnt_, (*r2).nameBuf); _setBegin((*r2).name, (*r2).nameBuf); _setLength((*r2).name, nameLen); } @@ -2405,13 +2405,13 @@ class FastaContinuousPatternSource : public BufferedFilePatternSource { for(size_t i = 0; i < nameChars_; i++) { r.nameBuf[i] = nameBuf_[i]; } - itoa10(readCnt_ - subReadCnt_, &r.nameBuf[nameChars_]); + itoa10((int)(readCnt_ - subReadCnt_), &r.nameBuf[nameChars_]); _setBegin(r.name, r.nameBuf); _setLength(r.name, strlen(r.nameBuf)); eat_ = freq_-1; readCnt_++; beginning_ = false; - patid = readCnt_-1; + patid = (uint32_t)(readCnt_-1); break; } } @@ -2810,16 +2810,16 @@ class FastqPatternSource : public BufferedFilePatternSource { // Set up a default name if one hasn't been set if(nameLen == 0) { - itoa10(readCnt_, r.nameBuf); + itoa10((int)readCnt_, r.nameBuf); _setBegin(r.name, r.nameBuf); - nameLen = strlen(r.nameBuf); + nameLen = (int)strlen(r.nameBuf); _setLength(r.name, nameLen); } r.trimmed3 = this->trim3_; r.trimmed5 = mytrim5; assert_gt(nameLen, 0); readCnt_++; - patid = readCnt_-1; + patid = (uint32_t)(readCnt_-1); return; } } @@ -2967,13 +2967,13 @@ class RawPatternSource : public BufferedFilePatternSource { fb_.resetLastN(); // Set up name - itoa10(readCnt_, r.nameBuf); + itoa10((int)readCnt_, r.nameBuf); _setBegin(r.name, r.nameBuf); - nameLen = strlen(r.nameBuf); + nameLen = (int)strlen(r.nameBuf); _setLength(r.name, nameLen); readCnt_++; - patid = readCnt_-1; + patid = (uint32_t)(readCnt_-1); } /// Read another read pair from a FASTQ input file virtual void readPair(ReadBuf& ra, ReadBuf& rb, uint32_t& patid) { @@ -3059,7 +3059,7 @@ class ChainPatternSource : public BufferedFilePatternSource { fb_.resetLastN(); readCnt_++; - patid = readCnt_-1; + patid = (uint32_t)(readCnt_-1); } /// Read another read pair diff --git a/range_cache.h b/range_cache.h index 447ae38..b00a0b2 100644 --- a/range_cache.h +++ b/range_cache.h @@ -506,7 +506,7 @@ class RangeCache { uint32_t jumps = 0; if(tops.size() > 0) { entTop = tops.back(); - jumps = tops.size(); + jumps = (uint32_t)tops.size(); } // Cache the entry for the end of the tunnel assert(map_.find(entTop) == map_.end()); diff --git a/range_source.h b/range_source.h index 8291adb..af40a54 100644 --- a/range_source.h +++ b/range_source.h @@ -53,7 +53,7 @@ struct EditList { } else if(sz_ == (numEdits + numMoreEdits)) { assert(moreEdits_ != NULL); assert(yetMoreEdits_ == NULL); - yetMoreEdits_ = pool.alloc(qlen + 10 - numMoreEdits - numEdits); + yetMoreEdits_ = pool.alloc((uint32_t)qlen + 10 - numMoreEdits - numEdits); if(yetMoreEdits_ == NULL) { return false; } @@ -135,7 +135,7 @@ struct EditList { */ void free(AllocOnlyPool& epool, size_t qlen) { if(yetMoreEdits_ != NULL) - epool.free(yetMoreEdits_, qlen + 10 - numMoreEdits - numEdits); + epool.free(yetMoreEdits_, (uint32_t)qlen + 10 - numMoreEdits - numEdits); if(moreEdits_ != NULL) epool.free(moreEdits_, numMoreEdits); } @@ -636,7 +636,7 @@ class Branch { // jumping over a bunch of unrevisitable positions. for(size_t i = 0; i < len_; i++) { ranges_[i].eliminated_ = true; - assert(eliminated(i)); + assert(eliminated((int)i)); } assert(repOk(qlen)); return true; diff --git a/ref_aligner.h b/ref_aligner.h index 541a9eb..2e6fcde 100644 --- a/ref_aligner.h +++ b/ref_aligner.h @@ -93,7 +93,7 @@ class RefAligner { } } // Look for alignments - ASSERT_ONLY(uint32_t irsz = ranges.size()); + ASSERT_ONLY(uint32_t irsz = (uint32_t)ranges.size()); anchor64Find(numToFind, tidx, buf, qry, quals, begin, end, ranges, results, pairs, aoff, seedOnLeft); #ifndef NDEBUG @@ -194,10 +194,10 @@ class ExactRefAligner : public RefAligner { void naiveFind(uint32_t numToFind, size_t tidx, uint8_t* ref, - const TDna5Str& qry, - const TCharStr& quals, - TIndexOffU begin, - TIndexOffU end, + const TDna5Str& qry, + const TCharStr& quals, + TIndexOffU begin, + TIndexOffU end, TRangeVec& ranges, std::vector& results, TSetPairs* pairs, @@ -205,17 +205,17 @@ class ExactRefAligner : public RefAligner { bool seedOnLeft) const { assert_gt(numToFind, 0); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(end, begin); assert_gt(qlen, 0); - size_t qend = end - qlen; - size_t lim = qend - begin; - size_t halfway = begin + (lim >> 1); + TIndexOffU qend = end - qlen; + TIndexOffU lim = qend - begin; + TIndexOffU halfway = begin + (lim >> 1); bool hi = false; - for(size_t i = 1; i <= lim+1; i++) { - size_t ri; // leftmost position in candidate alignment - size_t rir; // same, minus begin; for indexing into ref[] + for(TIndexOffU i = 1; i <= lim+1; i++) { + TIndexOffU ri; // leftmost position in candidate alignment + TIndexOffU rir; // same, minus begin; for indexing into ref[] if(hi) { ri = halfway + (i >> 1); rir = ri - begin; assert_leq(ri, qend); @@ -290,10 +290,10 @@ class ExactRefAligner : public RefAligner { bool seedOnLeft) const { assert_gt(numToFind, 0); - ASSERT_ONLY(const uint32_t rangesInitSz = ranges.size()); + ASSERT_ONLY(const uint32_t rangesInitSz = (uint32_t)ranges.size()); ASSERT_ONLY(uint32_t duplicates = 0); ASSERT_ONLY(uint32_t r2i = 0); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(end, begin); assert_gt(qlen, 0); @@ -305,9 +305,9 @@ class ExactRefAligner : public RefAligner { #endif const uint32_t anchorBitPairs = min(qlen, 32); // anchorOverhang = # read bases not included in the anchor - const size_t anchorOverhang = qlen <= 32 ? 0 : qlen - 32; - const size_t lim = end - qlen - begin; - const size_t halfway = begin + (lim >> 1); + const TIndexOffU anchorOverhang = qlen <= 32 ? 0 : qlen - 32; + const TIndexOffU lim = end - qlen - begin; + const TIndexOffU halfway = begin + (lim >> 1); uint64_t anchor = 0llu; uint64_t buffw = 0llu; // Set up a mask that we'll apply to the two bufs every round @@ -361,12 +361,12 @@ class ExactRefAligner : public RefAligner { // were, we might need to make the 'anchorOverhang' adjustment on // the left end of the range rather than the right. bool hi = false; - size_t riHi = halfway; - size_t rirHi = halfway - begin; - size_t rirHiAnchor = rirHi + anchorBitPairs - 1; - size_t riLo = halfway + 1; - size_t rirLo = halfway - begin + 1; - for(size_t i = 1; i <= lim + 1; i++) { + TIndexOffU riHi = halfway; + TIndexOffU rirHi = halfway - begin; + TIndexOffU rirHiAnchor = rirHi + anchorBitPairs - 1; + TIndexOffU riLo = halfway + 1; + TIndexOffU rirLo = halfway - begin + 1; + for(TIndexOffU i = 1; i <= lim + 1; i++) { int r; // new reference char assert_lt(skipLeftToRights, qlen); assert_leq(skipRightToLefts, qlen); @@ -417,8 +417,8 @@ class ExactRefAligner : public RefAligner { } // Seed hit! bool foundHit = true; - size_t ri = hi ? riLo : riHi; - size_t rir = hi ? rirLo : rirHi; + TIndexOffU ri = hi ? riLo : riHi; + TIndexOffU rir = hi ? rirLo : rirHi; if(anchorOverhang > 0) { // Does the non-anchor part of the alignment (the // "overhang") ruin it? @@ -539,17 +539,17 @@ class OneMMRefAligner : public RefAligner { bool seedOnLeft) const { assert_gt(numToFind, 0); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(end, begin); assert_gt(qlen, 0); - size_t qend = end - qlen; - size_t lim = qend - begin; - size_t halfway = begin + (lim >> 1); + TIndexOffU qend = end - qlen; + TIndexOffU lim = qend - begin; + TIndexOffU halfway = begin + (lim >> 1); bool hi = false; - for(size_t i = 1; i <= lim+1; i++) { - size_t ri; // leftmost position in candidate alignment - size_t rir; // same, minus begin; for indexing into ref[] + for(TIndexOffU i = 1; i <= lim+1; i++) { + TIndexOffU ri; // leftmost position in candidate alignment + TIndexOffU rir; // same, minus begin; for indexing into ref[] if(hi) { ri = halfway + (i >> 1); rir = ri - begin; assert_leq(ri, qend); @@ -593,7 +593,7 @@ class OneMMRefAligner : public RefAligner { } else { // First one; remember offset and ref char refc = "ACGTN"[r]; - mmOff = j; + mmOff = (uint32_t)j; } } } @@ -634,10 +634,10 @@ class OneMMRefAligner : public RefAligner { bool seedOnLeft = false) const { assert_gt(numToFind, 0); - ASSERT_ONLY(const uint32_t rangesInitSz = ranges.size()); + ASSERT_ONLY(const uint32_t rangesInitSz = (uint32_t)ranges.size()); ASSERT_ONLY(uint32_t duplicates = 0); ASSERT_ONLY(uint32_t r2i = 0); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(end, begin); assert_gt(qlen, 0); @@ -652,8 +652,8 @@ class OneMMRefAligner : public RefAligner { const uint32_t anchorCushion = 32 - anchorBitPairs; // anchorOverhang = # read bases not included in the anchor const size_t anchorOverhang = (qlen <= 32 ? 0 : (qlen - 32)); - const size_t lim = end - qlen - begin; - const size_t halfway = begin + (lim >> 1); + const TIndexOffU lim = end - qlen - begin; + const TIndexOffU halfway = begin + (lim >> 1); uint64_t anchor = 0llu; uint64_t buffw = 0llu; // rotating ref sequence buffer // OR the 'diff' buffer with this so that we can always count @@ -721,12 +721,12 @@ class OneMMRefAligner : public RefAligner { // were, we might need to make the 'anchorOverhang' adjustment on // the left end of the range rather than the right. bool hi = false; - size_t riHi = halfway; - size_t rirHi = halfway - begin; - size_t rirHiAnchor = rirHi + anchorBitPairs - 1; - size_t riLo = halfway + 1; - size_t rirLo = halfway - begin + 1; - for(size_t i = 1; i <= lim + 1; i++) { + TIndexOffU riHi = halfway; + TIndexOffU rirHi = halfway - begin; + TIndexOffU rirHiAnchor = rirHi + anchorBitPairs - 1; + TIndexOffU riLo = halfway + 1; + TIndexOffU rirLo = halfway - begin + 1; + for(TIndexOffU i = 1; i <= lim + 1; i++) { int r; // new reference char uint64_t diff; assert_lt(skipLeftToRights, qlen); @@ -777,8 +777,8 @@ class OneMMRefAligner : public RefAligner { } if((diff & 0xffffffff00000000llu) && (diff & 0x00000000ffffffffllu)) continue; - size_t ri = hi ? riLo : riHi; - size_t rir = hi ? rirLo : rirHi; + TIndexOffU ri = hi ? riLo : riHi; + TIndexOffU rir = hi ? rirLo : rirHi; // Could use pop count uint8_t *diff8 = reinterpret_cast(&diff); // As a first cut, see if there are too many mismatches in @@ -846,7 +846,7 @@ class OneMMRefAligner : public RefAligner { foundHit = false; break; } else { - mmpos = 32 + j; + mmpos = (uint32_t)(32 + j); refc = "ACGT"[(int)ref[rir + 32 + j]]; } } @@ -943,15 +943,15 @@ class TwoMMRefAligner : public RefAligner { bool seedOnLeft) const { assert_gt(numToFind, 0); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(end, begin); assert_gt(qlen, 0); - size_t qend = end - qlen; - size_t lim = qend - begin; - size_t halfway = begin + (lim >> 1); + TIndexOffU qend = end - qlen; + TIndexOffU lim = qend - begin; + TIndexOffU halfway = begin + (lim >> 1); bool hi = false; - for(size_t i = 1; i <= lim+1; i++) { + for(TIndexOffU i = 1; i <= lim+1; i++) { TIndexOffU ri; // leftmost position in candidate alignment TIndexOffU rir; // same, minus begin; for indexing into ref[] if(hi) { @@ -999,12 +999,12 @@ class TwoMMRefAligner : public RefAligner { } else if(mms == 2) { // Second one; remember offset and ref char refc2 = "ACGTN"[r]; - mmOff2 = j; + mmOff2 = (uint32_t)j; } else { assert_eq(1, mms); // First one; remember offset and ref char refc1 = "ACGTN"[r]; - mmOff1 = j; + mmOff1 = (uint32_t)j; } } } @@ -1053,10 +1053,10 @@ class TwoMMRefAligner : public RefAligner { bool seedOnLeft = false) const { assert_gt(numToFind, 0); - ASSERT_ONLY(const uint32_t rangesInitSz = ranges.size()); + ASSERT_ONLY(const uint32_t rangesInitSz = (uint32_t)ranges.size()); ASSERT_ONLY(uint32_t duplicates = 0); ASSERT_ONLY(uint32_t r2i = 0); - const size_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(end, begin); assert_gt(qlen, 0); @@ -1066,13 +1066,13 @@ class TwoMMRefAligner : public RefAligner { naiveFind(numToFind, tidx, ref, qry, quals, begin, end, r2, re2, pairs, aoff, seedOnLeft); #endif - const uint32_t anchorBitPairs = min(qlen, 32); + const uint32_t anchorBitPairs = min((int)qlen, 32); const int lhsShift = ((anchorBitPairs - 1) << 1); const uint32_t anchorCushion = 32 - anchorBitPairs; // anchorOverhang = # read bases not included in the anchor - const uint32_t anchorOverhang = (qlen <= 32 ? 0 : (qlen - 32)); - const size_t lim = end - qlen - begin; - const size_t halfway = begin + (lim >> 1); + const uint32_t anchorOverhang = (uint32_t)(qlen <= 32 ? 0 : (qlen - 32)); + const TIndexOffU lim = end - qlen - begin; + const TIndexOffU halfway = begin + (lim >> 1); uint64_t anchor = 0llu; uint64_t buffw = 0llu; // rotating ref sequence buffer // OR the 'diff' buffer with this so that we can always count @@ -1147,12 +1147,12 @@ class TwoMMRefAligner : public RefAligner { // were, we might need to make the 'anchorOverhang' adjustment on // the left end of the range rather than the right. bool hi = false; - size_t riHi = halfway; - size_t rirHi = halfway - begin; - size_t rirHiAnchor = rirHi + anchorBitPairs - 1; - size_t riLo = halfway + 1; - size_t rirLo = halfway - begin + 1; - size_t i; + TIndexOffU riHi = halfway; + TIndexOffU rirHi = halfway - begin; + TIndexOffU rirHiAnchor = rirHi + anchorBitPairs - 1; + TIndexOffU riLo = halfway + 1; + TIndexOffU rirLo = halfway - begin + 1; + TIndexOffU i; for(i = 1; i <= lim + 1; i++) { int r; // new reference char uint64_t diff; @@ -1205,8 +1205,8 @@ class TwoMMRefAligner : public RefAligner { if((diff & 0xfffff00000000000llu) && (diff & 0x00000ffffff00000llu) && (diff & 0x00000000000fffffllu)) continue; - size_t ri = hi ? riLo : riHi; - size_t rir = hi ? rirLo : rirHi; + TIndexOffU ri = hi ? riLo : riHi; + TIndexOffU rir = hi ? rirLo : rirHi; // Could use pop count uint8_t *diff8 = reinterpret_cast(&diff); // As a first cut, see if there are too many mismatches in @@ -1421,7 +1421,7 @@ class ThreeMMRefAligner : public RefAligner { bool seedOnLeft) const { assert_gt(numToFind, 0); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(end, begin); assert_gt(qlen, 0); @@ -1429,9 +1429,9 @@ class ThreeMMRefAligner : public RefAligner { TIndexOffU lim = qend - begin; TIndexOffU halfway = begin + (lim >> 1); bool hi = false; - for(size_t i = 1; i <= lim+1; i++) { - size_t ri; // leftmost position in candidate alignment - size_t rir; // same, minus begin; for indexing into ref[] + for(TIndexOffU i = 1; i <= lim+1; i++) { + TIndexOffU ri; // leftmost position in candidate alignment + TIndexOffU rir; // same, minus begin; for indexing into ref[] if(hi) { ri = halfway + (i >> 1); rir = ri - begin; assert_leq(ri, qend); @@ -1479,16 +1479,16 @@ class ThreeMMRefAligner : public RefAligner { } else if(mms == 3) { // Second one; remember offset and ref char refc3 = "ACGTN"[r]; - mmOff3 = j; + mmOff3 = (uint32_t)j; } else if(mms == 2) { // Second one; remember offset and ref char refc2 = "ACGTN"[r]; - mmOff2 = j; + mmOff2 = (uint32_t)j; } else { assert_eq(1, mms); // First one; remember offset and ref char refc1 = "ACGTN"[r]; - mmOff1 = j; + mmOff1 = (uint32_t)j; } } } @@ -1543,10 +1543,10 @@ class ThreeMMRefAligner : public RefAligner { bool seedOnLeft = false) const { assert_gt(numToFind, 0); - ASSERT_ONLY(const uint32_t rangesInitSz = ranges.size()); + ASSERT_ONLY(const uint32_t rangesInitSz = (uint32_t)ranges.size()); ASSERT_ONLY(uint32_t duplicates = 0); ASSERT_ONLY(uint32_t r2i = 0); - const size_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(end, begin); assert_gt(qlen, 0); @@ -1556,13 +1556,13 @@ class ThreeMMRefAligner : public RefAligner { naiveFind(numToFind, tidx, ref, qry, quals, begin, end, r2, re2, pairs, aoff, seedOnLeft); #endif - const uint32_t anchorBitPairs = min(qlen, 32); + const uint32_t anchorBitPairs = min((int)qlen, 32); const int lhsShift = ((anchorBitPairs - 1) << 1); const uint32_t anchorCushion = 32 - anchorBitPairs; // anchorOverhang = # read bases not included in the anchor - const uint32_t anchorOverhang = (qlen <= 32 ? 0 : (qlen - 32)); - const size_t lim = end - qlen - begin; - const size_t halfway = begin + (lim >> 1); + const uint32_t anchorOverhang = (uint32_t)(qlen <= 32 ? 0 : (qlen - 32)); + const TIndexOffU lim = end - qlen - begin; + const TIndexOffU halfway = begin + (lim >> 1); uint64_t anchor = 0llu; uint64_t buffw = 0llu; // rotating ref sequence buffer // OR the 'diff' buffer with this so that we can always count @@ -1643,12 +1643,12 @@ class ThreeMMRefAligner : public RefAligner { // were, we might need to make the 'anchorOverhang' adjustment on // the left end of the range rather than the right. bool hi = false; - size_t riHi = halfway; - size_t rirHi = halfway - begin; - size_t rirHiAnchor = rirHi + anchorBitPairs - 1; - size_t riLo = halfway + 1; - size_t rirLo = halfway - begin + 1; - for(size_t i = 1; i <= lim + 1; i++) { + TIndexOffU riHi = halfway; + TIndexOffU rirHi = halfway - begin; + TIndexOffU rirHiAnchor = rirHi + anchorBitPairs - 1; + TIndexOffU riLo = halfway + 1; + TIndexOffU rirLo = halfway - begin + 1; + for(TIndexOffU i = 1; i <= lim + 1; i++) { int r; // new reference char uint64_t diff; assert_lt(skipLeftToRights, qlen); @@ -1701,8 +1701,8 @@ class ThreeMMRefAligner : public RefAligner { (diff & 0x0000ffff00000000llu) && (diff & 0x00000000ffff0000llu) && (diff & 0x000000000000ffffllu)) continue; - size_t ri = hi ? riLo : riHi; - size_t rir = hi ? rirLo : rirHi; + TIndexOffU ri = hi ? riLo : riHi; + TIndexOffU rir = hi ? rirLo : rirHi; // Could use pop count uint8_t *diff8 = reinterpret_cast(&diff); // As a first cut, see if there are too many mismatches in @@ -1997,7 +1997,7 @@ class Seed0RefAligner : public RefAligner { { assert_gt(numToFind, 0); assert_gt(end, begin); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_gt(qlen, 0); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(this->seedLen_, 0); @@ -2018,14 +2018,14 @@ class Seed0RefAligner : public RefAligner { const TIndexOffU lim = qend - qbegin; // halfway = position in the reference to start at (and then // we work our way out to the right and to the left). - const size_t halfway = qbegin + (lim >> 1); + const TIndexOffU halfway = qbegin + (lim >> 1); // Vectors for holding edit information std::vector nonSeedMms; std::vector nonSeedRefcs; bool hi = false; - for(size_t i = 1; i <= lim+1; i++) { - size_t ri; // leftmost position in candidate alignment - size_t rir; // same, minus begin; for indexing into ref[] + for(TIndexOffU i = 1; i <= lim+1; i++) { + TIndexOffU ri; // leftmost position in candidate alignment + TIndexOffU rir; // same, minus begin; for indexing into ref[] if(hi) { ri = halfway + (i >> 1); rir = ri - begin; assert_leq(ri, qend); @@ -2049,10 +2049,10 @@ class Seed0RefAligner : public RefAligner { } else { // Go left-to-right } - size_t rirj = rir + j; + TIndexOffU rirj = (TIndexOffU)(rir + j); if(!seedOnLeft) { assert_geq(rir, jj); - rirj = rir - jj - 1; + rirj = (TIndexOffU)(rir - jj - 1); } #if 0 // Count Ns in the reference as mismatches @@ -2090,7 +2090,7 @@ class Seed0RefAligner : public RefAligner { } else { // Legal mismatch outside of the anchor; record it mms++; - nonSeedMms.push_back(j); + nonSeedMms.push_back((uint32_t)j); assert_leq(nonSeedMms.size(), (size_t)mms); nonSeedRefcs.push_back("ACGTN"[r]); } @@ -2173,10 +2173,10 @@ class Seed0RefAligner : public RefAligner { bool seedOnLeft = false) const { assert_gt(numToFind, 0); - ASSERT_ONLY(const uint32_t rangesInitSz = ranges.size()); + ASSERT_ONLY(const uint32_t rangesInitSz = (uint32_t)ranges.size()); ASSERT_ONLY(uint32_t duplicates = 0); ASSERT_ONLY(uint32_t r2i = 0); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_gt(qlen, 0); assert_gt(end, begin); assert_geq(end - begin, qlen); // caller should have checked this @@ -2188,13 +2188,13 @@ class Seed0RefAligner : public RefAligner { naiveFind(numToFind, tidx, ref, qry, quals, begin, end, r2, re2, pairs, aoff, seedOnLeft); #endif - const uint32_t anchorBitPairs = min(slen, 32); + const uint32_t anchorBitPairs = min((int)slen, 32); const int lhsShift = ((anchorBitPairs - 1) << 1); ASSERT_ONLY(const uint32_t anchorCushion = 32 - anchorBitPairs); // seedAnchorOverhang = # seed bases not included in the anchor - const uint32_t seedAnchorOverhang = (slen <= anchorBitPairs ? 0 : (slen - anchorBitPairs)); + const uint32_t seedAnchorOverhang = (uint32_t)(slen <= anchorBitPairs ? 0 : (slen - anchorBitPairs)); // seedAnchorOverhang = # seed bases not included in the anchor - const uint32_t readSeedOverhang = (slen == qlen ? 0 : (qlen - slen)); + const uint32_t readSeedOverhang = (uint32_t)(slen == qlen ? 0 : (qlen - slen)); assert(anchorCushion == 0 || seedAnchorOverhang == 0); assert_eq(qlen, readSeedOverhang + slen); TIndexOffU qend = end; @@ -2209,9 +2209,9 @@ class Seed0RefAligner : public RefAligner { qend -= slen; } // lim = # possible alignments in the range - const size_t lim = qend - qbegin; + const TIndexOffU lim = qend - qbegin; // halfway = point on the genome to radiate out from - const size_t halfway = qbegin + (lim >> 1); + const TIndexOffU halfway = qbegin + (lim >> 1); uint64_t anchor = 0llu; uint64_t buffw = 0llu; // rotating ref sequence buffer // Set up a mask that we'll apply to the two bufs every round @@ -2291,13 +2291,13 @@ class Seed0RefAligner : public RefAligner { // between right-to-left and left-to-right shifts, until all of // the positions from qbegin to qend have been covered. bool hi = false; - size_t riHi = halfway; - size_t rirHi = halfway - begin; - size_t rirHiAnchor = rirHi + anchorBitPairs - 1; - size_t riLo = halfway + 1; - size_t rirLo = halfway - begin + 1; - size_t lrSkips = anchorBitPairs; - size_t rlSkips = qlen; + TIndexOffU riHi = halfway; + TIndexOffU rirHi = halfway - begin; + TIndexOffU rirHiAnchor = rirHi + anchorBitPairs - 1; + TIndexOffU riLo = halfway + 1; + TIndexOffU rirLo = halfway - begin + 1; + TIndexOffU lrSkips = anchorBitPairs; + TIndexOffU rlSkips = qlen; if(!seedOnLeft && readSeedOverhang) { lrSkips += readSeedOverhang; assert_geq(rlSkips, readSeedOverhang); @@ -2353,8 +2353,8 @@ class Seed0RefAligner : public RefAligner { diff = bufbw ^ anchor; } if(diff) continue; - size_t ri = hi ? riLo : riHi; - size_t rir = hi ? rirLo : rirHi; + TIndexOffU ri = hi ? riLo : riHi; + TIndexOffU rir = hi ? rirLo : rirHi; unsigned int ham = 0; // If the seed is longer than the anchor, then scan the // rest of the seed characters @@ -2389,7 +2389,7 @@ class Seed0RefAligner : public RefAligner { foundHit = false; // Skip this candidate break; } - TIndexOffU qoff = anchorBitPairs + j; + TIndexOffU qoff = (TIndexOffU)(anchorBitPairs + j); if(!seedOnLeft) { qoff += readSeedOverhang; } @@ -2411,12 +2411,12 @@ class Seed0RefAligner : public RefAligner { if((qlen - slen) > 0) { // Going left-to-right for(size_t j = 0; j < readSeedOverhang; j++) { - TIndexOffU roff = rir + slen + j; - TIndexOffU qoff = slen + j; + TIndexOffU roff = (TIndexOffU)(rir + slen + j); + TIndexOffU qoff = (TIndexOffU)(slen + j); if(!seedOnLeft) { assert_geq(roff, qlen); roff -= qlen; - qoff = j; + qoff = (uint32_t)j; } int rc = (int)ref[roff]; if(rc == 4) { @@ -2600,7 +2600,7 @@ class Seed1RefAligner : public RefAligner { { assert_gt(numToFind, 0); assert_gt(end, begin); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_gt(qlen, 0); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(this->seedLen_, 0); @@ -2618,19 +2618,19 @@ class Seed1RefAligner : public RefAligner { qbegin += qlen; } // lim = number of alignments to try - const size_t lim = qend - qbegin; + const TIndexOffU lim = qend - qbegin; // halfway = position in the reference to start at (and then // we work our way out to the right and to the left). - const size_t halfway = qbegin + (lim >> 1); + const TIndexOffU halfway = qbegin + (lim >> 1); // Vectors for holding edit information std::vector nonSeedMms; assert_eq(0, nonSeedMms.size()); std::vector nonSeedRefcs; assert_eq(0, nonSeedRefcs.size()); bool hi = false; - for(size_t i = 1; i <= lim+1; i++) { - size_t ri; // leftmost position in candidate alignment - size_t rir; // same, minus begin; for indexing into ref[] + for(TIndexOffU i = 1; i <= lim+1; i++) { + TIndexOffU ri; // leftmost position in candidate alignment + TIndexOffU rir; // same, minus begin; for indexing into ref[] if(hi) { ri = halfway + (i >> 1); rir = ri - begin; assert_leq(ri, qend); @@ -2657,10 +2657,10 @@ class Seed1RefAligner : public RefAligner { } else { // Go left-to-right } - size_t rirj = rir + j; + TIndexOffU rirj = (TIndexOffU)(rir + j); if(!seedOnLeft) { assert_geq(rir, jj); - rirj = rir - jj - 1; + rirj = (TIndexOffU)(rir - jj - 1); } #if 0 // Count Ns in the reference as mismatches @@ -2700,11 +2700,11 @@ class Seed1RefAligner : public RefAligner { // First mismatch in the anchor; remember offset // and ref char refc = "ACGTN"[r]; - mmOff = j; + mmOff = (uint32_t)j; seedMms = 1; } else { // Legal mismatch outside of the anchor; record it - nonSeedMms.push_back(j); + nonSeedMms.push_back((uint32_t)j); assert_leq(nonSeedMms.size(), (size_t)mms); nonSeedRefcs.push_back("ACGTN"[r]); } @@ -2798,10 +2798,10 @@ class Seed1RefAligner : public RefAligner { bool seedOnLeft = false) const { assert_gt(numToFind, 0); - ASSERT_ONLY(const uint32_t rangesInitSz = ranges.size()); + ASSERT_ONLY(const uint32_t rangesInitSz = (uint32_t)ranges.size()); ASSERT_ONLY(uint32_t duplicates = 0); ASSERT_ONLY(uint32_t r2i = 0); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_gt(qlen, 0); assert_gt(end, begin); assert_geq(end - begin, qlen); // caller should have checked this @@ -2813,13 +2813,13 @@ class Seed1RefAligner : public RefAligner { naiveFind(numToFind, tidx, ref, qry, quals, begin, end, r2, re2, pairs, aoff, seedOnLeft); #endif - const uint32_t anchorBitPairs = min(slen, 32); + const uint32_t anchorBitPairs = min((int)slen, 32); const int lhsShift = ((anchorBitPairs - 1) << 1); const uint32_t anchorCushion = 32 - anchorBitPairs; // seedAnchorOverhang = # seed bases not included in the anchor - const uint32_t seedAnchorOverhang = (slen <= anchorBitPairs ? 0 : (slen - anchorBitPairs)); + const uint32_t seedAnchorOverhang = (uint32_t)(slen <= anchorBitPairs ? 0 : (slen - anchorBitPairs)); // seedAnchorOverhang = # seed bases not included in the anchor - const uint32_t readSeedOverhang = (slen == qlen ? 0 : (qlen - slen)); + const uint32_t readSeedOverhang = (uint32_t)(slen == qlen ? 0 : (qlen - slen)); assert(anchorCushion == 0 || seedAnchorOverhang == 0); assert_eq(qlen, readSeedOverhang + slen); TIndexOffU qend = end; @@ -2834,9 +2834,9 @@ class Seed1RefAligner : public RefAligner { qend -= slen; } // lim = # possible alignments in the range - const size_t lim = qend - qbegin; + const TIndexOffU lim = qend - qbegin; // halfway = point on the genome to radiate out from - const size_t halfway = qbegin + (lim >> 1); + const TIndexOffU halfway = qbegin + (lim >> 1); uint64_t anchor = 0llu; uint64_t buffw = 0llu; // rotating ref sequence buffer // OR the 'diff' buffer with this so that we can always count @@ -2854,7 +2854,7 @@ class Seed1RefAligner : public RefAligner { int nPos = -1; size_t skipLeftToRights = 0; size_t skipRightToLefts = 0; - const size_t halfwayRi = halfway - begin; + const TIndexOffU halfwayRi = halfway - begin; // Construct the 'anchor' 64-bit buffer so that it holds all of // the first 'anchorBitPairs' bit pairs of the query. for(size_t ii = 0; ii < anchorBitPairs; ii++) { @@ -2931,13 +2931,13 @@ class Seed1RefAligner : public RefAligner { // between right-to-left and left-to-right shifts, until all of // the positions from qbegin to qend have been covered. bool hi = false; - size_t riHi = halfway; - size_t rirHi = halfway - begin; - size_t rirHiAnchor = rirHi + anchorBitPairs - 1; - size_t riLo = halfway + 1; - size_t rirLo = halfway - begin + 1; - size_t lrSkips = anchorBitPairs; - size_t rlSkips = qlen; + TIndexOffU riHi = halfway; + TIndexOffU rirHi = halfway - begin; + TIndexOffU rirHiAnchor = rirHi + anchorBitPairs - 1; + TIndexOffU riLo = halfway + 1; + TIndexOffU rirLo = halfway - begin + 1; + TIndexOffU lrSkips = anchorBitPairs; + TIndexOffU rlSkips = qlen; if(!seedOnLeft && readSeedOverhang) { lrSkips += readSeedOverhang; assert_geq(rlSkips, readSeedOverhang); @@ -2994,8 +2994,8 @@ class Seed1RefAligner : public RefAligner { } if((diff & 0xffffffff00000000llu) && (diff & 0x00000000ffffffffllu)) continue; - size_t ri = hi ? riLo : riHi; - size_t rir = hi ? rirLo : rirHi; + TIndexOffU ri = hi ? riLo : riHi; + TIndexOffU rir = hi ? rirLo : rirHi; // Could use pop count uint8_t *diff8 = reinterpret_cast(&diff); // As a first cut, see if there are too many mismatches in @@ -3090,7 +3090,7 @@ class Seed1RefAligner : public RefAligner { foundHit = false; // Skip this candidate break; } - TIndexOffU qoff = anchorBitPairs + j; + TIndexOffU qoff = (TIndexOffU)(anchorBitPairs + j); if(!seedOnLeft) { qoff += readSeedOverhang; } @@ -3126,12 +3126,12 @@ class Seed1RefAligner : public RefAligner { if((qlen - slen) > 0) { // Going left-to-right for(size_t j = 0; j < readSeedOverhang; j++) { - TIndexOffU roff = rir + slen + j; - TIndexOffU qoff = slen + j; + TIndexOffU roff = (TIndexOffU)(rir + slen + j); + TIndexOffU qoff = (TIndexOffU)(slen + j); if(!seedOnLeft) { assert_geq(roff, qlen); roff -= qlen; - qoff = j; + qoff = (TIndexOffU)(j); } int rc = (int)ref[roff]; if(rc == 4) { @@ -3339,7 +3339,7 @@ class Seed2RefAligner : public RefAligner { { assert_gt(numToFind, 0); assert_gt(end, begin); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_gt(qlen, 0); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(this->seedLen_, 0); @@ -3357,17 +3357,17 @@ class Seed2RefAligner : public RefAligner { qbegin += qlen; } // lim = number of alignments to try - const size_t lim = qend - qbegin; + const TIndexOffU lim = qend - qbegin; // halfway = position in the reference to start at (and then // we work our way out to the right and to the left). - const size_t halfway = qbegin + (lim >> 1); + const TIndexOffU halfway = qbegin + (lim >> 1); // Vectors for holding edit information std::vector nonSeedMms; std::vector nonSeedRefcs; bool hi = false; - for(size_t i = 1; i <= lim+1; i++) { - size_t ri; // leftmost position in candidate alignment - size_t rir; // same, minus begin; for indexing into ref[] + for(TIndexOffU i = 1; i <= lim+1; i++) { + TIndexOffU ri; // leftmost position in candidate alignment + TIndexOffU rir; // same, minus begin; for indexing into ref[] if(hi) { ri = halfway + (i >> 1); rir = ri - begin; assert_leq(ri, qend); @@ -3396,10 +3396,10 @@ class Seed2RefAligner : public RefAligner { } else { // Go left-to-right } - size_t rirj = rir + j; + TIndexOffU rirj = (TIndexOffU)(rir + j); if(!seedOnLeft) { assert_geq(rir, jj); - rirj = rir - jj - 1; + rirj = (TIndexOffU)(rir - jj - 1); } #if 0 // Count Ns in the reference as mismatches @@ -3439,17 +3439,17 @@ class Seed2RefAligner : public RefAligner { // First mismatch in the anchor; remember offset // and ref char refc1 = "ACGTN"[r]; - mmOff1 = j; + mmOff1 = (uint32_t)j; seedMms = 1; } else if(mms == 2 && jj < slen) { // Second mismatch in the anchor; remember offset // and ref char refc2 = "ACGTN"[r]; - mmOff2 = j; + mmOff2 = (uint32_t)j; seedMms = 2; } else { // Legal mismatch outside of the anchor; record it - nonSeedMms.push_back(j); + nonSeedMms.push_back((uint32_t)j); assert_leq(nonSeedMms.size(), (size_t)mms); nonSeedRefcs.push_back("ACGTN"[r]); } @@ -3555,10 +3555,10 @@ class Seed2RefAligner : public RefAligner { bool seedOnLeft = false) const { assert_gt(numToFind, 0); - ASSERT_ONLY(const uint32_t rangesInitSz = ranges.size()); + ASSERT_ONLY(const uint32_t rangesInitSz = (uint32_t)ranges.size()); ASSERT_ONLY(uint32_t duplicates = 0); ASSERT_ONLY(uint32_t r2i = 0); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_gt(qlen, 0); assert_gt(end, begin); assert_geq(end - begin, qlen); // caller should have checked this @@ -3570,13 +3570,13 @@ class Seed2RefAligner : public RefAligner { naiveFind(numToFind, tidx, ref, qry, quals, begin, end, r2, re2, pairs, aoff, seedOnLeft); #endif - const uint32_t anchorBitPairs = min(slen, 32); + const uint32_t anchorBitPairs = min((int)slen, 32); const int lhsShift = ((anchorBitPairs - 1) << 1); const uint32_t anchorCushion = 32 - anchorBitPairs; // seedAnchorOverhang = # seed bases not included in the anchor - const uint32_t seedAnchorOverhang = (slen <= anchorBitPairs ? 0 : (slen - anchorBitPairs)); + const uint32_t seedAnchorOverhang = (uint32_t)(slen <= anchorBitPairs ? 0 : (slen - anchorBitPairs)); // seedAnchorOverhang = # seed bases not included in the anchor - const uint32_t readSeedOverhang = (slen == qlen ? 0 : (qlen - slen)); + const uint32_t readSeedOverhang = (uint32_t)(slen == qlen ? 0 : (qlen - slen)); assert(anchorCushion == 0 || seedAnchorOverhang == 0); assert_eq(qlen, readSeedOverhang + slen); TIndexOffU qend = end; @@ -3591,9 +3591,9 @@ class Seed2RefAligner : public RefAligner { qend -= slen; } // lim = # possible alignments in the range - const size_t lim = qend - qbegin; + const TIndexOffU lim = qend - qbegin; // halfway = point on the genome to radiate out from - const size_t halfway = qbegin + (lim >> 1); + const TIndexOffU halfway = qbegin + (lim >> 1); uint64_t anchor = 0llu; uint64_t buffw = 0llu; // rotating ref sequence buffer // OR the 'diff' buffer with this so that we can always count @@ -3613,7 +3613,7 @@ class Seed2RefAligner : public RefAligner { int nPos2 = -1; size_t skipLeftToRights = 0; size_t skipRightToLefts = 0; - const size_t halfwayRi = halfway - begin; + const TIndexOffU halfwayRi = halfway - begin; assert_leq(anchorBitPairs, slen); // Construct the 'anchor' 64-bit buffer so that it holds all of // the first 'anchorBitPairs' bit pairs of the query. @@ -3699,13 +3699,13 @@ class Seed2RefAligner : public RefAligner { // between right-to-left and left-to-right shifts, until all of // the positions from qbegin to qend have been covered. bool hi = false; - size_t riHi = halfway; - size_t rirHi = halfway - begin; - size_t rirHiAnchor = rirHi + anchorBitPairs - 1; - size_t riLo = halfway + 1; - size_t rirLo = halfway - begin + 1; - size_t lrSkips = anchorBitPairs; - size_t rlSkips = qlen; + TIndexOffU riHi = halfway; + TIndexOffU rirHi = halfway - begin; + TIndexOffU rirHiAnchor = rirHi + anchorBitPairs - 1; + TIndexOffU riLo = halfway + 1; + TIndexOffU rirLo = halfway - begin + 1; + TIndexOffU lrSkips = anchorBitPairs; + TIndexOffU rlSkips = qlen; if(!seedOnLeft && readSeedOverhang) { lrSkips += readSeedOverhang; assert_geq(rlSkips, readSeedOverhang); @@ -3919,7 +3919,7 @@ class Seed2RefAligner : public RefAligner { foundHit = false; // Skip this candidate break; } - TIndexOffU qoff = anchorBitPairs + j; + TIndexOffU qoff = (TIndexOffU)(anchorBitPairs + j); if(!seedOnLeft) { qoff += readSeedOverhang; } @@ -3962,12 +3962,12 @@ class Seed2RefAligner : public RefAligner { if((qlen - slen) > 0) { // Going left-to-right for(size_t j = 0; j < readSeedOverhang; j++) { - TIndexOffU roff = rir + slen + j; - TIndexOffU qoff = slen + j; + TIndexOffU roff = (TIndexOffU)(rir + slen + j); + TIndexOffU qoff = (TIndexOffU)(slen + j); if(!seedOnLeft) { assert_geq(roff, qlen); roff -= qlen; - qoff = j; + qoff = (TIndexOffU)j; } int rc = (int)ref[roff]; if(rc == 4) { @@ -4199,7 +4199,7 @@ class Seed3RefAligner : public RefAligner { { assert_gt(numToFind, 0); assert_gt(end, begin); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_gt(qlen, 0); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(this->seedLen_, 0); @@ -4217,15 +4217,15 @@ class Seed3RefAligner : public RefAligner { qbegin += qlen; } // lim = number of alignments to try - const size_t lim = qend - qbegin; + const TIndexOffU lim = qend - qbegin; // halfway = position in the reference to start at (and then // we work our way out to the right and to the left). - const size_t halfway = qbegin + (lim >> 1); + const TIndexOffU halfway = qbegin + (lim >> 1); // Vectors for holding edit information std::vector nonSeedMms; std::vector nonSeedRefcs; bool hi = false; - for(size_t i = 1; i <= lim+1; i++) { + for(TIndexOffU i = 1; i <= lim+1; i++) { TIndexOffU ri; // leftmost position in candidate alignment TIndexOffU rir; // same, minus begin; for indexing into ref[] if(hi) { @@ -4258,10 +4258,10 @@ class Seed3RefAligner : public RefAligner { } else { // Go left-to-right } - TIndexOffU rirj = rir + j; + TIndexOffU rirj = (TIndexOffU)(rir + j); if(!seedOnLeft) { assert_geq(rir, jj); - rirj = rir - jj - 1; + rirj = (TIndexOffU)(rir - jj - 1); } #if 0 // Count Ns in the reference as mismatches @@ -4301,23 +4301,23 @@ class Seed3RefAligner : public RefAligner { // First mismatch in the anchor; remember offset // and ref char refc1 = "ACGTN"[r]; - mmOff1 = j; + mmOff1 = (uint32_t)j; seedMms = 1; } else if(mms == 2 && jj < slen) { // Second mismatch in the anchor; remember offset // and ref char refc2 = "ACGTN"[r]; - mmOff2 = j; + mmOff2 = (uint32_t)j; seedMms = 2; } else if(mms == 3 && jj < slen) { // Third mismatch in the anchor; remember offset // and ref char refc3 = "ACGTN"[r]; - mmOff3 = j; + mmOff3 = (uint32_t)j; seedMms = 3; } else { // Legal mismatch outside of the anchor; record it - nonSeedMms.push_back(j); + nonSeedMms.push_back((uint32_t)j); assert_leq(nonSeedMms.size(), (size_t)mms); nonSeedRefcs.push_back("ACGTN"[r]); } @@ -4435,28 +4435,28 @@ class Seed3RefAligner : public RefAligner { bool seedOnLeft = false) const { assert_gt(numToFind, 0); - ASSERT_ONLY(const uint32_t rangesInitSz = ranges.size()); + ASSERT_ONLY(const uint32_t rangesInitSz = (uint32_t)ranges.size()); ASSERT_ONLY(uint32_t duplicates = 0); ASSERT_ONLY(uint32_t r2i = 0); - const uint32_t qlen = seqan::length(qry); + const uint32_t qlen = (uint32_t)seqan::length(qry); assert_gt(qlen, 0); assert_gt(end, begin); assert_geq(end - begin, qlen); // caller should have checked this assert_gt(this->seedLen_, 0); - size_t slen = min(qlen, this->seedLen_); + uint32_t slen = min(qlen, this->seedLen_); #ifndef NDEBUG // Get results from the naive matcher for sanity-checking TRangeVec r2; std::vector re2; naiveFind(numToFind, tidx, ref, qry, quals, begin, end, r2, re2, pairs, aoff, seedOnLeft); #endif - const uint32_t anchorBitPairs = min(slen, 32); + const uint32_t anchorBitPairs = min((int)slen, 32); const int lhsShift = ((anchorBitPairs - 1) << 1); const uint32_t anchorCushion = 32 - anchorBitPairs; // seedAnchorOverhang = # seed bases not included in the anchor - const uint32_t seedAnchorOverhang = (slen <= anchorBitPairs ? 0 : (slen - anchorBitPairs)); + const uint32_t seedAnchorOverhang = (uint32_t)(slen <= anchorBitPairs ? 0 : (slen - anchorBitPairs)); // seedAnchorOverhang = # seed bases not included in the anchor - const uint32_t readSeedOverhang = (slen == qlen ? 0 : (qlen - slen)); + const uint32_t readSeedOverhang = (uint32_t)(slen == qlen ? 0 : (qlen - slen)); assert(anchorCushion == 0 || seedAnchorOverhang == 0); assert_eq(qlen, readSeedOverhang + slen); TIndexOffU qend = end; @@ -4471,7 +4471,7 @@ class Seed3RefAligner : public RefAligner { qend -= slen; } // lim = # possible alignments in the range - const size_t lim = qend - qbegin; + const TIndexOffU lim = qend - qbegin; // halfway = point on the genome to radiate out from const TIndexOffU halfway = qbegin + (lim >> 1); uint64_t anchor = 0llu; @@ -4583,13 +4583,13 @@ class Seed3RefAligner : public RefAligner { // between right-to-left and left-to-right shifts, until all of // the positions from qbegin to qend have been covered. bool hi = false; - size_t riHi = halfway; - size_t rirHi = halfway - begin; - size_t rirHiAnchor = rirHi + anchorBitPairs - 1; - size_t riLo = halfway + 1; - size_t rirLo = halfway - begin + 1; - size_t lrSkips = anchorBitPairs; - size_t rlSkips = qlen; + TIndexOffU riHi = halfway; + TIndexOffU rirHi = halfway - begin; + TIndexOffU rirHiAnchor = rirHi + anchorBitPairs - 1; + TIndexOffU riLo = halfway + 1; + TIndexOffU rirLo = halfway - begin + 1; + TIndexOffU lrSkips = anchorBitPairs; + TIndexOffU rlSkips = qlen; if(!seedOnLeft && readSeedOverhang) { lrSkips += readSeedOverhang; assert_geq(rlSkips, readSeedOverhang); @@ -4652,8 +4652,8 @@ class Seed3RefAligner : public RefAligner { (diff & 0x3c003c003c003c00llu) && (diff & 0x03c003c003c003c0llu) && (diff & 0x003c003c003c003cllu)) continue; - size_t ri = hi ? riLo : riHi; - size_t rir = hi ? rirLo : rirHi; + TIndexOffU ri = hi ? riLo : riHi; + TIndexOffU rir = hi ? rirLo : rirHi; // Could use pop count uint8_t *diff8 = reinterpret_cast(&diff); // As a first cut, see if there are too many mismatches in @@ -4788,7 +4788,7 @@ class Seed3RefAligner : public RefAligner { assert_lt(mmpos1, mmpos2); if(diffs > 2) { // Figure out the second mismatched position - ASSERT_ONLY(uint32_t origDiff3 = diff3); + ASSERT_ONLY(uint64_t origDiff3 = diff3); diff3 &= ~(0xc000000000000000llu >> (uint64_t)((savedMmpos2) << 1)); assert_neq(diff3, origDiff3); mmpos3 = 31; @@ -4872,7 +4872,7 @@ class Seed3RefAligner : public RefAligner { foundHit = false; // Skip this candidate break; } - TIndexOffU qoff = anchorBitPairs + j; + TIndexOffU qoff = (TIndexOffU)(anchorBitPairs + j); if(!seedOnLeft) { qoff += readSeedOverhang; } @@ -4920,12 +4920,12 @@ class Seed3RefAligner : public RefAligner { if((qlen - slen) > 0) { // Going left-to-right for(size_t j = 0; j < readSeedOverhang; j++) { - TIndexOffU roff = rir + slen + j; - TIndexOffU qoff = slen + j; + TIndexOffU roff = (TIndexOffU)(rir + slen + j); + TIndexOffU qoff = (TIndexOffU)(slen + j); if(!seedOnLeft) { assert_geq(roff, qlen); roff -= qlen; - qoff = j; + qoff = (TIndexOffU)j; } int rc = (int)ref[roff]; if(rc == 4) { diff --git a/ref_read.cpp b/ref_read.cpp index 63d8490..e2309e0 100644 --- a/ref_read.cpp +++ b/ref_read.cpp @@ -184,7 +184,7 @@ void reverseRefRecords(const vector& src, dst.clear(); { vector cur; - for(int i = src.size()-1; i >= 0; i--) { + for(int64_t i = (int64_t)src.size()-1; i >= 0; i--) { bool first = (i == (int)src.size()-1 || src[i+1].first); if(src[i].len) { cur.push_back(RefRecord(0, src[i].len, first)); @@ -192,9 +192,9 @@ void reverseRefRecords(const vector& src, } if(src[i].off) cur.push_back(RefRecord(src[i].off, 0, first)); } - for(int i = 0; i < (int)cur.size(); i++) { + for(int64_t i = 0; i < (int64_t)cur.size(); i++) { assert(cur[i].off == 0 || cur[i].len == 0); - if(i < (int)cur.size()-1 && cur[i].off != 0 && !cur[i+1].first) { + if(i < (int64_t)cur.size()-1 && cur[i].off != 0 && !cur[i+1].first) { dst.push_back(RefRecord(cur[i].off, cur[i+1].len, cur[i].first)); i++; } else { diff --git a/sam.cpp b/sam.cpp index 05eb9b7..1daf5bb 100644 --- a/sam.cpp +++ b/sam.cpp @@ -175,7 +175,7 @@ void SAMHitSink::appendAligned(ostream& ss, } } } else { - for (int i = len-1; i >= 0; -- i) { + for (int i = (int)len-1; i >= 0; -- i) { if(mms->test(i)) { nm++; // There's a mismatch at this position @@ -384,7 +384,7 @@ void SAMHitSink::reportMaxed(vector& hs, PatternSourcePerThread& p) { int strat = min(hs[i].stratum, hs[i+1].stratum); if(strat == bestStratum) { if(num == r) { - reportSamHits(hs, i, i+2, 0, hs.size()/2+1); + reportSamHits(hs, i, i+2, 0, (int)(hs.size()/2)+1); break; } num++; @@ -399,7 +399,7 @@ void SAMHitSink::reportMaxed(vector& hs, PatternSourcePerThread& p) { } assert_leq(num, hs.size()); uint32_t r = rand.nextU32() % num; - reportSamHit(hs[r], /*MAPQ*/0, /*XM:I*/hs.size()+1); + reportSamHit(hs[r], /*MAPQ*/0, /*XM:I*/(int)hs.size()+1); } } else { reportUnOrMax(p, &hs, false); diff --git a/search_exact.c b/search_exact.c index ca0fad2..fc5ed00 100644 --- a/search_exact.c +++ b/search_exact.c @@ -5,7 +5,7 @@ * paired and unpaired alignment. */ { - uint32_t plen = length(patsrc->bufa().patFw); + uint32_t plen = (uint32_t)length(patsrc->bufa().patFw); if(!nofw) { // Match against forward strand params.setFw(true); diff --git a/timer.h b/timer.h index c2db053..01e2a4c 100644 --- a/timer.h +++ b/timer.h @@ -31,9 +31,9 @@ class Timer { time_t passed = elapsed(); // Print the message supplied at construction time followed // by time elapsed formatted HH:MM:SS - unsigned int hours = (passed / 60) / 60; - unsigned int minutes = (passed / 60) % 60; - unsigned int seconds = (passed % 60); + unsigned int hours = (unsigned int)((passed / 60) / 60); + unsigned int minutes = (unsigned int)((passed / 60) % 60); + unsigned int seconds = (unsigned int)((passed % 60)); out << _msg << setfill ('0') << setw (2) << hours << ":" << setfill ('0') << setw (2) << minutes << ":" << setfill ('0') << setw (2) << seconds << endl;