diff --git a/src/JBrowse/Model/BAIIndex.js b/src/JBrowse/Model/BAIIndex.js deleted file mode 100644 index d5bb7ac955..0000000000 --- a/src/JBrowse/Model/BAIIndex.js +++ /dev/null @@ -1,281 +0,0 @@ -define([ - 'dojo/_base/declare', - 'JBrowse/has', - 'JBrowse/Util', - 'JBrowse/Model/DataView', - 'JBrowse/Model/TabixIndex', - 'JBrowse/Model/BGZip/VirtualOffset', - 'JBrowse/Store/SeqFeature/BAM/Util' - - ], - function( - declare, - has, - Util, - jDataView, - TabixIndex, - VirtualOffset, - BAMUtil - ) { - -var dlog = function(){ console.error.apply(console, arguments); }; -var readInt = BAMUtil.readInt; -var readVirtualOffset = BAMUtil.readVirtualOffset; - - -var BAI_MAGIC = 21578050; - -function lshift(num, bits) { - return num * Math.pow(2, bits); -} -function rshift(num, bits) { - return Math.floor(num / Math.pow(2,bits)); -} -// inner class representing a chunk -var Chunk = Util.fastDeclare({ - constructor: function(minv,maxv,bin) { - this.minv = minv; - this.maxv = maxv; - this.bin = bin; - }, - toUniqueString: function() { - return this.minv+'..'+this.maxv+' (bin '+this.bin+')'; - }, - toString: function() { - return this.toUniqueString(); - }, - compareTo: function( b ) { - return this.minv.compareTo(b.minv) || this.maxv.compareTo(b.maxv) || this.bin - b.bin; - }, - compare: function( b ) { - return this.compareTo( b ); - }, - fetchedSize: function() { - return this.maxv.block + (1<<16) - this.minv.block + 1; - } -}); - -return declare( TabixIndex, { - - _parseIndex: function( header, deferred ) { - if (!header) { - dlog("No data read from BAM index (BAI) file"); - deferred.reject("No data read from BAM index (BAI) file"); - return; - } - - if( ! has('typed-arrays') ) { - dlog('Web browser does not support typed arrays'); - deferred.reject('Web browser does not support typed arrays'); - return; - } - - var uncba = new Uint8Array(header); - if( readInt(uncba, 0) != BAI_MAGIC) { - dlog('Not a BAI file'); - deferred.reject('Not a BAI file'); - return; - } - - var nref = readInt(uncba, 4); - - this.indices = []; - - var p = 8; - - for (var ref = 0; ref < nref; ++ref) { - var blockStart = p; - var nbin = readInt(uncba, p); p += 4; - for (var b = 0; b < nbin; ++b) { - var bin = readInt(uncba, p); - var nchnk = readInt(uncba, p+4); - p += 8; - for( var chunkNum = 0; chunkNum < nchnk; chunkNum++ ) { - var vo = readVirtualOffset( uncba, p ); - this._findMinAlignment( vo ); - p += 16; - } - } - var nintv = readInt(uncba, p); p += 4; - // as we're going through the linear index, figure out - // the smallest virtual offset in the indexes, which - // tells us where the BAM header ends - this._findMinAlignment( nintv ? readVirtualOffset(uncba,p) : null ); - - p += nintv * 8; - if( nbin > 0 || nintv > 0 ) { - this.indices[ref] = new Uint8Array(header, blockStart, p - blockStart); - } - } - - this.empty = ! this.indices.length; - deferred.resolve(); - }, - - featureCount: function(tid) { - var index = this.indices[tid]; - if (!index) { - return -1; - } - var p = 4; - var nbin = readInt(index, 0); - var overlappingBins = function() { - var intBins = {}; - var intBinsL = [this._bin_limit()+1]; - for (var i = 0; i < intBinsL.length; ++i) { - intBins[intBinsL[i]] = true; - } - return intBins; - }.call(this); - for (var b = 0; b < nbin; ++b) { - var bin = readInt(index, p ); - var nchnk = readInt(index, p+4); - p += 8; - if( overlappingBins[bin] ) { - p += 16; - var cs = readVirtualOffset( index, p ); - var ce = readVirtualOffset( index, p + 8 ); - var ch = new Chunk(cs, ce, bin); - return ch.minv.offset; - } else { - p += nchnk * 16; - } - } - - return 0; - }, - - /** - * Get an array of Chunk objects for the given ref seq id and range. - */ - blocksForRange: function(refId, min, max) { - var index = this.indices[refId]; - if (!index) { - return []; - } - - // object as { : true, ... } containing the bin numbers - // that overlap this range - var overlappingBins = function() { - var intBins = {}; - var intBinsL = this._reg2bins(min, max); - for (var i = 0; i < intBinsL.length; ++i) { - intBins[intBinsL[i]] = true; - } - return intBins; - }.call(this); - - // parse the chunks for the overlapping bins out of the index - // for this ref seq, keeping a distinction between chunks from - // leaf (lowest-level, smallest) bins, and chunks from other, - // larger bins - var leafChunks = []; - var otherChunks = []; - var nbin = readInt(index, 0); - var p = 4; - for (var b = 0; b < nbin; ++b) { - var bin = readInt(index, p ); - var nchnk = readInt(index, p+4); - p += 8; - if( overlappingBins[bin] ) { - for (var c = 0; c < nchnk; ++c) { - var cs = readVirtualOffset( index, p ); - var ce = readVirtualOffset( index, p + 8 ); - ( bin < 4681 ? otherChunks : leafChunks ).push( new Chunk(cs, ce, bin) ); - p += 16; - } - } else { - p += nchnk * 16; - } - } - var lowest = function() { - var lowest = null; - var nintv = readInt(index, p); - var minLin = Math.min(min>>14, nintv - 1); - var maxLin = Math.min(max>>14, nintv - 1); - for (var i = minLin; i <= maxLin; ++i) { - var lb = readVirtualOffset(index, p + 4 + (i * 8)); - if( !lb ) - continue; - - if ( ! lowest || lb.cmp( lowest ) > 0 ) - lowest = lb; - } - return lowest; - }(); - - // discard any chunks that come before the lowest - // virtualOffset that we got from the linear index - if( lowest ) { - otherChunks = function( otherChunks ) { - var relevantOtherChunks = []; - for (var i = 0; i < otherChunks.length; ++i) { - var chnk = otherChunks[i]; - if( chnk.maxv.block >= lowest.block ) { - relevantOtherChunks.push(chnk); - } - } - return relevantOtherChunks; - }(otherChunks); - } - - // add the leaf chunks in, and sort the chunks ascending by virtual offset - var allChunks = otherChunks - .concat( leafChunks ) - .sort( function(c0, c1) { - return c0.minv.block - c1.minv.block || c0.minv.offset - c1.minv.offset; - }); - - // merge chunks from the same block together - var mergedChunks = []; - if( allChunks.length ) { - var cur = allChunks[0]; - for (var i = 1; i < allChunks.length; ++i) { - var nc = allChunks[i]; - if (nc.minv.block == cur.maxv.block /* && nc.minv.offset == cur.maxv.offset */) { // no point splitting mid-block - cur = new Chunk(cur.minv, nc.maxv, 'merged'); - } else { - mergedChunks.push(cur); - cur = nc; - } - } - mergedChunks.push(cur); - } - - return mergedChunks; - }, - - - _findMinAlignment: function( candidate ) { - if( candidate && ( ! this.minAlignmentVO || this.minAlignmentVO.cmp( candidate ) < 0 ) ) - this.minAlignmentVO = candidate; - }, - /* calculate bin given an alignment covering [beg,end) (zero-based, half-close-half-open) */ - _reg2bin: function( beg, end ) { - --end; - if (beg>>14 == end>>14) return ((1<<15)-1)/7 + (beg>>14); - if (beg>>17 == end>>17) return ((1<<12)-1)/7 + (beg>>17); - if (beg>>20 == end>>20) return ((1<<9)-1)/7 + (beg>>20); - if (beg>>23 == end>>23) return ((1<<6)-1)/7 + (beg>>23); - if (beg>>26 == end>>26) return ((1<<3)-1)/7 + (beg>>26); - return 0; - }, - - /* calculate the list of bins that may overlap with region [beg,end) (zero-based) */ - MAX_BIN: (((1<<18)-1)/7), - _reg2bins: function( beg, end ) { - var k, list = [ 0 ]; - --end; - for (k = 1 + (beg>>26); k <= 1 + (end>>26); ++k) list.push(k); - for (k = 9 + (beg>>23); k <= 9 + (end>>23); ++k) list.push(k); - for (k = 73 + (beg>>20); k <= 73 + (end>>20); ++k) list.push(k); - for (k = 585 + (beg>>17); k <= 585 + (end>>17); ++k) list.push(k); - for (k = 4681 + (beg>>14); k <= 4681 + (end>>14); ++k) list.push(k); - return list; - }, - _bin_limit: function(min_shift, depth=5) { - return ((1 << (depth+1)*3) - 1) / 7; - } - -}); -}); diff --git a/src/JBrowse/Model/CSIIndex.js b/src/JBrowse/Model/CSIIndex.js index 6836cd6f16..5f37ac0a2f 100644 --- a/src/JBrowse/Model/CSIIndex.js +++ b/src/JBrowse/Model/CSIIndex.js @@ -3,21 +3,16 @@ define([ 'JBrowse/Util', 'JBrowse/Model/DataView', 'JBrowse/Model/TabixIndex', - 'JBrowse/Model/BGZip/VirtualOffset', - 'JBrowse/Store/SeqFeature/BAM/Util' + 'JBrowse/Model/BGZip/VirtualOffset' ], function( declare, Util, jDataView, TabixIndex, - VirtualOffset, - BAMUtil + VirtualOffset ) { -var readInt = BAMUtil.readInt; -var readVirtualOffset = BAMUtil.readVirtualOffset; - function lshift(num, bits) { return num * Math.pow(2, bits); diff --git a/src/JBrowse/Store/SeqFeature/BAM.js b/src/JBrowse/Store/SeqFeature/BAM.js index 95755c70dd..f864dd7430 100644 --- a/src/JBrowse/Store/SeqFeature/BAM.js +++ b/src/JBrowse/Store/SeqFeature/BAM.js @@ -21,7 +21,7 @@ class BamSlightlyLazyFeature { _get_cigar() { return this.record.get('cigar')} _get_seq_id() { return this._store._refIdToName(this.record.sequenceId)} _get_qc_failed() { return this.record.isFailedQc()} - _get_secondary_alignment() { return this.record.isSecondary()} + _get_secondary_alignment() { console.log('here',this.record.isSecondary());return this.record.isSecondary()} _get_supplementary_alignment() { return this.record.isSupplementary()} _get_multi_segment_template() { return this.record.isPaired()} _get_multi_segment_all_correctly_aligned() { return this.record.isProperlyPaired()} diff --git a/src/JBrowse/Store/SeqFeature/BAM/File.js b/src/JBrowse/Store/SeqFeature/BAM/File.js deleted file mode 100644 index eac0cea0cb..0000000000 --- a/src/JBrowse/Store/SeqFeature/BAM/File.js +++ /dev/null @@ -1,318 +0,0 @@ -define( [ - 'dojo/_base/declare', - 'dojo/_base/array', - 'JBrowse/has', - 'JBrowse/Util', - 'JBrowse/Errors', - 'JBrowse/Store/LRUCache', - 'JBrowse/Model/BAIIndex', - 'JBrowse/Model/CSIIndex', - 'JBrowse/Model/BGZip/BGZBlob', - './Util', - './LazyFeature' - ], - function( - declare, - array, - has, - Util, - Errors, - LRUCache, - BAIIndex, - CSIIndex, - BGZBlob, - BAMUtil, - BAMFeature - ) { - -var BAM_MAGIC = 21840194; - -var dlog = function(){ console.error.apply(console, arguments); }; - - -var readInt = BAMUtil.readInt; -var readVirtualOffset = BAMUtil.readVirtualOffset; - -var BamFile = declare( null, - - -/** - * @lends JBrowse.Store.SeqFeature.BAM.File - */ -{ - - /** - * Low-level BAM file reading code. - * - * Adapted by Robert Buels from bam.js in the Dalliance Genome - * Explorer which is copyright Thomas Down 2006-2010 - * @constructs - */ - constructor: function( args ) { - this.store = args.store; - this.data = args.data; - - if(args.bai) { - this.index = new BAIIndex({ blob: args.bai, browser: args.browser }); - } else if(args.csi) { - this.index = new CSIIndex({ blob: new BGZBlob( args.csi ), browser: args.browser } ); - } - - this.chunkSizeLimit = args.chunkSizeLimit || 5000000; - }, - - init: function( args ) { - var bam = this; - var successCallback = args.success || function() {}; - var failCallback = args.failure || function(e) { console.error(e, e.stack); }; - - this.index.load().then(function() { - bam._readBAMheader( function() { - successCallback(); - }, failCallback ); - }, failCallback); - }, - - - - _readBAMheader: function( successCallback, failCallback ) { - var thisB = this; - // We have the virtual offset of the first alignment - // in the file. Cannot completely determine how - // much of the first part of the file to fetch to get just - // up to that, since the file is compressed. Thus, fetch - // up to the start of the BGZF block that the first - // alignment is in, plus 64KB, which should get us that whole - // BGZF block, assuming BGZF blocks are no bigger than 64KB. - thisB.data.read( - 0, - thisB.index.minAlignmentVO ? thisB.index.minAlignmentVO.block + 65535 : undefined, - function(r) { - try { - var uncba; - try { - uncba = new Uint8Array( BAMUtil.unbgzf(r) ); - } catch(e) { - throw new Error( "Could not uncompress BAM data. Is it compressed correctly?" ); - } - - if( readInt(uncba, 0) != BAM_MAGIC) - throw new Error('Not a BAM file'); - - var headLen = readInt(uncba, 4); - - thisB._readRefSeqs( headLen+8, 65536*4, successCallback, failCallback ); - } catch(e) { - dlog( ''+e ); - failCallback( ''+e ); - } - }, - failCallback - ); - }, - - _readRefSeqs: function( start, refSeqBytes, successCallback, failCallback ) { - var thisB = this; - // have to do another request, because sometimes - // minAlignment VO is just flat wrong. - // if headLen is not too big, this will just be in the - // global file cache - thisB.data.read( 0, start+refSeqBytes, - function(r) { - var unc = BAMUtil.unbgzf(r); - var uncba = new Uint8Array(unc); - - var nRef = readInt(uncba, start ); - var p = start + 4; - - thisB.chrToIndex = {}; - thisB.indexToChr = []; - for (var i = 0; i < nRef; ++i) { - var lName = readInt(uncba, p); - var name = ''; - for (var j = 0; j < lName-1; ++j) { - name += String.fromCharCode(uncba[p + 4 + j]); - } - - var lRef = readInt(uncba, p + lName + 4); - //console.log(name + ': ' + lRef); - thisB.chrToIndex[ thisB.store.browser.regularizeReferenceName( name ) ] = i; - thisB.indexToChr.push({ name: name, length: lRef }); - - p = p + 8 + lName; - if( p > uncba.length ) { - // we've gotten to the end of the data without - // finishing reading the ref seqs, need to fetch a - // bigger chunk and try again. :-( - refSeqBytes *= 2; - console.warn( 'BAM header is very big. Re-fetching '+refSeqBytes+' bytes.' ); - thisB._readRefSeqs( start, refSeqBytes, successCallback, failCallback ); - return; - } - } - - successCallback(); - - }, failCallback ); - }, - - - - fetch: function(chr, min, max, featCallback, endCallback, errorCallback ) { - - chr = this.store.browser.regularizeReferenceName( chr ); - - var chrId = this.chrToIndex && this.chrToIndex[chr]; - var chunks; - if( !( chrId >= 0 ) ) { - chunks = []; - } else { - chunks = this.index.blocksForRange(chrId, min, max, true); - if (!chunks) { - errorCallback( new Errors.Fatal('Error in index fetch') ); - } - } - - // toString function is used by the cache for making cache keys - chunks.toString = function() { - return this.join(', '); - }; - - - try { - this._fetchChunkFeatures( - chunks, - chrId, - min, - max, - featCallback, - endCallback, - errorCallback - ); - } catch( e ) { - errorCallback( e ); - } - }, - - _fetchChunkFeatures: function( chunks, chrId, min, max, featCallback, endCallback, errorCallback ) { - var thisB = this; - - if( ! chunks.length ) { - endCallback(); - return; - } - - var chunksProcessed = 0; - - var cache = this.featureCache = this.featureCache || new LRUCache({ - name: 'bamFeatureCache', - fillCallback: dojo.hitch( this, '_readChunk' ), - sizeFunction: function( features ) { - return features.length; - }, - maxSize: 100000 // cache up to 100,000 BAM features - }); - - // check the chunks for any that are over the size limit. if - // any are, don't fetch any of them - for( var i = 0; i this.chunkSizeLimit ) { - errorCallback( new Errors.DataOverflow('Too many BAM features. BAM chunk size '+Util.commifyNumber(size)+' bytes exceeds chunkSizeLimit of '+Util.commifyNumber(this.chunkSizeLimit)+'.' ) ); - return; - } - } - - var haveError; - var pastStart; - array.forEach( chunks, function( c ) { - cache.get( c, function( f, e ) { - if( e && !haveError ) - errorCallback(e); - if(( haveError = haveError || e )) { - return; - } - - for( var i = 0; i max ) // past end of range, can stop iterating - break; - else if( feature.get('end') >= min ) // must be in range - featCallback( feature ); - } - } - if( ++chunksProcessed == chunks.length ) { - endCallback(); - } - }); - }); - - }, - - _readChunk: function( chunk, callback ) { - var thisB = this; - var features = []; - // console.log('chunk '+chunk+' size ',Util.humanReadableNumber(size)); - - thisB.data.read( chunk.minv.block, chunk.fetchedSize(), function(r) { - try { - var data = BAMUtil.unbgzf(r, chunk.maxv.block - chunk.minv.block + 1); - thisB.readBamFeatures( new Uint8Array(data), chunk.minv.offset, features, callback ); - } catch( e ) { - callback( null, new Errors.Fatal(e) ); - } - }, function( e ) { - callback( null, new Errors.Fatal(e) ); - }); - }, - - readBamFeatures: function(ba, blockStart, sink, callback ) { - var that = this; - var featureCount = 0; - - var maxFeaturesWithoutYielding = 300; - - while ( true ) { - if( blockStart >= ba.length ) { - // if we're done, call the callback and return - callback( sink ); - return; - } - else if( featureCount <= maxFeaturesWithoutYielding ) { - // if we've read no more than 200 features this cycle, read another one - var blockSize = readInt(ba, blockStart); - var blockEnd = blockStart + 4 + blockSize - 1; - - // only try to read the feature if we have all the bytes for it - if( blockEnd < ba.length ) { - var feature = new BAMFeature({ - store: this.store, - file: this, - bytes: { byteArray: ba, start: blockStart, end: blockEnd } - }); - sink.push(feature); - featureCount++; - } - - blockStart = blockEnd+1; - } - else { - // if we're not done but we've read a good chunk of - // features, put the rest of our work into a timeout to continue - // later, avoiding blocking any UI stuff that's going on - window.setTimeout( function() { - that.readBamFeatures( ba, blockStart, sink, callback ); - }, 1); - return; - } - } - } - - -}); - -return BamFile; - -}); diff --git a/src/JBrowse/Store/SeqFeature/BAM/LazyFeature.js b/src/JBrowse/Store/SeqFeature/BAM/LazyFeature.js deleted file mode 100644 index 694bd53311..0000000000 --- a/src/JBrowse/Store/SeqFeature/BAM/LazyFeature.js +++ /dev/null @@ -1,481 +0,0 @@ -define( ['dojo/_base/array', - 'JBrowse/Util', - 'JBrowse/Digest/Crc32', - './Util', - 'JBrowse/Model/SimpleFeature' - ], - function( array, Util, Crc32, BAMUtil, SimpleFeature ) { - -var SEQRET_DECODER = ['=', 'A', 'C', 'x', 'G', 'x', 'x', 'x', 'T', 'x', 'x', 'x', 'x', 'x', 'x', 'N']; -var CIGAR_DECODER = ['M', 'I', 'D', 'N', 'S', 'H', 'P', '=', 'X', '?', '?', '?', '?', '?', '?', '?']; - -var readInt = BAMUtil.readInt; -var readShort = BAMUtil.readShort; -var readFloat = BAMUtil.readFloat; -var readByte = BAMUtil.readByte; - -var Feature = Util.fastDeclare( -{ - constructor: function( args ) { - this.file = args.file; - this.data = { - type: 'match', - source: args.store.source - }; - this.bytes = { - start: args.bytes.start, - end: args.bytes.end, - byteArray: args.bytes.byteArray - }; - - this._coreParse(); - }, - - get: function( field) { - return this._get( field.toLowerCase() ); - }, - - // same as get(), except requires lower-case arguments. used - // internally to save lots of calls to field.toLowerCase() - _get: function( field ) { - return field in this.data ? this.data[field] : // have we already parsed it out? - function(field) { - var v = this.data[field] = - this[field] ? this[field]() : // maybe we have a special parser for it - this._flagMasks[field] ? this._parseFlag( field ) : // or is it a flag? - this._parseTag( field ); // otherwise, look for it in the tags - return v; - }.call(this,field); - }, - - tags: function() { - return this._get('_tags'); - }, - - _tags: function() { - this._parseAllTags(); - - var tags = [ 'seq', 'seq_reverse_complemented', 'unmapped','qc_failed','duplicate','secondary_alignment','supplementary_alignment' ]; - - if( ! this._get('unmapped') ) - tags.push( 'start', 'end', 'strand', 'score', 'qual', 'MQ', 'CIGAR', 'length_on_ref', 'template_length' ); - if( this._get('multi_segment_template') ) { - tags.push( 'multi_segment_all_correctly_aligned', - 'multi_segment_next_segment_unmapped', - 'multi_segment_next_segment_reversed', - 'multi_segment_first', - 'multi_segment_last', - 'next_segment_position' - ); - } - tags = tags.concat( this._tagList || [] ); - - var d = this.data; - for( var k in d ) { - if( d.hasOwnProperty( k ) && k[0] != '_' - && k != 'multi_segment_all_aligned' - && k != 'next_seq_id') - tags.push( k ); - } - - var seen = {}; - tags = array.filter( tags, function(t) { - if( t in this.data && this.data[t] === undefined ) - return false; - - var lt = t.toLowerCase(); - var s = seen[lt]; - seen[lt] = true; - return ! s; - },this); - - return tags; - }, - - parent: function() { - return undefined; - }, - - children: function() { - return this._get('subfeatures'); - }, - - id: function() { - return Crc32.crc32_raw(this.bytes.byteArray, this.bytes.start, this.bytes.end); - }, - - multi_segment_all_aligned: function() { - return this._get('multi_segment_all_correctly_aligned'); - }, - - // special parsers - /** - * Mapping quality score. - */ - mq: function() { - var mq = (this._get('_bin_mq_nl') & 0xff00) >> 8; - return mq == 255 ? undefined : mq; - }, - score: function() { - return this._get('mq'); - }, - qual: function() { - if( this._get('unmapped') ) - return undefined; - - var qseq = []; - var byteArray = this.bytes.byteArray; - var p = this.bytes.start + 36 + this._get('_l_read_name') + this._get('_n_cigar_op')*4 + this._get('_seq_bytes'); - var lseq = this._get('seq_length'); - for (var j = 0; j < lseq; ++j) { - qseq.push( byteArray[p + j] ); - } - return qseq.join(' '); - }, - strand: function() { - return this._get('seq_reverse_complemented') ? -1 : 1; - }, - multi_segment_next_segment_strand: function() { - if(this._get('multi_segment_next_segment_unmapped')) - return undefined; - return this._get('multi_segment_next_segment_reversed') ? -1 : 1; - }, - - /** - * Length in characters of the read name. - */ - _l_read_name: function() { - return this._get('_bin_mq_nl') & 0xff; - }, - /** - * number of bytes in the sequence field - */ - _seq_bytes: function() { - return (this._get('seq_length') + 1) >> 1; - }, - seq: function() { - var seq = ''; - var byteArray = this.bytes.byteArray; - var p = this.bytes.start + 36 + this._get('_l_read_name') + this._get('_n_cigar_op')*4; - var seqBytes = this._get('_seq_bytes'); - for (var j = 0; j < seqBytes; ++j) { - var sb = byteArray[p + j]; - seq += SEQRET_DECODER[(sb & 0xf0) >> 4]; - if (seq.length < this.get('seq_length')) - seq += SEQRET_DECODER[(sb & 0x0f)]; - } - return seq; - }, - name: function() { - return this._get('_read_name'); - }, - _read_name: function() { - var byteArray = this.bytes.byteArray; - var readName = ''; - var nl = this._get('_l_read_name'); - var p = this.bytes.start + 36; - for (var j = 0; j < nl-1; ++j) { - readName += String.fromCharCode(byteArray[p+j]); - } - return readName; - }, - _n_cigar_op: function() { - return this._get('_flag_nc') & 0xffff; - }, - cigar: function() { - if( this._get('unmapped') ) - return undefined; - - var byteArray = this.bytes.byteArray; - var numCigarOps = this._get('_n_cigar_op'); - var p = this.bytes.start + 36 + this._get('_l_read_name'); - var cigar = ''; - var lref = 0; - for (var c = 0; c < numCigarOps; ++c) { - var cigop = readInt(byteArray, p); - var lop = cigop >> 4; - var op = CIGAR_DECODER[cigop & 0xf]; - cigar += lop + op; - - // soft clip, hard clip, and insertion don't count toward - // the length on the reference - if( op != 'H' && op != 'S' && op != 'I' ) - lref += lop; - - p += 4; - } - - this.data.length_on_ref = lref; - return cigar; - }, - next_segment_position: function() { - // NOTE: next_segment_position is a JBrowse location string, so - // it is in 1-based coordinates. Thus, we add 1 to the position. - var nextSegment = this.file.indexToChr[this._get('_next_refid')]; - if( nextSegment ) - return nextSegment.name+':'+(parseInt(this._get('_next_pos'))+1); - else - return undefined; - }, - subfeatures: function() { - var cigar = this._get('cigar'); - if( cigar ) - return this._cigarToSubfeats( cigar ); - - return undefined; - }, - length_on_ref: function() { - var c = this._get('cigar'); // the length_on_ref is set as a - // side effect of the CIGAR parsing - return this.data.length_on_ref; - }, - _flags: function() { - return (this.get('_flag_nc') & 0xffff0000) >> 16; - }, - end: function() { - return this._get('start') + ( this._get('length_on_ref') || this._get('seq_length') || undefined ); - }, - - seq_id: function() { - if( this._get('unmapped') ) - return undefined; - - return ( this.file.indexToChr[ this._refID ] || {} ).name; - }, - - next_seq_id: function() { - if( this._get('multi_segment_next_segment_unmapped') ) - return undefined; - return ( this.file.indexToChr[this._get('_next_refid')] || {} ).name; - }, - - _bin_mq_nl: function() { - return readInt( this.bytes.byteArray, this.bytes.start + 12 ); - }, - _flag_nc: function() { - return readInt( this.bytes.byteArray, this.bytes.start + 16 ); - }, - seq_length: function() { - return readInt( this.bytes.byteArray, this.bytes.start + 20 ); - }, - _next_refid: function() { - return readInt( this.bytes.byteArray, this.bytes.start + 24 ); - }, - _next_pos: function() { - return readInt( this.bytes.byteArray, this.bytes.start + 28 ); - }, - template_length: function() { - return readInt( this.bytes.byteArray, this.bytes.start + 32 ); - }, - - /** - * parse the core data: ref ID and start - */ - _coreParse: function() { - this._refID = readInt( this.bytes.byteArray, this.bytes.start + 4 ); - this.data.start = readInt( this.bytes.byteArray, this.bytes.start + 8 ); - }, - - /** - * Get the value of a tag, parsing the tags as far as necessary. - * Only called if we have not already parsed that field. - */ - _parseTag: function( tagName ) { - // if all of the tags have been parsed and we're still being - // called, we already know that we have no such tag, because - // it would already have been cached. - if( this._allTagsParsed ) - return undefined; - - this._tagList = this._tagList || []; - var byteArray = this.bytes.byteArray; - var p = this._tagOffset || this.bytes.start + 36 + this._get('_l_read_name') + this._get('_n_cigar_op')*4 + this._get('_seq_bytes') + this._get('seq_length'); - - var blockEnd = this.bytes.end; - while( p < blockEnd && lcTag != tagName ) { - var tag = String.fromCharCode( byteArray[p], byteArray[ p+1 ] ); - var lcTag = tag.toLowerCase(); - var type = String.fromCharCode( byteArray[ p+2 ] ); - p += 3; - - var value; - switch( type.toLowerCase() ) { - case 'a': - value = String.fromCharCode( byteArray[p] ); - p += 1; - break; - case 'i': - value = readInt(byteArray, p ); - p += 4; - break; - case 'c': - value = byteArray[p]; - p += 1; - break; - case 's': - value = readShort(byteArray, p); - p += 2; - break; - case 'f': - value = readFloat( byteArray, p ); - p += 4; - break; - case 'z': - case 'h': - value = ''; - while( p <= blockEnd ) { - var cc = byteArray[p++]; - if( cc == 0 ) { - break; - } - else { - value += String.fromCharCode(cc); - } - } - break; - case 'b': - value = ''; - var cc = byteArray[p++]; - var Btype = String.fromCharCode(cc); - if( Btype == 'i'|| Btype == 'I' ) { - var limit = readInt( byteArray, p ) - p += 4; - for( var k = 0; k < limit; k++ ) { - value += readInt( byteArray, p ); - if(k+1