Skip to content

Commit

Permalink
Merge a957395 into beec466
Browse files Browse the repository at this point in the history
  • Loading branch information
tdanford committed Oct 14, 2014
2 parents beec466 + a957395 commit db0f900
Show file tree
Hide file tree
Showing 43 changed files with 88 additions and 88 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package org.bdgenomics.adam.apis.java

import org.apache.spark.api.java.JavaRDD
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.read.ADAMAlignmentRecordContext._
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext._
import org.bdgenomics.formats.avro._
import parquet.hadoop.metadata.CompressionCodecName

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package org.bdgenomics.adam.cli

import org.bdgenomics.formats.avro.Genotype
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.variation.ADAMVariationContext._
import org.bdgenomics.adam.rdd.variation.VariationContext._
import org.kohsuke.args4j.{ Option => Args4jOption, Argument }
import org.apache.spark.rdd.RDD
import org.apache.spark.{ Logging, SparkContext }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import org.apache.hadoop.mapreduce.Job
import org.apache.spark.{ SparkContext, Logging }
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.read.ADAMAlignmentRecordContext._
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext._
import org.bdgenomics.adam.util.ParquetLogger
import org.bdgenomics.formats.avro.AlignmentRecord
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ import org.apache.hadoop.mapreduce.Job
import org.apache.spark.{ Logging, SparkContext }
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.ADAMContext
import org.bdgenomics.adam.rdd.contig.ADAMNucleotideContigFragmentContext._
import org.bdgenomics.adam.rdd.contig.ADAMNucleotideContigFragmentContext
import org.bdgenomics.adam.rdd.contig.NucleotideContigFragmentContext._
import org.bdgenomics.adam.rdd.contig.NucleotideContigFragmentContext
import org.bdgenomics.formats.avro.AlignmentRecord
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }

Expand Down Expand Up @@ -53,7 +53,7 @@ class Fasta2ADAM(protected val args: Fasta2ADAMArgs) extends ADAMSparkCommand[Fa

def run(sc: SparkContext, job: Job) {
log.info("Loading FASTA data from disk.")
val adamFasta = new ADAMNucleotideContigFragmentContext(sc).adamSequenceLoad(args.fastaFile, args.fragmentLength)
val adamFasta = new NucleotideContigFragmentContext(sc).adamSequenceLoad(args.fastaFile, args.fragmentLength)
if (args.verbose) {
println("FASTA contains:")
println(adamFasta.adamGetSequenceDictionary())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.BaseFeature
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.features.ADAMFeaturesContext._
import org.bdgenomics.adam.rdd.features.FeaturesContext._
import org.bdgenomics.formats.avro.Feature
import org.kohsuke.args4j.Argument

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.projections.{ Projection, AlignmentRecordField }
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.read.ADAMAlignmentRecordContext._
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext._
import org.bdgenomics.formats.avro.AlignmentRecord
import org.kohsuke.args4j.Argument

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
import org.bdgenomics.adam.predicates.GenotypeRecordPASSPredicate
import org.bdgenomics.adam.projections.GenotypeField
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.variation.ADAMVariationContext._
import org.bdgenomics.adam.rdd.variation.VariationContext._
import org.bdgenomics.adam.rdd.variation.ConcordanceTable
import org.bdgenomics.formats.avro.Genotype

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package org.bdgenomics.adam.cli
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models._
import org.bdgenomics.adam.rdd.features.ADAMFeaturesContext._
import org.bdgenomics.adam.rdd.features.FeaturesContext._
import org.bdgenomics.adam.models.GeneContext._
import org.bdgenomics.adam.rdd.features.GeneFeatureRDD._
import org.bdgenomics.formats.avro.Feature
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.projections.AlignmentRecordField._
import org.bdgenomics.adam.projections.Projection
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.read.ADAMAlignmentRecordContext._
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext._
import org.bdgenomics.formats.avro.AlignmentRecord
import org.kohsuke.args4j.{ Argument, Option }

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.predicates.UniqueMappedReadPredicate
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.pileup.ADAMPileupContext._
import org.bdgenomics.adam.rdd.read.ADAMAlignmentRecordContext._
import org.bdgenomics.adam.rdd.pileup.PileupContext._
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext._
import org.bdgenomics.formats.avro.{ AlignmentRecord, Pileup }
import org.kohsuke.args4j.{ Option => option, Argument }

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ import org.apache.spark.{ SparkContext, Logging }
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.SnpTable
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.read.ADAMAlignmentRecordContext._
import org.bdgenomics.adam.rdd.variation.ADAMVariationContext._
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext._
import org.bdgenomics.adam.rdd.variation.VariationContext._
import org.bdgenomics.adam.rich.RichVariant
import org.bdgenomics.formats.avro.AlignmentRecord
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package org.bdgenomics.adam.cli

import org.bdgenomics.adam.models.{ SequenceDictionary, VariantContext }
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.variation.ADAMVariationContext._
import org.bdgenomics.adam.rdd.variation.VariationContext._
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.{ Logging, SparkContext }
import org.apache.spark.rdd.RDD
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ package org.bdgenomics.adam.cli
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.{ Logging, SparkContext }
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
import org.bdgenomics.adam.rdd.variation.ADAMVariationContext._
import org.bdgenomics.adam.rdd.variation.VariationContext._
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.formats.avro._
import org.apache.spark.rdd.RDD
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import org.bdgenomics.adam.models.{ OrderedTrackedLayout, ReferenceRegion }
import org.bdgenomics.adam.projections.AlignmentRecordField._
import org.bdgenomics.adam.projections.Projection
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.read.ADAMAlignmentRecordContext._
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext._
import org.bdgenomics.adam.rich.ReferenceMappingContext.AlignmentRecordReferenceMapping
import org.bdgenomics.formats.avro.AlignmentRecord
import org.fusesource.scalate.TemplateEngine
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models._
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.read.realignment.IndelRealignmentTarget
import org.bdgenomics.adam.rdd.variation.ADAMVariationContext._
import org.bdgenomics.adam.rdd.variation.VariationContext._
import org.bdgenomics.adam.rich.RichAlignmentRecord

class ConsensusGeneratorFromKnowns(file: String, sc: SparkContext) extends ConsensusGenerator {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package org.bdgenomics.adam.models
import org.apache.spark.{ Logging, SparkContext }
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.rdd.variation.ADAMVariationContext._
import org.bdgenomics.adam.rdd.variation.VariationContext._
import org.bdgenomics.formats.avro.Variant

class IndelTable(private val table: Map[String, Iterable[Consensus]]) extends Serializable with Logging {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import org.bdgenomics.adam.rich.RichVariant._

/**
* Note: VariantContext inherits its name from the Picard VariantContext, and is not related to the SparkContext object.
* If you're looking for the latter, see [[org.bdgenomics.adam.rdd.variation.ADAMVariationContext]]
* If you're looking for the latter, see [[org.bdgenomics.adam.rdd.variation.VariationContext]]
*/

object VariantContext {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ import org.bdgenomics.adam.instrumentation.ADAMMetricsListener
import org.bdgenomics.adam.models._
import org.bdgenomics.adam.predicates.ADAMPredicate
import org.bdgenomics.adam.projections.{ AlignmentRecordField, NucleotideContigFragmentField, Projection }
import org.bdgenomics.adam.rdd.read.ADAMAlignmentRecordContext
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext
import org.bdgenomics.adam.rich.RichAlignmentRecord
import org.bdgenomics.adam.util.HadoopUtil
import org.bdgenomics.formats.avro.{ AlignmentRecord, NucleotideContigFragment, Pileup }
Expand Down Expand Up @@ -195,7 +195,7 @@ class ADAMContext(val sc: SparkContext) extends Serializable with Logging {
if (projection.isDefined) {
log.warn("Projection is ignored when loading a BAM file")
}
val reads = ADAMAlignmentRecordContext.adamBamLoad(sc, filePath).asInstanceOf[RDD[T]]
val reads = AlignmentRecordContext.adamBamLoad(sc, filePath).asInstanceOf[RDD[T]]
if (predicate.isDefined) {
val predicateClass = predicate.get
val filter = predicateClass.newInstance()
Expand All @@ -208,7 +208,7 @@ class ADAMContext(val sc: SparkContext) extends Serializable with Logging {
if (projection.isDefined) {
log.warn("Projection is ignored when loading an interleaved FASTQ file")
}
val reads = ADAMAlignmentRecordContext.adamInterleavedFastqLoad(sc, filePath).asInstanceOf[RDD[T]]
val reads = AlignmentRecordContext.adamInterleavedFastqLoad(sc, filePath).asInstanceOf[RDD[T]]
if (predicate.isDefined) {
val predicateClass = predicate.get
val filter = predicateClass.newInstance()
Expand All @@ -221,7 +221,7 @@ class ADAMContext(val sc: SparkContext) extends Serializable with Logging {
if (projection.isDefined) {
log.warn("Projection is ignored when loading a FASTQ file")
}
val reads = ADAMAlignmentRecordContext.adamUnpairedFastqLoad(sc, filePath).asInstanceOf[RDD[T]]
val reads = AlignmentRecordContext.adamUnpairedFastqLoad(sc, filePath).asInstanceOf[RDD[T]]
if (predicate.isDefined) {
val predicateClass = predicate.get
val filter = predicateClass.newInstance()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,15 @@ import org.bdgenomics.adam.converters.FastaConverter
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.formats.avro.NucleotideContigFragment

object ADAMNucleotideContigFragmentContext {
object NucleotideContigFragmentContext {
// Add ADAM Spark context methods
implicit def sparkContextToADAMContext(sc: SparkContext): ADAMNucleotideContigFragmentContext = new ADAMNucleotideContigFragmentContext(sc)
implicit def sparkContextToADAMContext(sc: SparkContext): NucleotideContigFragmentContext = new NucleotideContigFragmentContext(sc)

// Add methods specific to the ADAMNucleotideContig RDDs
implicit def rddToContigFragmentRDD(rdd: RDD[NucleotideContigFragment]) = new ADAMNucleotideContigFragmentRDDFunctions(rdd)
implicit def rddToContigFragmentRDD(rdd: RDD[NucleotideContigFragment]) = new NucleotideContigFragmentRDDFunctions(rdd)
}

class ADAMNucleotideContigFragmentContext(val sc: SparkContext) extends Serializable with Logging {
class NucleotideContigFragmentContext(val sc: SparkContext) extends Serializable with Logging {

def adamSequenceLoad(filePath: String, fragmentLength: Long): RDD[NucleotideContigFragment] = {
if (filePath.endsWith(".fasta") || filePath.endsWith(".fa")) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ import parquet.hadoop.util.ContextUtil
import scala.math.max
import scala.Some

class ADAMNucleotideContigFragmentRDDFunctions(rdd: RDD[NucleotideContigFragment]) extends ADAMSequenceDictionaryRDDAggregator[NucleotideContigFragment](rdd) {
class NucleotideContigFragmentRDDFunctions(rdd: RDD[NucleotideContigFragment]) extends ADAMSequenceDictionaryRDDAggregator[NucleotideContigFragment](rdd) {

/**
* Rewrites the contig IDs of a FASTA reference set to match the contig IDs present in a
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@ import org.apache.spark.{ SparkContext, Logging }
import org.apache.spark.rdd.RDD
import org.bdgenomics.formats.avro.Feature

object ADAMFeaturesContext {
implicit def sparkContextToADAMFeaturesContext(sc: SparkContext): ADAMFeaturesContext = new ADAMFeaturesContext(sc)
object FeaturesContext {
implicit def sparkContextToADAMFeaturesContext(sc: SparkContext): FeaturesContext = new FeaturesContext(sc)
}

class ADAMFeaturesContext(sc: SparkContext) extends Serializable with Logging {
class FeaturesContext(sc: SparkContext) extends Serializable with Logging {

def adamGTFFeatureLoad(filePath: String): RDD[Feature] = {
sc.textFile(filePath).flatMap(new GTFParser().parse)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@ import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models._
import org.bdgenomics.formats.avro.Pileup

object ADAMPileupContext {
object PileupContext {

// Add methods specific to the Pileup RDDs
implicit def rddToADAMPileupRDD(rdd: RDD[Pileup]) = new ADAMPileupRDDFunctions(rdd)
implicit def rddToPileupRDD(rdd: RDD[Pileup]) = new PileupRDDFunctions(rdd)

// Add methods specific to the Rod RDDs
implicit def rddToRodRDD(rdd: RDD[Rod]) = new ADAMRodRDDFunctions(rdd)
implicit def rddToRodRDD(rdd: RDD[Rod]) = new RodRDDFunctions(rdd)
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models._
import org.bdgenomics.formats.avro._

class ADAMPileupRDDFunctions(rdd: RDD[Pileup]) extends Serializable with Logging {
class PileupRDDFunctions(rdd: RDD[Pileup]) extends Serializable with Logging {

/**
* Converts ungrouped pileup bases into reference grouped bases.
Expand All @@ -37,14 +37,14 @@ class ADAMPileupRDDFunctions(rdd: RDD[Pileup]) extends Serializable with Logging
}
}

class ADAMRodRDDFunctions(rdd: RDD[Rod]) extends Serializable with Logging {
class RodRDDFunctions(rdd: RDD[Rod]) extends Serializable with Logging {
/**
* Given an RDD of rods, splits the rods up by the specific sample they correspond to.
* Returns a flat RDD.
*
* @return Rods split up by samples and _not_ grouped together.
*/
def adamSplitRodsBySamples(): RDD[Rod] = {
def splitRodsBySamples(): RDD[Rod] = {
rdd.flatMap(_.splitBySamples())
}

Expand All @@ -54,7 +54,7 @@ class ADAMRodRDDFunctions(rdd: RDD[Rod]) extends Serializable with Logging {
*
* @return Rods split up by samples and grouped together by position.
*/
def adamDivideRodsBySamples(): RDD[(ReferencePosition, List[Rod])] = {
def divideRodsBySamples(): RDD[(ReferencePosition, List[Rod])] = {
rdd.keyBy(_.position).map(r => (r._1, r._2.splitBySamples()))
}

Expand All @@ -68,7 +68,7 @@ class ADAMRodRDDFunctions(rdd: RDD[Rod]) extends Serializable with Logging {
*
* @return Average coverage across mapped loci.
*/
def adamRodCoverage(): Double = {
def rodCoverage(): Double = {
val totalBases: Long = rdd.map(_.pileups.length.toLong).reduce(_ + _)

// coverage is the total count of bases, over the total number of loci
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,12 @@ import org.seqdoop.hadoop_bam.{
import org.seqdoop.hadoop_bam.util.SAMHeaderReader
import parquet.hadoop.util.ContextUtil

object ADAMAlignmentRecordContext extends Serializable with Logging {
object AlignmentRecordContext extends Serializable with Logging {
// Add ADAM Spark context methods
implicit def adamContextToADAMContext(ac: ADAMContext): ADAMAlignmentRecordContext = new ADAMAlignmentRecordContext(ac.sc)
implicit def adamContextToADAMContext(ac: ADAMContext): AlignmentRecordContext = new AlignmentRecordContext(ac.sc)

// Add methods specific to Read RDDs
implicit def rddToADAMRecordRDD(rdd: RDD[AlignmentRecord]) = new ADAMAlignmentRecordRDDFunctions(rdd)
implicit def rddToADAMRecordRDD(rdd: RDD[AlignmentRecord]) = new AlignmentRecordRDDFunctions(rdd)

private[rdd] def adamBamLoad(sc: SparkContext,
filePath: String): RDD[AlignmentRecord] = {
Expand Down Expand Up @@ -90,14 +90,14 @@ object ADAMAlignmentRecordContext extends Serializable with Logging {
}
}

class ADAMAlignmentRecordContext(val sc: SparkContext) extends Serializable with Logging {
class AlignmentRecordContext(val sc: SparkContext) extends Serializable with Logging {

def adamFastqLoad(firstPairPath: String,
secondPairPath: String,
fixPairs: Boolean = false): RDD[AlignmentRecord] = {
// load rdds
val firstPairRdd = ADAMAlignmentRecordContext.adamUnpairedFastqLoad(sc, firstPairPath)
val secondPairRdd = ADAMAlignmentRecordContext.adamUnpairedFastqLoad(sc, secondPairPath)
val firstPairRdd = AlignmentRecordContext.adamUnpairedFastqLoad(sc, firstPairPath)
val secondPairRdd = AlignmentRecordContext.adamUnpairedFastqLoad(sc, secondPairPath)

// cache rdds
firstPairRdd.cache()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,15 +31,15 @@ import org.bdgenomics.adam.converters.AlignmentRecordConverter
import org.bdgenomics.adam.models._
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.ADAMSequenceDictionaryRDDAggregator
import org.bdgenomics.adam.rdd.read.ADAMAlignmentRecordContext._
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext._
import org.bdgenomics.adam.rdd.read.correction.{ ErrorCorrection, TrimReads }
import org.bdgenomics.adam.rdd.read.realignment.RealignIndels
import org.bdgenomics.adam.rdd.read.recalibration.BaseQualityRecalibration
import org.bdgenomics.adam.rich.RichAlignmentRecord
import org.bdgenomics.adam.util.MapTools
import org.bdgenomics.formats.avro._

class ADAMAlignmentRecordRDDFunctions(rdd: RDD[AlignmentRecord]) extends ADAMSequenceDictionaryRDDAggregator[AlignmentRecord](rdd) {
class AlignmentRecordRDDFunctions(rdd: RDD[AlignmentRecord]) extends ADAMSequenceDictionaryRDDAggregator[AlignmentRecord](rdd) {

/**
* Calculates the subset of the RDD whose AlignmentRecords overlap the corresponding
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package org.bdgenomics.adam.rdd.read
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.{ ReferencePositionPair, ReferencePositionWithOrientation, SingleReadBucket }
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.read.ADAMAlignmentRecordContext._
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext._
import org.bdgenomics.formats.avro.AlignmentRecord

private[rdd] object MarkDuplicates extends Serializable {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,16 +27,16 @@ import org.bdgenomics.adam.metrics.filters.GeneratorFilter
import org.bdgenomics.adam.models.ReadBucket
import org.bdgenomics.adam.projections.{ FieldValue, Projection }
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.read.ADAMAlignmentRecordContext._
import org.bdgenomics.adam.rdd.read.ADAMAlignmentRecordContext
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext._
import org.bdgenomics.adam.rdd.read.AlignmentRecordContext
import org.bdgenomics.formats.avro.AlignmentRecord
import scala.reflect.ClassTag

class ComparisonTraversalEngine(schema: Seq[FieldValue], input1: RDD[AlignmentRecord], input2: RDD[AlignmentRecord])(implicit sc: SparkContext) {
def this(schema: Seq[FieldValue], input1Paths: Seq[Path], input2Paths: Seq[Path])(implicit sc: SparkContext) =
this(schema,
new ADAMAlignmentRecordContext(sc).loadADAMFromPaths(input1Paths),
new ADAMAlignmentRecordContext(sc).loadADAMFromPaths(input2Paths))(sc)
new AlignmentRecordContext(sc).loadADAMFromPaths(input1Paths),
new AlignmentRecordContext(sc).loadADAMFromPaths(input2Paths))(sc)

lazy val projection = Projection(schema: _*)

Expand Down
Loading

0 comments on commit db0f900

Please sign in to comment.