Skip to content

Commit

Permalink
run mvn org.scalariform:scalariform-maven-plugin:format
Browse files Browse the repository at this point in the history
  • Loading branch information
jey committed Apr 9, 2014
1 parent a7a9dfa commit 1c2b59b
Show file tree
Hide file tree
Showing 45 changed files with 155 additions and 155 deletions.
Expand Up @@ -65,7 +65,7 @@ object ADAMMain extends Logging {
printCommands()
} else {
commands.find(_.commandName == args(0)) match {
case None => printCommands()
case None => printCommands()
case Some(cmd) => cmd.apply(args drop 1).run()
}
}
Expand Down
Expand Up @@ -68,11 +68,11 @@ object CompareADAM extends ADAMCommandCompanion with Serializable {
* @see CompareADAMArgs.recurse1, CompareADAMArgs.recurse2
*/
def setupTraversalEngine(sc: SparkContext,
input1Path: String,
recurse1: String,
input2Path: String,
recurse2: String,
generator: BucketComparisons[Any]): ComparisonTraversalEngine = {
input1Path: String,
recurse1: String,
input2Path: String,
recurse2: String,
generator: BucketComparisons[Any]): ComparisonTraversalEngine = {

val schemas = Seq[FieldValue](
recordGroupId,
Expand All @@ -88,7 +88,7 @@ object CompareADAM extends ADAMCommandCompanion with Serializable {
def parseGenerators(nameList: String): Seq[BucketComparisons[Any]] = {
nameList match {
case null => DefaultComparisons.comparisons
case s => parseGenerators(s.split(","))
case s => parseGenerators(s.split(","))
}
}

Expand Down Expand Up @@ -146,9 +146,9 @@ class CompareADAM(protected val args: CompareADAMArgs) extends ADAMSparkCommand[
* @param writer The PrintWriter to print the summary with.
*/
def printSummary(engine: ComparisonTraversalEngine,
generators: Seq[BucketComparisons[Any]],
aggregateds: Seq[Histogram[Any]],
writer: PrintWriter) {
generators: Seq[BucketComparisons[Any]],
aggregateds: Seq[Histogram[Any]],
writer: PrintWriter) {

writer.println("%15s: %s".format("INPUT1", args.input1Path))
writer.println("\t%15s: %d".format("total-reads", engine.named1.count()))
Expand Down
Expand Up @@ -57,7 +57,7 @@ class MpileupCommand(protected val args: MpileupArgs) extends ADAMSparkCommand[M
// The reference base
pileup.referenceBase match {
case Some(base) => print(base)
case None => print("?")
case None => print("?")
}

// The number of reads
Expand Down
Expand Up @@ -95,19 +95,19 @@ class PluginExecutor(protected val args: PluginExecutorArgs) extends ADAMSparkCo
// see. This is related to Issue #62: Predicate to filter conversion.
val filter = accessControl.predicate match {
case None => plugin.predicate match {
case None => None
case None => None
case Some(predicateFilter) => Some(predicateFilter)
}
case Some(accessControlPredicate) => plugin.predicate match {
case None => Some(accessControlPredicate)
case None => Some(accessControlPredicate)
case Some(predicateFilter) => Some((value: ADAMRecord) => accessControlPredicate(value) && predicateFilter(value))
}
}

val firstRdd: RDD[ADAMRecord] = load[ADAMRecord](sc, args.input, plugin.projection)

val input = filter match {
case None => firstRdd
case None => firstRdd
case Some(filterFunc) => firstRdd.filter(filterFunc)
}

Expand Down
Expand Up @@ -56,8 +56,8 @@ class SummarizeGenotypes(val args: SummarizeGenotypesArgs) extends ADAMSparkComm
val stats = GenotypesSummary(adamGTs)
val result = args.format match {
case "human" => GenotypesSummaryFormatting.format_human_readable(stats)
case "csv" => GenotypesSummaryFormatting.format_csv(stats)
case _ => throw new IllegalArgumentException("Invalid -format: %s".format(args.format))
case "csv" => GenotypesSummaryFormatting.format_csv(stats)
case _ => throw new IllegalArgumentException("Invalid -format: %s".format(args.format))
}
if (args.out.isEmpty) {
println(result)
Expand Down
Expand Up @@ -33,10 +33,10 @@ object SmithWatermanConstantGapScoring {
}

abstract class SmithWatermanConstantGapScoring(xSequence: String,
ySequence: String,
wMatch: Double,
wMismatch: Double,
wInsert: Double,
wDelete: Double)
ySequence: String,
wMatch: Double,
wMismatch: Double,
wInsert: Double,
wDelete: Double)
extends SmithWatermanGapScoringFromFn(xSequence, ySequence, SmithWatermanConstantGapScoring.constantGapFn(wMatch, wMismatch, wInsert, wDelete)) {
}
Expand Up @@ -17,8 +17,8 @@
package org.bdgenomics.adam.algorithms.smithwaterman

abstract class SmithWatermanGapScoringFromFn(xSequence: String,
ySequence: String,
scoreFn: (Int, Int, Char, Char) => Double)
ySequence: String,
scoreFn: (Int, Int, Char, Char) => Double)
extends SmithWaterman(xSequence, ySequence) {

def buildScoringMatrix(): Array[Array[Double]] = {
Expand Down
Expand Up @@ -43,7 +43,7 @@ private[adam] object FastaConverter {
* @return An RDD of ADAM FASTA data.
*/
def apply(rdd: RDD[(Int, String)],
maxFragmentLength: Long = 10000L): RDD[ADAMNucleotideContigFragment] = {
maxFragmentLength: Long = 10000L): RDD[ADAMNucleotideContigFragment] = {
val filtered = rdd.map(kv => (kv._1, kv._2.trim()))
.filter((kv: (Int, String)) => !kv._2.startsWith(";"))

Expand Down Expand Up @@ -164,9 +164,9 @@ private[converters] class FastaConverter(fragmentLength: Long) extends Serializa
* @return The converted ADAM FASTA contig.
*/
def convert(name: Option[String],
id: Int,
sequence: Seq[String],
description: Option[String]): Seq[ADAMNucleotideContigFragment] = {
id: Int,
sequence: Seq[String],
description: Option[String]): Seq[ADAMNucleotideContigFragment] = {

// get sequence length
val sequenceLength = sequence.map(_.length).reduce(_ + _)
Expand Down
Expand Up @@ -19,7 +19,7 @@ import org.bdgenomics.adam.util._
import scala.math.{ pow, sqrt }

private[adam] class GenotypesToVariantsConverter(validateSamples: Boolean = false,
failOnValidationError: Boolean = false) extends Serializable {
failOnValidationError: Boolean = false) extends Serializable {

/**
* Computes root mean squared (RMS) values for a series of doubles.
Expand Down
Expand Up @@ -124,7 +124,7 @@ class SAMRecordConverter extends Serializable {
if (recordGroup != null) {
Option(recordGroup.getRunDate) match {
case Some(date) => builder.setRecordGroupRunDateEpoch(date.getTime)
case None =>
case None =>
}
recordGroup.getId
builder.setRecordGroupId(readGroups(recordGroup.getReadGroupId))
Expand Down
Expand Up @@ -26,18 +26,18 @@ import org.bdgenomics.adam.util.ImplicitJavaConversions._
object VariantAnnotationConverter extends Serializable {

private def attrAsInt(attr: Object): Object = attr match {
case a: String => java.lang.Integer.valueOf(a)
case a: String => java.lang.Integer.valueOf(a)
case a: java.lang.Integer => a
case a: java.lang.Number => java.lang.Integer.valueOf(a.intValue)
case a: java.lang.Number => java.lang.Integer.valueOf(a.intValue)
}
private def attrAsLong(attr: Object): Object = attr match {
case a: String => java.lang.Long.valueOf(a)
case a: java.lang.Long => a
case a: String => java.lang.Long.valueOf(a)
case a: java.lang.Long => a
case a: java.lang.Number => java.lang.Long.valueOf(a.longValue)
}
private def attrAsFloat(attr: Object): Object = attr match {
case a: String => java.lang.Float.valueOf(a)
case a: java.lang.Float => a
case a: String => java.lang.Float.valueOf(a)
case a: java.lang.Float => a
case a: java.lang.Number => java.lang.Float.valueOf(a.floatValue)
}
private def attrAsString(attr: Object): Object = attr match {
Expand All @@ -46,7 +46,7 @@ object VariantAnnotationConverter extends Serializable {

private def attrAsBoolean(attr: Object): Object = attr match {
case a: java.lang.Boolean => a
case a: String => java.lang.Boolean.valueOf(a)
case a: String => java.lang.Boolean.valueOf(a)
}

private case class AttrKey(adamKey: String, attrConverter: (Object => Object), hdrLine: VCFCompoundHeaderLine) {
Expand Down
Expand Up @@ -48,8 +48,8 @@ object VariantContextConverter {
private def convertAlleles(g: ADAMGenotype): java.util.List[Allele] = {
g.getAlleles.map(a => a match {
case ADAMGenotypeAllele.NoCall => Allele.NO_CALL
case ADAMGenotypeAllele.Ref => Allele.create(g.getVariant.getReferenceAllele.toString, true)
case ADAMGenotypeAllele.Alt => Allele.create(g.getVariant.getVariantAllele.toString)
case ADAMGenotypeAllele.Ref => Allele.create(g.getVariant.getReferenceAllele.toString, true)
case ADAMGenotypeAllele.Alt => Allele.create(g.getVariant.getVariantAllele.toString)
})
}
}
Expand Down
Expand Up @@ -29,12 +29,12 @@ import com.esotericsoftware.kryo.io.{ Input, Output }
* This is useful as this will usually map a single read in any of the sequences.
*/
case class ReadBucket(unpairedPrimaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedFirstPrimaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedSecondPrimaryMappedReads: Seq[ADAMRecord] = Seq.empty,
unpairedSecondaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedFirstSecondaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedSecondSecondaryMappedReads: Seq[ADAMRecord] = Seq.empty,
unmappedReads: Seq[ADAMRecord] = Seq.empty) {
pairedFirstPrimaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedSecondPrimaryMappedReads: Seq[ADAMRecord] = Seq.empty,
unpairedSecondaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedFirstSecondaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedSecondSecondaryMappedReads: Seq[ADAMRecord] = Seq.empty,
unmappedReads: Seq[ADAMRecord] = Seq.empty) {
def allReads(): Seq[ADAMRecord] =
unpairedPrimaryMappedReads ++
pairedFirstPrimaryMappedReads ++
Expand Down
Expand Up @@ -48,7 +48,7 @@ object ReferencePositionPair extends Logging {
}

case class ReferencePositionPair(read1refPos: Option[ReferencePositionWithOrientation],
read2refPos: Option[ReferencePositionWithOrientation])
read2refPos: Option[ReferencePositionWithOrientation])

class ReferencePositionPairSerializer extends Serializer[ReferencePositionPair] {
val rps = new ReferencePositionWithOrientationSerializer()
Expand Down
Expand Up @@ -122,7 +122,7 @@ case class ReferenceRegion(refId: Int, start: Long, end: Long) extends Ordered[R
*/
def isAdjacent(region: ReferenceRegion): Boolean = distance(region) match {
case Some(d) => d == 1
case None => false
case None => false
}

/**
Expand Down
Expand Up @@ -208,7 +208,7 @@ class SequenceDictionary(val recordsIn: Array[SequenceRecord]) extends Serializa
def records: Set[SequenceRecord] = recordIndices.values.toSet

private[models] def cleanAndMerge(a1: Array[SequenceRecord],
a2: Array[SequenceRecord]): Array[SequenceRecord] = {
a2: Array[SequenceRecord]): Array[SequenceRecord] = {
val a2filt = a2.filter(k => !a1.contains(k))

a1 ++ a2filt
Expand Down
Expand Up @@ -36,8 +36,8 @@ object SingleReadBucket extends Logging {
}

case class SingleReadBucket(primaryMapped: Seq[ADAMRecord] = Seq.empty,
secondaryMapped: Seq[ADAMRecord] = Seq.empty,
unmapped: Seq[ADAMRecord] = Seq.empty) {
secondaryMapped: Seq[ADAMRecord] = Seq.empty,
unmapped: Seq[ADAMRecord] = Seq.empty) {
// Note: not a val in order to save serialization/memory cost
def allReads = {
primaryMapped ++ secondaryMapped ++ unmapped
Expand Down
Expand Up @@ -113,12 +113,12 @@ object ADAMContext {
* @return Returns a properly configured Spark Context.
*/
def createSparkContext(name: String,
master: String,
sparkHome: String,
sparkJars: Seq[String],
sparkEnvVars: Seq[String],
sparkAddStatsListener: Boolean = false,
sparkKryoBufferSize: Int = 4): SparkContext = {
master: String,
sparkHome: String,
sparkJars: Seq[String],
sparkEnvVars: Seq[String],
sparkAddStatsListener: Boolean = false,
sparkKryoBufferSize: Int = 4): SparkContext = {
ADAMKryoProperties.setupContextProperties(sparkKryoBufferSize)
val appName = "adam: " + name
val environment: Map[String, String] = if (sparkEnvVars.isEmpty) {
Expand Down
Expand Up @@ -53,8 +53,8 @@ import scala.math.{ min, max }
class ADAMRDDFunctions[T <% SpecificRecord: Manifest](rdd: RDD[T]) extends Serializable {

def adamSave(filePath: String, blockSize: Int = 128 * 1024 * 1024,
pageSize: Int = 1 * 1024 * 1024, compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false): RDD[T] = {
pageSize: Int = 1 * 1024 * 1024, compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false): RDD[T] = {
val job = new Job(rdd.context.hadoopConfiguration)
ParquetLogger.hadoopLoggerLevel(Level.SEVERE)
ParquetOutputFormat.setWriteSupportClass(job, classOf[AvroWriteSupport])
Expand Down Expand Up @@ -224,7 +224,7 @@ class ADAMRecordRDDFunctions(rdd: RDD[ADAMRecord]) extends ADAMSequenceDictionar
* @return RDD of ADAMRods.
*/
def adamRecords2Rods(bucketSize: Int = 1000,
secondaryAlignments: Boolean = false): RDD[ADAMRod] = {
secondaryAlignments: Boolean = false): RDD[ADAMRod] = {

/**
* Maps a read to one or two buckets. A read maps to a single bucket if both
Expand Down Expand Up @@ -459,7 +459,7 @@ class ADAMNucleotideContigFragmentRDDFunctions(rdd: RDD[ADAMNucleotideContigFrag
}

def reducePairs(kv1: (ReferenceRegion, String),
kv2: (ReferenceRegion, String)): (ReferenceRegion, String) = {
kv2: (ReferenceRegion, String)): (ReferenceRegion, String) = {
assert(kv1._1.isAdjacent(kv2._1), "Regions being joined must be adjacent. For: " +
kv1 + ", " + kv2)

Expand Down
Expand Up @@ -56,10 +56,10 @@ case class DuplicateMetrics(total: Long, bothMapped: Long, onlyReadMapped: Long,
}

case class FlagStatMetrics(total: Long, duplicatesPrimary: DuplicateMetrics, duplicatesSecondary: DuplicateMetrics,
mapped: Long, pairedInSequencing: Long,
read1: Long, read2: Long, properlyPaired: Long, withSelfAndMateMapped: Long,
singleton: Long, withMateMappedToDiffChromosome: Long,
withMateMappedToDiffChromosomeMapQ5: Long, failedQuality: Boolean) {
mapped: Long, pairedInSequencing: Long,
read1: Long, read2: Long, properlyPaired: Long, withSelfAndMateMapped: Long,
singleton: Long, withMateMappedToDiffChromosome: Long,
withMateMappedToDiffChromosomeMapQ5: Long, failedQuality: Boolean) {
def +(that: FlagStatMetrics): FlagStatMetrics = {
assert(failedQuality == that.failedQuality, "Can't reduce passedVendorQuality with different failedQuality values")
new FlagStatMetrics(total + that.total,
Expand Down
Expand Up @@ -76,10 +76,10 @@ class GenomicRegionPartitioner(val numParts: Int, val seqLengths: Map[Int, Long]
case ReferencePosition.UNMAPPED => parts

// everything else gets assigned normally.
case refpos: ReferencePosition => getPart(refpos.refId, refpos.pos)
case refpos: ReferencePosition => getPart(refpos.refId, refpos.pos)

// only ReferencePosition values are partitioned using this partitioner
case _ => throw new IllegalArgumentException("Only ReferencePosition values can be partitioned by GenomicRegionPartitioner")
case _ => throw new IllegalArgumentException("Only ReferencePosition values can be partitioned by GenomicRegionPartitioner")
}
}

Expand Down
Expand Up @@ -181,9 +181,9 @@ object GenotypesSummary {
stats1.keySet.union(stats2.keySet).map(sample => {
(stats1.get(sample), stats2.get(sample)) match {
case (Some(statsA), Some(statsB)) => sample -> statsA.combine(statsB)
case (Some(stats), None) => sample -> stats
case (None, Some(stats)) => sample -> stats
case (None, None) => throw new AssertionError("Unreachable")
case (Some(stats), None) => sample -> stats
case (None, Some(stats)) => sample -> stats
case (None, None) => throw new AssertionError("Unreachable")
}
}).toMap
}
Expand Down Expand Up @@ -270,7 +270,7 @@ object GenotypesSummaryFormatting {
}
result ++= "\tAverage read depth at called variants: %s\n".format(stats.averageReadDepthAtVariants match {
case Some(depth) => "%1.1f".format(depth)
case None => "[no variant calls, or read depth missing for one or more variant calls]"
case None => "[no variant calls, or read depth missing for one or more variant calls]"
})
result ++= "\tPhased genotypes: %d / %d = %1.3f%%\n".format(
stats.phasedCount,
Expand All @@ -296,8 +296,8 @@ object GenotypesSummaryFormatting {

private def sortedGenotypeAlleles(stats: GenotypesSummaryCounts): Seq[List[ADAMGenotypeAllele]] = {
def genotypeSortOrder(genotype: List[ADAMGenotypeAllele]): Int = genotype.map({
case ADAMGenotypeAllele.Ref => 0
case ADAMGenotypeAllele.Alt => 1
case ADAMGenotypeAllele.Ref => 0
case ADAMGenotypeAllele.Alt => 1
case ADAMGenotypeAllele.NoCall => 10 // arbitrary large number so any genotype with a NoCall sorts last.
}).sum
stats.genotypesCounts.keySet.toList.sortBy(genotypeSortOrder(_))
Expand Down
Expand Up @@ -77,7 +77,7 @@ private[rdd] class Reads2PileupProcessor(createSecondaryAlignments: Boolean = fa

val end: Long = record.end match {
case Some(o) => o.asInstanceOf[Long]
case None => -1L
case None => -1L
}

assert(end != -1L, "Read is mapped but has a null end position. Read:\n" + record)
Expand Down Expand Up @@ -151,7 +151,7 @@ private[rdd] class Reads2PileupProcessor(createSecondaryAlignments: Boolean = fa
} else {
if (mdTag.isDefined) {
mdTag.get.mismatchedBase(referencePos) match {
case None => throw new IllegalArgumentException("Cigar match has no MD (mis)match @" + referencePos + " " + record.getCigar + " " + record.getMismatchingPositions) fillInStackTrace ()
case None => throw new IllegalArgumentException("Cigar match has no MD (mis)match @" + referencePos + " " + record.getCigar + " " + record.getMismatchingPositions) fillInStackTrace ()
case Some(read) => Some(Base.valueOf(read.toString))
}
} else {
Expand Down Expand Up @@ -242,7 +242,7 @@ private[rdd] class Reads2PileupProcessor(createSecondaryAlignments: Boolean = fa

val referenceBase: Option[Base] = if (mdTag.isDefined) {
mdTag.get.mismatchedBase(referencePos) match {
case None => throw new IllegalArgumentException("Cigar match has no MD (mis)match @" + referencePos + " " + record.getCigar + " " + record.getMismatchingPositions) fillInStackTrace ()
case None => throw new IllegalArgumentException("Cigar match has no MD (mis)match @" + referencePos + " " + record.getCigar + " " + record.getMismatchingPositions) fillInStackTrace ()
case Some(read) => Some(Base.valueOf(read.toString))
}
} else {
Expand Down

0 comments on commit 1c2b59b

Please sign in to comment.