Skip to content

Commit

Permalink
Replace utils.Logger with grizzled.slf4j.Logger.
Browse files Browse the repository at this point in the history
  • Loading branch information
heuermh committed Apr 26, 2019
1 parent 122888b commit f132d8c
Show file tree
Hide file tree
Showing 52 changed files with 191 additions and 187 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
*/
package org.bdgenomics.adam.cli

import grizzled.slf4j.Logging
import org.apache.spark.SparkContext
import org.bdgenomics.adam.cli.FileSystemUtils._
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.utils.cli._
import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }

class ADAM2FastaArgs extends Args4jBase {
Expand Down Expand Up @@ -55,10 +55,10 @@ class ADAM2Fasta(val args: ADAM2FastaArgs) extends BDGSparkCommand[ADAM2FastaArg
override def run(sc: SparkContext): Unit = {
checkWriteablePath(args.outputPath, sc.hadoopConfiguration)

log.info("Loading ADAM nucleotide contig fragments from disk.")
info("Loading ADAM nucleotide contig fragments from disk.")
val contigFragments = sc.loadContigFragments(args.inputPath)

log.info("Merging fragments and writing FASTA to disk.")
info("Merging fragments and writing FASTA to disk.")
val contigs = contigFragments.mergeFragments()

val cc = if (args.coalesce > 0) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ class ADAM2Fastq(val args: ADAM2FastqArgs) extends BDGSparkCommand[ADAM2FastqArg
var reads = sc.loadAlignments(args.inputPath, optProjection = projectionOpt)

if (args.repartition != -1) {
log.info("Repartitioning reads to to '%d' partitions".format(args.repartition))
info("Repartitioning reads to to '%d' partitions".format(args.repartition))
reads = reads.transform(_.repartition(args.repartition))
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import java.util.logging.Level._
import javax.inject.Inject
import com.google.inject.AbstractModule
import net.codingwell.scalaguice.ScalaModule
import org.bdgenomics.utils.misc.Logging
import grizzled.slf4j.Logging
import org.bdgenomics.adam.util.ParquetLogger
import org.bdgenomics.utils.cli._

Expand Down Expand Up @@ -106,7 +106,7 @@ class ADAMMain @Inject() (commandGroups: List[CommandGroup]) extends Logging {
}

def apply(args: Array[String]) {
log.info("ADAM invoked with args: %s".format(argsToString(args)))
info("ADAM invoked with args: %s".format(argsToString(args)))
if (args.length < 1) {
printCommands()
} else if (args.contains("--version") || args.contains("-version")) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
*/
package org.bdgenomics.adam.cli

import grizzled.slf4j.Logging
import org.apache.spark.SparkContext
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.cli.FileSystemUtils._
import org.bdgenomics.utils.cli._
import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }

object CountContigKmers extends BDGCommandCompanion {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@
*/
package org.bdgenomics.adam.cli

import grizzled.slf4j.Logging
import org.apache.spark.SparkContext
import org.bdgenomics.adam.cli.FileSystemUtils._
import org.bdgenomics.adam.projections.{ AlignmentRecordField, Projection }
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.utils.cli._
import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }

object CountReadKmers extends BDGCommandCompanion {
Expand Down Expand Up @@ -60,7 +60,7 @@ class CountReadKmers(protected val args: CountReadKmersArgs) extends BDGSparkCom
)

if (args.repartition != -1) {
log.info("Repartitioning reads to '%d' partitions".format(args.repartition))
info("Repartitioning reads to '%d' partitions".format(args.repartition))
adamRecords = adamRecords.transform(_.repartition(args.repartition))
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
*/
package org.bdgenomics.adam.cli

import grizzled.slf4j.Logging
import org.apache.spark.SparkContext
import org.bdgenomics.adam.cli.FileSystemUtils._
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.utils.cli._
import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }

object Fasta2ADAM extends BDGCommandCompanion {
Expand Down Expand Up @@ -54,14 +54,14 @@ class Fasta2ADAM(protected val args: Fasta2ADAMArgs) extends BDGSparkCommand[Fas
def run(sc: SparkContext) {
checkWriteablePath(args.outputPath, sc.hadoopConfiguration)

log.info("Loading FASTA data from disk.")
info("Loading FASTA data from disk.")
val adamFasta = sc.loadFasta(args.fastaFile, maximumLength = args.maximumLength)

if (args.verbose) {
log.info("FASTA contains: %s", adamFasta.sequences.toString)
info("FASTA contains: %s".format(adamFasta.sequences.toString))
}

log.info("Writing records to disk.")
info("Writing records to disk.")
val finalFasta = if (args.partitions > 0) {
adamFasta.transform(_.repartition(args.partitions))
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,11 @@ package org.bdgenomics.adam.cli
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapred.FileAlreadyExistsException
import org.bdgenomics.utils.misc.Logging

/**
* Utility methods for file systems.
*/
private[cli] object FileSystemUtils extends Logging {
private[cli] object FileSystemUtils {
private def exists(pathName: String, conf: Configuration): Boolean = {
val p = new Path(pathName)
val fs = p.getFileSystem(conf)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ package org.bdgenomics.adam.cli
import java.time.Instant
import java.lang.{ Boolean => JBoolean }

import grizzled.slf4j.Logging
import htsjdk.samtools.ValidationStringency
import org.apache.parquet.filter2.predicate.FilterApi
import org.apache.parquet.filter2.predicate.Operators.BooleanColumn
Expand All @@ -37,7 +38,6 @@ import org.bdgenomics.adam.rdd.read.{ AlignmentRecordDataset, QualityScoreBin }
import org.bdgenomics.adam.rich.RichVariant
import org.bdgenomics.formats.avro.ProcessingStep
import org.bdgenomics.utils.cli._
import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }

object TransformAlignments extends BDGCommandCompanion {
Expand Down Expand Up @@ -175,7 +175,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
*/
private def maybeRepartition(rdd: AlignmentRecordDataset): AlignmentRecordDataset = {
if (args.repartition != -1) {
log.info("Repartitioning reads to to '%d' partitions".format(args.repartition))
info("Repartitioning reads to to '%d' partitions".format(args.repartition))
rdd.transform(_.repartition(args.repartition))
} else {
rdd
Expand All @@ -190,7 +190,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
*/
private def maybeDedupe(rdd: AlignmentRecordDataset): AlignmentRecordDataset = {
if (args.markDuplicates) {
log.info("Marking duplicates")
info("Marking duplicates")
rdd.markDuplicates()
} else {
rdd
Expand All @@ -213,7 +213,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
sl: StorageLevel): AlignmentRecordDataset = {
if (args.locallyRealign) {

log.info("Locally realigning indels.")
info("Locally realigning indels.")

// has the user asked us to cache the rdd before multi-pass stages?
if (args.cache) {
Expand Down Expand Up @@ -272,7 +272,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
sl: StorageLevel): AlignmentRecordDataset = {
if (args.recalibrateBaseQualities) {

log.info("Recalibrating base qualities")
info("Recalibrating base qualities")

// bqsr is a two pass algorithm, so cache the rdd if requested
val optSl = if (args.cache) {
Expand Down Expand Up @@ -311,7 +311,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
*/
private def maybeCoalesce(rdd: AlignmentRecordDataset): AlignmentRecordDataset = {
if (args.coalesce != -1) {
log.info("Coalescing the number of partitions to '%d'".format(args.coalesce))
info("Coalescing the number of partitions to '%d'".format(args.coalesce))
if (args.coalesce > rdd.rdd.partitions.length || args.forceShuffle) {
rdd.transform(_.coalesce(args.coalesce, shuffle = true))
} else {
Expand Down Expand Up @@ -341,7 +341,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
rdd.rdd.persist(sl)
}

log.info("Sorting reads")
info("Sorting reads")

// are we sorting lexicographically or using legacy SAM sort order?
val sortedRdd = if (args.sortLexicographically) {
Expand Down Expand Up @@ -373,7 +373,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
rdd: AlignmentRecordDataset,
stringencyOpt: Option[ValidationStringency]): AlignmentRecordDataset = {
if (args.mdTagsReferenceFile != null) {
log.info(s"Adding MDTags to reads based on reference file ${args.mdTagsReferenceFile}")
info(s"Adding MDTags to reads based on reference file ${args.mdTagsReferenceFile}")
val referenceFile = sc.loadReferenceFile(args.mdTagsReferenceFile,
maximumLength = args.mdTagsFragmentSize)
rdd.computeMismatchingPositions(
Expand Down Expand Up @@ -588,7 +588,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B

if (args.partitionByStartPos) {
if (outputRdd.sequences.isEmpty) {
log.warn("This dataset is not aligned and therefore will not benefit from being saved as a partitioned dataset")
warn("This dataset is not aligned and therefore will not benefit from being saved as a partitioned dataset")
}
outputRdd.saveAsPartitionedParquet(args.outputPath, partitionSize = args.partitionedBinSize)
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.bdgenomics.adam.cli

import grizzled.slf4j.Logging
import org.apache.spark.SparkContext
import org.bdgenomics.adam.cli.FileSystemUtils._
import org.bdgenomics.adam.io.FastqRecordReader
Expand All @@ -25,7 +26,6 @@ import org.bdgenomics.adam.rdd.ADAMSaveAnyArgs
import org.bdgenomics.adam.rdd.read.QualityScoreBin
import org.bdgenomics.adam.rdd.fragment.FragmentDataset
import org.bdgenomics.utils.cli._
import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }

object TransformFragments extends BDGCommandCompanion {
Expand Down Expand Up @@ -99,7 +99,7 @@ class TransformFragments(protected val args: TransformFragmentsArgs) extends BDG
checkWriteablePath(args.outputPath, sc.hadoopConfiguration)

if (args.loadAsReads && args.saveAsReads) {
log.warn("If loading and saving as reads, consider using TransformAlignments instead.")
warn("If loading and saving as reads, consider using TransformAlignments instead.")
}
if (args.sortReads) {
require(args.saveAsReads,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ class TransformGenotypes(val args: TransformGenotypesArgs)
*/
private def maybeCoalesce[U <: GenomicDataset[_, _, U]](rdd: U): U = {
if (args.coalesce != -1) {
log.info("Coalescing the number of partitions to '%d'".format(args.coalesce))
info("Coalescing the number of partitions to '%d'".format(args.coalesce))
if (args.coalesce > rdd.rdd.partitions.length || args.forceShuffle) {
rdd.transform(_.coalesce(args.coalesce, shuffle = true))
} else {
Expand All @@ -114,10 +114,10 @@ class TransformGenotypes(val args: TransformGenotypesArgs)
*/
private def maybeSort[U <: GenomicDataset[_, _, U]](rdd: U): U = {
if (args.sort) {
log.info("Sorting before saving")
info("Sorting before saving")
rdd.sort()
} else if (args.sortLexicographically) {
log.info("Sorting lexicographically before saving")
info("Sorting lexicographically before saving")
rdd.sortLexicographically()
} else {
rdd
Expand All @@ -131,7 +131,7 @@ class TransformGenotypes(val args: TransformGenotypesArgs)
"Cannot set both -sort_on_save and -sort_lexicographically_on_save.")

if (args.nestedAnnotations) {
log.info("Populating the variant.annotation field in the Genotype records")
info("Populating the variant.annotation field in the Genotype records")
sc.hadoopConfiguration.setBoolean(VariantContextConverter.nestAnnotationInGenotypesProperty, true)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ class TransformVariants(val args: TransformVariantsArgs)
*/
private def maybeCoalesce[U <: GenomicDataset[_, _, U]](rdd: U): U = {
if (args.coalesce != -1) {
log.info("Coalescing the number of partitions to '%d'".format(args.coalesce))
info("Coalescing the number of partitions to '%d'".format(args.coalesce))
if (args.coalesce > rdd.rdd.partitions.length || args.forceShuffle) {
rdd.transform(_.coalesce(args.coalesce, shuffle = true))
} else {
Expand All @@ -104,10 +104,10 @@ class TransformVariants(val args: TransformVariantsArgs)
*/
private def maybeSort[U <: GenomicDataset[_, _, U]](rdd: U): U = {
if (args.sort) {
log.info("Sorting before saving")
info("Sorting before saving")
rdd.sort()
} else if (args.sortLexicographically) {
log.info("Sorting lexicographically before saving")
info("Sorting lexicographically before saving")
rdd.sortLexicographically()
} else {
rdd
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@ package org.bdgenomics.adam.cli

import java.io.File

import grizzled.slf4j.Logging
import org.apache.avro.Schema
import org.apache.avro.generic.IndexedRecord
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.bdgenomics.utils.misc.Logging
import org.apache.parquet.avro.AvroReadSupport
import org.apache.parquet.hadoop.ParquetReader

Expand Down Expand Up @@ -54,15 +54,15 @@ class ParquetLister[T <: IndexedRecord](projectionSchema: Option[Schema] = None)
materialize(f)
} catch {
case e: IllegalArgumentException =>
logInfo("File %s doesn't appear to be a Parquet file; skipping".format(f))
info("File %s doesn't appear to be a Parquet file; skipping".format(f))
Seq()
}
}.iterator
}
}

private def materialize(file: File): Iterator[T] = {
logInfo("Materializing file %s".format(file))
info("Materializing file %s".format(file))
val conf = new Configuration
if (projectionSchema.isDefined) {
AvroReadSupport.setRequestedProjection(conf, projectionSchema.get)
Expand Down
5 changes: 5 additions & 0 deletions adam-core/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,11 @@
<artifactId>avro</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.clapper</groupId>
<artifactId>grizzled-slf4j_${scala.version.prefix}</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@
*/
package org.bdgenomics.adam.converters

import grizzled.slf4j.Logging
import htsjdk.samtools.ValidationStringency
import org.apache.hadoop.io.Text
import org.bdgenomics.formats.avro.{
AlignmentRecord,
Fragment
}
import org.bdgenomics.utils.misc.Logging
import scala.collection.JavaConversions._

/**
Expand Down Expand Up @@ -91,7 +91,7 @@ private[adam] class FastqRecordConverter extends Serializable with Logging {
if (stringency == ValidationStringency.STRICT) {
throw e
} else if (stringency == ValidationStringency.LENIENT) {
log.warn("Read had improper pair suffix: %s".format(e.getMessage))
warn("Read had improper pair suffix: %s".format(e.getMessage))
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@
*/
package org.bdgenomics.adam.converters

import grizzled.slf4j.Logging
import htsjdk.samtools.{
SAMReadGroupRecord,
SAMRecord,
SAMUtils
}
import org.bdgenomics.utils.misc.Logging
import org.bdgenomics.adam.models.Attribute
import org.bdgenomics.adam.util.AttributeUtils
import org.bdgenomics.formats.avro.AlignmentRecord
Expand Down Expand Up @@ -212,7 +212,7 @@ private[adam] class SAMRecordConverter extends Serializable with Logging {
builder.build
} catch {
case t: Throwable => {
log.error("Conversion of read: " + samRecord + " failed.")
error("Conversion of read: " + samRecord + " failed.")
throw t
}
}
Expand Down

0 comments on commit f132d8c

Please sign in to comment.