diff --git a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAM2Fasta.scala b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAM2Fasta.scala
index 28bd69103f..ebfd6ce3e8 100644
--- a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAM2Fasta.scala
+++ b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAM2Fasta.scala
@@ -17,11 +17,11 @@
*/
package org.bdgenomics.adam.cli
+import grizzled.slf4j.Logging
import org.apache.spark.SparkContext
import org.bdgenomics.adam.cli.FileSystemUtils._
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.utils.cli._
-import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
class ADAM2FastaArgs extends Args4jBase {
@@ -55,10 +55,10 @@ class ADAM2Fasta(val args: ADAM2FastaArgs) extends BDGSparkCommand[ADAM2FastaArg
override def run(sc: SparkContext): Unit = {
checkWriteablePath(args.outputPath, sc.hadoopConfiguration)
- log.info("Loading ADAM nucleotide contig fragments from disk.")
+ info("Loading ADAM nucleotide contig fragments from disk.")
val contigFragments = sc.loadContigFragments(args.inputPath)
- log.info("Merging fragments and writing FASTA to disk.")
+ info("Merging fragments and writing FASTA to disk.")
val contigs = contigFragments.mergeFragments()
val cc = if (args.coalesce > 0) {
diff --git a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAM2Fastq.scala b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAM2Fastq.scala
index cc3d843e36..ebae9fb39a 100644
--- a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAM2Fastq.scala
+++ b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAM2Fastq.scala
@@ -80,7 +80,7 @@ class ADAM2Fastq(val args: ADAM2FastqArgs) extends BDGSparkCommand[ADAM2FastqArg
var reads = sc.loadAlignments(args.inputPath, optProjection = projectionOpt)
if (args.repartition != -1) {
- log.info("Repartitioning reads to to '%d' partitions".format(args.repartition))
+ info("Repartitioning reads to to '%d' partitions".format(args.repartition))
reads = reads.transform(_.repartition(args.repartition))
}
diff --git a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAMMain.scala b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAMMain.scala
index 0b6d9ff60c..a816273b77 100644
--- a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAMMain.scala
+++ b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAMMain.scala
@@ -21,7 +21,7 @@ import java.util.logging.Level._
import javax.inject.Inject
import com.google.inject.AbstractModule
import net.codingwell.scalaguice.ScalaModule
-import org.bdgenomics.utils.misc.Logging
+import grizzled.slf4j.Logging
import org.bdgenomics.adam.util.ParquetLogger
import org.bdgenomics.utils.cli._
@@ -106,7 +106,7 @@ class ADAMMain @Inject() (commandGroups: List[CommandGroup]) extends Logging {
}
def apply(args: Array[String]) {
- log.info("ADAM invoked with args: %s".format(argsToString(args)))
+ info("ADAM invoked with args: %s".format(argsToString(args)))
if (args.length < 1) {
printCommands()
} else if (args.contains("--version") || args.contains("-version")) {
diff --git a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/CountContigKmers.scala b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/CountContigKmers.scala
index 677dc99ea9..b664d9c4bf 100644
--- a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/CountContigKmers.scala
+++ b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/CountContigKmers.scala
@@ -17,11 +17,11 @@
*/
package org.bdgenomics.adam.cli
+import grizzled.slf4j.Logging
import org.apache.spark.SparkContext
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.cli.FileSystemUtils._
import org.bdgenomics.utils.cli._
-import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
object CountContigKmers extends BDGCommandCompanion {
diff --git a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/CountReadKmers.scala b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/CountReadKmers.scala
index 31258e6ebf..97f4ce1125 100644
--- a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/CountReadKmers.scala
+++ b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/CountReadKmers.scala
@@ -17,12 +17,12 @@
*/
package org.bdgenomics.adam.cli
+import grizzled.slf4j.Logging
import org.apache.spark.SparkContext
import org.bdgenomics.adam.cli.FileSystemUtils._
import org.bdgenomics.adam.projections.{ AlignmentRecordField, Projection }
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.utils.cli._
-import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
object CountReadKmers extends BDGCommandCompanion {
@@ -60,7 +60,7 @@ class CountReadKmers(protected val args: CountReadKmersArgs) extends BDGSparkCom
)
if (args.repartition != -1) {
- log.info("Repartitioning reads to '%d' partitions".format(args.repartition))
+ info("Repartitioning reads to '%d' partitions".format(args.repartition))
adamRecords = adamRecords.transform(_.repartition(args.repartition))
}
diff --git a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/Fasta2ADAM.scala b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/Fasta2ADAM.scala
index 74e5bbd2e5..c8aa7d5da5 100644
--- a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/Fasta2ADAM.scala
+++ b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/Fasta2ADAM.scala
@@ -17,11 +17,11 @@
*/
package org.bdgenomics.adam.cli
+import grizzled.slf4j.Logging
import org.apache.spark.SparkContext
import org.bdgenomics.adam.cli.FileSystemUtils._
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.utils.cli._
-import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
object Fasta2ADAM extends BDGCommandCompanion {
@@ -54,14 +54,14 @@ class Fasta2ADAM(protected val args: Fasta2ADAMArgs) extends BDGSparkCommand[Fas
def run(sc: SparkContext) {
checkWriteablePath(args.outputPath, sc.hadoopConfiguration)
- log.info("Loading FASTA data from disk.")
+ info("Loading FASTA data from disk.")
val adamFasta = sc.loadFasta(args.fastaFile, maximumLength = args.maximumLength)
if (args.verbose) {
- log.info("FASTA contains: %s", adamFasta.sequences.toString)
+ info("FASTA contains: %s".format(adamFasta.sequences.toString))
}
- log.info("Writing records to disk.")
+ info("Writing records to disk.")
val finalFasta = if (args.partitions > 0) {
adamFasta.transform(_.repartition(args.partitions))
} else {
diff --git a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/FileSystemUtils.scala b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/FileSystemUtils.scala
index c14a97003a..be524230f1 100644
--- a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/FileSystemUtils.scala
+++ b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/FileSystemUtils.scala
@@ -20,12 +20,11 @@ package org.bdgenomics.adam.cli
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapred.FileAlreadyExistsException
-import org.bdgenomics.utils.misc.Logging
/**
* Utility methods for file systems.
*/
-private[cli] object FileSystemUtils extends Logging {
+private[cli] object FileSystemUtils {
private def exists(pathName: String, conf: Configuration): Boolean = {
val p = new Path(pathName)
val fs = p.getFileSystem(conf)
diff --git a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformAlignments.scala b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformAlignments.scala
index 23537471f3..c8b126c3ab 100644
--- a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformAlignments.scala
+++ b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformAlignments.scala
@@ -20,6 +20,7 @@ package org.bdgenomics.adam.cli
import java.time.Instant
import java.lang.{ Boolean => JBoolean }
+import grizzled.slf4j.Logging
import htsjdk.samtools.ValidationStringency
import org.apache.parquet.filter2.predicate.FilterApi
import org.apache.parquet.filter2.predicate.Operators.BooleanColumn
@@ -37,7 +38,6 @@ import org.bdgenomics.adam.rdd.read.{ AlignmentRecordDataset, QualityScoreBin }
import org.bdgenomics.adam.rich.RichVariant
import org.bdgenomics.formats.avro.ProcessingStep
import org.bdgenomics.utils.cli._
-import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
object TransformAlignments extends BDGCommandCompanion {
@@ -175,7 +175,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
*/
private def maybeRepartition(rdd: AlignmentRecordDataset): AlignmentRecordDataset = {
if (args.repartition != -1) {
- log.info("Repartitioning reads to to '%d' partitions".format(args.repartition))
+ info("Repartitioning reads to to '%d' partitions".format(args.repartition))
rdd.transform(_.repartition(args.repartition))
} else {
rdd
@@ -190,7 +190,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
*/
private def maybeDedupe(rdd: AlignmentRecordDataset): AlignmentRecordDataset = {
if (args.markDuplicates) {
- log.info("Marking duplicates")
+ info("Marking duplicates")
rdd.markDuplicates()
} else {
rdd
@@ -213,7 +213,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
sl: StorageLevel): AlignmentRecordDataset = {
if (args.locallyRealign) {
- log.info("Locally realigning indels.")
+ info("Locally realigning indels.")
// has the user asked us to cache the rdd before multi-pass stages?
if (args.cache) {
@@ -272,7 +272,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
sl: StorageLevel): AlignmentRecordDataset = {
if (args.recalibrateBaseQualities) {
- log.info("Recalibrating base qualities")
+ info("Recalibrating base qualities")
// bqsr is a two pass algorithm, so cache the rdd if requested
val optSl = if (args.cache) {
@@ -311,7 +311,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
*/
private def maybeCoalesce(rdd: AlignmentRecordDataset): AlignmentRecordDataset = {
if (args.coalesce != -1) {
- log.info("Coalescing the number of partitions to '%d'".format(args.coalesce))
+ info("Coalescing the number of partitions to '%d'".format(args.coalesce))
if (args.coalesce > rdd.rdd.partitions.length || args.forceShuffle) {
rdd.transform(_.coalesce(args.coalesce, shuffle = true))
} else {
@@ -341,7 +341,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
rdd.rdd.persist(sl)
}
- log.info("Sorting reads")
+ info("Sorting reads")
// are we sorting lexicographically or using legacy SAM sort order?
val sortedRdd = if (args.sortLexicographically) {
@@ -373,7 +373,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
rdd: AlignmentRecordDataset,
stringencyOpt: Option[ValidationStringency]): AlignmentRecordDataset = {
if (args.mdTagsReferenceFile != null) {
- log.info(s"Adding MDTags to reads based on reference file ${args.mdTagsReferenceFile}")
+ info(s"Adding MDTags to reads based on reference file ${args.mdTagsReferenceFile}")
val referenceFile = sc.loadReferenceFile(args.mdTagsReferenceFile,
maximumLength = args.mdTagsFragmentSize)
rdd.computeMismatchingPositions(
@@ -588,7 +588,7 @@ class TransformAlignments(protected val args: TransformAlignmentsArgs) extends B
if (args.partitionByStartPos) {
if (outputRdd.sequences.isEmpty) {
- log.warn("This dataset is not aligned and therefore will not benefit from being saved as a partitioned dataset")
+ warn("This dataset is not aligned and therefore will not benefit from being saved as a partitioned dataset")
}
outputRdd.saveAsPartitionedParquet(args.outputPath, partitionSize = args.partitionedBinSize)
} else {
diff --git a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformFragments.scala b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformFragments.scala
index 86369ded1f..5332d95694 100644
--- a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformFragments.scala
+++ b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformFragments.scala
@@ -17,6 +17,7 @@
*/
package org.bdgenomics.adam.cli
+import grizzled.slf4j.Logging
import org.apache.spark.SparkContext
import org.bdgenomics.adam.cli.FileSystemUtils._
import org.bdgenomics.adam.io.FastqRecordReader
@@ -25,7 +26,6 @@ import org.bdgenomics.adam.rdd.ADAMSaveAnyArgs
import org.bdgenomics.adam.rdd.read.QualityScoreBin
import org.bdgenomics.adam.rdd.fragment.FragmentDataset
import org.bdgenomics.utils.cli._
-import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
object TransformFragments extends BDGCommandCompanion {
@@ -99,7 +99,7 @@ class TransformFragments(protected val args: TransformFragmentsArgs) extends BDG
checkWriteablePath(args.outputPath, sc.hadoopConfiguration)
if (args.loadAsReads && args.saveAsReads) {
- log.warn("If loading and saving as reads, consider using TransformAlignments instead.")
+ warn("If loading and saving as reads, consider using TransformAlignments instead.")
}
if (args.sortReads) {
require(args.saveAsReads,
diff --git a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformGenotypes.scala b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformGenotypes.scala
index 4d493af0f8..f1696e313d 100644
--- a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformGenotypes.scala
+++ b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformGenotypes.scala
@@ -95,7 +95,7 @@ class TransformGenotypes(val args: TransformGenotypesArgs)
*/
private def maybeCoalesce[U <: GenomicDataset[_, _, U]](rdd: U): U = {
if (args.coalesce != -1) {
- log.info("Coalescing the number of partitions to '%d'".format(args.coalesce))
+ info("Coalescing the number of partitions to '%d'".format(args.coalesce))
if (args.coalesce > rdd.rdd.partitions.length || args.forceShuffle) {
rdd.transform(_.coalesce(args.coalesce, shuffle = true))
} else {
@@ -114,10 +114,10 @@ class TransformGenotypes(val args: TransformGenotypesArgs)
*/
private def maybeSort[U <: GenomicDataset[_, _, U]](rdd: U): U = {
if (args.sort) {
- log.info("Sorting before saving")
+ info("Sorting before saving")
rdd.sort()
} else if (args.sortLexicographically) {
- log.info("Sorting lexicographically before saving")
+ info("Sorting lexicographically before saving")
rdd.sortLexicographically()
} else {
rdd
@@ -131,7 +131,7 @@ class TransformGenotypes(val args: TransformGenotypesArgs)
"Cannot set both -sort_on_save and -sort_lexicographically_on_save.")
if (args.nestedAnnotations) {
- log.info("Populating the variant.annotation field in the Genotype records")
+ info("Populating the variant.annotation field in the Genotype records")
sc.hadoopConfiguration.setBoolean(VariantContextConverter.nestAnnotationInGenotypesProperty, true)
}
diff --git a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformVariants.scala b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformVariants.scala
index 74ed5be834..8e4cd7aa90 100644
--- a/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformVariants.scala
+++ b/adam-cli/src/main/scala/org/bdgenomics/adam/cli/TransformVariants.scala
@@ -85,7 +85,7 @@ class TransformVariants(val args: TransformVariantsArgs)
*/
private def maybeCoalesce[U <: GenomicDataset[_, _, U]](rdd: U): U = {
if (args.coalesce != -1) {
- log.info("Coalescing the number of partitions to '%d'".format(args.coalesce))
+ info("Coalescing the number of partitions to '%d'".format(args.coalesce))
if (args.coalesce > rdd.rdd.partitions.length || args.forceShuffle) {
rdd.transform(_.coalesce(args.coalesce, shuffle = true))
} else {
@@ -104,10 +104,10 @@ class TransformVariants(val args: TransformVariantsArgs)
*/
private def maybeSort[U <: GenomicDataset[_, _, U]](rdd: U): U = {
if (args.sort) {
- log.info("Sorting before saving")
+ info("Sorting before saving")
rdd.sort()
} else if (args.sortLexicographically) {
- log.info("Sorting lexicographically before saving")
+ info("Sorting lexicographically before saving")
rdd.sortLexicographically()
} else {
rdd
diff --git a/adam-cli/src/test/scala/org/bdgenomics/adam/cli/ParquetLister.scala b/adam-cli/src/test/scala/org/bdgenomics/adam/cli/ParquetLister.scala
index 1b1da667be..f48a1d3778 100644
--- a/adam-cli/src/test/scala/org/bdgenomics/adam/cli/ParquetLister.scala
+++ b/adam-cli/src/test/scala/org/bdgenomics/adam/cli/ParquetLister.scala
@@ -19,11 +19,11 @@ package org.bdgenomics.adam.cli
import java.io.File
+import grizzled.slf4j.Logging
import org.apache.avro.Schema
import org.apache.avro.generic.IndexedRecord
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
-import org.bdgenomics.utils.misc.Logging
import org.apache.parquet.avro.AvroReadSupport
import org.apache.parquet.hadoop.ParquetReader
@@ -54,7 +54,7 @@ class ParquetLister[T <: IndexedRecord](projectionSchema: Option[Schema] = None)
materialize(f)
} catch {
case e: IllegalArgumentException =>
- logInfo("File %s doesn't appear to be a Parquet file; skipping".format(f))
+ info("File %s doesn't appear to be a Parquet file; skipping".format(f))
Seq()
}
}.iterator
@@ -62,7 +62,7 @@ class ParquetLister[T <: IndexedRecord](projectionSchema: Option[Schema] = None)
}
private def materialize(file: File): Iterator[T] = {
- logInfo("Materializing file %s".format(file))
+ info("Materializing file %s".format(file))
val conf = new Configuration
if (projectionSchema.isDefined) {
AvroReadSupport.setRequestedProjection(conf, projectionSchema.get)
diff --git a/adam-core/pom.xml b/adam-core/pom.xml
index d5b9a04f81..510f92d180 100644
--- a/adam-core/pom.xml
+++ b/adam-core/pom.xml
@@ -232,6 +232,11 @@
avro
compile
+
+ org.clapper
+ grizzled-slf4j_${scala.version.prefix}
+ compile
+
org.slf4j
slf4j-log4j12
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/converters/FastqRecordConverter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/converters/FastqRecordConverter.scala
index fe1a2669bc..f035e0d6c1 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/converters/FastqRecordConverter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/converters/FastqRecordConverter.scala
@@ -17,13 +17,13 @@
*/
package org.bdgenomics.adam.converters
+import grizzled.slf4j.Logging
import htsjdk.samtools.ValidationStringency
import org.apache.hadoop.io.Text
import org.bdgenomics.formats.avro.{
AlignmentRecord,
Fragment
}
-import org.bdgenomics.utils.misc.Logging
import scala.collection.JavaConversions._
/**
@@ -91,7 +91,7 @@ private[adam] class FastqRecordConverter extends Serializable with Logging {
if (stringency == ValidationStringency.STRICT) {
throw e
} else if (stringency == ValidationStringency.LENIENT) {
- log.warn("Read had improper pair suffix: %s".format(e.getMessage))
+ warn("Read had improper pair suffix: %s".format(e.getMessage))
}
}
}
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/converters/SAMRecordConverter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/converters/SAMRecordConverter.scala
index 280e254f8d..dfb832f5da 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/converters/SAMRecordConverter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/converters/SAMRecordConverter.scala
@@ -17,12 +17,12 @@
*/
package org.bdgenomics.adam.converters
+import grizzled.slf4j.Logging
import htsjdk.samtools.{
SAMReadGroupRecord,
SAMRecord,
SAMUtils
}
-import org.bdgenomics.utils.misc.Logging
import org.bdgenomics.adam.models.Attribute
import org.bdgenomics.adam.util.AttributeUtils
import org.bdgenomics.formats.avro.AlignmentRecord
@@ -212,7 +212,7 @@ private[adam] class SAMRecordConverter extends Serializable with Logging {
builder.build
} catch {
case t: Throwable => {
- log.error("Conversion of read: " + samRecord + " failed.")
+ error("Conversion of read: " + samRecord + " failed.")
throw t
}
}
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/converters/TranscriptEffectConverter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/converters/TranscriptEffectConverter.scala
index 5731c76caf..44ffa840ec 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/converters/TranscriptEffectConverter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/converters/TranscriptEffectConverter.scala
@@ -17,6 +17,7 @@
*/
package org.bdgenomics.adam.converters
+import grizzled.slf4j.Logging
import htsjdk.samtools.ValidationStringency
import htsjdk.variant.vcf.VCFConstants
import htsjdk.variant.variantcontext.VariantContext
@@ -26,7 +27,6 @@ import org.bdgenomics.formats.avro.{
Variant,
VariantAnnotationMessage
}
-import org.bdgenomics.utils.misc.Logging
import scala.collection.JavaConverters._
/**
@@ -220,7 +220,7 @@ private[adam] object TranscriptEffectConverter extends Serializable with Logging
if (stringency == ValidationStringency.STRICT) {
throw t
} else if (stringency == ValidationStringency.LENIENT) {
- log.warn("Could not convert VCF INFO reserved key ANN value to TranscriptEffect, caught %s.".format(t))
+ warn("Could not convert VCF INFO reserved key ANN value to TranscriptEffect, caught %s.".format(t))
}
None
}
@@ -248,7 +248,7 @@ private[adam] object TranscriptEffectConverter extends Serializable with Logging
"%d".format(n)
}
case (None, Some(d)) => {
- log.warn("Incorrect fractional value ?/%d, missing numerator".format(d))
+ warn("Incorrect fractional value ?/%d, missing numerator".format(d))
""
}
case (Some(n), Some(d)) => {
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/models/IndelTable.scala b/adam-core/src/main/scala/org/bdgenomics/adam/models/IndelTable.scala
index 39e7deeb50..cab2ef9fa5 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/models/IndelTable.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/models/IndelTable.scala
@@ -17,15 +17,15 @@
*/
package org.bdgenomics.adam.models
+import grizzled.slf4j.Logging
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.algorithms.consensus.Consensus
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.formats.avro.Variant
-import org.bdgenomics.utils.misc.Logging
private[adam] class IndelTable(private val table: Map[String, Iterable[Consensus]]) extends Serializable with Logging {
- log.info("Indel table has %s reference sequences and %s entries".format(
+ info("Indel table has %s reference sequences and %s entries".format(
table.size,
table.values.map(_.size).sum
))
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/models/SnpTable.scala b/adam-core/src/main/scala/org/bdgenomics/adam/models/SnpTable.scala
index 0cbf592eb2..e755add5b2 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/models/SnpTable.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/models/SnpTable.scala
@@ -23,7 +23,6 @@ import org.apache.spark.rdd.MetricsContext._
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.instrumentation.Timers._
import org.bdgenomics.adam.rdd.variant.VariantDataset
-import org.bdgenomics.utils.misc.Logging
import scala.annotation.tailrec
import scala.math.{ max, min }
@@ -37,7 +36,7 @@ import scala.math.{ max, min }
*/
class SnpTable private[models] (
private[models] val indices: Map[String, (Int, Int)],
- private[models] val sites: Array[Long]) extends Serializable with Logging {
+ private[models] val sites: Array[Long]) extends Serializable {
private val midpoints: Map[String, Int] = {
@tailrec def pow2ceil(length: Int, i: Int = 1): Int = {
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/ADAMContext.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/ADAMContext.scala
index 60510000c2..21cee931f4 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/ADAMContext.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/ADAMContext.scala
@@ -18,6 +18,7 @@
package org.bdgenomics.adam.rdd
import java.io.{ File, FileNotFoundException, InputStream }
+import grizzled.slf4j.Logging
import htsjdk.samtools.{ SAMFileHeader, SAMProgramRecord, ValidationStringency }
import htsjdk.samtools.util.Locatable
import htsjdk.variant.vcf.{
@@ -105,7 +106,7 @@ import org.bdgenomics.formats.avro.{
}
import org.bdgenomics.utils.instrumentation.Metrics
import org.bdgenomics.utils.io.LocalFileByteAccess
-import org.bdgenomics.utils.misc.{ HadoopUtil, Logging }
+import org.bdgenomics.utils.misc.HadoopUtil
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._
import org.seqdoop.hadoop_bam._
@@ -1300,19 +1301,19 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
throw new IllegalArgumentException("Type inference failed; when loading please specify a specific type. " +
"e.g.:\nval reads: RDD[AlignmentRecord] = ...\nbut not\nval reads = ...\nwithout a return type")
- log.info("Reading the ADAM file at %s to create RDD".format(pathName))
+ info("Reading the ADAM file at %s to create RDD".format(pathName))
val job = HadoopUtil.newJob(sc)
ParquetInputFormat.setReadSupportClass(job, classOf[AvroReadSupport[T]])
AvroParquetInputFormat.setAvroReadSchema(job,
manifest[T].runtimeClass.newInstance().asInstanceOf[T].getSchema)
optPredicate.foreach { (pred) =>
- log.info("Using the specified push-down predicate")
+ info("Using the specified push-down predicate")
ParquetInputFormat.setFilterPredicate(job.getConfiguration, pred)
}
if (optProjection.isDefined) {
- log.info("Using the specified projection schema")
+ info("Using the specified projection schema")
AvroParquetInputFormat.setRequestedProjection(job, optProjection.get)
}
@@ -1473,7 +1474,7 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
samHeader.getGroupOrder == SAMFileHeader.GroupOrder.query)
} catch {
case e: Throwable => {
- log.error(
+ error(
s"Loading header failed for $fp:n${e.getMessage}\n\t${e.getStackTrace.take(25).map(_.toString).mkString("\n\t")}"
)
false
@@ -1496,7 +1497,7 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
if (codec == null) {
pathName
} else {
- log.info(s"Found compression codec $codec for $pathName in Hadoop configuration.")
+ info(s"Found compression codec $codec for $pathName in Hadoop configuration.")
val extension = codec.getDefaultExtension()
CompressionCodecFactory.removeSuffix(pathName, extension)
}
@@ -1540,7 +1541,7 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
// below).
sc.hadoopConfiguration.set(SAMHeaderReader.VALIDATION_STRINGENCY_PROPERTY, stringency.toString)
val samHeader = SAMHeaderReader.readSAMHeaderFrom(fp, sc.hadoopConfiguration)
- log.info("Loaded header from " + fp)
+ info("Loaded header from " + fp)
val sd = loadBamDictionary(samHeader)
val rg = loadBamReadGroups(samHeader)
val pgs = loadBamPrograms(samHeader)
@@ -1550,7 +1551,7 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
if (stringency == ValidationStringency.STRICT) {
throw e
} else if (stringency == ValidationStringency.LENIENT) {
- log.error(
+ error(
s"Loading failed for $fp:\n${e.getMessage}\n\t${e.getStackTrace.take(25).map(_.toString).mkString("\n\t")}"
)
}
@@ -1662,7 +1663,7 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
sc.hadoopConfiguration.set(SAMHeaderReader.VALIDATION_STRINGENCY_PROPERTY, stringency.toString)
val samHeader = SAMHeaderReader.readSAMHeaderFrom(fp, sc.hadoopConfiguration)
- log.info("Loaded header from " + fp)
+ info("Loaded header from " + fp)
val sd = loadBamDictionary(samHeader)
val rg = loadBamReadGroups(samHeader)
val pgs = loadBamPrograms(samHeader)
@@ -1673,7 +1674,7 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
if (stringency == ValidationStringency.STRICT) {
throw e
} else if (stringency == ValidationStringency.LENIENT) {
- log.error(
+ error(
s"Loading failed for $fp:\n${e.getMessage}\n\t${e.getStackTrace.take(25).map(_.toString).mkString("\n\t")}"
)
}
@@ -2057,7 +2058,7 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
throw new IllegalArgumentException(msg)
else {
// ValidationStringency.LENIENT
- logError(msg)
+ warn(msg)
}
}
case ValidationStringency.SILENT =>
@@ -2175,7 +2176,7 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
VariantContextDataset(records.flatMap(p => vcc.convert(p._2.get)),
sd,
samples,
- VariantContextConverter.cleanAndMixInSupportedLines(headers, stringency, log))
+ VariantContextConverter.cleanAndMixInSupportedLines(headers, stringency, logger.logger))
}
/**
@@ -2242,7 +2243,7 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
VariantContextDataset(records.flatMap(p => vcc.convert(p._2.get)),
sd,
samples,
- VariantContextConverter.cleanAndMixInSupportedLines(headers, stringency, log))
+ VariantContextConverter.cleanAndMixInSupportedLines(headers, stringency, logger.logger))
}
/**
@@ -2288,7 +2289,7 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
VariantContextDataset(records.flatMap(p => vcc.convert(p._2.get)),
sd,
samples,
- VariantContextConverter.cleanAndMixInSupportedLines(headers, stringency, log))
+ VariantContextConverter.cleanAndMixInSupportedLines(headers, stringency, logger.logger))
}
/**
@@ -3018,36 +3019,36 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
val trimmedPathName = trimExtensionIfCompressed(pathName)
if (isBedExt(trimmedPathName)) {
- log.info(s"Loading $pathName as BED and converting to Features.")
+ info(s"Loading $pathName as BED and converting to Features.")
loadBed(pathName,
optSequenceDictionary = optSequenceDictionary,
optMinPartitions = optMinPartitions,
stringency = stringency)
} else if (isGff3Ext(trimmedPathName)) {
- log.info(s"Loading $pathName as GFF3 and converting to Features.")
+ info(s"Loading $pathName as GFF3 and converting to Features.")
loadGff3(pathName,
optSequenceDictionary = optSequenceDictionary,
optMinPartitions = optMinPartitions,
stringency = stringency)
} else if (isGtfExt(trimmedPathName)) {
- log.info(s"Loading $pathName as GTF/GFF2 and converting to Features.")
+ info(s"Loading $pathName as GTF/GFF2 and converting to Features.")
loadGtf(pathName,
optSequenceDictionary = optSequenceDictionary,
optMinPartitions = optMinPartitions,
stringency = stringency)
} else if (isNarrowPeakExt(trimmedPathName)) {
- log.info(s"Loading $pathName as NarrowPeak and converting to Features.")
+ info(s"Loading $pathName as NarrowPeak and converting to Features.")
loadNarrowPeak(pathName,
optSequenceDictionary = optSequenceDictionary,
optMinPartitions = optMinPartitions,
stringency = stringency)
} else if (isIntervalListExt(trimmedPathName)) {
- log.info(s"Loading $pathName as IntervalList and converting to Features.")
+ info(s"Loading $pathName as IntervalList and converting to Features.")
loadIntervalList(pathName,
optMinPartitions = optMinPartitions,
stringency = stringency)
} else {
- log.info(s"Loading $pathName as Parquet containing Features.")
+ info(s"Loading $pathName as Parquet containing Features.")
loadParquetFeatures(pathName,
optPredicate = optPredicate,
optProjection = optProjection)
@@ -3098,13 +3099,13 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
def loadSequenceDictionary(pathName: String): SequenceDictionary = LoadSequenceDictionary.time {
val trimmedPathName = trimExtensionIfCompressed(pathName)
if (isDictExt(trimmedPathName)) {
- log.info(s"Loading $pathName as HTSJDK sequence dictionary.")
+ info(s"Loading $pathName as HTSJDK sequence dictionary.")
SequenceDictionaryReader(pathName, sc)
} else if (isGenomeExt(trimmedPathName)) {
- log.info(s"Loading $pathName as Bedtools genome file sequence dictionary.")
+ info(s"Loading $pathName as Bedtools genome file sequence dictionary.")
GenomeFileReader(pathName, sc)
} else if (isTextExt(trimmedPathName)) {
- log.info(s"Loading $pathName as UCSC Genome Browser chromInfo file sequence dictionary.")
+ info(s"Loading $pathName as UCSC Genome Browser chromInfo file sequence dictionary.")
GenomeFileReader(pathName, sc)
} else {
throw new IllegalArgumentException("Path name file extension must be one of .dict, .genome, or .txt")
@@ -3142,13 +3143,13 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
val trimmedPathName = trimExtensionIfCompressed(pathName)
if (isFastaExt(trimmedPathName)) {
- log.info(s"Loading $pathName as FASTA and converting to NucleotideContigFragment.")
+ info(s"Loading $pathName as FASTA and converting to NucleotideContigFragment.")
loadFasta(
pathName,
maximumLength
)
} else {
- log.info(s"Loading $pathName as Parquet containing NucleotideContigFragments.")
+ info(s"Loading $pathName as Parquet containing NucleotideContigFragments.")
loadParquetContigFragments(pathName, optPredicate = optPredicate, optProjection = optProjection)
}
}
@@ -3180,10 +3181,10 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
stringency: ValidationStringency = ValidationStringency.STRICT): GenotypeDataset = LoadGenotypes.time {
if (isVcfExt(pathName)) {
- log.info(s"Loading $pathName as VCF and converting to Genotypes.")
+ info(s"Loading $pathName as VCF and converting to Genotypes.")
loadVcf(pathName, stringency).toGenotypes
} else {
- log.info(s"Loading $pathName as Parquet containing Genotypes. Sequence dictionary for translation is ignored.")
+ info(s"Loading $pathName as Parquet containing Genotypes. Sequence dictionary for translation is ignored.")
loadParquetGenotypes(pathName, optPredicate = optPredicate, optProjection = optProjection)
}
}
@@ -3214,10 +3215,10 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
stringency: ValidationStringency = ValidationStringency.STRICT): VariantDataset = LoadVariants.time {
if (isVcfExt(pathName)) {
- log.info(s"Loading $pathName as VCF and converting to Variants.")
+ info(s"Loading $pathName as VCF and converting to Variants.")
loadVcf(pathName, stringency).toVariants
} else {
- log.info(s"Loading $pathName as Parquet containing Variants. Sequence dictionary for translation is ignored.")
+ info(s"Loading $pathName as Parquet containing Variants. Sequence dictionary for translation is ignored.")
loadParquetVariants(pathName, optPredicate = optPredicate, optProjection = optProjection)
}
}
@@ -3275,19 +3276,19 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
classOf[BGZFEnhancedGzipCodec].getCanonicalName)
val trimmedPathName = trimExtensionIfCompressed(pathName)
if (isBamExt(trimmedPathName)) {
- log.info(s"Loading $pathName as BAM/CRAM/SAM and converting to AlignmentRecords.")
+ info(s"Loading $pathName as BAM/CRAM/SAM and converting to AlignmentRecords.")
loadBam(pathName, stringency)
} else if (isInterleavedFastqExt(trimmedPathName)) {
- log.info(s"Loading $pathName as interleaved FASTQ and converting to AlignmentRecords.")
+ info(s"Loading $pathName as interleaved FASTQ and converting to AlignmentRecords.")
loadInterleavedFastq(pathName)
} else if (isFastqExt(trimmedPathName)) {
- log.info(s"Loading $pathName as unpaired FASTQ and converting to AlignmentRecords.")
+ info(s"Loading $pathName as unpaired FASTQ and converting to AlignmentRecords.")
loadFastq(pathName, optPathName2, optReadGroup, stringency)
} else if (isFastaExt(trimmedPathName)) {
- log.info(s"Loading $pathName as FASTA and converting to AlignmentRecords.")
+ info(s"Loading $pathName as FASTA and converting to AlignmentRecords.")
AlignmentRecordDataset.unaligned(loadFasta(pathName, maximumLength = 10000L).toReads)
} else {
- log.info(s"Loading $pathName as Parquet of AlignmentRecords.")
+ info(s"Loading $pathName as Parquet of AlignmentRecords.")
loadParquetAlignments(pathName, optPredicate = optPredicate, optProjection = optProjection)
}
}
@@ -3334,18 +3335,18 @@ class ADAMContext(@transient val sc: SparkContext) extends Serializable with Log
if (isBamExt(trimmedPathName)) {
// check to see if the input files are all queryname sorted
if (filesAreQueryGrouped(pathName)) {
- log.info(s"Loading $pathName as queryname sorted BAM/CRAM/SAM and converting to Fragments.")
+ info(s"Loading $pathName as queryname sorted BAM/CRAM/SAM and converting to Fragments.")
loadBam(pathName, stringency).transform(RepairPartitions(_))
.querynameSortedToFragments
} else {
- log.info(s"Loading $pathName as BAM/CRAM/SAM and converting to Fragments.")
+ info(s"Loading $pathName as BAM/CRAM/SAM and converting to Fragments.")
loadBam(pathName, stringency).toFragments
}
} else if (isInterleavedFastqExt(trimmedPathName)) {
- log.info(s"Loading $pathName as interleaved FASTQ and converting to Fragments.")
+ info(s"Loading $pathName as interleaved FASTQ and converting to Fragments.")
loadInterleavedFastqAsFragments(pathName)
} else {
- log.info(s"Loading $pathName as Parquet containing Fragments.")
+ info(s"Loading $pathName as Parquet containing Fragments.")
loadParquetFragments(pathName, optPredicate = optPredicate, optProjection = optProjection)
}
}
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/GenomicDataset.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/GenomicDataset.scala
index 79c1e272cd..b0894d4c0a 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/GenomicDataset.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/GenomicDataset.scala
@@ -18,6 +18,7 @@
package org.bdgenomics.adam.rdd
import java.nio.file.Paths
+import grizzled.slf4j.Logging
import htsjdk.samtools.ValidationStringency
import htsjdk.variant.vcf.{
VCFFilterHeaderLine,
@@ -61,7 +62,7 @@ import org.bdgenomics.formats.avro.{
}
import org.bdgenomics.utils.cli.SaveArgs
import org.bdgenomics.utils.interval.array.IntervalArray
-import org.bdgenomics.utils.misc.{ HadoopUtil, Logging }
+import org.bdgenomics.utils.misc.HadoopUtil
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import scala.annotation.tailrec
@@ -376,7 +377,7 @@ trait GenomicDataset[T, U <: Product, V <: GenomicDataset[T, U, V]] extends Logg
def saveAsPartitionedParquet(pathName: String,
compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
partitionSize: Int = 1000000) {
- log.info("Saving directly as Hive-partitioned Parquet from SQL. " +
+ info("Saving directly as Hive-partitioned Parquet from SQL. " +
"Options other than compression codec are ignored.")
val df = toDF()
df.withColumn("positionBin", floor(df("start") / partitionSize))
@@ -614,7 +615,7 @@ trait GenomicDataset[T, U <: Product, V <: GenomicDataset[T, U, V]] extends Logg
case ValidationStringency.STRICT => {
throw new IllegalArgumentException(message)
}
- case ValidationStringency.LENIENT => log.warn(message)
+ case ValidationStringency.LENIENT => warn(message)
case _ =>
}
None
@@ -3102,7 +3103,7 @@ sealed abstract class GenericGenomicDataset[T, U <: Product] extends GenomicData
pageSize: Int = 1 * 1024 * 1024,
compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false) {
- log.warn("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
+ warn("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
dataset.toDF()
.write
.format("parquet")
@@ -3797,7 +3798,7 @@ abstract class AvroGenomicDataset[T <% IndexedRecord: Manifest, U <: Product, V
compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false,
optSchema: Option[Schema] = None): Unit = SaveAsADAM.time {
- log.info("Saving data in ADAM format")
+ info("Saving data in ADAM format")
val job = HadoopUtil.newJob(rdd.context)
ParquetOutputFormat.setCompression(job, compressCodec)
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/GenomicPartitioners.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/GenomicPartitioners.scala
index e123f66fb2..3b845984d7 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/GenomicPartitioners.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/GenomicPartitioners.scala
@@ -17,8 +17,8 @@
*/
package org.bdgenomics.adam.rdd
+import grizzled.slf4j.Logging
import org.bdgenomics.adam.models.{ ReferenceRegion, ReferencePosition, SequenceDictionary }
-import org.bdgenomics.utils.misc.Logging
import org.apache.spark.Partitioner
import scala.math._
@@ -38,8 +38,10 @@ import scala.math._
*/
case class GenomicPositionPartitioner(numParts: Int, seqLengths: Map[String, Long]) extends Partitioner with Logging {
- log.info("Have genomic position partitioner with " + numParts + " partitions, and sequences:")
- seqLengths.foreach(kv => log.info("Contig " + kv._1 + " with length " + kv._2))
+ info("Have genomic position partitioner with " + numParts + " partitions, and sequences:")
+ if (isInfoEnabled) {
+ seqLengths.foreach(kv => info("Contig " + kv._1 + " with length " + kv._2))
+ }
private val names: Seq[String] = seqLengths.keys.toSeq.sortWith(_ < _)
private val lengths: Seq[Long] = names.map(seqLengths(_))
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/OutFormatter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/OutFormatter.scala
index 06a3d8d404..4876e71702 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/OutFormatter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/OutFormatter.scala
@@ -20,7 +20,7 @@ package org.bdgenomics.adam.rdd
import java.io.InputStream
import java.lang.Process
import java.util.concurrent.{ Callable, TimeUnit }
-import org.bdgenomics.utils.misc.Logging
+import grizzled.slf4j.Logging
private[rdd] class OutFormatterRunner[T, U <: OutFormatter[T]](formatter: U,
is: InputStream,
@@ -45,7 +45,7 @@ private[rdd] class OutFormatterRunner[T, U <: OutFormatter[T]](formatter: U,
def hasNext: Boolean = {
if (hasTimedOut()) {
- log.warn("Piped command %s timed out after %d seconds.".format(
+ warn("Piped command %s timed out after %d seconds.".format(
finalCmd, optTimeout.get))
process.destroy()
false
@@ -57,7 +57,7 @@ private[rdd] class OutFormatterRunner[T, U <: OutFormatter[T]](formatter: U,
if (exited) {
process.exitValue()
} else {
- log.warn("Piped command %s timed out after %d seconds.".format(
+ warn("Piped command %s timed out after %d seconds.".format(
finalCmd, timeout))
process.destroy()
0
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/contig/NucleotideContigFragmentDataset.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/contig/NucleotideContigFragmentDataset.scala
index fde197fb5a..6e86bfd635 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/contig/NucleotideContigFragmentDataset.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/contig/NucleotideContigFragmentDataset.scala
@@ -155,7 +155,7 @@ case class DatasetBoundNucleotideContigFragmentDataset private[rdd] (
pageSize: Int = 1 * 1024 * 1024,
compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false) {
- log.info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
+ info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
dataset.toDF()
.write
.format("parquet")
@@ -259,7 +259,7 @@ sealed abstract class NucleotideContigFragmentDataset extends AvroGenomicDataset
override def saveAsPartitionedParquet(pathName: String,
compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
partitionSize: Int = 1000000) {
- log.info("Saving directly as Hive-partitioned Parquet from SQL. " +
+ info("Saving directly as Hive-partitioned Parquet from SQL. " +
"Options other than compression codec are ignored.")
val df = toDF()
.withColumnRenamed("contigName", "referenceName")
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/BEDInFormatter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/BEDInFormatter.scala
index f7f1733ab1..d3ce290418 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/BEDInFormatter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/BEDInFormatter.scala
@@ -25,7 +25,6 @@ import java.io.{
import org.bdgenomics.adam.rdd.{ InFormatter, InFormatterCompanion }
import org.bdgenomics.adam.sql.{ Feature => FeatureProduct }
import org.bdgenomics.formats.avro.Feature
-import org.bdgenomics.utils.misc.Logging
/**
* InFormatter companion that builds a BEDInFormatter to write features in BED format to a pipe.
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/FeatureDataset.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/FeatureDataset.scala
index 87e2a92fc9..ab20ca43f8 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/FeatureDataset.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/FeatureDataset.scala
@@ -322,7 +322,7 @@ case class DatasetBoundFeatureDataset private[rdd] (
pageSize: Int = 1 * 1024 * 1024,
compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false) {
- log.info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
+ info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
dataset.toDF()
.write
.format("parquet")
@@ -519,7 +519,7 @@ sealed abstract class FeatureDataset extends AvroGenomicDataset[Feature, Feature
disableFastConcat = disableFastConcat)
} else {
if (asSingleFile) {
- log.warn("asSingleFile = true ignored when saving as Parquet.")
+ warn("asSingleFile = true ignored when saving as Parquet.")
}
saveAsParquet(new JavaSaveArgs(filePath))
}
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/FeatureParser.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/FeatureParser.scala
index 3ca3553efc..82fc1cd457 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/FeatureParser.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/FeatureParser.scala
@@ -17,10 +17,10 @@
*/
package org.bdgenomics.adam.rdd.feature
+import grizzled.slf4j.Logging
import htsjdk.samtools.ValidationStringency
import org.bdgenomics.adam.models.SequenceRecord
import org.bdgenomics.formats.avro.Feature
-import org.bdgenomics.utils.misc.Logging
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
@@ -42,7 +42,7 @@ private[rdd] sealed trait FeatureParser extends Serializable with Logging {
if (stringency == ValidationStringency.STRICT) {
throw new IllegalArgumentException(message.format(line))
} else if (stringency == ValidationStringency.LENIENT) {
- log.warn(message.format(line))
+ warn(message.format(line))
}
None
}
@@ -251,7 +251,7 @@ private[rdd] class IntervalListParser extends FeatureParser {
throw new Exception(s"Expected fields of the form 'key:value' in field $field but got: $x. Line:\n$line")
} else {
if (stringency == ValidationStringency.LENIENT) {
- log.warn(s"Expected fields of the form 'key:value' in field $field but got: $x. Line:\n$line")
+ warn(s"Expected fields of the form 'key:value' in field $field but got: $x. Line:\n$line")
}
None
}
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/GFF3InFormatter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/GFF3InFormatter.scala
index 734d66f106..41eed1a7fc 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/GFF3InFormatter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/GFF3InFormatter.scala
@@ -25,7 +25,6 @@ import java.io.{
import org.bdgenomics.adam.rdd.{ InFormatter, InFormatterCompanion }
import org.bdgenomics.adam.sql.{ Feature => FeatureProduct }
import org.bdgenomics.formats.avro.Feature
-import org.bdgenomics.utils.misc.Logging
/**
* InFormatter companion that builds a GFF3InFormatter to write features in GFF3 format to a pipe.
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/GTFInFormatter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/GTFInFormatter.scala
index c3ae0e1c53..93ea3c61ac 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/GTFInFormatter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/GTFInFormatter.scala
@@ -25,7 +25,6 @@ import java.io.{
import org.bdgenomics.adam.rdd.{ InFormatter, InFormatterCompanion }
import org.bdgenomics.adam.sql.{ Feature => FeatureProduct }
import org.bdgenomics.formats.avro.Feature
-import org.bdgenomics.utils.misc.Logging
/**
* InFormatter companion that builds a GTFInFormatter to write features in GTF format to a pipe.
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/NarrowPeakInFormatter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/NarrowPeakInFormatter.scala
index 5a88c2cffd..012c9206ff 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/NarrowPeakInFormatter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/feature/NarrowPeakInFormatter.scala
@@ -25,7 +25,6 @@ import java.io.{
import org.bdgenomics.adam.rdd.{ InFormatter, InFormatterCompanion }
import org.bdgenomics.adam.sql.{ Feature => FeatureProduct }
import org.bdgenomics.formats.avro.Feature
-import org.bdgenomics.utils.misc.Logging
/**
* InFormatter companion that builds a NarrowPeakInFormatter to write features in NarrowPeak format to a pipe.
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/FragmentDataset.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/FragmentDataset.scala
index 469be95231..829ce1d03e 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/FragmentDataset.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/FragmentDataset.scala
@@ -17,6 +17,7 @@
*/
package org.bdgenomics.adam.rdd.fragment
+import grizzled.slf4j.Logging
import org.apache.parquet.hadoop.metadata.CompressionCodecName
import org.apache.spark.SparkContext
import org.apache.spark.api.java.function.{ Function => JFunction }
@@ -49,7 +50,6 @@ import org.bdgenomics.utils.interval.array.{
IntervalArray,
IntervalArraySerializer
}
-import org.bdgenomics.utils.misc.Logging
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
@@ -203,7 +203,7 @@ case class DatasetBoundFragmentDataset private[rdd] (
pageSize: Int = 1 * 1024 * 1024,
compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false) {
- log.info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
+ info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
dataset.toDF()
.write
.format("parquet")
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/InterleavedFASTQInFormatter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/InterleavedFASTQInFormatter.scala
index c32a4cb600..819e8bec01 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/InterleavedFASTQInFormatter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/InterleavedFASTQInFormatter.scala
@@ -18,12 +18,12 @@
package org.bdgenomics.adam.rdd.fragment
import java.io.OutputStream
+import grizzled.slf4j.Logging
import org.apache.hadoop.conf.Configuration
import org.bdgenomics.adam.converters.AlignmentRecordConverter
import org.bdgenomics.adam.rdd.{ InFormatter, InFormatterCompanion }
import org.bdgenomics.adam.sql.{ Fragment => FragmentProduct }
import org.bdgenomics.formats.avro.Fragment
-import org.bdgenomics.utils.misc.Logging
/**
* InFormatter companion that creates an InFormatter that writes interleaved
@@ -61,11 +61,11 @@ class InterleavedFASTQInFormatter private (
val reads = converter.convertFragment(frag).toSeq
if (reads.size < 2) {
- log.warn("Fewer than two reads for %s. Dropping...".format(frag))
+ warn("Fewer than two reads for %s. Dropping...".format(frag))
None
} else {
if (reads.size > 2) {
- log.warn("More than two reads for %s. Taking first 2.".format(frag))
+ warn("More than two reads for %s. Taking first 2.".format(frag))
}
Some((reads(0), reads(1)))
}
@@ -91,7 +91,7 @@ class InterleavedFASTQInFormatter private (
os.write(fastq2.getBytes)
os.write(fastq1.getBytes)
} else {
- log.warn("Improper pair of reads in fragment %s. Dropping...".format(p))
+ warn("Improper pair of reads in fragment %s. Dropping...".format(p))
}
})
}
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/Tab5InFormatter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/Tab5InFormatter.scala
index 5a6374d3a4..f9d024e2cf 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/Tab5InFormatter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/Tab5InFormatter.scala
@@ -18,12 +18,12 @@
package org.bdgenomics.adam.rdd.fragment
import java.io.OutputStream
+import grizzled.slf4j.Logging
import org.apache.hadoop.conf.Configuration
import org.bdgenomics.adam.converters.AlignmentRecordConverter
import org.bdgenomics.adam.rdd.{ InFormatter, InFormatterCompanion }
import org.bdgenomics.adam.sql.{ Fragment => FragmentProduct }
import org.bdgenomics.formats.avro.Fragment
-import org.bdgenomics.utils.misc.Logging
/**
* InFormatter companion that creates an InFormatter that writes Bowtie tab5 format.
@@ -70,7 +70,7 @@ class Tab5InFormatter private (
reads
} else {
if (reads.size > 2) {
- log.warn("More than two reads for %s. Taking first 2.".format(frag))
+ warn("More than two reads for %s. Taking first 2.".format(frag))
}
reads.take(2)
}
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/Tab6InFormatter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/Tab6InFormatter.scala
index 2d88e2b05d..a85c4107f5 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/Tab6InFormatter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/Tab6InFormatter.scala
@@ -18,12 +18,12 @@
package org.bdgenomics.adam.rdd.fragment
import java.io.OutputStream
+import grizzled.slf4j.Logging
import org.apache.hadoop.conf.Configuration
import org.bdgenomics.adam.converters.AlignmentRecordConverter
import org.bdgenomics.adam.rdd.{ InFormatter, InFormatterCompanion }
import org.bdgenomics.adam.sql.{ Fragment => FragmentProduct }
import org.bdgenomics.formats.avro.Fragment
-import org.bdgenomics.utils.misc.Logging
/**
* InFormatter companion that creates an InFormatter that writes Bowtie tab6 format.
@@ -69,7 +69,7 @@ class Tab6InFormatter private (
reads
} else {
if (reads.size > 2) {
- log.warn("More than two reads for %s. Taking first 2.".format(frag))
+ warn("More than two reads for %s. Taking first 2.".format(frag))
}
reads.take(2)
}
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/AlignmentRecordDataset.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/AlignmentRecordDataset.scala
index eb9f4b9085..3e68f2c7f7 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/AlignmentRecordDataset.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/AlignmentRecordDataset.scala
@@ -261,7 +261,7 @@ case class DatasetBoundAlignmentRecordDataset private[rdd] (
pageSize: Int = 1 * 1024 * 1024,
compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false) {
- log.info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
+ info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
dataset.toDF()
.write
.format("parquet")
@@ -552,7 +552,7 @@ sealed abstract class AlignmentRecordDataset extends AvroReadGroupGenomicDataset
if (args.outputPath.endsWith(".sam") ||
args.outputPath.endsWith(".bam") ||
args.outputPath.endsWith(".cram")) {
- log.info("Saving data in SAM/BAM/CRAM format")
+ info("Saving data in SAM/BAM/CRAM format")
saveAsSam(
args.outputPath,
isSorted = isSorted,
@@ -882,7 +882,7 @@ sealed abstract class AlignmentRecordDataset extends AvroReadGroupGenomicDataset
// clean up the header after writing
fs.delete(headPath, true)
} else {
- log.info(s"Writing single ${fileType} file (not Hadoop-style directory)")
+ info(s"Writing single ${fileType} file (not Hadoop-style directory)")
val tailPath = new Path(filePath + "_tail")
val outputPath = new Path(filePath)
@@ -942,7 +942,7 @@ sealed abstract class AlignmentRecordDataset extends AvroReadGroupGenomicDataset
* @return Returns a new RDD containing sorted reads.
*/
def sortReadsByReadName(): AlignmentRecordDataset = SortReads.time {
- log.info("Sorting reads by read name")
+ info("Sorting reads by read name")
transformDataset(_.orderBy("readName", "readInFragment"))
}
@@ -959,7 +959,7 @@ sealed abstract class AlignmentRecordDataset extends AvroReadGroupGenomicDataset
* @see sortReadsByReferencePositionAndIndex
*/
def sortReadsByReferencePosition(): AlignmentRecordDataset = SortReads.time {
- log.info("Sorting reads by reference position")
+ info("Sorting reads by reference position")
// NOTE: In order to keep unmapped reads from swamping a single partition
// we sort the unmapped reads by read name. We prefix with tildes ("~";
@@ -986,7 +986,7 @@ sealed abstract class AlignmentRecordDataset extends AvroReadGroupGenomicDataset
* @see sortReadsByReferencePosition
*/
def sortReadsByReferencePositionAndIndex(): AlignmentRecordDataset = SortByIndex.time {
- log.info("Sorting reads by reference index, using %s.".format(sequences))
+ info("Sorting reads by reference index, using %s.".format(sequences))
import scala.math.Ordering.{ Int => ImplicitIntOrdering, _ }
@@ -1431,7 +1431,7 @@ sealed abstract class AlignmentRecordDataset extends AvroReadGroupGenomicDataset
if (validationStringency == ValidationStringency.STRICT)
throw new IllegalArgumentException(msg)
else if (validationStringency == ValidationStringency.LENIENT)
- logError(msg)
+ warn(msg)
}
case ValidationStringency.SILENT =>
}
@@ -1452,7 +1452,7 @@ sealed abstract class AlignmentRecordDataset extends AvroReadGroupGenomicDataset
maybeUnpersist(pairedRecords)
- log.info(
+ info(
"%d/%d records are properly paired: %d firsts, %d seconds".format(
numPairedRecords,
numRecords,
@@ -1558,7 +1558,7 @@ sealed abstract class AlignmentRecordDataset extends AvroReadGroupGenomicDataset
validationStringency: ValidationStringency = ValidationStringency.LENIENT,
persistLevel: Option[StorageLevel] = None) {
- log.info("Saving data in FASTQ format.")
+ info("Saving data in FASTQ format.")
fileName2Opt match {
case Some(fileName2) =>
saveAsPairedFastq(
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/FASTQInFormatter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/FASTQInFormatter.scala
index b70fc4379c..216e2e8e0c 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/FASTQInFormatter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/FASTQInFormatter.scala
@@ -24,7 +24,6 @@ import org.bdgenomics.adam.rdd.{ InFormatter, InFormatterCompanion }
import org.bdgenomics.adam.rdd.fragment.FragmentDataset
import org.bdgenomics.adam.sql.{ AlignmentRecord => AlignmentRecordProduct }
import org.bdgenomics.formats.avro.AlignmentRecord
-import org.bdgenomics.utils.misc.Logging
/**
* InFormatter companion that creates an InFormatter that writes FASTQ.
@@ -43,7 +42,7 @@ object FASTQInFormatter extends InFormatterCompanion[AlignmentRecord, AlignmentR
}
class FASTQInFormatter private (
- conf: Configuration) extends InFormatter[AlignmentRecord, AlignmentRecordProduct, AlignmentRecordDataset, FASTQInFormatter] with Logging {
+ conf: Configuration) extends InFormatter[AlignmentRecord, AlignmentRecordProduct, AlignmentRecordDataset, FASTQInFormatter] {
protected val companion = FASTQInFormatter
private val converter = new AlignmentRecordConverter
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/MDTagging.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/MDTagging.scala
index f157bb6a2d..4d54b961e2 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/MDTagging.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/MDTagging.scala
@@ -17,12 +17,12 @@
*/
package org.bdgenomics.adam.rdd.read
+import grizzled.slf4j.Logging
import htsjdk.samtools.{ TextCigarCodec, ValidationStringency }
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.{ MdTag, ReferenceRegion }
import org.bdgenomics.adam.util.ReferenceFile
import org.bdgenomics.formats.avro.AlignmentRecord
-import org.bdgenomics.utils.misc.Logging
private[read] case class MDTagging(
reads: RDD[AlignmentRecord],
@@ -53,7 +53,7 @@ private[read] case class MDTagging(
if (validationStringency == ValidationStringency.STRICT) {
throw exception
} else if (validationStringency == ValidationStringency.LENIENT) {
- log.warn(exception.getMessage)
+ warn(exception.getMessage)
}
}
}
@@ -79,7 +79,7 @@ private[read] case class MDTagging(
if (validationStringency == ValidationStringency.STRICT) {
throw t
} else if (validationStringency == ValidationStringency.LENIENT) {
- log.warn("Caught exception when processing read %s: %s".format(
+ warn("Caught exception when processing read %s: %s".format(
read.getReferenceName, t))
}
read
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/MarkDuplicates.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/MarkDuplicates.scala
index b369b6ee0c..df1123fbac 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/MarkDuplicates.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/MarkDuplicates.scala
@@ -17,7 +17,7 @@
*/
package org.bdgenomics.adam.rdd.read
-import org.bdgenomics.utils.misc.Logging
+import grizzled.slf4j.Logging
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.instrumentation.Timers._
import org.bdgenomics.adam.models.{ ReadGroupDictionary, ReferencePosition }
@@ -80,12 +80,12 @@ private[rdd] object MarkDuplicates extends Serializable with Logging {
.filter(_.library.isEmpty)
emptyRgs.foreach(rg => {
- log.warn("Library ID is empty for read group %s from sample %s.".format(rg.id,
+ warn("Library ID is empty for read group %s from sample %s.".format(rg.id,
rg.sampleId))
})
if (emptyRgs.nonEmpty) {
- log.warn("For duplicate marking, all reads whose library is unknown will be treated as coming from the same library.")
+ warn("For duplicate marking, all reads whose library is unknown will be treated as coming from the same library.")
}
}
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/ReferencePositionPair.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/ReferencePositionPair.scala
index d412d8b8f1..3ce82a806a 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/ReferencePositionPair.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/ReferencePositionPair.scala
@@ -19,7 +19,6 @@ package org.bdgenomics.adam.rdd.read
import com.esotericsoftware.kryo.{ Kryo, Serializer }
import com.esotericsoftware.kryo.io.{ Input, Output }
-import org.bdgenomics.utils.misc.Logging
import org.bdgenomics.adam.instrumentation.Timers.CreateReferencePositionPair
import org.bdgenomics.adam.models.{
ReferencePosition,
@@ -31,7 +30,7 @@ import org.bdgenomics.formats.avro.AlignmentRecord
/**
* A singleton object for creating reference position pairs.
*/
-private[read] object ReferencePositionPair extends Logging {
+private[read] object ReferencePositionPair {
/**
* Extracts the reference positions from a bucket of reads from a single fragment.
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/SingleReadBucket.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/SingleReadBucket.scala
index 228630ebec..c249b3c937 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/SingleReadBucket.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/SingleReadBucket.scala
@@ -19,7 +19,6 @@ package org.bdgenomics.adam.rdd.read
import com.esotericsoftware.kryo.{ Kryo, Serializer }
import com.esotericsoftware.kryo.io.{ Output, Input }
-import org.bdgenomics.utils.misc.Logging
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.serialization.AvroSerializer
import org.bdgenomics.formats.avro.{
@@ -62,7 +61,7 @@ private class FragmentIterator(
/**
* Companion object for building SingleReadBuckets.
*/
-private[read] object SingleReadBucket extends Logging {
+private[read] object SingleReadBucket {
private def fromGroupedReads(reads: Iterable[AlignmentRecord]): SingleReadBucket = {
// split by mapping
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/realignment/IndelRealignmentTarget.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/realignment/IndelRealignmentTarget.scala
index bb3735d7e4..5a82d7e3d9 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/realignment/IndelRealignmentTarget.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/realignment/IndelRealignmentTarget.scala
@@ -20,7 +20,6 @@ package org.bdgenomics.adam.rdd.read.realignment
import com.esotericsoftware.kryo.io.{ Input, Output }
import com.esotericsoftware.kryo.{ Kryo, Serializer }
import htsjdk.samtools.CigarOperator
-import org.bdgenomics.utils.misc.Logging
import org.bdgenomics.adam.models.ReferenceRegion
import org.bdgenomics.adam.rich.RichAlignmentRecord
import org.bdgenomics.formats.avro.AlignmentRecord
@@ -149,7 +148,7 @@ private[adam] class IndelRealignmentTargetSerializer extends Serializer[IndelRea
private[adam] class IndelRealignmentTarget(
val variation: Option[ReferenceRegion],
- val readRange: ReferenceRegion) extends Logging with Serializable {
+ val readRange: ReferenceRegion) extends Serializable {
assert(variation.map(r => r.referenceName).forall(_ == readRange.referenceName))
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/realignment/RealignIndels.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/realignment/RealignIndels.scala
index 5f72d14fde..549e3f128d 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/realignment/RealignIndels.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/realignment/RealignIndels.scala
@@ -18,7 +18,7 @@
package org.bdgenomics.adam.rdd.read.realignment
import htsjdk.samtools.{ Cigar, CigarElement, CigarOperator }
-import org.bdgenomics.utils.misc.Logging
+import grizzled.slf4j.Logging
import org.apache.spark.SparkContext
import org.apache.spark.rdd.MetricsContext._
import org.apache.spark.rdd.RDD
@@ -148,7 +148,7 @@ private[read] object RealignIndels extends Serializable with Logging {
// group reads by target
val broadcastTargets = rich_rdd.context.broadcast(targets)
val targetSize = targets.length
- log.info("Mapping reads to %d targets.".format(targetSize))
+ info("Mapping reads to %d targets.".format(targetSize))
// identify targets that are covered too highly and drop them
val targetsToDrop = rich_rdd.flatMap(r => {
@@ -162,7 +162,7 @@ private[read] object RealignIndels extends Serializable with Logging {
val targetsToDropSet = targetsToDrop.map(_._1)
.toSet
- log.info("Dropping %d targets whose coverage is too high:\n%s".format(targetsToDrop.length, targetsToDrop.mkString("\n")))
+ info("Dropping %d targets whose coverage is too high:\n%s".format(targetsToDrop.length, targetsToDrop.mkString("\n")))
val bcastTargetsToDrop = rich_rdd.context.broadcast(targetsToDropSet)
val readsMappedToTarget = rich_rdd.groupBy((r: RichAlignmentRecord) => {
@@ -194,7 +194,7 @@ private[read] object RealignIndels extends Serializable with Logging {
// get reference and range from a single read
val readRefs = reads.flatMap((r: RichAlignmentRecord) => {
r.mdTag.fold {
- log.warn("Discarding read " + r.record.getReadName + " during reference re-creation.")
+ warn("Discarding read " + r.record.getReadName + " during reference re-creation.")
tossedReads += 1
(None: Option[(String, NumericRange[Long])])
} { (tag) =>
@@ -366,7 +366,7 @@ private[read] class RealignIndels(
val (bestConsensusMismatchSum, bestConsensus, bestMappings) = bestConsensusTuple
// check for a sufficient improvement in mismatch quality versus threshold
- log.info("On " + refRegion + ", before realignment, sum was " + totalMismatchSumPreCleaning +
+ info("On " + refRegion + ", before realignment, sum was " + totalMismatchSumPreCleaning +
", best realignment is " + bestConsensus + " with " + bestConsensusMismatchSum)
val lodImprovement = (totalMismatchSumPreCleaning - bestConsensusMismatchSum).toDouble / 10.0
if (lodImprovement > lodThreshold) {
@@ -409,7 +409,7 @@ private[read] class RealignIndels(
bestConsensus)
if (newEnd <= newStart) {
- log.warn("Realigning read %s failed because realignment issued an illegal alignment: start %d, end %d, CIGAR %s.".format(r, newStart, newEnd, newCigar))
+ warn("Realigning read %s failed because realignment issued an illegal alignment: start %d, end %d, CIGAR %s.".format(r, newStart, newEnd, newCigar))
r
} else {
builder.setStart(newStart)
@@ -433,19 +433,19 @@ private[read] class RealignIndels(
}
} catch {
case t: Throwable => {
- log.warn("Realigning read %s failed with %s. At:".format(r, t))
+ warn("Realigning read %s failed with %s. At:".format(r, t))
r
}
}
})
- log.info("On " + refRegion + ", realigned " + realignedReadCount + " reads to " +
+ info("On " + refRegion + ", realigned " + realignedReadCount + " reads to " +
bestConsensus + " due to LOD improvement of " + lodImprovement)
cleanedReads
}
} else {
- log.info("On " + refRegion + ", skipping realignment due to insufficient LOD improvement (" +
+ info("On " + refRegion + ", skipping realignment due to insufficient LOD improvement (" +
lodImprovement + "for consensus " + bestConsensus)
reads
}
@@ -454,7 +454,7 @@ private[read] class RealignIndels(
}
// return all reads that we cleaned and all reads that were initially realigned
val endTime = System.nanoTime()
- log.info("TARGET|\t%d\t%d\t%d\t%s\t%d\t%d\t%d\t%d\t%d".format(partitionIdx,
+ info("TARGET|\t%d\t%d\t%d\t%s\t%d\t%d\t%d\t%d\t%d".format(partitionIdx,
targetIdx,
endTime - startTime,
refRegion.referenceName,
@@ -466,7 +466,7 @@ private[read] class RealignIndels(
finalReads
} catch {
case t: Throwable => {
- log.warn("Realigning target %s failed with %s.".format(target, t))
+ warn("Realigning target %s failed with %s.".format(target, t))
reads
}
}
@@ -717,7 +717,7 @@ private[read] class RealignIndels(
richRdd.cache()
// find realignment targets
- log.info("Generating realignment targets...")
+ info("Generating realignment targets...")
val targets: Array[IndelRealignmentTarget] = RealignmentTargetFinder(
richRdd,
consensusGenerator,
@@ -732,14 +732,14 @@ private[read] class RealignIndels(
readRdd
} else {
// map reads to targets
- log.info("Grouping reads by target...")
+ info("Grouping reads by target...")
val readsMappedToTarget = RealignIndels.mapTargets(richRdd,
targets,
maxReadsPerTarget = maxReadsPerTarget)
richRdd.unpersist()
// realign target groups
- log.info("Sorting reads by reference in ADAM RDD")
+ info("Sorting reads by reference in ADAM RDD")
readsMappedToTarget.mapPartitionsWithIndex((idx, iter) => {
iter.flatMap(realignTargetGroup(_, idx))
}).map(r => r.record)
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/realignment/RealignmentTargetFinder.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/realignment/RealignmentTargetFinder.scala
index f878d26fb9..64fca51a90 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/realignment/RealignmentTargetFinder.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/realignment/RealignmentTargetFinder.scala
@@ -17,7 +17,6 @@
*/
package org.bdgenomics.adam.rdd.read.realignment
-import org.bdgenomics.utils.misc.Logging
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.algorithms.consensus.{ ConsensusGenerator, ConsensusGeneratorFromReads }
import org.bdgenomics.adam.rich.RichAlignmentRecord
@@ -43,7 +42,7 @@ private[realignment] object RealignmentTargetFinder {
}
}
-private[realignment] class RealignmentTargetFinder extends Serializable with Logging {
+private[realignment] class RealignmentTargetFinder extends Serializable {
/**
* Joins two sorted sets of targets together. Is tail call recursive.
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/recalibration/BaseQualityRecalibration.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/recalibration/BaseQualityRecalibration.scala
index c9aeeafcdd..71fb6ea847 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/recalibration/BaseQualityRecalibration.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/recalibration/BaseQualityRecalibration.scala
@@ -17,6 +17,7 @@
*/
package org.bdgenomics.adam.rdd.read.recalibration
+import grizzled.slf4j.Logging
import htsjdk.samtools.{
CigarElement,
CigarOperator,
@@ -34,7 +35,6 @@ import org.bdgenomics.adam.models.{
}
import org.bdgenomics.adam.instrumentation.Timers._
import org.bdgenomics.formats.avro.AlignmentRecord
-import org.bdgenomics.utils.misc.Logging
import scala.annotation.tailrec
/**
@@ -81,7 +81,7 @@ private class BaseQualityRecalibration(
})
optStorageLevel.fold(covRdd)(sl => {
- log.info("User requested %s persistance for covariate RDD.".format(sl))
+ info("User requested %s persistance for covariate RDD.".format(sl))
covRdd.persist(sl)
})
}
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/GenotypeDataset.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/GenotypeDataset.scala
index b6a0def2fa..f36e3005da 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/GenotypeDataset.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/GenotypeDataset.scala
@@ -172,7 +172,7 @@ case class DatasetBoundGenotypeDataset private[rdd] (
pageSize: Int = 1 * 1024 * 1024,
compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false) {
- log.info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
+ info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
dataset.toDF()
.write
.format("parquet")
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/VCFOutFormatter.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/VCFOutFormatter.scala
index deb7ace160..35b4ba096c 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/VCFOutFormatter.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/VCFOutFormatter.scala
@@ -17,6 +17,7 @@
*/
package org.bdgenomics.adam.rdd.variant
+import grizzled.slf4j.Logging
import htsjdk.samtools.ValidationStringency
import htsjdk.variant.vcf.{
VCFCodec,
@@ -34,7 +35,6 @@ import org.bdgenomics.adam.converters.VariantContextConverter._
import org.bdgenomics.adam.converters.VariantContextConverter
import org.bdgenomics.adam.models.VariantContext
import org.bdgenomics.adam.rdd.OutFormatter
-import org.bdgenomics.utils.misc.Logging
import scala.annotation.tailrec
import scala.collection.mutable.ListBuffer
@@ -104,7 +104,7 @@ case class VCFOutFormatter(
val header = codec.readActualHeader(lri).asInstanceOf[VCFHeader]
// merge header lines with our supported header lines
- val lines = cleanAndMixInSupportedLines(headerLines(header), stringency, log)
+ val lines = cleanAndMixInSupportedLines(headerLines(header), stringency, logger.logger)
// accumulate header lines if desired
optHeaderLines.map(accumulator => lines.foreach(line => accumulator.add(line)))
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/VariantContextDataset.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/VariantContextDataset.scala
index c9a80ff2c2..c24851f94f 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/VariantContextDataset.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/VariantContextDataset.scala
@@ -17,6 +17,7 @@
*/
package org.bdgenomics.adam.rdd.variant
+import grizzled.slf4j.Logging
import htsjdk.samtools.ValidationStringency
import htsjdk.samtools.util.BlockCompressedOutputStream
import htsjdk.variant.vcf.{
@@ -60,7 +61,6 @@ import org.bdgenomics.adam.rdd.{
import org.bdgenomics.adam.sql.{ VariantContext => VariantContextProduct }
import org.bdgenomics.adam.util.{ FileMerger, FileExtensions }
import org.bdgenomics.formats.avro.Sample
-import org.bdgenomics.utils.misc.Logging
import org.bdgenomics.utils.interval.array.{
IntervalArray,
IntervalArraySerializer
@@ -224,7 +224,7 @@ sealed abstract class VariantContextDataset extends MultisampleGenomicDataset[Va
pageSize: Int = 1 * 1024 * 1024,
compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false) {
- log.info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
+ info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
dataset.toDF()
.write
.format("parquet")
@@ -359,7 +359,7 @@ sealed abstract class VariantContextDataset extends MultisampleGenomicDataset[Va
.format(filePath))
}
- log.info(s"Writing $vcfFormat file to $filePath")
+ info(s"Writing $vcfFormat file to $filePath")
// map samples to sample ids
val sampleIds = samples.map(_.getId)
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/VariantDataset.scala b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/VariantDataset.scala
index ffa700608c..dd40f55088 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/VariantDataset.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/rdd/variant/VariantDataset.scala
@@ -153,7 +153,7 @@ case class DatasetBoundVariantDataset private[rdd] (
pageSize: Int = 1 * 1024 * 1024,
compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false) {
- log.info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
+ info("Saving directly as Parquet from SQL. Options other than compression codec are ignored.")
dataset.toDF()
.write
.format("parquet")
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/serialization/ADAMKryoRegistrator.scala b/adam-core/src/main/scala/org/bdgenomics/adam/serialization/ADAMKryoRegistrator.scala
index 697cb971fb..b6a8729a7c 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/serialization/ADAMKryoRegistrator.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/serialization/ADAMKryoRegistrator.scala
@@ -24,12 +24,12 @@ import com.esotericsoftware.kryo.io.{
Output
}
import com.esotericsoftware.kryo.{ Kryo, Serializer }
+import grizzled.slf4j.Logging
import it.unimi.dsi.fastutil.io.{ FastByteArrayInputStream, FastByteArrayOutputStream }
import org.apache.avro.io.{ BinaryDecoder, BinaryEncoder, DecoderFactory, EncoderFactory }
import org.apache.avro.specific.{ SpecificDatumReader, SpecificDatumWriter, SpecificRecord }
import org.apache.hadoop.io.Writable
import org.apache.spark.serializer.KryoRegistrator
-import org.bdgenomics.utils.misc.Logging
import scala.reflect.ClassTag
case class InputStreamWithDecoder(size: Int) {
@@ -287,7 +287,7 @@ class ADAMKryoRegistrator extends KryoRegistrator with Logging {
kryo.register(Class.forName("org.apache.spark.sql.execution.datasources.ExecutedWriteSummary"))
} catch {
case cnfe: java.lang.ClassNotFoundException => {
- if (log.isDebugEnabled) log.debug("Did not find Spark internal class. This is expected for earlier Spark versions.")
+ debug("Did not find Spark internal class. This is expected for earlier Spark versions.")
}
}
kryo.register(classOf[org.apache.spark.sql.catalyst.expressions.UnsafeRow])
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/util/FileMerger.scala b/adam-core/src/main/scala/org/bdgenomics/adam/util/FileMerger.scala
index 8cdbd5d183..c08e7ecca5 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/util/FileMerger.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/util/FileMerger.scala
@@ -18,13 +18,13 @@
package org.bdgenomics.adam.util
import java.io.{ InputStream, OutputStream }
+import grizzled.slf4j.Logging
import htsjdk.samtools.cram.build.CramIO
import htsjdk.samtools.cram.common.CramVersions
import htsjdk.samtools.util.BlockCompressedStreamConstants
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{ FileSystem, Path }
import org.apache.spark.SparkContext
-import org.bdgenomics.utils.misc.Logging
import scala.annotation.tailrec
/**
@@ -184,7 +184,7 @@ private[adam] object FileMerger extends Logging {
// optionally copy the header
optHeaderPath.foreach(p => {
- log.info("Copying header file (%s)".format(p))
+ info("Copying header file (%s)".format(p))
// open our input file
val is = fsIn.open(p)
@@ -202,7 +202,7 @@ private[adam] object FileMerger extends Logging {
tailFiles.toSeq.foreach(p => {
// print a bit of progress logging
- log.info("Copying file %s, file %d of %d.".format(
+ info("Copying file %s, file %d of %d.".format(
p.toString,
filesCopied,
numFiles))
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/util/IndexedFastaFile.scala b/adam-core/src/main/scala/org/bdgenomics/adam/util/IndexedFastaFile.scala
index 0a2926524d..3eb01d242e 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/util/IndexedFastaFile.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/util/IndexedFastaFile.scala
@@ -18,6 +18,7 @@
package org.bdgenomics.adam.util
+import grizzled.slf4j.Logging
import htsjdk.samtools.ValidationStringency
import htsjdk.samtools.reference.{ FastaSequenceIndex, IndexedFastaSequenceFile }
import java.net.URI
@@ -25,7 +26,6 @@ import java.nio.file.Paths
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkContext
import org.bdgenomics.adam.models.{ SequenceDictionary, ReferenceRegion }
-import org.bdgenomics.utils.misc.Logging
/**
* Loads and extracts sequences directly from indexed fasta or fa files. filePath requires fai index in the
@@ -67,7 +67,7 @@ case class IndexedFastaFile(sc: SparkContext,
throw e
} else {
if (stringency == ValidationStringency.LENIENT) {
- log.warn("Caught exception %s when loading FASTA sequence dictionary. Using empty dictionary instead.".format(e))
+ warn("Caught exception %s when loading FASTA sequence dictionary. Using empty dictionary instead.".format(e))
}
SequenceDictionary.empty
}
diff --git a/adam-core/src/main/scala/org/bdgenomics/adam/util/ParallelFileMerger.scala b/adam-core/src/main/scala/org/bdgenomics/adam/util/ParallelFileMerger.scala
index 5cea070821..403ea9ec44 100644
--- a/adam-core/src/main/scala/org/bdgenomics/adam/util/ParallelFileMerger.scala
+++ b/adam-core/src/main/scala/org/bdgenomics/adam/util/ParallelFileMerger.scala
@@ -17,6 +17,7 @@
*/
package org.bdgenomics.adam.util
+import grizzled.slf4j.Logging
import htsjdk.samtools.cram.build.CramIO
import htsjdk.samtools.cram.common.CramVersions
import htsjdk.samtools.util.BlockCompressedStreamConstants
@@ -24,7 +25,6 @@ import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{ FileSystem, Path }
import org.apache.spark.SparkContext
import org.apache.spark.broadcast.Broadcast
-import org.bdgenomics.utils.misc.Logging
import scala.annotation.tailrec
import scala.math.min
diff --git a/pom.xml b/pom.xml
index 8de859fb78..a82c94c398 100644
--- a/pom.xml
+++ b/pom.xml
@@ -28,7 +28,8 @@
7.9.2
1.7.25
0.12.0
- 0.2.13
+ 0.2.15-SNAPSHOT
+ 1.3.3
2.18.2
1.1.1
@@ -126,12 +127,12 @@
org.apache.maven.plugins
maven-deploy-plugin
- 2.8.2
+ 3.0.0-M1
org.apache.maven.plugins
maven-enforcer-plugin
- 1.4.1
+ 3.0.0-M2
enforce-versions
@@ -141,8 +142,8 @@
- [3.1.1,)
- ADAM requires Maven 3.1.1 or greater
+ [3.3.9,)
+ ADAM requires Maven 3.3.9 or greater
[1.8,)
@@ -156,17 +157,17 @@
org.apache.maven.plugins
maven-install-plugin
- 2.5.2
+ 3.0.0-M1
org.apache.maven.plugins
maven-jar-plugin
- 3.1.0
+ 3.1.1
org.apache.maven.plugins
maven-resources-plugin
- 3.0.1
+ 3.1.0
org.apache.maven.plugins
@@ -181,7 +182,7 @@
org.apache.maven.plugins
maven-surefire-plugin
- 2.22.1
+ 3.0.0-M3
org.codehaus.mojo
@@ -191,7 +192,7 @@
org.scalatest
scalatest-maven-plugin
- 1.0
+ 2.0.0
pl.project13.maven
@@ -543,14 +544,19 @@
args4j
args4j
- 2.0.31
+ 2.33
org.scalatest
scalatest_${scala.version.prefix}
- 2.2.6
+ 3.0.7
test
+
+ org.clapper
+ grizzled-slf4j_${scala.version.prefix}
+ ${grizzled-slf4j.version}
+
org.slf4j
jcl-over-slf4j
@@ -574,7 +580,7 @@
org.apache.httpcomponents
httpclient
- 4.5.2
+ 4.5.7
com.google.guava
@@ -584,7 +590,7 @@
org.mockito
mockito-core
- 2.23.0
+ 2.25.1
test