Skip to content

Commit

Permalink
Add command line support for loading references in TransformFeatures.
Browse files Browse the repository at this point in the history
  • Loading branch information
heuermh committed Jun 12, 2019
1 parent 988897e commit b66e9e1
Showing 1 changed file with 10 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,16 @@ object TransformFeatures extends BDGCommandCompanion {
class TransformFeaturesArgs extends Args4jBase with ParquetSaveArgs {
@Argument(required = true, metaVar = "INPUT",
usage = "The feature file to convert (e.g., .bed, .gff/.gtf, .gff3, .interval_list, .narrowPeak). If extension is not detected, Parquet is assumed.", index = 0)
var featuresFile: String = _
var featuresPath: String = _

@Argument(required = true, metaVar = "OUTPUT",
usage = "Location to write ADAM feature data. If extension is not detected, Parquet is assumed.", index = 1)
var outputPath: String = null

@Args4jOption(required = false, name = "-reference",
usage = "Load reference for features; .dict as HTSJDK sequence dictionary format, .genome as Bedtools genome file format, .txt as UCSC Genome Browser chromInfo files.")
var referencePath: String = null

@Args4jOption(required = false, name = "-num_partitions",
usage = "Number of partitions to load a text file using.")
var numPartitions: Int = _
Expand All @@ -62,10 +66,12 @@ class TransformFeatures(val args: TransformFeaturesArgs)
def run(sc: SparkContext) {
checkWriteablePath(args.outputPath, sc.hadoopConfiguration)

val optSequenceDictionary = Option(args.referencePath).map(sc.loadSequenceDictionary(_))

sc.loadFeatures(
args.featuresFile,
optMinPartitions = Option(args.numPartitions),
optProjection = None
args.featuresPath,
optSequenceDictionary = optSequenceDictionary,
optMinPartitions = Option(args.numPartitions)
).save(args.outputPath, args.single, args.disableFastConcat)
}
}

0 comments on commit b66e9e1

Please sign in to comment.