Skip to content

Commit

Permalink
Merge baf789d into fc3e5fd
Browse files Browse the repository at this point in the history
  • Loading branch information
heuermh committed May 26, 2017
2 parents fc3e5fd + baf789d commit 186556d
Show file tree
Hide file tree
Showing 23 changed files with 647 additions and 416 deletions.
4 changes: 2 additions & 2 deletions README.md
Expand Up @@ -72,12 +72,12 @@ ADAM ACTIONS
countContigKmers : Counts the k-mers/q-mers from a read dataset.
transformAlignments : Convert SAM/BAM to ADAM format and optionally perform read pre-processing transformations
transformFeatures : Convert a file with sequence features into corresponding ADAM format and vice versa
transformGenotypes : Convert a file with genotypes into corresponding ADAM format and vice versa
transformVariants : Convert a file with variants into corresponding ADAM format and vice versa
mergeShards : Merges the shards of a file
reads2coverage : Calculate the coverage from a given ADAM file
CONVERSION OPERATIONS
vcf2adam : Convert a VCF file to the corresponding ADAM format
adam2vcf : Convert an ADAM variant to the VCF ADAM format
fasta2adam : Converts a text FASTA sequence file into an ADAMNucleotideContig Parquet file which represents assembled sequences.
adam2fasta : Convert ADAM nucleotide contig fragments to FASTA files
adam2fastq : Convert BAM to FASTQ files
Expand Down
Expand Up @@ -34,7 +34,7 @@ public static GenotypeRDD conduit(final GenotypeRDD recordRdd,
// make temp directory and save file
Path tempDir = Files.createTempDirectory("javaAC");
String fileName = tempDir.toString() + "/testRdd.genotype.adam";
recordRdd.save(fileName);
recordRdd.saveAsParquet(fileName);

// create a new adam context and load the file
JavaADAMContext jac = new JavaADAMContext(ac);
Expand Down
Expand Up @@ -34,7 +34,7 @@ public static VariantRDD conduit(final VariantRDD recordRdd,
// make temp directory and save file
Path tempDir = Files.createTempDirectory("javaAC");
String fileName = tempDir.toString() + "/testRdd.variant.adam";
recordRdd.save(fileName);
recordRdd.saveAsParquet(fileName);

// create a new adam context and load the file
JavaADAMContext jac = new JavaADAMContext(ac);
Expand Down
105 changes: 0 additions & 105 deletions adam-cli/src/main/scala/org/bdgenomics/adam/cli/ADAM2Vcf.scala

This file was deleted.

Expand Up @@ -36,15 +36,15 @@ object ADAMMain {
CountContigKmers,
TransformAlignments,
TransformFeatures,
TransformGenotypes,
TransformVariants,
MergeShards,
Reads2Coverage
)
),
CommandGroup(
"CONVERSION OPERATIONS",
List(
Vcf2ADAM,
ADAM2Vcf,
Fasta2ADAM,
ADAM2Fasta,
ADAM2Fastq,
Expand Down
@@ -0,0 +1,132 @@
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.cli

import htsjdk.samtools.ValidationStringency
import org.apache.spark.SparkContext
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.{ ADAMSaveAnyArgs, GenomicRDD }
import org.bdgenomics.utils.cli._
import org.kohsuke.args4j.{ Argument, Option Args4jOption }

object TransformGenotypes extends BDGCommandCompanion {
val commandName = "transformGenotypes"
val commandDescription = "Convert a file with genotypes into corresponding ADAM format and vice versa"

def apply(cmdLine: Array[String]) = {
new TransformGenotypes(Args4j[TransformGenotypesArgs](cmdLine))
}
}

class TransformGenotypesArgs extends Args4jBase with ADAMSaveAnyArgs with ParquetArgs {
@Argument(required = true, metaVar = "INPUT", usage = "The genotypes file to convert (e.g., .vcf, .vcf.gz, .vcf.bgzf, .vcf.bgz). If extension is not detected, Parquet is assumed.", index = 0)
var inputPath: String = null

@Argument(required = true, metaVar = "OUTPUT", usage = "Location to write ADAM genotypes data. If extension is not detected, Parquet is assumed.", index = 1)
var outputPath: String = null

@Args4jOption(required = false, name = "-coalesce", usage = "Number of partitions written to the ADAM output directory.")
var coalesce: Int = -1

@Args4jOption(required = false, name = "-force_shuffle_coalesce", usage = "Even if the repartitioned RDD has fewer partitions, force a shuffle.")
var forceShuffle: Boolean = false

@Args4jOption(required = false, name = "-sort_on_save", usage = "Sort VCF output by contig index.")
var sort: Boolean = false

@Args4jOption(required = false, name = "-sort_lexicographically_on_save", usage = "Sort VCF output by lexicographic order. Conflicts with -sort_on_save.")
var sortLexicographically: Boolean = false

@Args4jOption(required = false, name = "-single", usage = "Save as a single VCF file.")
var asSingleFile: Boolean = false

@Args4jOption(required = false, name = "-defer_merging", usage = "Defers merging single file output.")
var deferMerging: Boolean = false

@Args4jOption(required = false, name = "-disable_fast_concat", usage = "Disables the parallel file concatenation engine.")
var disableFastConcat: Boolean = false

@Args4jOption(required = false, name = "-stringency", usage = "Stringency level for various checks; can be SILENT, LENIENT, or STRICT. Defaults to STRICT.")
var stringency: String = "STRICT"

// must be defined due to ADAMSaveAnyArgs, but unused here
var sortFastqOutput: Boolean = false
}

/**
* Convert a file with genotypes into corresponding ADAM format and vice versa.
*/
class TransformGenotypes(val args: TransformGenotypesArgs)
extends BDGSparkCommand[TransformGenotypesArgs] {
val companion = TransformGenotypes
val stringency = ValidationStringency.valueOf(args.stringency)

/**
* Coalesce the specified GenomicRDD if requested.
*
* @param rdd GenomicRDD to coalesce.
* @return The specified GenomicRDD coalesced if requested.
*/
private def maybeCoalesce[U <: GenomicRDD[_, U]](rdd: U): U = {
if (args.coalesce != -1) {
log.info("Coalescing the number of partitions to '%d'".format(args.coalesce))
if (args.coalesce > rdd.rdd.partitions.length || args.forceShuffle) {
rdd.transform(_.coalesce(args.coalesce, shuffle = true))
} else {
rdd.transform(_.coalesce(args.coalesce, shuffle = false))
}
} else {
rdd
}
}

/**
* Sort the specified GenomicRDD if requested.
*
* @param rdd GenomicRDD to sort.
* @return The specified GenomicRDD sorted if requested.
*/
private def maybeSort[U <: GenomicRDD[_, U]](rdd: U): U = {
if (args.sort) {
log.info("Sorting before saving")
rdd.sort()
} else if (args.sortLexicographically) {
log.info("Sorting lexicographically before saving")
rdd.sortLexicographically()
} else {
rdd
}
}

def run(sc: SparkContext) {
require(!(args.sort && args.sortLexicographically),
"Cannot set both -sort_on_save and -sort_lexicographically_on_save.")

val genotypes = sc.loadGenotypes(
args.inputPath,
optPredicate = None,
optProjection = None,
stringency = stringency)

if (args.outputPath.endsWith(".vcf")) {
maybeSort(maybeCoalesce(genotypes.toVariantContextRDD)).saveAsVcf(args)
} else {
maybeSort(maybeCoalesce(genotypes)).saveAsParquet(args)
}
}
}

0 comments on commit 186556d

Please sign in to comment.