Skip to content

Commit

Permalink
name
Browse files Browse the repository at this point in the history
  • Loading branch information
akiezun committed Sep 30, 2015
1 parent 298dc21 commit 9deac96
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,14 @@ public abstract class SparkCommandLineProgram extends CommandLineProgram impleme
@Argument(fullName = "sparkMaster", doc="URL of the Spark Master to submit jobs to when using the Spark pipeline runner.", optional = true)
protected String sparkMaster = "local[2]";

@Argument(
doc = "Name of the program running",
shortName = "N",
fullName = "programName",
optional = true
)
public String programName;

@Override
protected Object doWork() {
final JavaSparkContext ctx = SparkContextFactory.getSparkContext(getProgramName(), sparkMaster);
Expand Down Expand Up @@ -65,5 +73,13 @@ protected void afterPipeline(final JavaSparkContext ctx) {
SparkContextFactory.stopSparkContext(ctx);
}

protected abstract String getProgramName();
/**
* Returns the program's name.
* If programName argument is provided, returns that. Otherwise, returns the simple name of the class.
*
* Subclasses can override if desired.
*/
protected String getProgramName(){
return programName == null ? getClass().getSimpleName() : programName;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ public final class ComputeCoveragePerIntervalSpark extends SparkCommandLineProgr
public File targetIntervalFile;

@ArgumentCollection
private IntervalArgumentCollection intervalArgumentCollection = new OptionalIntervalArgumentCollection();
public IntervalArgumentCollection intervalArgumentCollection = new OptionalIntervalArgumentCollection();

@Argument(doc = "uri for the output file: a local file path",
shortName = StandardArgumentDefinitions.OUTPUT_SHORT_NAME, fullName = StandardArgumentDefinitions.OUTPUT_LONG_NAME,
Expand Down Expand Up @@ -90,7 +90,7 @@ protected void runPipeline(final JavaSparkContext ctx) {
//so for now we'll write some filters out explicitly.
// final ReadFilter readFilter = ctx.broadcast(new WellformedReadFilter(readsHeader));
final JavaRDD<GATKRead> rawReads = readSource.getParallelReads(bam, intervals);
final JavaRDD<GATKRead> reads = rawReads.filter(read -> !read.isUnmapped() && read.getStart() <= read.getEnd());//.filter(read -> readFilter.getValue().test(read));
final JavaRDD<GATKRead> reads = rawReads.filter(read -> !read.isUnmapped() && read.getStart() <= read.getEnd());
final Map<Locatable, Long> byKey = reads.flatMap(read -> islBroad.getValue().getOverlapping(new SimpleInterval(read))).countByValue();

final SortedMap<Locatable, Object> byKeySorted = new TreeMap<>(IntervalUtils.LEXICOGRAPHICAL_ORDER_COMPARATOR);
Expand Down Expand Up @@ -134,9 +134,4 @@ private TargetCollection<Locatable> getTargetCollection() {
final List<Locatable> targets = result.targets().stream().collect(Collectors.toList());
return new HashedListTargetCollection<>(targets);
}

@Override
protected String getProgramName() {
return getClass().getSimpleName();
}
}

0 comments on commit 9deac96

Please sign in to comment.