Skip to content

Commit

Permalink
SPARK-1095 add explicit return types to APIs.
Browse files Browse the repository at this point in the history
  • Loading branch information
ScrapCodes committed Feb 27, 2014
1 parent 3ddc8bb commit 737819a
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 10 deletions.
Expand Up @@ -126,7 +126,7 @@ class JavaRDD[T](val rdd: RDD[T])(implicit val classTag: ClassTag[T])
def subtract(other: JavaRDD[T], p: Partitioner): JavaRDD[T] = def subtract(other: JavaRDD[T], p: Partitioner): JavaRDD[T] =
wrapRDD(rdd.subtract(other, p)) wrapRDD(rdd.subtract(other, p))


def generator = rdd.generator def generator: String = rdd.generator


override def toString = rdd.toString override def toString = rdd.toString


Expand Down
Expand Up @@ -74,7 +74,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
* of the original partition. * of the original partition.
*/ */
def mapPartitionsWithIndex[R: ClassTag]( def mapPartitionsWithIndex[R: ClassTag](
f: JFunction2[Int, java.util.Iterator[T], java.util.Iterator[R]], f: JFunction2[Integer, java.util.Iterator[T], java.util.Iterator[R]],
preservesPartitioning: Boolean = false): JavaRDD[R] = preservesPartitioning: Boolean = false): JavaRDD[R] =
new JavaRDD(rdd.mapPartitionsWithIndex(((a,b) => f(a,asJavaIterator(b))), new JavaRDD(rdd.mapPartitionsWithIndex(((a,b) => f(a,asJavaIterator(b))),
preservesPartitioning)) preservesPartitioning))
Expand Down
Expand Up @@ -17,6 +17,7 @@


package org.apache.spark.api.java package org.apache.spark.api.java


import java.util
import java.util.{Map => JMap} import java.util.{Map => JMap}


import scala.collection.JavaConversions import scala.collection.JavaConversions
Expand Down Expand Up @@ -92,23 +93,23 @@ class JavaSparkContext(val sc: SparkContext) extends JavaSparkContextVarargsWork


private[spark] val env = sc.env private[spark] val env = sc.env


def isLocal = sc.isLocal def isLocal: java.lang.Boolean = sc.isLocal


def sparkUser = sc.sparkUser def sparkUser: String = sc.sparkUser


def master = sc.master def master: String = sc.master


def appName = sc.appName def appName: String = sc.appName


def jars = JavaConversions.seqAsJavaList(sc.jars) def jars: util.List[String] = sc.jars


def startTime = sc.startTime def startTime: java.lang.Long = sc.startTime


/** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */
def defaultParallelism = sc.defaultParallelism def defaultParallelism: Integer = sc.defaultParallelism


/** Default min number of partitions for Hadoop RDDs when not given by user */ /** Default min number of partitions for Hadoop RDDs when not given by user */
def defaultMinSplits = sc.defaultMinSplits def defaultMinSplits: Integer = sc.defaultMinSplits


/** Distribute a local Scala collection to form an RDD. */ /** Distribute a local Scala collection to form an RDD. */
def parallelize[T](list: java.util.List[T], numSlices: Int): JavaRDD[T] = { def parallelize[T](list: java.util.List[T], numSlices: Int): JavaRDD[T] = {
Expand Down

0 comments on commit 737819a

Please sign in to comment.