diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala index 816157b0cd85e..d7ce8fdfc23f4 100644 --- a/core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala +++ b/core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala @@ -126,7 +126,7 @@ class JavaRDD[T](val rdd: RDD[T])(implicit val classTag: ClassTag[T]) def subtract(other: JavaRDD[T], p: Partitioner): JavaRDD[T] = wrapRDD(rdd.subtract(other, p)) - def generator = rdd.generator + def generator: String = rdd.generator override def toString = rdd.toString diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala index 24a9925dbd22c..a3157b5bfb472 100644 --- a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala +++ b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala @@ -74,7 +74,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable { * of the original partition. */ def mapPartitionsWithIndex[R: ClassTag]( - f: JFunction2[Int, java.util.Iterator[T], java.util.Iterator[R]], + f: JFunction2[Integer, java.util.Iterator[T], java.util.Iterator[R]], preservesPartitioning: Boolean = false): JavaRDD[R] = new JavaRDD(rdd.mapPartitionsWithIndex(((a,b) => f(a,asJavaIterator(b))), preservesPartitioning)) diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala index 084ec5a8e319b..916259191baa8 100644 --- a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala +++ b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala @@ -17,6 +17,7 @@ package org.apache.spark.api.java +import java.util import java.util.{Map => JMap} import scala.collection.JavaConversions @@ -92,23 +93,23 @@ class JavaSparkContext(val sc: SparkContext) extends JavaSparkContextVarargsWork private[spark] val env = sc.env - def isLocal = sc.isLocal + def isLocal: java.lang.Boolean = sc.isLocal - def sparkUser = sc.sparkUser + def sparkUser: String = sc.sparkUser - def master = sc.master + def master: String = sc.master - def appName = sc.appName + def appName: String = sc.appName - def jars = JavaConversions.seqAsJavaList(sc.jars) + def jars: util.List[String] = sc.jars - def startTime = sc.startTime + def startTime: java.lang.Long = sc.startTime /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ - def defaultParallelism = sc.defaultParallelism + def defaultParallelism: Integer = sc.defaultParallelism /** Default min number of partitions for Hadoop RDDs when not given by user */ - def defaultMinSplits = sc.defaultMinSplits + def defaultMinSplits: Integer = sc.defaultMinSplits /** Distribute a local Scala collection to form an RDD. */ def parallelize[T](list: java.util.List[T], numSlices: Int): JavaRDD[T] = {