Skip to content

Commit

Permalink
Ensure that HadoopRDD is actually serializable
Browse files Browse the repository at this point in the history
  • Loading branch information
Andrew Or committed Apr 28, 2015
1 parent c3bfcae commit aa868a9
Showing 1 changed file with 5 additions and 1 deletion.
6 changes: 5 additions & 1 deletion core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ private[spark] class HadoopPartition(rddId: Int, idx: Int, @transient s: InputSp
*/
@DeveloperApi
class HadoopRDD[K, V](
sc: SparkContext,
@transient sc: SparkContext,
broadcastedConf: Broadcast[SerializableWritable[Configuration]],
initLocalJobConfFuncOpt: Option[JobConf => Unit],
inputFormatClass: Class[_ <: InputFormat[K, V]],
Expand All @@ -108,6 +108,10 @@ class HadoopRDD[K, V](
minPartitions: Int)
extends RDD[(K, V)](sc, Nil) with Logging {

if (initLocalJobConfFuncOpt.isDefined) {
sc.clean(initLocalJobConfFuncOpt.get)
}

def this(
sc: SparkContext,
conf: JobConf,
Expand Down

0 comments on commit aa868a9

Please sign in to comment.