From 25988f377fc0cbeadbddd630f9564697b07ef877 Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Mon, 19 Jan 2015 19:22:27 -0800 Subject: [PATCH] Add addSparkListener to JavaSparkContext --- .../org/apache/spark/api/java/JavaSparkContext.scala | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala index 4cbc624ad9cc0..2a55edd2db0d4 100644 --- a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala +++ b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala @@ -34,7 +34,7 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import org.apache.spark._ import org.apache.spark.AccumulatorParam._ -import org.apache.spark.annotation.Experimental +import org.apache.spark.annotation.{DeveloperApi, Experimental} import org.apache.spark.api.java.JavaSparkContext.fakeClassTag import org.apache.spark.broadcast.Broadcast import org.apache.spark.rdd.{EmptyRDD, HadoopRDD, NewHadoopRDD, RDD} @@ -688,6 +688,15 @@ class JavaSparkContext(val sc: SparkContext) sc.clearFiles() } + /** + * :: DeveloperApi :: + * Register a listener to receive up-calls from events that happen during execution. + */ + @DeveloperApi + def addSparkListener(listener: SparkListener): Unit = { + sc.addSparkListener(listener) + } + /** * Returns the Hadoop configuration used for the Hadoop code (e.g. file systems) we reuse. */