Skip to content

Commit 25988f3

Browse files
committed
Add addSparkListener to JavaSparkContext
1 parent 163ba19 commit 25988f3

File tree

1 file changed

+10
-1
lines changed

1 file changed

+10
-1
lines changed

core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat}
3434

3535
import org.apache.spark._
3636
import org.apache.spark.AccumulatorParam._
37-
import org.apache.spark.annotation.Experimental
37+
import org.apache.spark.annotation.{DeveloperApi, Experimental}
3838
import org.apache.spark.api.java.JavaSparkContext.fakeClassTag
3939
import org.apache.spark.broadcast.Broadcast
4040
import org.apache.spark.rdd.{EmptyRDD, HadoopRDD, NewHadoopRDD, RDD}
@@ -688,6 +688,15 @@ class JavaSparkContext(val sc: SparkContext)
688688
sc.clearFiles()
689689
}
690690

691+
/**
692+
* :: DeveloperApi ::
693+
* Register a listener to receive up-calls from events that happen during execution.
694+
*/
695+
@DeveloperApi
696+
def addSparkListener(listener: SparkListener): Unit = {
697+
sc.addSparkListener(listener)
698+
}
699+
691700
/**
692701
* Returns the Hadoop configuration used for the Hadoop code (e.g. file systems) we reuse.
693702
*/

0 commit comments

Comments
 (0)