Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 12 additions & 8 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -274,10 +274,17 @@ class SparkContext(config: SparkConf) extends Logging {

// Create and start the scheduler
private[spark] var taskScheduler = SparkContext.createTaskScheduler(this, master)
taskScheduler.start()
@volatile private[spark] var dagScheduler: DAGScheduler = _
try {
dagScheduler = new DAGScheduler(this)
} catch {
case e: Exception => throw
new SparkException("DAGScheduler cannot be initialized due to %s".format(e.getMessage))
}

@volatile private[spark] var dagScheduler = new DAGScheduler(this)
dagScheduler.start()
// start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's
// constructor
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: could you put these 2 in 1 line?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I found that the line length would be 103 if I put them together....

taskScheduler.start()

private[spark] val cleaner: Option[ContextCleaner] = {
if (conf.getBoolean("spark.cleaner.referenceTracking", true)) {
Expand Down Expand Up @@ -1007,8 +1014,8 @@ class SparkContext(config: SparkConf) extends Logging {
partitions: Seq[Int],
allowLocal: Boolean,
resultHandler: (Int, U) => Unit) {
partitions.foreach{ p =>
require(p >= 0 && p < rdd.partitions.size, s"Invalid partition requested: $p")
if (dagScheduler == null) {
throw new SparkException("SparkContext has been shutdown")
}
val callSite = getCallSite
val cleanedFunc = clean(func)
Expand Down Expand Up @@ -1116,9 +1123,6 @@ class SparkContext(config: SparkConf) extends Logging {
resultHandler: (Int, U) => Unit,
resultFunc: => R): SimpleFutureAction[R] =
{
partitions.foreach{ p =>
require(p >= 0 && p < rdd.partitions.size, s"Invalid partition requested: $p")
}
val cleanF = clean(processPartition)
val callSite = getCallSite
val waiter = dagScheduler.submitJob(
Expand Down
6 changes: 3 additions & 3 deletions core/src/main/scala/org/apache/spark/rdd/RDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -1112,9 +1112,9 @@ abstract class RDD[T: ClassTag](
@transient private var doCheckpointCalled = false

/**
* Performs the checkpointing of this RDD by saving this. It is called by the DAGScheduler
* after a job using this RDD has completed (therefore the RDD has been materialized and
* potentially stored in memory). doCheckpoint() is called recursively on the parent RDDs.
* Performs the checkpointing of this RDD by saving this. It is called after a job using this RDD
* has completed (therefore the RDD has been materialized and potentially stored in memory).
* doCheckpoint() is called recursively on the parent RDDs.
*/
private[spark] def doCheckpoint() {
if (!doCheckpointCalled) {
Expand Down
Loading