@@ -42,7 +42,7 @@ import org.apache.spark.util.{AccumulatorV2, ThreadUtils, Utils}
4242 * up to launch speculative tasks, etc.
4343 *
4444 * Clients should first call initialize() and start(), then submit task sets through the
45- * runTasks method.
45+ * submitTasks method.
4646 *
4747 * THREADING: [[SchedulerBackend ]]s and task-submitting clients can call this class from multiple
4848 * threads, so it needs locks in public API methods to maintain its state. In addition, some
@@ -62,7 +62,7 @@ private[spark] class TaskSchedulerImpl(
6262 this (sc, sc.conf.get(config.MAX_TASK_FAILURES ))
6363 }
6464
65- // Lazily initializing blackListTrackOpt to avoid getting empty ExecutorAllocationClient,
65+ // Lazily initializing blacklistTrackerOpt to avoid getting empty ExecutorAllocationClient,
6666 // because ExecutorAllocationClient is created after this TaskSchedulerImpl.
6767 private [scheduler] lazy val blacklistTrackerOpt = maybeCreateBlacklistTracker(sc)
6868
@@ -228,7 +228,7 @@ private[spark] class TaskSchedulerImpl(
228228 // 1. The task set manager has been created and some tasks have been scheduled.
229229 // In this case, send a kill signal to the executors to kill the task and then abort
230230 // the stage.
231- // 2. The task set manager has been created but no tasks has been scheduled. In this case,
231+ // 2. The task set manager has been created but no tasks have been scheduled. In this case,
232232 // simply abort the stage.
233233 tsm.runningTasksSet.foreach { tid =>
234234 taskIdToExecutorId.get(tid).foreach(execId =>
@@ -694,7 +694,7 @@ private[spark] class TaskSchedulerImpl(
694694 *
695695 * After stage failure and retry, there may be multiple TaskSetManagers for the stage.
696696 * If an earlier attempt of a stage completes a task, we should ensure that the later attempts
697- * do not also submit those same tasks. That also means that a task completion from an earlier
697+ * do not also submit those same tasks. That also means that a task completion from an earlier
698698 * attempt can lead to the entire stage getting marked as successful.
699699 */
700700 private [scheduler] def markPartitionCompletedInAllTaskSets (stageId : Int , partitionId : Int ) = {
0 commit comments