Skip to content

Commit c859be2

Browse files
Jacek Laskowskirxin
authored andcommitted
Typo fixes + code readability improvements
Author: Jacek Laskowski <[email protected]> Closes #9501 from jaceklaskowski/typos-with-style. (cherry picked from commit 62bb290) Signed-off-by: Reynold Xin <[email protected]>
1 parent a91d213 commit c859be2

File tree

4 files changed

+21
-17
lines changed

4 files changed

+21
-17
lines changed

core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -88,8 +88,8 @@ private[spark] class HadoopPartition(rddId: Int, idx: Int, s: InputSplit)
8888
*
8989
* @param sc The SparkContext to associate the RDD with.
9090
* @param broadcastedConf A general Hadoop Configuration, or a subclass of it. If the enclosed
91-
* variabe references an instance of JobConf, then that JobConf will be used for the Hadoop job.
92-
* Otherwise, a new JobConf will be created on each slave using the enclosed Configuration.
91+
* variable references an instance of JobConf, then that JobConf will be used for the Hadoop job.
92+
* Otherwise, a new JobConf will be created on each slave using the enclosed Configuration.
9393
* @param initLocalJobConfFuncOpt Optional closure used to initialize any JobConf that HadoopRDD
9494
* creates.
9595
* @param inputFormatClass Storage format of the data to be read.
@@ -123,7 +123,7 @@ class HadoopRDD[K, V](
123123
sc,
124124
sc.broadcast(new SerializableConfiguration(conf))
125125
.asInstanceOf[Broadcast[SerializableConfiguration]],
126-
None /* initLocalJobConfFuncOpt */,
126+
initLocalJobConfFuncOpt = None,
127127
inputFormatClass,
128128
keyClass,
129129
valueClass,
@@ -184,8 +184,9 @@ class HadoopRDD[K, V](
184184
protected def getInputFormat(conf: JobConf): InputFormat[K, V] = {
185185
val newInputFormat = ReflectionUtils.newInstance(inputFormatClass.asInstanceOf[Class[_]], conf)
186186
.asInstanceOf[InputFormat[K, V]]
187-
if (newInputFormat.isInstanceOf[Configurable]) {
188-
newInputFormat.asInstanceOf[Configurable].setConf(conf)
187+
newInputFormat match {
188+
case c: Configurable => c.setConf(conf)
189+
case _ =>
189190
}
190191
newInputFormat
191192
}
@@ -195,9 +196,6 @@ class HadoopRDD[K, V](
195196
// add the credentials here as this can be called before SparkContext initialized
196197
SparkHadoopUtil.get.addCredentials(jobConf)
197198
val inputFormat = getInputFormat(jobConf)
198-
if (inputFormat.isInstanceOf[Configurable]) {
199-
inputFormat.asInstanceOf[Configurable].setConf(jobConf)
200-
}
201199
val inputSplits = inputFormat.getSplits(jobConf, minPartitions)
202200
val array = new Array[Partition](inputSplits.size)
203201
for (i <- 0 until inputSplits.size) {

core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -541,8 +541,7 @@ class DAGScheduler(
541541
}
542542

543543
/**
544-
* Submit an action job to the scheduler and get a JobWaiter object back. The JobWaiter object
545-
* can be used to block until the the job finishes executing or can be used to cancel the job.
544+
* Submit an action job to the scheduler.
546545
*
547546
* @param rdd target RDD to run tasks on
548547
* @param func a function to run on each partition of the RDD
@@ -551,6 +550,11 @@ class DAGScheduler(
551550
* @param callSite where in the user program this job was called
552551
* @param resultHandler callback to pass each result to
553552
* @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name
553+
*
554+
* @return a JobWaiter object that can be used to block until the job finishes executing
555+
* or can be used to cancel the job.
556+
*
557+
* @throws IllegalArgumentException when partitions ids are illegal
554558
*/
555559
def submitJob[T, U](
556560
rdd: RDD[T],
@@ -584,7 +588,7 @@ class DAGScheduler(
584588

585589
/**
586590
* Run an action job on the given RDD and pass all the results to the resultHandler function as
587-
* they arrive. Throws an exception if the job fials, or returns normally if successful.
591+
* they arrive.
588592
*
589593
* @param rdd target RDD to run tasks on
590594
* @param func a function to run on each partition of the RDD
@@ -593,6 +597,8 @@ class DAGScheduler(
593597
* @param callSite where in the user program this job was called
594598
* @param resultHandler callback to pass each result to
595599
* @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name
600+
*
601+
* @throws Exception when the job fails
596602
*/
597603
def runJob[T, U](
598604
rdd: RDD[T],

core/src/main/scala/org/apache/spark/scheduler/ShuffleMapTask.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,11 @@ import org.apache.spark.rdd.RDD
2727
import org.apache.spark.shuffle.ShuffleWriter
2828

2929
/**
30-
* A ShuffleMapTask divides the elements of an RDD into multiple buckets (based on a partitioner
31-
* specified in the ShuffleDependency).
32-
*
33-
* See [[org.apache.spark.scheduler.Task]] for more information.
34-
*
30+
* A ShuffleMapTask divides the elements of an RDD into multiple buckets (based on a partitioner
31+
* specified in the ShuffleDependency).
32+
*
33+
* See [[org.apache.spark.scheduler.Task]] for more information.
34+
*
3535
* @param stageId id of the stage this task belongs to
3636
* @param taskBinary broadcast version of the RDD and the ShuffleDependency. Once deserialized,
3737
* the type should be (RDD[_], ShuffleDependency[_, _, _]).

core/src/main/scala/org/apache/spark/scheduler/TaskSet.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ private[spark] class TaskSet(
2929
val stageAttemptId: Int,
3030
val priority: Int,
3131
val properties: Properties) {
32-
val id: String = stageId + "." + stageAttemptId
32+
val id: String = stageId + "." + stageAttemptId
3333

3434
override def toString: String = "TaskSet " + id
3535
}

0 commit comments

Comments
 (0)