Skip to content

Commit aed530b

Browse files
committed
[SPARK-4961] [CORE] Put HadoopRDD.getPartitions forward to reduce DAGScheduler.JobSubmitted processing time
1 parent b535a53 commit aed530b

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -500,10 +500,10 @@ class DAGScheduler(
500500
// the job submitter sends a message into the DAGScheduler actor.
501501
// Although getPartitions may be called in rdd.partitions.length before this.
502502
try {
503-
getParentStages(rdd, jobId)
503+
getParentStages(rdd, jobId).foreach(_.rdd.partitions)
504504
} catch {
505505
case e: Exception =>
506-
logWarning("Get or create the list of parent stages failed due to exception - job: "
506+
logWarning("Get the partitions of parent stages' rdds failed due to exception - job: "
507507
+ jobId, e)
508508
waiter.jobFailed(e)
509509
return waiter

0 commit comments

Comments
 (0)