Skip to content

Commit f6c13a1

Browse files
committed
remove debugging code
1 parent ea016db commit f6c13a1

File tree

1 file changed

+2
-9
lines changed

1 file changed

+2
-9
lines changed

core/src/test/scala/org/apache/spark/rdd/PairRDDFunctionsSuite.scala

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -566,20 +566,13 @@ class PairRDDFunctionsSuite extends FunSuite with SharedSparkContext {
566566
rdd.saveAsSequenceFile(path)
567567

568568
// this is basically sc.sequenceFile[Int,Int], but with input splits turned off
569-
val r1: RDD[(IntWritable,IntWritable)] = sc.hadoopFile(
569+
val reloaded: RDD[(Int,Int)] = sc.hadoopFile(
570570
path,
571571
classOf[NoSplitSequenceFileInputFormat[IntWritable,IntWritable]],
572572
classOf[IntWritable],
573573
classOf[IntWritable],
574574
nParts
575-
)
576-
println("hadoop rdd partitions:")
577-
r1.partitions.zipWithIndex.foreach{case(part, idx) =>
578-
val hp = part.asInstanceOf[HadoopPartition]
579-
println(s"hp.split: ${hp.inputSplit}; idx: $idx; hp.idx: ${hp.index}")
580-
}
581-
582-
val reloaded = r1.map{case(k,v) => k.get() -> v.get()}
575+
).map{case(k,v) => k.get() -> v.get()}
583576

584577

585578
val assumedPartitioned = reloaded.assumePartitionedBy(rdd.partitioner.get)

0 commit comments

Comments
 (0)