File tree Expand file tree Collapse file tree 1 file changed +2
-9
lines changed
core/src/test/scala/org/apache/spark/rdd Expand file tree Collapse file tree 1 file changed +2
-9
lines changed Original file line number Diff line number Diff line change @@ -566,20 +566,13 @@ class PairRDDFunctionsSuite extends FunSuite with SharedSparkContext {
566566 rdd.saveAsSequenceFile(path)
567567
568568 // this is basically sc.sequenceFile[Int,Int], but with input splits turned off
569- val r1 : RDD [(IntWritable , IntWritable )] = sc.hadoopFile(
569+ val reloaded : RDD [(Int , Int )] = sc.hadoopFile(
570570 path,
571571 classOf [NoSplitSequenceFileInputFormat [IntWritable ,IntWritable ]],
572572 classOf [IntWritable ],
573573 classOf [IntWritable ],
574574 nParts
575- )
576- println(" hadoop rdd partitions:" )
577- r1.partitions.zipWithIndex.foreach{case (part, idx) =>
578- val hp = part.asInstanceOf [HadoopPartition ]
579- println(s " hp.split: ${hp.inputSplit}; idx: $idx; hp.idx: ${hp.index}" )
580- }
581-
582- val reloaded = r1.map{case (k,v) => k.get() -> v.get()}
575+ ).map{case (k,v) => k.get() -> v.get()}
583576
584577
585578 val assumedPartitioned = reloaded.assumePartitionedBy(rdd.partitioner.get)
You can’t perform that action at this time.
0 commit comments