1818package org .apache .spark .api .java
1919
2020import java .util .{Comparator , List => JList , Iterator => JIterator }
21- import java .lang .{Iterable => JIterable }
21+ import java .lang .{Iterable => JIterable , Long => JLong }
2222
2323import scala .collection .JavaConversions ._
2424import scala .reflect .ClassTag
@@ -268,8 +268,8 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
268268 * 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method
269269 * won't trigger a spark job, which is different from [[org.apache.spark.rdd.RDD#zipWithIndex ]].
270270 */
271- def zipWithUniqueId [ Long ] (): JavaPairRDD [T , Long ] = {
272- JavaPairRDD .fromRDD(rdd.zipWithUniqueId()).asInstanceOf [JavaPairRDD [T , Long ]]
271+ def zipWithUniqueId (): JavaPairRDD [T , JLong ] = {
272+ JavaPairRDD .fromRDD(rdd.zipWithUniqueId()).asInstanceOf [JavaPairRDD [T , JLong ]]
273273 }
274274
275275 /**
@@ -279,8 +279,8 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
279279 * This is similar to Scala's zipWithIndex but it uses Long instead of Int as the index type.
280280 * This method needs to trigger a spark job when this RDD contains more than one partitions.
281281 */
282- def zipWithIndex [ Long ] (): JavaPairRDD [T , Long ] = {
283- JavaPairRDD .fromRDD(rdd.zipWithIndex()).asInstanceOf [JavaPairRDD [T , Long ]]
282+ def zipWithIndex (): JavaPairRDD [T , JLong ] = {
283+ JavaPairRDD .fromRDD(rdd.zipWithIndex()).asInstanceOf [JavaPairRDD [T , JLong ]]
284284 }
285285
286286 // Actions (launch a job to return a value to the user program)
0 commit comments