Skip to content

Commit 808d69a

Browse files
committed
[SPARK-16588][SQL] Deprecate monotonicallyIncreasingId in Scala/Java
This patch deprecates monotonicallyIncreasingId in Scala/Java, as done in Python. This patch was originally written by HyukjinKwon. Closes #14236. (cherry picked from commit 480c870) Signed-off-by: Reynold Xin <[email protected]>
1 parent a4bf13a commit 808d69a

File tree

3 files changed

+9
-9
lines changed

3 files changed

+9
-9
lines changed

mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ import org.apache.spark.mllib.linalg.MatrixImplicits._
3737
import org.apache.spark.mllib.linalg.VectorImplicits._
3838
import org.apache.spark.rdd.RDD
3939
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
40-
import org.apache.spark.sql.functions.{col, monotonicallyIncreasingId, udf}
40+
import org.apache.spark.sql.functions.{col, monotonically_increasing_id, udf}
4141
import org.apache.spark.sql.types.StructType
4242

4343

@@ -888,7 +888,7 @@ object LDA extends DefaultParamsReadable[LDA] {
888888
dataset: Dataset[_],
889889
featuresCol: String): RDD[(Long, OldVector)] = {
890890
dataset
891-
.withColumn("docId", monotonicallyIncreasingId())
891+
.withColumn("docId", monotonically_increasing_id())
892892
.select("docId", featuresCol)
893893
.rdd
894894
.map { case Row(docId: Long, features: Vector) =>

sql/core/src/main/scala/org/apache/spark/sql/functions.scala

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -978,6 +978,7 @@ object functions {
978978
* @group normal_funcs
979979
* @since 1.4.0
980980
*/
981+
@deprecated("Use monotonically_increasing_id()", "2.0.0")
981982
def monotonicallyIncreasingId(): Column = monotonically_increasing_id()
982983

983984
/**

sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -508,18 +508,17 @@ class ColumnExpressionSuite extends QueryTest with SharedSQLContext {
508508
Row("ab", "cde"))
509509
}
510510

511-
test("monotonicallyIncreasingId") {
511+
test("monotonically_increasing_id") {
512512
// Make sure we have 2 partitions, each with 2 records.
513513
val df = sparkContext.parallelize(Seq[Int](), 2).mapPartitions { _ =>
514514
Iterator(Tuple1(1), Tuple1(2))
515515
}.toDF("a")
516516
checkAnswer(
517-
df.select(monotonicallyIncreasingId()),
518-
Row(0L) :: Row(1L) :: Row((1L << 33) + 0L) :: Row((1L << 33) + 1L) :: Nil
519-
)
520-
checkAnswer(
521-
df.select(expr("monotonically_increasing_id()")),
522-
Row(0L) :: Row(1L) :: Row((1L << 33) + 0L) :: Row((1L << 33) + 1L) :: Nil
517+
df.select(monotonically_increasing_id(), expr("monotonically_increasing_id()")),
518+
Row(0L, 0L) ::
519+
Row(1L, 1L) ::
520+
Row((1L << 33) + 0L, (1L << 33) + 0L) ::
521+
Row((1L << 33) + 1L, (1L << 33) + 1L) :: Nil
523522
)
524523
}
525524

0 commit comments

Comments
 (0)