Skip to content

Commit 18faa58

Browse files
Nick Pentreathmengxr
authored andcommitted
[SPARK-16127][ML][PYPSARK] Audit @SInCE annotations related to ml.linalg
[SPARK-14615](https://issues.apache.org/jira/browse/SPARK-14615) and #12627 changed `spark.ml` pipelines to use the new `ml.linalg` classes for `Vector`/`Matrix`. Some `Since` annotations for public methods/vals have not been updated accordingly to be `2.0.0`. This PR updates them. ## How was this patch tested? Existing unit tests. Author: Nick Pentreath <[email protected]> Closes #13840 from MLnick/SPARK-16127-ml-linalg-since.
1 parent ea3a12b commit 18faa58

File tree

14 files changed

+41
-37
lines changed

14 files changed

+41
-37
lines changed

mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -482,7 +482,7 @@ object LogisticRegression extends DefaultParamsReadable[LogisticRegression] {
482482
@Experimental
483483
class LogisticRegressionModel private[spark] (
484484
@Since("1.4.0") override val uid: String,
485-
@Since("1.6.0") val coefficients: Vector,
485+
@Since("2.0.0") val coefficients: Vector,
486486
@Since("1.3.0") val intercept: Double)
487487
extends ProbabilisticClassificationModel[Vector, LogisticRegressionModel]
488488
with LogisticRegressionParams with MLWritable {

mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ object MultilayerPerceptronClassifier
296296
class MultilayerPerceptronClassificationModel private[ml] (
297297
@Since("1.5.0") override val uid: String,
298298
@Since("1.5.0") val layers: Array[Int],
299-
@Since("1.5.0") val weights: Vector)
299+
@Since("2.0.0") val weights: Vector)
300300
extends PredictionModel[Vector, MultilayerPerceptronClassificationModel]
301301
with Serializable with MLWritable {
302302

mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -130,8 +130,8 @@ object NaiveBayes extends DefaultParamsReadable[NaiveBayes] {
130130
@Experimental
131131
class NaiveBayesModel private[ml] (
132132
@Since("1.5.0") override val uid: String,
133-
@Since("1.5.0") val pi: Vector,
134-
@Since("1.5.0") val theta: Matrix)
133+
@Since("2.0.0") val pi: Vector,
134+
@Since("2.0.0") val theta: Matrix)
135135
extends ProbabilisticClassificationModel[Vector, NaiveBayesModel]
136136
with NaiveBayesParams with MLWritable {
137137

mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ class KMeansModel private[ml] (
131131

132132
private[clustering] def predict(features: Vector): Int = parentModel.predict(features)
133133

134-
@Since("1.5.0")
134+
@Since("2.0.0")
135135
def clusterCenters: Array[Vector] = parentModel.clusterCenters.map(_.asML)
136136

137137
/**

mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -432,7 +432,7 @@ sealed abstract class LDAModel private[ml] (
432432
* If Online LDA was used and [[optimizeDocConcentration]] was set to false,
433433
* then this returns the fixed (given) value for the [[docConcentration]] parameter.
434434
*/
435-
@Since("1.6.0")
435+
@Since("2.0.0")
436436
def estimatedDocConcentration: Vector = getModel.docConcentration
437437

438438
/**
@@ -444,7 +444,7 @@ sealed abstract class LDAModel private[ml] (
444444
* the Expectation-Maximization ("em") [[optimizer]], then this method could involve
445445
* collecting a large amount of data to the driver (on the order of vocabSize x k).
446446
*/
447-
@Since("1.6.0")
447+
@Since("2.0.0")
448448
def topicsMatrix: Matrix = oldLocalModel.topicsMatrix.asML
449449

450450
/** Indicates whether this instance is of type [[DistributedLDAModel]] */

mllib/src/main/scala/org/apache/spark/ml/feature/ElementwiseProduct.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,11 +33,11 @@ import org.apache.spark.sql.types.DataType
3333
* multiplier.
3434
*/
3535
@Experimental
36-
@Since("2.0.0")
37-
class ElementwiseProduct @Since("2.0.0") (@Since("2.0.0") override val uid: String)
36+
@Since("1.4.0")
37+
class ElementwiseProduct @Since("1.4.0") (@Since("1.4.0") override val uid: String)
3838
extends UnaryTransformer[Vector, Vector, ElementwiseProduct] with DefaultParamsWritable {
3939

40-
@Since("2.0.0")
40+
@Since("1.4.0")
4141
def this() = this(Identifiable.randomUID("elemProd"))
4242

4343
/**

mllib/src/main/scala/org/apache/spark/ml/feature/Normalizer.scala

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -31,29 +31,29 @@ import org.apache.spark.sql.types.DataType
3131
* Normalize a vector to have unit norm using the given p-norm.
3232
*/
3333
@Experimental
34-
@Since("2.0.0")
35-
class Normalizer @Since("2.0.0") (@Since("2.0.0") override val uid: String)
34+
@Since("1.4.0")
35+
class Normalizer @Since("1.4.0") (@Since("1.4.0") override val uid: String)
3636
extends UnaryTransformer[Vector, Vector, Normalizer] with DefaultParamsWritable {
3737

38-
@Since("2.0.0")
38+
@Since("1.4.0")
3939
def this() = this(Identifiable.randomUID("normalizer"))
4040

4141
/**
4242
* Normalization in L^p^ space. Must be >= 1.
4343
* (default: p = 2)
4444
* @group param
4545
*/
46-
@Since("2.0.0")
46+
@Since("1.4.0")
4747
val p = new DoubleParam(this, "p", "the p norm value", ParamValidators.gtEq(1))
4848

4949
setDefault(p -> 2.0)
5050

5151
/** @group getParam */
52-
@Since("2.0.0")
52+
@Since("1.4.0")
5353
def getP: Double = $(p)
5454

5555
/** @group setParam */
56-
@Since("2.0.0")
56+
@Since("1.4.0")
5757
def setP(value: Double): this.type = set(p, value)
5858

5959
override protected def createTransformFunc: Vector => Vector = {

mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -35,30 +35,30 @@ import org.apache.spark.sql.types.DataType
3535
* `(x, y)`, if we want to expand it with degree 2, then we get `(x, x * x, y, x * y, y * y)`.
3636
*/
3737
@Experimental
38-
@Since("2.0.0")
39-
class PolynomialExpansion @Since("2.0.0") (@Since("2.0.0") override val uid: String)
38+
@Since("1.4.0")
39+
class PolynomialExpansion @Since("1.4.0") (@Since("1.4.0") override val uid: String)
4040
extends UnaryTransformer[Vector, Vector, PolynomialExpansion] with DefaultParamsWritable {
4141

42-
@Since("2.0.0")
42+
@Since("1.4.0")
4343
def this() = this(Identifiable.randomUID("poly"))
4444

4545
/**
4646
* The polynomial degree to expand, which should be >= 1. A value of 1 means no expansion.
4747
* Default: 2
4848
* @group param
4949
*/
50-
@Since("2.0.0")
50+
@Since("1.4.0")
5151
val degree = new IntParam(this, "degree", "the polynomial degree to expand (>= 1)",
5252
ParamValidators.gtEq(1))
5353

5454
setDefault(degree -> 2)
5555

5656
/** @group getParam */
57-
@Since("2.0.0")
57+
@Since("1.4.0")
5858
def getDegree: Int = $(degree)
5959

6060
/** @group setParam */
61-
@Since("2.0.0")
61+
@Since("1.4.0")
6262
def setDegree(value: Int): this.type = set(degree, value)
6363

6464
override protected def createTransformFunc: Vector => Vector = { v =>

mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ class Word2VecModel private[ml] (
240240
* of the word. Returns a dataframe with the words and the cosine similarities between the
241241
* synonyms and the given word vector.
242242
*/
243-
@Since("1.5.0")
243+
@Since("2.0.0")
244244
def findSynonyms(word: Vector, num: Int): DataFrame = {
245245
val spark = SparkSession.builder().getOrCreate()
246246
spark.createDataFrame(wordVectors.findSynonyms(word, num)).toDF("word", "similarity")

mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,7 @@ object AFTSurvivalRegression extends DefaultParamsReadable[AFTSurvivalRegression
286286
@Since("1.6.0")
287287
class AFTSurvivalRegressionModel private[ml] (
288288
@Since("1.6.0") override val uid: String,
289-
@Since("1.6.0") val coefficients: Vector,
289+
@Since("2.0.0") val coefficients: Vector,
290290
@Since("1.6.0") val intercept: Double,
291291
@Since("1.6.0") val scale: Double)
292292
extends Model[AFTSurvivalRegressionModel] with AFTSurvivalRegressionParams with MLWritable {
@@ -307,7 +307,7 @@ class AFTSurvivalRegressionModel private[ml] (
307307
@Since("1.6.0")
308308
def setQuantilesCol(value: String): this.type = set(quantilesCol, value)
309309

310-
@Since("1.6.0")
310+
@Since("2.0.0")
311311
def predictQuantiles(features: Vector): Vector = {
312312
// scale parameter for the Weibull distribution of lifetime
313313
val lambda = math.exp(BLAS.dot(coefficients, features) + intercept)
@@ -319,7 +319,7 @@ class AFTSurvivalRegressionModel private[ml] (
319319
Vectors.dense(quantiles)
320320
}
321321

322-
@Since("1.6.0")
322+
@Since("2.0.0")
323323
def predict(features: Vector): Double = {
324324
math.exp(BLAS.dot(coefficients, features) + intercept)
325325
}

0 commit comments

Comments
 (0)