Skip to content

Commit 82c7750

Browse files
author
DB Tsai
committed
use some implicit
1 parent 1602f6f commit 82c7750

File tree

5 files changed

+16
-14
lines changed

5 files changed

+16
-14
lines changed

examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ import scopt.OptionParser
2626
import org.apache.spark.{SparkConf, SparkContext}
2727
import org.apache.spark.examples.mllib.AbstractParams
2828
import org.apache.spark.ml.linalg.Vector
29-
import org.apache.spark.mllib.linalg.{Vectors => OldVectors}
29+
import org.apache.spark.mllib.linalg.VectorImplicits._
3030
import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer
3131
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
3232

@@ -82,7 +82,7 @@ object DataFrameExample {
8282
// Convert features column to an RDD of vectors.
8383
val features = df.select("features").rdd.map { case Row(v: Vector) => v }
8484
val featureSummary = features.aggregate(new MultivariateOnlineSummarizer())(
85-
(summary, feat) => summary.add(OldVectors.fromML(feat)),
85+
(summary, feat) => summary.add(feat),
8686
(sum1, sum2) => sum1.merge(sum2))
8787
println(s"Selected features column with average values:\n ${featureSummary.mean.toString}")
8888

mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import breeze.linalg.{*, axpy => Baxpy, DenseMatrix => BDM, DenseVector => BDV,
2323

2424
import org.apache.spark.ml.linalg.{Vector, Vectors}
2525
import org.apache.spark.mllib.linalg.{Vector => OldVector, Vectors => OldVectors}
26+
import org.apache.spark.mllib.linalg.VectorImplicits._
2627
import org.apache.spark.mllib.optimization._
2728
import org.apache.spark.rdd.RDD
2829
import org.apache.spark.util.random.XORShiftRandom
@@ -583,9 +584,9 @@ private[ann] class ANNGradient(topology: Topology, dataStacker: DataStacker) ext
583584
label: Double,
584585
weights: OldVector,
585586
cumGradient: OldVector): Double = {
586-
val (input, target, realBatchSize) = dataStacker.unstack(data.asML)
587-
val model = topology.model(weights.asML)
588-
model.computeGradient(input, target, cumGradient.asML, realBatchSize)
587+
val (input, target, realBatchSize) = dataStacker.unstack(data)
588+
val model = topology.model(weights)
589+
model.computeGradient(input, target, cumGradient, realBatchSize)
589590
}
590591
}
591592

@@ -809,7 +810,7 @@ private[ml] class FeedForwardTrainer(
809810
// TODO: deprecate standard optimizer because it needs Vector
810811
val newWeights = optimizer.optimize(dataStacker.stack(data).map { v =>
811812
(v._1, OldVectors.fromML(v._2))
812-
}, OldVectors.fromML(w)).asML
813+
}, w)
813814
topology.model(newWeights)
814815
}
815816

mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ import org.apache.spark.ml.param._
3333
import org.apache.spark.ml.param.shared._
3434
import org.apache.spark.ml.util._
3535
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
36-
import org.apache.spark.mllib.linalg.{Vectors => OldVectors}
36+
import org.apache.spark.mllib.linalg.VectorImplicits._
3737
import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer
3838
import org.apache.spark.mllib.util.MLUtils
3939
import org.apache.spark.rdd.RDD
@@ -280,8 +280,7 @@ class LogisticRegression @Since("1.2.0") (
280280
val (summarizer, labelSummarizer) = {
281281
val seqOp = (c: (MultivariateOnlineSummarizer, MultiClassSummarizer),
282282
instance: Instance) =>
283-
(c._1.add(OldVectors.fromML(instance.features), instance.weight),
284-
c._2.add(instance.label, instance.weight))
283+
(c._1.add(instance.features, instance.weight), c._2.add(instance.label, instance.weight))
285284

286285
val combOp = (c1: (MultivariateOnlineSummarizer, MultiClassSummarizer),
287286
c2: (MultivariateOnlineSummarizer, MultiClassSummarizer)) =>

mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import org.apache.spark.mllib.clustering.{DistributedLDAModel => OldDistributedL
3333
import org.apache.spark.mllib.impl.PeriodicCheckpointer
3434
import org.apache.spark.mllib.linalg.{Matrices => OldMatrices, Vector => OldVector,
3535
Vectors => OldVectors}
36+
import org.apache.spark.mllib.linalg.VectorImplicits._
3637
import org.apache.spark.rdd.RDD
3738
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
3839
import org.apache.spark.sql.functions.{col, monotonicallyIncreasingId, udf}
@@ -427,7 +428,7 @@ sealed abstract class LDAModel private[ml] (
427428
* then this returns the fixed (given) value for the [[docConcentration]] parameter.
428429
*/
429430
@Since("1.6.0")
430-
def estimatedDocConcentration: Vector = getModel.docConcentration.asML
431+
def estimatedDocConcentration: Vector = getModel.docConcentration
431432

432433
/**
433434
* Inferred topics, where each topic is represented by a distribution over terms.
@@ -577,7 +578,7 @@ object LocalLDAModel extends MLReadable[LocalLDAModel] {
577578
val topicConcentration = data.getAs[Double](3)
578579
val gammaShape = data.getAs[Double](4)
579580
val oldModel = new OldLocalLDAModel(OldMatrices.fromML(topicsMatrix),
580-
OldVectors.fromML(docConcentration), topicConcentration, gammaShape)
581+
docConcentration, topicConcentration, gammaShape)
581582
val model = new LocalLDAModel(metadata.uid, vocabSize, oldModel, sparkSession)
582583
DefaultParamsReader.getAndSetParams(model, metadata)
583584
model
@@ -846,7 +847,7 @@ class LDA @Since("1.6.0") (
846847
transformSchema(dataset.schema, logging = true)
847848
val oldLDA = new OldLDA()
848849
.setK($(k))
849-
.setDocConcentration(OldVectors.fromML(getOldDocConcentration))
850+
.setDocConcentration(getOldDocConcentration)
850851
.setTopicConcentration(getOldTopicConcentration)
851852
.setMaxIterations($(maxIter))
852853
.setSeed($(seed))

mllib/src/main/scala/org/apache/spark/ml/feature/ElementwiseProduct.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import org.apache.spark.ml.param.Param
2424
import org.apache.spark.ml.util.{DefaultParamsReadable, DefaultParamsWritable, Identifiable}
2525
import org.apache.spark.mllib.feature
2626
import org.apache.spark.mllib.linalg.{Vectors => OldVectors}
27+
import org.apache.spark.mllib.linalg.VectorImplicits._
2728
import org.apache.spark.sql.types.DataType
2829

2930
/**
@@ -52,8 +53,8 @@ class ElementwiseProduct(override val uid: String)
5253

5354
override protected def createTransformFunc: Vector => Vector = {
5455
require(params.contains(scalingVec), s"transformation requires a weight vector")
55-
val elemScaler = new feature.ElementwiseProduct(OldVectors.fromML($(scalingVec)))
56-
vector => elemScaler.transform(OldVectors.fromML(vector)).asML
56+
val elemScaler = new feature.ElementwiseProduct($(scalingVec))
57+
v => elemScaler.transform(v)
5758
}
5859

5960
override protected def outputDataType: DataType = new VectorUDT()

0 commit comments

Comments
 (0)