Skip to content

Commit 79f7263

Browse files
chetkhatrisrowen
authored andcommitted
[SPARK-22896] Improvement in String interpolation
## What changes were proposed in this pull request? * String interpolation in ml pipeline example has been corrected as per scala standard. ## How was this patch tested? * manually tested. Author: chetkhatri <[email protected]> Closes #20070 from chetkhatri/mllib-chetan-contrib. (cherry picked from commit 9a2b65a) Signed-off-by: Sean Owen <[email protected]>
1 parent 27c949d commit 79f7263

File tree

49 files changed

+94
-96
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+94
-96
lines changed

examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ public static void main(String[] args) {
6666
.setNumBuckets(3);
6767

6868
Dataset<Row> result = discretizer.fit(df).transform(df);
69-
result.show();
69+
result.show(false);
7070
// $example off$
7171
spark.stop();
7272
}

examples/src/main/scala/org/apache/spark/examples/SimpleSkewedGroupByTest.scala

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -60,10 +60,6 @@ object SimpleSkewedGroupByTest {
6060
pairs1.count
6161

6262
println(s"RESULT: ${pairs1.groupByKey(numReducers).count}")
63-
// Print how many keys each reducer got (for debugging)
64-
// println("RESULT: " + pairs1.groupByKey(numReducers)
65-
// .map{case (k,v) => (k, v.size)}
66-
// .collectAsMap)
6763

6864
spark.stop()
6965
}

examples/src/main/scala/org/apache/spark/examples/graphx/Analytics.scala

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -145,9 +145,11 @@ object Analytics extends Logging {
145145
// TriangleCount requires the graph to be partitioned
146146
.partitionBy(partitionStrategy.getOrElse(RandomVertexCut)).cache()
147147
val triangles = TriangleCount.run(graph)
148-
println("Triangles: " + triangles.vertices.map {
148+
val triangleTypes = triangles.vertices.map {
149149
case (vid, data) => data.toLong
150-
}.reduce(_ + _) / 3)
150+
}.reduce(_ + _) / 3
151+
152+
println(s"Triangles: ${triangleTypes}")
151153
sc.stop()
152154

153155
case _ =>

examples/src/main/scala/org/apache/spark/examples/graphx/SynthBenchmark.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ object SynthBenchmark {
5252
arg =>
5353
arg.dropWhile(_ == '-').split('=') match {
5454
case Array(opt, v) => (opt -> v)
55-
case _ => throw new IllegalArgumentException("Invalid argument: " + arg)
55+
case _ => throw new IllegalArgumentException(s"Invalid argument: $arg")
5656
}
5757
}
5858

@@ -76,7 +76,7 @@ object SynthBenchmark {
7676
case ("sigma", v) => sigma = v.toDouble
7777
case ("degFile", v) => degFile = v
7878
case ("seed", v) => seed = v.toInt
79-
case (opt, _) => throw new IllegalArgumentException("Invalid option: " + opt)
79+
case (opt, _) => throw new IllegalArgumentException(s"Invalid option: $opt")
8080
}
8181

8282
val conf = new SparkConf()
@@ -86,7 +86,7 @@ object SynthBenchmark {
8686
val sc = new SparkContext(conf)
8787

8888
// Create the graph
89-
println(s"Creating graph...")
89+
println("Creating graph...")
9090
val unpartitionedGraph = GraphGenerators.logNormalGraph(sc, numVertices,
9191
numEPart.getOrElse(sc.defaultParallelism), mu, sigma, seed)
9292
// Repartition the graph

examples/src/main/scala/org/apache/spark/examples/ml/ChiSquareTestExample.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,9 @@ object ChiSquareTestExample {
5252

5353
val df = data.toDF("label", "features")
5454
val chi = ChiSquareTest.test(df, "features", "label").head
55-
println("pValues = " + chi.getAs[Vector](0))
56-
println("degreesOfFreedom = " + chi.getSeq[Int](1).mkString("[", ",", "]"))
57-
println("statistics = " + chi.getAs[Vector](2))
55+
println(s"pValues = ${chi.getAs[Vector](0)}")
56+
println(s"degreesOfFreedom ${chi.getSeq[Int](1).mkString("[", ",", "]")}")
57+
println(s"statistics ${chi.getAs[Vector](2)}")
5858
// $example off$
5959

6060
spark.stop()

examples/src/main/scala/org/apache/spark/examples/ml/CorrelationExample.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,10 +51,10 @@ object CorrelationExample {
5151

5252
val df = data.map(Tuple1.apply).toDF("features")
5353
val Row(coeff1: Matrix) = Correlation.corr(df, "features").head
54-
println("Pearson correlation matrix:\n" + coeff1.toString)
54+
println(s"Pearson correlation matrix:\n $coeff1")
5555

5656
val Row(coeff2: Matrix) = Correlation.corr(df, "features", "spearman").head
57-
println("Spearman correlation matrix:\n" + coeff2.toString)
57+
println(s"Spearman correlation matrix:\n $coeff2")
5858
// $example off$
5959

6060
spark.stop()

examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ object DataFrameExample {
4747
val parser = new OptionParser[Params]("DataFrameExample") {
4848
head("DataFrameExample: an example app using DataFrame for ML.")
4949
opt[String]("input")
50-
.text(s"input path to dataframe")
50+
.text("input path to dataframe")
5151
.action((x, c) => c.copy(input = x))
5252
checkConfig { params =>
5353
success
@@ -93,7 +93,7 @@ object DataFrameExample {
9393
// Load the records back.
9494
println(s"Loading Parquet file with UDT from $outputDir.")
9595
val newDF = spark.read.parquet(outputDir)
96-
println(s"Schema from Parquet:")
96+
println("Schema from Parquet:")
9797
newDF.printSchema()
9898

9999
spark.stop()

examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,10 +83,10 @@ object DecisionTreeClassificationExample {
8383
.setPredictionCol("prediction")
8484
.setMetricName("accuracy")
8585
val accuracy = evaluator.evaluate(predictions)
86-
println("Test Error = " + (1.0 - accuracy))
86+
println(s"Test Error = ${(1.0 - accuracy)}")
8787

8888
val treeModel = model.stages(2).asInstanceOf[DecisionTreeClassificationModel]
89-
println("Learned classification tree model:\n" + treeModel.toDebugString)
89+
println(s"Learned classification tree model:\n ${treeModel.toDebugString}")
9090
// $example off$
9191

9292
spark.stop()

examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeRegressionExample.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,10 +73,10 @@ object DecisionTreeRegressionExample {
7373
.setPredictionCol("prediction")
7474
.setMetricName("rmse")
7575
val rmse = evaluator.evaluate(predictions)
76-
println("Root Mean Squared Error (RMSE) on test data = " + rmse)
76+
println(s"Root Mean Squared Error (RMSE) on test data = $rmse")
7777

7878
val treeModel = model.stages(1).asInstanceOf[DecisionTreeRegressionModel]
79-
println("Learned regression tree model:\n" + treeModel.toDebugString)
79+
println(s"Learned regression tree model:\n ${treeModel.toDebugString}")
8080
// $example off$
8181

8282
spark.stop()

examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ object DeveloperApiExample {
5353
// Create a LogisticRegression instance. This instance is an Estimator.
5454
val lr = new MyLogisticRegression()
5555
// Print out the parameters, documentation, and any default values.
56-
println("MyLogisticRegression parameters:\n" + lr.explainParams() + "\n")
56+
println(s"MyLogisticRegression parameters:\n ${lr.explainParams()}")
5757

5858
// We may set parameters using setter methods.
5959
lr.setMaxIter(10)
@@ -169,10 +169,10 @@ private class MyLogisticRegressionModel(
169169
Vectors.dense(-margin, margin)
170170
}
171171

172-
/** Number of classes the label can take. 2 indicates binary classification. */
172+
// Number of classes the label can take. 2 indicates binary classification.
173173
override val numClasses: Int = 2
174174

175-
/** Number of features the model was trained on. */
175+
// Number of features the model was trained on.
176176
override val numFeatures: Int = coefficients.size
177177

178178
/**

0 commit comments

Comments
 (0)