Skip to content

Commit 64d6bc4

Browse files
committed
Updated DecisionTreeRunner.scala and StreamingKMeans.scala to eliminate compilation warnings, including renaming StreamingKMeans to StreamingKMeansExample.
1 parent 17c162f commit 64d6bc4

File tree

2 files changed

+10
-8
lines changed

2 files changed

+10
-8
lines changed

examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717

1818
package org.apache.spark.examples.mllib
1919

20+
import scala.language.reflectiveCalls
21+
2022
import scopt.OptionParser
2123

2224
import org.apache.spark.{SparkConf, SparkContext}

examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeans.scala renamed to examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@
1717

1818
package org.apache.spark.examples.mllib
1919

20+
import org.apache.spark.SparkConf
21+
import org.apache.spark.mllib.clustering.StreamingKMeans
2022
import org.apache.spark.mllib.linalg.Vectors
2123
import org.apache.spark.mllib.regression.LabeledPoint
22-
import org.apache.spark.mllib.clustering.StreamingKMeans
23-
import org.apache.spark.SparkConf
2424
import org.apache.spark.streaming.{Seconds, StreamingContext}
2525

2626
/**
@@ -36,28 +36,28 @@ import org.apache.spark.streaming.{Seconds, StreamingContext}
3636
* `(y,[x1,x2,x3,...,xn])`
3737
* Where y is some identifier. n must be the same for train and test.
3838
*
39-
* Usage: StreamingKmeans <trainingDir> <testDir> <batchDuration> <numClusters> <numDimensions>
39+
* Usage:
40+
* StreamingKMeansExample <trainingDir> <testDir> <batchDuration> <numClusters> <numDimensions>
4041
*
4142
* To run on your local machine using the two directories `trainingDir` and `testDir`,
4243
* with updates every 5 seconds, 2 dimensions per data point, and 3 clusters, call:
43-
* $ bin/run-example \
44-
* org.apache.spark.examples.mllib.StreamingKMeans trainingDir testDir 5 3 2
44+
* $ bin/run-example mllib.StreamingKMeansExample trainingDir testDir 5 3 2
4545
*
4646
* As you add text files to `trainingDir` the clusters will continuously update.
4747
* Anytime you add text files to `testDir`, you'll see predicted labels using the current model.
4848
*
4949
*/
50-
object StreamingKMeans {
50+
object StreamingKMeansExample {
5151

5252
def main(args: Array[String]) {
5353
if (args.length != 5) {
5454
System.err.println(
55-
"Usage: StreamingKMeans " +
55+
"Usage: StreamingKMeansExample " +
5656
"<trainingDir> <testDir> <batchDuration> <numClusters> <numDimensions>")
5757
System.exit(1)
5858
}
5959

60-
val conf = new SparkConf().setMaster("local").setAppName("StreamingLinearRegression")
60+
val conf = new SparkConf().setMaster("local").setAppName("StreamingKMeansExample")
6161
val ssc = new StreamingContext(conf, Seconds(args(2).toLong))
6262

6363
val trainingData = ssc.textFileStream(args(0)).map(Vectors.parse)

0 commit comments

Comments
 (0)