diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala index 98f9d1689c8e..54953adb5f3d 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala @@ -17,6 +17,8 @@ package org.apache.spark.examples.mllib +import scala.language.reflectiveCalls + import scopt.OptionParser import org.apache.spark.{SparkConf, SparkContext} diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala similarity index 90% rename from examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeans.scala rename to examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala index 33e5760aed99..8bb12d2ee9ed 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeans.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala @@ -17,10 +17,10 @@ package org.apache.spark.examples.mllib +import org.apache.spark.SparkConf +import org.apache.spark.mllib.clustering.StreamingKMeans import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.regression.LabeledPoint -import org.apache.spark.mllib.clustering.StreamingKMeans -import org.apache.spark.SparkConf import org.apache.spark.streaming.{Seconds, StreamingContext} /** @@ -36,28 +36,28 @@ import org.apache.spark.streaming.{Seconds, StreamingContext} * `(y,[x1,x2,x3,...,xn])` * Where y is some identifier. n must be the same for train and test. * - * Usage: StreamingKmeans + * Usage: + * StreamingKMeansExample * * To run on your local machine using the two directories `trainingDir` and `testDir`, * with updates every 5 seconds, 2 dimensions per data point, and 3 clusters, call: - * $ bin/run-example \ - * org.apache.spark.examples.mllib.StreamingKMeans trainingDir testDir 5 3 2 + * $ bin/run-example mllib.StreamingKMeansExample trainingDir testDir 5 3 2 * * As you add text files to `trainingDir` the clusters will continuously update. * Anytime you add text files to `testDir`, you'll see predicted labels using the current model. * */ -object StreamingKMeans { +object StreamingKMeansExample { def main(args: Array[String]) { if (args.length != 5) { System.err.println( - "Usage: StreamingKMeans " + + "Usage: StreamingKMeansExample " + " ") System.exit(1) } - val conf = new SparkConf().setMaster("local").setAppName("StreamingLinearRegression") + val conf = new SparkConf().setMaster("local").setAppName("StreamingKMeansExample") val ssc = new StreamingContext(conf, Seconds(args(2).toLong)) val trainingData = ssc.textFileStream(args(0)).map(Vectors.parse)