Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@
* Logistic regression based classification.
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
* org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
* please refer to org.apache.spark.ml.classification.LogisticRegression.
*/
public final class JavaHdfsLR {

Expand All @@ -43,8 +42,7 @@ public final class JavaHdfsLR {
static void showWarning() {
String warning = "WARN: This is a naive implementation of Logistic Regression " +
"and is given as an example!\n" +
"Please use either org.apache.spark.mllib.classification.LogisticRegressionWithSGD " +
"or org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS " +
"Please use org.apache.spark.ml.classification.LogisticRegression " +
"for more conventional use.";
System.err.println(warning);
}
Expand Down
4 changes: 2 additions & 2 deletions examples/src/main/python/als.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

"""
This is an example implementation of ALS for learning how to use Spark. Please refer to
ALS in pyspark.mllib.recommendation for more conventional use.
pyspark.ml.recommendation.ALS for more conventional use.

This example requires numpy (http://www.numpy.org/)
"""
Expand Down Expand Up @@ -59,7 +59,7 @@ def update(i, vec, mat, ratings):
"""

print("""WARN: This is a naive implementation of ALS and is given as an
example. Please use the ALS method found in pyspark.mllib.recommendation for more
example. Please use pyspark.ml.recommendation.ALS for more
conventional use.""", file=sys.stderr)

sc = SparkContext(appName="PythonALS")
Expand Down
8 changes: 4 additions & 4 deletions examples/src/main/python/kmeans.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@

"""
The K-means algorithm written from scratch against PySpark. In practice,
one may prefer to use the KMeans algorithm in MLlib, as shown in
examples/src/main/python/mllib/kmeans.py.
one may prefer to use the KMeans algorithm in ML, as shown in
examples/src/main/python/ml/kmeans_example.py.

This example requires NumPy (http://www.numpy.org/).
"""
Expand Down Expand Up @@ -52,8 +52,8 @@ def closestPoint(p, centers):
exit(-1)

print("""WARN: This is a naive implementation of KMeans Clustering and is given
as an example! Please refer to examples/src/main/python/mllib/kmeans.py for an example on
how to use MLlib's KMeans implementation.""", file=sys.stderr)
as an example! Please refer to examples/src/main/python/ml/kmeans_example.py for an
example on how to use ML's KMeans implementation.""", file=sys.stderr)

sc = SparkContext(appName="PythonKMeans")
lines = sc.textFile(sys.argv[1])
Expand Down
7 changes: 4 additions & 3 deletions examples/src/main/python/logistic_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
to act on batches of input data using efficient matrix operations.

In practice, one may prefer to use the LogisticRegression algorithm in
MLlib, as shown in examples/src/main/python/mllib/logistic_regression.py.
ML, as shown in examples/src/main/python/ml/logistic_regression_with_elastic_net.py.
"""
from __future__ import print_function

Expand Down Expand Up @@ -51,8 +51,9 @@ def readPointBatch(iterator):
exit(-1)

print("""WARN: This is a naive implementation of Logistic Regression and is
given as an example! Please refer to examples/src/main/python/mllib/logistic_regression.py
to see how MLlib's implementation is used.""", file=sys.stderr)
given as an example!
Please refer to examples/src/main/python/ml/logistic_regression_with_elastic_net.py
to see how ML's implementation is used.""", file=sys.stderr)

sc = SparkContext(appName="PythonLR")
points = sc.textFile(sys.argv[1]).mapPartitions(readPointBatch).cache()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import org.apache.commons.math3.linear._
* Alternating least squares matrix factorization.
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to org.apache.spark.mllib.recommendation.ALS
* please refer to org.apache.spark.ml.recommendation.ALS.
*/
object LocalALS {

Expand Down Expand Up @@ -96,7 +96,7 @@ object LocalALS {
def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of ALS and is given as an example!
|Please use the ALS method found in org.apache.spark.mllib.recommendation
|Please use org.apache.spark.ml.recommendation.ALS
|for more conventional use.
""".stripMargin)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,7 @@ import breeze.linalg.{DenseVector, Vector}
* Logistic regression based classification.
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
* org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
* please refer to org.apache.spark.ml.classification.LogisticRegression.
*/
object LocalFileLR {
val D = 10 // Number of dimensions
Expand All @@ -43,8 +42,7 @@ object LocalFileLR {
def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
|Please use either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
|org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
|Please use org.apache.spark.ml.classification.LogisticRegression
|for more conventional use.
""".stripMargin)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import breeze.linalg.{squaredDistance, DenseVector, Vector}
* K-means clustering.
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to org.apache.spark.mllib.clustering.KMeans
* please refer to org.apache.spark.ml.clustering.KMeans.
*/
object LocalKMeans {
val N = 1000
Expand Down Expand Up @@ -66,7 +66,7 @@ object LocalKMeans {
def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of KMeans Clustering and is given as an example!
|Please use the KMeans method found in org.apache.spark.mllib.clustering
|Please use org.apache.spark.ml.clustering.KMeans
|for more conventional use.
""".stripMargin)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,7 @@ import breeze.linalg.{DenseVector, Vector}
* Logistic regression based classification.
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
* org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
* please refer to org.apache.spark.ml.classification.LogisticRegression.
*/
object LocalLR {
val N = 10000 // Number of data points
Expand All @@ -50,8 +49,7 @@ object LocalLR {
def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
|Please use either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
|org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
|Please use org.apache.spark.ml.classification.LogisticRegression
|for more conventional use.
""".stripMargin)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import org.apache.spark._
* Alternating least squares matrix factorization.
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to org.apache.spark.mllib.recommendation.ALS
* please refer to org.apache.spark.ml.recommendation.ALS.
*/
object SparkALS {

Expand Down Expand Up @@ -81,7 +81,7 @@ object SparkALS {
def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of ALS and is given as an example!
|Please use the ALS method found in org.apache.spark.mllib.recommendation
|Please use org.apache.spark.ml.recommendation.ALS
|for more conventional use.
""".stripMargin)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@ import org.apache.spark._
* Logistic regression based classification.
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
* org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
* please refer to org.apache.spark.ml.classification.LogisticRegression.
*/
object SparkHdfsLR {
val D = 10 // Number of dimensions
Expand All @@ -54,8 +53,7 @@ object SparkHdfsLR {
def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
|Please use either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
|org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
|Please use org.apache.spark.ml.classification.LogisticRegression
|for more conventional use.
""".stripMargin)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import org.apache.spark.{SparkConf, SparkContext}
* K-means clustering.
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to org.apache.spark.mllib.clustering.KMeans
* please refer to org.apache.spark.ml.clustering.KMeans.
*/
object SparkKMeans {

Expand All @@ -52,7 +52,7 @@ object SparkKMeans {
def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of KMeans Clustering and is given as an example!
|Please use the KMeans method found in org.apache.spark.mllib.clustering
|Please use org.apache.spark.ml.clustering.KMeans
|for more conventional use.
""".stripMargin)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@ import org.apache.spark._
* Usage: SparkLR [slices]
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
* org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
* please refer to org.apache.spark.ml.classification.LogisticRegression.
*/
object SparkLR {
val N = 10000 // Number of data points
Expand All @@ -55,8 +54,7 @@ object SparkLR {
def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
|Please use either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
|org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
|Please use org.apache.spark.ml.classification.LogisticRegression
|for more conventional use.
""".stripMargin)
}
Expand Down