From 4e96c0153063b35fc03e497f28292a97832e81d4 Mon Sep 17 00:00:00 2001 From: Bernardo Gomez Palacio Date: Tue, 15 Apr 2014 14:03:30 -0700 Subject: [PATCH 01/23] Add YARN/Stable compiled classes to the CLASSPATH. The change adds the `./yarn/stable/target//classes` to the _Classpath_ when a _dependencies_ assembly is available at the assembly directory. Why is this change necessary? Ease the development features and bug-fixes for Spark-YARN. [ticket: X] : NA Author : bernardo.gomezpalacio@gmail.com Reviewer : ? Testing : ? --- bin/compute-classpath.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/compute-classpath.sh b/bin/compute-classpath.sh index 2a2bb376fd71..2efec362b1df 100755 --- a/bin/compute-classpath.sh +++ b/bin/compute-classpath.sh @@ -44,6 +44,7 @@ if [ -f "$ASSEMBLY_DIR"/spark-assembly*hadoop*-deps.jar ]; then CLASSPATH="$CLASSPATH:$FWDIR/sql/catalyst/target/scala-$SCALA_VERSION/classes" CLASSPATH="$CLASSPATH:$FWDIR/sql/core/target/scala-$SCALA_VERSION/classes" CLASSPATH="$CLASSPATH:$FWDIR/sql/hive/target/scala-$SCALA_VERSION/classes" + CLASSPATH="$CLASSPATH:$FWDIR/yarn/stable/target/scala-$SCALA_VERSION/classes" DEPS_ASSEMBLY_JAR=`ls "$ASSEMBLY_DIR"/spark-assembly*hadoop*-deps.jar` CLASSPATH="$CLASSPATH:$DEPS_ASSEMBLY_JAR" From 1342886a396be00eda9449c6d84155dfecf954c8 Mon Sep 17 00:00:00 2001 From: Bernardo Gomez Palacio Date: Tue, 15 Apr 2014 14:46:44 -0700 Subject: [PATCH 02/23] The `spark-class` shell now ignores non jar files in the assembly directory. Why is this change necessary? While developing in Spark I found myself rebuilding either the dependencies assembly or the full spark assembly. I kept running into the case of having both the dep-assembly and full-assembly in the same directory and getting an error when I called either `spark-shell` or `spark-submit`. Quick fix: move either of them as a .bkp file depending on the development work flow you are executing at the moment and enabling the `spark-class` to ignore non-jar files. An other option could be to move the "offending" jar to a different directory but in my opinion keeping them in there is a bit tidier. e.g. ``` ll ./assembly/target/scala-2.10 spark-assembly-1.0.0-SNAPSHOT-hadoop2.3.0-deps.jar spark-assembly-1.0.0-SNAPSHOT-hadoop2.3.0.jar.bkp ``` [ticket: X] : ? --- bin/spark-class | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/spark-class b/bin/spark-class index 1b0d309cc5b1..ff03f1fda6e9 100755 --- a/bin/spark-class +++ b/bin/spark-class @@ -109,8 +109,8 @@ export JAVA_OPTS if [ ! -f "$FWDIR/RELEASE" ]; then # Exit if the user hasn't compiled Spark - num_jars=$(ls "$FWDIR"/assembly/target/scala-$SCALA_VERSION/ | grep "spark-assembly.*hadoop.*.jar" | wc -l) - jars_list=$(ls "$FWDIR"/assembly/target/scala-$SCALA_VERSION/ | grep "spark-assembly.*hadoop.*.jar") + num_jars=$(ls "$FWDIR"/assembly/target/scala-$SCALA_VERSION/ | grep -E "spark-assembly.*hadoop.*.jar$" | wc -l) + jars_list=$(ls "$FWDIR"/assembly/target/scala-$SCALA_VERSION/ | grep -E "spark-assembly.*hadoop.*.jar$") if [ "$num_jars" -eq "0" ]; then echo "Failed to find Spark assembly in $FWDIR/assembly/target/scala-$SCALA_VERSION/" >&2 echo "You need to build Spark with 'sbt/sbt assembly' before running this program." >&2 From ddf2547aa2aea8155f8d6c0386e2cb37bcf61537 Mon Sep 17 00:00:00 2001 From: Bernardo Gomez Palacio Date: Tue, 15 Apr 2014 14:53:23 -0700 Subject: [PATCH 03/23] The `spark-shell` option `--log-conf` also enables the SPARK_PRINT_LAUNCH_COMMAND . Why is this change necessary? Most likely when enabling the `--log-conf` through the `spark-shell` you are also interested on the full invocation of the java command including the _classpath_ and extended options. e.g. ``` INFO: Base Directory set to /Users/bernardo/work/github/berngp/spark INFO: Spark Master is yarn-client INFO: Spark REPL options -Dspark.logConf=true Spark Command: /Library/Java/JavaVirtualMachines/jdk1.8.0.jdk/Contents/Home/bin/java -cp :/Users/bernardo/work/github/berngp/spark/conf:/Users/bernardo/work/github/berngp/spark/core/target/scala-2.10/classes:/Users/bernardo/work/github/berngp/spark/repl/target/scala-2.10/classes:/Users/bernardo/work/github/berngp/spark/mllib/target/scala-2.10/classes:/Users/bernardo/work/github/berngp/spark/bagel/target/scala-2.10/classes:/Users/bernardo/work/github/berngp/spark/graphx/target/scala-2.10/classes:/Users/bernardo/work/github/berngp/spark/streaming/target/scala-2.10/classes:/Users/bernardo/work/github/berngp/spark/tools/target/scala-2.10/classes:/Users/bernardo/work/github/berngp/spark/sql/catalyst/target/scala-2.10/classes:/Users/bernardo/work/github/berngp/spark/sql/core/target/scala-2.10/classes:/Users/bernardo/work/github/berngp/spark/sql/hive/target/scala-2.10/classes:/Users/bernardo/work/github/berngp/spark/yarn/stable/target/scala-2.10/classes:/Users/bernardo/work/github/berngp/spark/assembly/target/scala-2.10/spark-assembly-1.0.0-SNAPSHOT-hadoop2.3.0-deps.jar:/usr/local/Cellar/hadoop/2.2.0/libexec/etc/hadoop -XX:ErrorFile=/tmp/spark-shell-hs_err_pid.log -XX:HeapDumpPath=/tmp/spark-shell-java_pid.hprof -XX:-HeapDumpOnOutOfMemoryError -XX:-PrintGC -XX:-PrintGCDetails -XX:-PrintGCTimeStamps -XX:-PrintTenuringDistribution -XX:-PrintAdaptiveSizePolicy -XX:GCLogFileSize=1024K -XX:-UseGCLogFileRotation -Xloggc:/tmp/spark-shell-gc.log -XX:+UseConcMarkSweepGC -Dspark.cleaner.ttl=10000 -Dspark.driver.host=33.33.33.1 -Dspark.logConf=true -Djava.library.path= -Xms400M -Xmx400M org.apache.spark.repl.Main ``` [ticket: X] : ? --- bin/spark-shell | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/spark-shell b/bin/spark-shell index ea12d256b23a..cc9d1ea20146 100755 --- a/bin/spark-shell +++ b/bin/spark-shell @@ -123,6 +123,7 @@ function set_dm(){ function set_spark_log_conf(){ SPARK_REPL_OPTS="$SPARK_REPL_OPTS -Dspark.logConf=$1" + export SPARK_PRINT_LAUNCH_COMMAND=1 } function set_spark_master(){ From 22045394955992c2c8dfe0e1040c6bb972be6ce4 Mon Sep 17 00:00:00 2001 From: Bernardo Gomez Palacio Date: Tue, 15 Apr 2014 15:15:23 -0700 Subject: [PATCH 04/23] Root is now Spark and qualify the assembly if it was built with YARN. Why is this change necessary? Renamed the SBT "root" project to "spark" to enhance readability. Currently the assembly is qualified with the Hadoop Version but not if YARN has been enabled or not. This change qualifies the assembly such that it is easy to identify if YARN was enabled. e.g ``` ./make-distribution.sh --hadoop 2.3.0 --with-yarn ls -l ./assembly/target/scala-2.10 spark-assembly-1.0.0-SNAPSHOT-hadoop2.3.0-yarn.jar ``` vs ``` ./make-distribution.sh --hadoop 2.3.0 ls -l ./assembly/target/scala-2.10 spark-assembly-1.0.0-SNAPSHOT-hadoop2.3.0.jar ``` [ticket: X] : ? --- project/SparkBuild.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 09b527c76a5a..f6e8a6ade161 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -52,7 +52,7 @@ object SparkBuild extends Build { val SCALAC_JVM_VERSION = "jvm-1.6" val JAVAC_JVM_VERSION = "1.6" - lazy val root = Project("root", file("."), settings = rootSettings) aggregate(allProjects: _*) + lazy val root = Project("spark", file("."), settings = rootSettings) aggregate(allProjects: _*) lazy val core = Project("core", file("core"), settings = coreSettings) @@ -409,7 +409,7 @@ object SparkBuild extends Build { def catalystSettings = sharedSettings ++ Seq( name := "catalyst", // The mechanics of rewriting expression ids to compare trees in some test cases makes - // assumptions about the the expression ids being contiguious. Running tests in parallel breaks + // assumptions about the the expression ids being contiguous. Running tests in parallel breaks // this non-deterministically. TODO: FIX THIS. parallelExecution in Test := false, libraryDependencies ++= Seq( @@ -516,7 +516,7 @@ object SparkBuild extends Build { libraryDependencies += "net.sf.py4j" % "py4j" % "0.8.1", name := "spark-assembly", assembleDeps in Compile <<= (packageProjects.map(packageBin in Compile in _) ++ Seq(packageDependency in Compile)).dependOn, - jarName in assembly <<= version map { v => "spark-assembly-" + v + "-hadoop" + hadoopVersion + ".jar" }, + jarName in assembly <<= version map { v => s"spark-assembly-${v}-hadoop${hadoopVersion}${if (isYarnEnabled) "-yarn" else ""}.jar" }, jarName in packageDependency <<= version map { v => "spark-assembly-" + v + "-hadoop" + hadoopVersion + "-deps.jar" } ) ++ assemblySettings ++ extraAssemblySettings From 889bf4ed742ed3d06cb62276ef554f2f37b53ee6 Mon Sep 17 00:00:00 2001 From: Bernardo Gomez Palacio Date: Tue, 15 Apr 2014 17:08:27 -0700 Subject: [PATCH 05/23] Upgrade the Maven Build to YARN 2.3.0. Upgraded to YARN 2.3.0, removed unnecessary `relativePath` values and removed incorrect version for the "org.apache.hadoop:hadoop-client" dependency at yarn/pom.xml. --- pom.xml | 3 ++- yarn/alpha/pom.xml | 1 - yarn/pom.xml | 4 +--- yarn/stable/pom.xml | 1 - 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/pom.xml b/pom.xml index cd204376de5d..0fd3d227a066 100644 --- a/pom.xml +++ b/pom.xml @@ -836,7 +836,8 @@ yarn 2 - 2.2.0 + 2.3.0 + 2.3.0 2.5.0 diff --git a/yarn/alpha/pom.xml b/yarn/alpha/pom.xml index d0aeaceb0d23..1892d2111713 100644 --- a/yarn/alpha/pom.xml +++ b/yarn/alpha/pom.xml @@ -21,7 +21,6 @@ org.apache.spark yarn-parent_2.10 1.0.0-SNAPSHOT - ../pom.xml diff --git a/yarn/pom.xml b/yarn/pom.xml index 3342cb65edcd..f86095896384 100644 --- a/yarn/pom.xml +++ b/yarn/pom.xml @@ -21,14 +21,13 @@ org.apache.spark spark-parent 1.0.0-SNAPSHOT - ../pom.xml org.apache.spark yarn-parent_2.10 pom Spark Project YARN Parent POM - + org.apache.spark @@ -50,7 +49,6 @@ org.apache.hadoop hadoop-client - ${yarn.version} org.scalatest diff --git a/yarn/stable/pom.xml b/yarn/stable/pom.xml index e7915d12aef6..22ba11461213 100644 --- a/yarn/stable/pom.xml +++ b/yarn/stable/pom.xml @@ -21,7 +21,6 @@ org.apache.spark yarn-parent_2.10 1.0.0-SNAPSHOT - ../pom.xml From f1c7535fe6e97e1d5ebf8adcac01d82c794a01f8 Mon Sep 17 00:00:00 2001 From: witgo Date: Wed, 30 Apr 2014 01:48:01 +0800 Subject: [PATCH 06/23] =?UTF-8?q?Improved=20build=20configuration=20?= =?UTF-8?q?=E2=85=A1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/building-with-maven.md | 9 ++- pom.xml | 133 ++++++++++++++++++------------------ 2 files changed, 72 insertions(+), 70 deletions(-) diff --git a/docs/building-with-maven.md b/docs/building-with-maven.md index a5e530346740..5eb85f05a4d4 100644 --- a/docs/building-with-maven.md +++ b/docs/building-with-maven.md @@ -45,17 +45,20 @@ For Apache Hadoop versions 1.x, Cloudera CDH MRv1, and other Hadoop versions wit For Apache Hadoop 2.x, 0.23.x, Cloudera CDH MRv2, and other Hadoop versions with YARN, you can enable the "yarn-alpha" or "yarn" profile and set the "hadoop.version", "yarn.version" property. Note that Hadoop 0.23.X requires a special `-Phadoop-0.23` profile: # Apache Hadoop 2.0.5-alpha - $ mvn -Pyarn-alpha -Dhadoop.version=2.0.5-alpha -Dyarn.version=2.0.5-alpha -DskipTests clean package + $ mvn -Pyarn-alpha -Dhadoop.version=2.0.5-alpha -DskipTests clean package # Cloudera CDH 4.2.0 with MapReduce v2 - $ mvn -Pyarn-alpha -Dhadoop.version=2.0.0-cdh4.2.0 -Dyarn.version=2.0.0-cdh4.2.0 -DskipTests clean package + $ mvn -Pyarn-alpha -Dhadoop.version=2.0.0-cdh4.2.0 -DskipTests clean package # Apache Hadoop 2.2.X (e.g. 2.2.0 as below) and newer - $ mvn -Pyarn -Dhadoop.version=2.2.0 -Dyarn.version=2.2.0 -DskipTests clean package + $ mvn -Pyarn -Dhadoop.version=2.2.0 -DskipTests clean package # Apache Hadoop 0.23.x $ mvn -Pyarn-alpha -Phadoop-0.23 -Dhadoop.version=0.23.7 -Dyarn.version=0.23.7 -DskipTests clean package + # Different versions of HDFS vs YARN. + $ mvn -Pyarn-alpha -Dhadoop.version=2.3.0 -Dyarn.version= 0.23.7 -DskipTests clean package + ## Spark Tests in Maven ## Tests are run by default via the [ScalaTest Maven plugin](http://www.scalatest.org/user_guide/using_the_scalatest_maven_plugin). Some of the require Spark to be packaged first, so always run `mvn package` with `-DskipTests` the first time. You can then run the tests with `mvn -Dhadoop.version=... test`. diff --git a/pom.xml b/pom.xml index 560bd364ff91..73de1cc385c5 100644 --- a/pom.xml +++ b/pom.xml @@ -16,7 +16,8 @@ ~ limitations under the License. --> - + 4.0.0 org.apache @@ -119,7 +120,7 @@ 1.2.17 1.0.4 2.4.1 - 0.23.7 + ${hadoop.version} 0.94.6 0.12.0 1.3.2 @@ -135,7 +136,8 @@ - maven-repo + maven-repo + Maven Repository http://repo.maven.apache.org/maven2 @@ -558,64 +560,7 @@ jets3t 0.7.1 - - org.apache.hadoop - hadoop-yarn-api - ${yarn.version} - - - asm - asm - - - org.ow2.asm - asm - - - org.jboss.netty - netty - - - - - org.apache.hadoop - hadoop-yarn-common - ${yarn.version} - - - asm - asm - - - org.ow2.asm - asm - - - org.jboss.netty - netty - - - - - org.apache.hadoop - hadoop-yarn-client - ${yarn.version} - - - asm - asm - - - org.ow2.asm - asm - - - org.jboss.netty - netty - - - org.codehaus.jackson @@ -850,12 +795,6 @@ yarn - - - org.apache.avro - avro - - @@ -896,13 +835,73 @@ 2 2.3.0 - 2.3.0 2.5.0 yarn + + + + org.apache.hadoop + hadoop-yarn-api + ${yarn.version} + + + asm + asm + + + org.ow2.asm + asm + + + org.jboss.netty + netty + + + + + org.apache.hadoop + hadoop-yarn-common + ${yarn.version} + + + asm + asm + + + org.ow2.asm + asm + + + org.jboss.netty + netty + + + + + org.apache.hadoop + hadoop-yarn-client + ${yarn.version} + + + asm + asm + + + org.ow2.asm + asm + + + org.jboss.netty + netty + + + + + From 8540e83420a7d11394501f88872860d32ebc3f1b Mon Sep 17 00:00:00 2001 From: witgo Date: Wed, 30 Apr 2014 09:37:06 +0800 Subject: [PATCH 07/23] review commit --- docs/building-with-maven.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/building-with-maven.md b/docs/building-with-maven.md index 5eb85f05a4d4..e447dfea3bac 100644 --- a/docs/building-with-maven.md +++ b/docs/building-with-maven.md @@ -56,8 +56,8 @@ For Apache Hadoop 2.x, 0.23.x, Cloudera CDH MRv2, and other Hadoop versions with # Apache Hadoop 0.23.x $ mvn -Pyarn-alpha -Phadoop-0.23 -Dhadoop.version=0.23.7 -Dyarn.version=0.23.7 -DskipTests clean package - # Different versions of HDFS vs YARN. - $ mvn -Pyarn-alpha -Dhadoop.version=2.3.0 -Dyarn.version= 0.23.7 -DskipTests clean package + # Different versions of HDFS and YARN. + $ mvn -Pyarn-alpha -Dhadoop.version=2.3.0 -Dyarn.version=0.23.7 -DskipTests clean package ## Spark Tests in Maven ## From c4c6e45ff134663196e07fbd3a812af57b8ea95c Mon Sep 17 00:00:00 2001 From: witgo Date: Wed, 30 Apr 2014 23:23:08 +0800 Subject: [PATCH 08/23] review commit --- project/SparkBuild.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 45b4a753ef07..f609343dd1ba 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -569,7 +569,7 @@ object SparkBuild extends Build { libraryDependencies += "net.sf.py4j" % "py4j" % "0.8.1", name := "spark-assembly", assembleDeps in Compile <<= (packageProjects.map(packageBin in Compile in _) ++ Seq(packageDependency in Compile)).dependOn, - jarName in assembly <<= version map { v => s"spark-assembly-${v}-hadoop${hadoopVersion}${if (isYarnEnabled) "-yarn" else ""}.jar" }, + jarName in assembly <<= version map { v => "spark-assembly-" + v + "-hadoop" + hadoopVersion + ".jar" } jarName in packageDependency <<= version map { v => "spark-assembly-" + v + "-hadoop" + hadoopVersion + "-deps.jar" } ) ++ assemblySettings ++ extraAssemblySettings From e1a7e0020f3c42b9e7dae07cabd072f0dd38d463 Mon Sep 17 00:00:00 2001 From: witgo Date: Thu, 1 May 2014 14:34:57 +0800 Subject: [PATCH 09/23] improve travis tests coverage --- .gitignore | 1 + .jvmopts | 5 +++++ .travis.yml | 28 +++++++++++++++++++++------- core/pom.xml | 29 ----------------------------- pom.xml | 5 +++++ repl/pom.xml | 29 ----------------------------- yarn/pom.xml | 29 ----------------------------- 7 files changed, 32 insertions(+), 94 deletions(-) create mode 100644 .jvmopts diff --git a/.gitignore b/.gitignore index 857e9feb953b..a509bfbb495e 100644 --- a/.gitignore +++ b/.gitignore @@ -19,6 +19,7 @@ conf/spark-env.sh conf/streaming-env.sh conf/log4j.properties conf/spark-defaults.conf +conf/*.xml docs/_site docs/api target/ diff --git a/.jvmopts b/.jvmopts new file mode 100644 index 000000000000..86db0ab9b6dd --- /dev/null +++ b/.jvmopts @@ -0,0 +1,5 @@ +-Xmx3g +-Xss2M +-XX:+CMSClassUnloadingEnabled +-XX:MaxPermSize=512M +-XX:ReservedCodeCacheSize=512m \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 8ebd0d68429f..4895faed78d2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,18 +15,32 @@ language: scala scala: - - "2.10.3" + - "2.10.4" jdk: + - oraclejdk8 - oraclejdk7 - env: - matrix: - - TEST="scalastyle assembly/assembly" - - TEST="catalyst/test sql/test streaming/test mllib/test graphx/test bagel/test" - - TEST=hive/test + - openjdk6 cache: directories: - $HOME/.m2 - $HOME/.ivy2 - $HOME/.sbt + env: + matrix: + - SPARK_HADOOP_VERSION=1.2.1 + - SPARK_HADOOP_VERSION=2.4.0 + before_script: + - sudo apt-get install libgfortran3 script: - - "sbt ++$TRAVIS_SCALA_VERSION $TEST" + # TODO: Cannot get hive/test to pass + > + if [[ $SPARK_HADOOP_VERSION = '2.4.0' ]]; then + mvn clean package -DskipTests -Pyarn -Dhadoop.version=$SPARK_HADOOP_VERSION -Dyarn.version=$SPARK_HADOOP_VERSION + num_jars=$(ls "$FWDIR"/assembly/target/scala-2.10/ | grep "spark-assembly.*hadoop.*.jar" | wc -l) + if [ "$num_jars" -eq "0" ]; then + exit -1 + fi + mvn test -Pyarn -Dhadoop.version=$SPARK_HADOOP_VERSION -Dyarn.version=$SPARK_HADOOP_VERSION -am -pl mllib -pl bagel -pl sql/catalyst -pl yarn + else + SPARK_HADOOP_VERSION=$SPARK_HADOOP_VERSION sbt ++$TRAVIS_SCALA_VERSION assembly/assembly sql/test graphx/test streaming/test + fi \ No newline at end of file diff --git a/core/pom.xml b/core/pom.xml index 822b5b1dd7cc..1f71bb95f857 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -254,35 +254,6 @@ target/scala-${scala.binary.version}/classes target/scala-${scala.binary.version}/test-classes - - org.apache.maven.plugins - maven-antrun-plugin - - - test - - run - - - true - - - - - - - - - - - - - - - - - - org.scalatest scalatest-maven-plugin diff --git a/pom.xml b/pom.xml index 73de1cc385c5..e1e0bce3929d 100644 --- a/pom.xml +++ b/pom.xml @@ -682,6 +682,11 @@ ${project.build.directory}/SparkTestSuite.txt -Xmx3g -XX:MaxPermSize=${MaxPermGen} -XX:ReservedCodeCacheSize=512m + + ${basedir}/.. + 1 + ${spark.classpath} + diff --git a/repl/pom.xml b/repl/pom.xml index b761a176ce25..8539a9394ddd 100644 --- a/repl/pom.xml +++ b/repl/pom.xml @@ -92,35 +92,6 @@ target/scala-${scala.binary.version}/classes target/scala-${scala.binary.version}/test-classes - - org.apache.maven.plugins - maven-antrun-plugin - - - test - - run - - - true - - - - - - - - - - - - - - - - - - org.scalatest scalatest-maven-plugin diff --git a/yarn/pom.xml b/yarn/pom.xml index a10436c07d91..35b5090a102a 100644 --- a/yarn/pom.xml +++ b/yarn/pom.xml @@ -112,35 +112,6 @@ - - org.apache.maven.plugins - maven-antrun-plugin - - - test - - run - - - true - - - - - - - - - - - - - - - - - - org.scalatest scalatest-maven-plugin From effe79cd8c018c31be31c53260fad59fa50a77a9 Mon Sep 17 00:00:00 2001 From: witgo Date: Thu, 1 May 2014 14:46:04 +0800 Subject: [PATCH 10/23] missing "," --- project/SparkBuild.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index f609343dd1ba..03e25acf6f1b 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -569,7 +569,7 @@ object SparkBuild extends Build { libraryDependencies += "net.sf.py4j" % "py4j" % "0.8.1", name := "spark-assembly", assembleDeps in Compile <<= (packageProjects.map(packageBin in Compile in _) ++ Seq(packageDependency in Compile)).dependOn, - jarName in assembly <<= version map { v => "spark-assembly-" + v + "-hadoop" + hadoopVersion + ".jar" } + jarName in assembly <<= version map { v => "spark-assembly-" + v + "-hadoop" + hadoopVersion + ".jar" }, jarName in packageDependency <<= version map { v => "spark-assembly-" + v + "-hadoop" + hadoopVersion + "-deps.jar" } ) ++ assemblySettings ++ extraAssemblySettings From 9ea1af926c3ccf36c59e13c103ec6052c032f77e Mon Sep 17 00:00:00 2001 From: witgo Date: Thu, 1 May 2014 15:04:53 +0800 Subject: [PATCH 11/23] add the dependency of commons-lang --- pom.xml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pom.xml b/pom.xml index e1e0bce3929d..76e0e92a4bc1 100644 --- a/pom.xml +++ b/pom.xml @@ -373,6 +373,11 @@ commons-net 2.2 + + commons-lang + commons-lang + 2.5 + io.netty netty-all From 0ed124dc0e453a0a59d3c387651be970859a9a0a Mon Sep 17 00:00:00 2001 From: witgo Date: Thu, 1 May 2014 16:07:45 +0800 Subject: [PATCH 12/23] SPARK-1693: Most of the tests throw a java.lang.SecurityException when spark built for hadoop 2.3.0 , 2.4.0 --- pom.xml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pom.xml b/pom.xml index 76e0e92a4bc1..1a7f590cc333 100644 --- a/pom.xml +++ b/pom.xml @@ -888,6 +888,10 @@ org.jboss.netty netty + + javax.servlet + servlet-api + @@ -908,6 +912,10 @@ org.jboss.netty netty + + javax.servlet + servlet-api + From 03b136fe1ad64ee12ffe90bacfe715a2ad87b2c2 Mon Sep 17 00:00:00 2001 From: witgo Date: Thu, 1 May 2014 16:27:11 +0800 Subject: [PATCH 13/23] revert .travis.yml --- .jvmopts | 5 ----- .travis.yml | 28 +++++++--------------------- 2 files changed, 7 insertions(+), 26 deletions(-) delete mode 100644 .jvmopts diff --git a/.jvmopts b/.jvmopts deleted file mode 100644 index 86db0ab9b6dd..000000000000 --- a/.jvmopts +++ /dev/null @@ -1,5 +0,0 @@ --Xmx3g --Xss2M --XX:+CMSClassUnloadingEnabled --XX:MaxPermSize=512M --XX:ReservedCodeCacheSize=512m \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 4895faed78d2..a37e958c6809 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,32 +15,18 @@ language: scala scala: - - "2.10.4" + - "2.10.3" jdk: - - oraclejdk8 - oraclejdk7 - - openjdk6 + env: + matrix: + - TEST="scalastyle assembly/assembly" + - TEST="catalyst/test sql/test streaming/test mllib/test graphx/test bagel/test" + - TEST=hive/test cache: directories: - $HOME/.m2 - $HOME/.ivy2 - $HOME/.sbt - env: - matrix: - - SPARK_HADOOP_VERSION=1.2.1 - - SPARK_HADOOP_VERSION=2.4.0 - before_script: - - sudo apt-get install libgfortran3 script: - # TODO: Cannot get hive/test to pass - > - if [[ $SPARK_HADOOP_VERSION = '2.4.0' ]]; then - mvn clean package -DskipTests -Pyarn -Dhadoop.version=$SPARK_HADOOP_VERSION -Dyarn.version=$SPARK_HADOOP_VERSION - num_jars=$(ls "$FWDIR"/assembly/target/scala-2.10/ | grep "spark-assembly.*hadoop.*.jar" | wc -l) - if [ "$num_jars" -eq "0" ]; then - exit -1 - fi - mvn test -Pyarn -Dhadoop.version=$SPARK_HADOOP_VERSION -Dyarn.version=$SPARK_HADOOP_VERSION -am -pl mllib -pl bagel -pl sql/catalyst -pl yarn - else - SPARK_HADOOP_VERSION=$SPARK_HADOOP_VERSION sbt ++$TRAVIS_SCALA_VERSION assembly/assembly sql/test graphx/test streaming/test - fi \ No newline at end of file + - "sbt ++$TRAVIS_SCALA_VERSION $TEST" \ No newline at end of file From d3488c6e316657ca1436b8cc148cfb9102747658 Mon Sep 17 00:00:00 2001 From: witgo Date: Thu, 1 May 2014 17:19:15 +0800 Subject: [PATCH 14/23] Add the missing yarn dependencies --- pom.xml | 70 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/pom.xml b/pom.xml index 1a7f590cc333..74567d723f32 100644 --- a/pom.xml +++ b/pom.xml @@ -805,6 +805,76 @@ yarn + + + + org.apache.hadoop + hadoop-yarn-api + ${yarn.version} + + + asm + asm + + + org.ow2.asm + asm + + + org.jboss.netty + netty + + + + + org.apache.hadoop + hadoop-yarn-common + ${yarn.version} + + + asm + asm + + + org.ow2.asm + asm + + + org.jboss.netty + netty + + + javax.servlet + servlet-api + + + + + + org.apache.hadoop + hadoop-yarn-client + ${yarn.version} + + + asm + asm + + + org.ow2.asm + asm + + + org.jboss.netty + netty + + + javax.servlet + servlet-api + + + + + From 779ae5d71992da80a20259978b4cd900110c8f30 Mon Sep 17 00:00:00 2001 From: witgo Date: Thu, 1 May 2014 22:58:12 +0800 Subject: [PATCH 15/23] Fix SPARK-1693: Dependent on multiple versions of servlet-api jars lead to throw an SecurityException when Spark built for hadoop 2.3.0 , 2.4.0 --- bagel/pom.xml | 4 ++++ core/pom.xml | 4 ++++ examples/pom.xml | 4 ++++ graphx/pom.xml | 4 ++++ mllib/pom.xml | 4 ++++ pom.xml | 13 +++++++++++++ project/SparkBuild.scala | 22 ++++++++++++---------- repl/pom.xml | 4 ++++ streaming/pom.xml | 4 ++++ 9 files changed, 53 insertions(+), 10 deletions(-) diff --git a/bagel/pom.xml b/bagel/pom.xml index 355f437c5b16..adb691648213 100644 --- a/bagel/pom.xml +++ b/bagel/pom.xml @@ -41,6 +41,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + org.scalatest scalatest_${scala.binary.version} diff --git a/core/pom.xml b/core/pom.xml index 1f71bb95f857..54b53f9d4260 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -65,6 +65,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + com.google.guava guava diff --git a/examples/pom.xml b/examples/pom.xml index e1fc149d87f1..f6b3f8150b96 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -120,6 +120,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + com.twitter algebird-core_${scala.binary.version} diff --git a/graphx/pom.xml b/graphx/pom.xml index dc108d2fe7fb..f8abc7f3c8ac 100644 --- a/graphx/pom.xml +++ b/graphx/pom.xml @@ -46,6 +46,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + org.scalatest scalatest_${scala.binary.version} diff --git a/mllib/pom.xml b/mllib/pom.xml index cdd33dbb7970..947d0d919d64 100644 --- a/mllib/pom.xml +++ b/mllib/pom.xml @@ -41,6 +41,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + org.jblas jblas diff --git a/pom.xml b/pom.xml index 74567d723f32..df40e1808991 100644 --- a/pom.xml +++ b/pom.xml @@ -215,6 +215,19 @@ org.eclipse.jetty jetty-server ${jetty.version} + + + org.eclipse.jetty.orbit + javax.servlet + + + + + + javax.servlet + javax.servlet-api + 3.0.1 com.google.guava diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 03e25acf6f1b..1b52d596c449 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -266,16 +266,17 @@ object SparkBuild extends Build { */ libraryDependencies ++= Seq( - "io.netty" % "netty-all" % "4.0.17.Final", - "org.eclipse.jetty" % "jetty-server" % jettyVersion, - "org.eclipse.jetty" % "jetty-util" % jettyVersion, - "org.eclipse.jetty" % "jetty-plus" % jettyVersion, - "org.eclipse.jetty" % "jetty-security" % jettyVersion, - "org.scalatest" %% "scalatest" % "1.9.1" % "test", - "org.scalacheck" %% "scalacheck" % "1.10.0" % "test", - "com.novocode" % "junit-interface" % "0.10" % "test", - "org.easymock" % "easymock" % "3.1" % "test", - "org.mockito" % "mockito-all" % "1.8.5" % "test" + "io.netty" % "netty-all" % "4.0.17.Final", + "org.eclipse.jetty" % "jetty-server" % jettyVersion excludeAll(excludeJettyServlet), + "javax.servlet" % "javax.servlet-api" % "3.0.1", + "org.eclipse.jetty" % "jetty-util" % jettyVersion, + "org.eclipse.jetty" % "jetty-plus" % jettyVersion, + "org.eclipse.jetty" % "jetty-security" % jettyVersion, + "org.scalatest" %% "scalatest" % "1.9.1" % "test", + "org.scalacheck" %% "scalacheck" % "1.10.0" % "test", + "com.novocode" % "junit-interface" % "0.10" % "test", + "org.easymock" % "easymock" % "3.1" % "test", + "org.mockito" % "mockito-all" % "1.8.5" % "test" ), testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a"), @@ -315,6 +316,7 @@ object SparkBuild extends Build { val excludeFastutil = ExclusionRule(organization = "it.unimi.dsi") val excludeJruby = ExclusionRule(organization = "org.jruby") val excludeThrift = ExclusionRule(organization = "org.apache.thrift") + val excludeJettyServlet= ExclusionRule(organization = "org.eclipse.jetty.orbit") def sparkPreviousArtifact(id: String, organization: String = "org.apache.spark", version: String = "0.9.0-incubating", crossVersion: String = "2.10"): Option[sbt.ModuleID] = { diff --git a/repl/pom.xml b/repl/pom.xml index 8539a9394ddd..e9d812ee8030 100644 --- a/repl/pom.xml +++ b/repl/pom.xml @@ -58,6 +58,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + org.scala-lang scala-compiler diff --git a/streaming/pom.xml b/streaming/pom.xml index 6435224a1467..ecbafd833418 100644 --- a/streaming/pom.xml +++ b/streaming/pom.xml @@ -41,6 +41,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + org.scala-lang scala-library From 27bd4268a7d0c7300182f43d4edc59ad8154aa0c Mon Sep 17 00:00:00 2001 From: witgo Date: Thu, 1 May 2014 23:01:42 +0800 Subject: [PATCH 16/23] review commit --- yarn/alpha/pom.xml | 1 + yarn/pom.xml | 1 + yarn/stable/pom.xml | 1 + 3 files changed, 3 insertions(+) diff --git a/yarn/alpha/pom.xml b/yarn/alpha/pom.xml index 14e5bfc518b0..e076ca1d44b9 100644 --- a/yarn/alpha/pom.xml +++ b/yarn/alpha/pom.xml @@ -21,6 +21,7 @@ org.apache.spark yarn-parent_2.10 1.0.0-SNAPSHOT + ../pom.xml org.apache.spark diff --git a/yarn/pom.xml b/yarn/pom.xml index 35b5090a102a..1a2729466acd 100644 --- a/yarn/pom.xml +++ b/yarn/pom.xml @@ -21,6 +21,7 @@ org.apache.spark spark-parent 1.0.0-SNAPSHOT + ../pom.xml org.apache.spark diff --git a/yarn/stable/pom.xml b/yarn/stable/pom.xml index 99a04c2ab9cc..0780f251b595 100644 --- a/yarn/stable/pom.xml +++ b/yarn/stable/pom.xml @@ -21,6 +21,7 @@ org.apache.spark yarn-parent_2.10 1.0.0-SNAPSHOT + ../pom.xml org.apache.spark From 54a86b03b5b0b3f90f2b3c78cb9456a0ec81976b Mon Sep 17 00:00:00 2001 From: witgo Date: Fri, 2 May 2014 14:22:36 +0800 Subject: [PATCH 17/23] review commit --- pom.xml | 3 +-- repl/pom.xml | 1 - yarn/pom.xml | 1 - 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/pom.xml b/pom.xml index df40e1808991..e9f51abfc85d 100644 --- a/pom.xml +++ b/pom.xml @@ -701,9 +701,8 @@ -Xmx3g -XX:MaxPermSize=${MaxPermGen} -XX:ReservedCodeCacheSize=512m - ${basedir}/.. + ${session.executionRootDirectory} 1 - ${spark.classpath} diff --git a/repl/pom.xml b/repl/pom.xml index e9d812ee8030..c686b2f978ef 100644 --- a/repl/pom.xml +++ b/repl/pom.xml @@ -102,7 +102,6 @@ ${basedir}/.. - 1 diff --git a/yarn/pom.xml b/yarn/pom.xml index 1a2729466acd..b8714b54502e 100644 --- a/yarn/pom.xml +++ b/yarn/pom.xml @@ -119,7 +119,6 @@ ${basedir}/../.. - 1 ${spark.classpath} From 882e35d354e98b8a416583038b72a49d0a331d8e Mon Sep 17 00:00:00 2001 From: witgo Date: Fri, 2 May 2014 14:24:05 +0800 Subject: [PATCH 18/23] review commit --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index e9f51abfc85d..ad9a2ca70e9e 100644 --- a/pom.xml +++ b/pom.xml @@ -926,7 +926,7 @@ yarn 2 - 2.3.0 + 2.2.0 2.5.0 From 31451dfe66e14f6ccf7daa32287190c7ef7780ca Mon Sep 17 00:00:00 2001 From: witgo Date: Sat, 3 May 2014 11:15:32 +0800 Subject: [PATCH 19/23] Compile hive optional --- pom.xml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index ad9a2ca70e9e..71def98165f0 100644 --- a/pom.xml +++ b/pom.xml @@ -94,7 +94,6 @@ streaming sql/catalyst sql/core - sql/hive repl assembly external/twitter @@ -1049,6 +1048,12 @@ + + hive + + sql/hive + + From d9a31db82b30ebfa6c27227507e2e20bb1e8d08a Mon Sep 17 00:00:00 2001 From: witgo Date: Sat, 3 May 2014 14:20:52 +0800 Subject: [PATCH 20/23] SPARK-1699: Python relative should be independence from the core, becomes subprojects --- assembly/pom.xml | 20 +- bagel/pom.xml | 4 + bin/compute-classpath.sh | 1 + bin/spark-class | 4 +- core/pom.xml | 75 +----- .../scala/org/apache/spark/SparkEnv.scala | 22 +- docs/building-with-maven.md | 9 +- examples/pom.xml | 4 + graphx/pom.xml | 4 + make-distribution.sh | 40 ++- mllib/pom.xml | 4 + .../MatrixFactorizationModel.scala | 15 -- pom.xml | 236 +++++++++++++----- project/SparkBuild.scala | 24 +- python-api/pom.xml | 111 ++++++++ .../org/apache/spark/PythonSparkEnv.scala | 80 ++++++ .../spark/api/python/PythonPartitioner.scala | 0 .../apache/spark/api/python/PythonRDD.scala | 4 +- .../api/python/PythonWorkerFactory.scala | 6 +- .../mllib/api/python/PythonMLLibAPI.scala | 14 +- .../MatrixFactorizationModel.scala | 28 +++ .../spark/api/python/PythonRDDSuite.scala | 0 repl/pom.xml | 34 +-- streaming/pom.xml | 4 + yarn/pom.xml | 33 +-- 25 files changed, 501 insertions(+), 275 deletions(-) create mode 100644 python-api/pom.xml create mode 100644 python-api/src/main/scala/org/apache/spark/PythonSparkEnv.scala rename {core => python-api}/src/main/scala/org/apache/spark/api/python/PythonPartitioner.scala (100%) rename {core => python-api}/src/main/scala/org/apache/spark/api/python/PythonRDD.scala (99%) rename {core => python-api}/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala (99%) rename {mllib => python-api}/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala (95%) create mode 100644 python-api/src/main/scala/org/apache/spark/mllib/api/python/recommendation/MatrixFactorizationModel.scala rename {core => python-api}/src/test/scala/org/apache/spark/api/python/PythonRDDSuite.scala (100%) diff --git a/assembly/pom.xml b/assembly/pom.xml index bdb38806492a..057324e88907 100644 --- a/assembly/pom.xml +++ b/assembly/pom.xml @@ -84,11 +84,6 @@ spark-sql_${scala.binary.version} ${project.version} - - net.sf.py4j - py4j - 0.8.1 - @@ -173,6 +168,21 @@ + + python + + + net.sf.py4j + py4j + 0.8.1 + + + org.apache.spark + python-api_${scala.binary.version} + ${project.version} + + + spark-ganglia-lgpl diff --git a/bagel/pom.xml b/bagel/pom.xml index 355f437c5b16..adb691648213 100644 --- a/bagel/pom.xml +++ b/bagel/pom.xml @@ -41,6 +41,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + org.scalatest scalatest_${scala.binary.version} diff --git a/bin/compute-classpath.sh b/bin/compute-classpath.sh index b0218531e9eb..70ac71e45db4 100755 --- a/bin/compute-classpath.sh +++ b/bin/compute-classpath.sh @@ -44,6 +44,7 @@ if [ -f "$ASSEMBLY_DIR"/spark-assembly*hadoop*-deps.jar ]; then CLASSPATH="$CLASSPATH:$FWDIR/sql/catalyst/target/scala-$SCALA_VERSION/classes" CLASSPATH="$CLASSPATH:$FWDIR/sql/core/target/scala-$SCALA_VERSION/classes" CLASSPATH="$CLASSPATH:$FWDIR/sql/hive/target/scala-$SCALA_VERSION/classes" + CLASSPATH="$CLASSPATH:$FWDIR/yarn/stable/target/scala-$SCALA_VERSION/classes" DEPS_ASSEMBLY_JAR=`ls "$ASSEMBLY_DIR"/spark-assembly*hadoop*-deps.jar` CLASSPATH="$CLASSPATH:$DEPS_ASSEMBLY_JAR" diff --git a/bin/spark-class b/bin/spark-class index e8160c8af64c..7506b82a48a2 100755 --- a/bin/spark-class +++ b/bin/spark-class @@ -110,8 +110,8 @@ export JAVA_OPTS if [ ! -f "$FWDIR/RELEASE" ]; then # Exit if the user hasn't compiled Spark - num_jars=$(ls "$FWDIR"/assembly/target/scala-$SCALA_VERSION/ | grep "spark-assembly.*hadoop.*.jar" | wc -l) - jars_list=$(ls "$FWDIR"/assembly/target/scala-$SCALA_VERSION/ | grep "spark-assembly.*hadoop.*.jar") + num_jars=$(ls "$FWDIR"/assembly/target/scala-$SCALA_VERSION/ | grep -E "spark-assembly.*hadoop.*.jar$" | wc -l) + jars_list=$(ls "$FWDIR"/assembly/target/scala-$SCALA_VERSION/ | grep -E "spark-assembly.*hadoop.*.jar$") if [ "$num_jars" -eq "0" ]; then echo "Failed to find Spark assembly in $FWDIR/assembly/target/scala-$SCALA_VERSION/" >&2 echo "You need to build Spark with 'sbt/sbt assembly' before running this program." >&2 diff --git a/core/pom.xml b/core/pom.xml index 822b5b1dd7cc..1fc6099a0665 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -65,6 +65,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + com.google.guava guava @@ -254,35 +258,6 @@ target/scala-${scala.binary.version}/classes target/scala-${scala.binary.version}/test-classes - - org.apache.maven.plugins - maven-antrun-plugin - - - test - - run - - - true - - - - - - - - - - - - - - - - - - org.scalatest scalatest-maven-plugin @@ -294,48 +269,6 @@ - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - generate-resources - - exec - - - - - unzip - ../python - - -o - lib/py4j*.zip - -d - build - - - - - - - src/main/resources - - - ../python - - pyspark/*.py - - - - ../python/build - - py4j/*.py - - - diff --git a/core/src/main/scala/org/apache/spark/SparkEnv.scala b/core/src/main/scala/org/apache/spark/SparkEnv.scala index bea435ec34ce..454a6e744e4d 100644 --- a/core/src/main/scala/org/apache/spark/SparkEnv.scala +++ b/core/src/main/scala/org/apache/spark/SparkEnv.scala @@ -26,7 +26,6 @@ import akka.actor._ import com.google.common.collect.MapMaker import org.apache.spark.annotation.DeveloperApi -import org.apache.spark.api.python.PythonWorkerFactory import org.apache.spark.broadcast.BroadcastManager import org.apache.spark.metrics.MetricsSystem import org.apache.spark.network.ConnectionManager @@ -67,15 +66,14 @@ class SparkEnv ( // A mapping of thread ID to amount of memory used for shuffle in bytes // All accesses should be manually synchronized val shuffleMemoryMap = mutable.HashMap[Long, Long]() - - private val pythonWorkers = mutable.HashMap[(String, Map[String, String]), PythonWorkerFactory]() + val closeables = mutable.ListBuffer[java.io.Closeable]() // A general, soft-reference map for metadata needed during HadoopRDD split computation // (e.g., HadoopFileRDD uses this to cache JobConfs and InputFormats). private[spark] val hadoopJobMetadata = new MapMaker().softValues().makeMap[String, Any]() private[spark] def stop() { - pythonWorkers.foreach { case(key, worker) => worker.stop() } + closeables.toList.foreach(_.close()) httpFileServer.stop() mapOutputTracker.stop() shuffleFetcher.stop() @@ -89,22 +87,6 @@ class SparkEnv ( // UPDATE: In Akka 2.1.x, this hangs if there are remote actors, so we can't call it. // actorSystem.awaitTermination() } - - private[spark] - def createPythonWorker(pythonExec: String, envVars: Map[String, String]): java.net.Socket = { - synchronized { - val key = (pythonExec, envVars) - pythonWorkers.getOrElseUpdate(key, new PythonWorkerFactory(pythonExec, envVars)).create() - } - } - - private[spark] - def destroyPythonWorker(pythonExec: String, envVars: Map[String, String]) { - synchronized { - val key = (pythonExec, envVars) - pythonWorkers(key).stop() - } - } } object SparkEnv extends Logging { diff --git a/docs/building-with-maven.md b/docs/building-with-maven.md index a5e530346740..e447dfea3bac 100644 --- a/docs/building-with-maven.md +++ b/docs/building-with-maven.md @@ -45,17 +45,20 @@ For Apache Hadoop versions 1.x, Cloudera CDH MRv1, and other Hadoop versions wit For Apache Hadoop 2.x, 0.23.x, Cloudera CDH MRv2, and other Hadoop versions with YARN, you can enable the "yarn-alpha" or "yarn" profile and set the "hadoop.version", "yarn.version" property. Note that Hadoop 0.23.X requires a special `-Phadoop-0.23` profile: # Apache Hadoop 2.0.5-alpha - $ mvn -Pyarn-alpha -Dhadoop.version=2.0.5-alpha -Dyarn.version=2.0.5-alpha -DskipTests clean package + $ mvn -Pyarn-alpha -Dhadoop.version=2.0.5-alpha -DskipTests clean package # Cloudera CDH 4.2.0 with MapReduce v2 - $ mvn -Pyarn-alpha -Dhadoop.version=2.0.0-cdh4.2.0 -Dyarn.version=2.0.0-cdh4.2.0 -DskipTests clean package + $ mvn -Pyarn-alpha -Dhadoop.version=2.0.0-cdh4.2.0 -DskipTests clean package # Apache Hadoop 2.2.X (e.g. 2.2.0 as below) and newer - $ mvn -Pyarn -Dhadoop.version=2.2.0 -Dyarn.version=2.2.0 -DskipTests clean package + $ mvn -Pyarn -Dhadoop.version=2.2.0 -DskipTests clean package # Apache Hadoop 0.23.x $ mvn -Pyarn-alpha -Phadoop-0.23 -Dhadoop.version=0.23.7 -Dyarn.version=0.23.7 -DskipTests clean package + # Different versions of HDFS and YARN. + $ mvn -Pyarn-alpha -Dhadoop.version=2.3.0 -Dyarn.version=0.23.7 -DskipTests clean package + ## Spark Tests in Maven ## Tests are run by default via the [ScalaTest Maven plugin](http://www.scalatest.org/user_guide/using_the_scalatest_maven_plugin). Some of the require Spark to be packaged first, so always run `mvn package` with `-DskipTests` the first time. You can then run the tests with `mvn -Dhadoop.version=... test`. diff --git a/examples/pom.xml b/examples/pom.xml index e1fc149d87f1..f6b3f8150b96 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -120,6 +120,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + com.twitter algebird-core_${scala.binary.version} diff --git a/graphx/pom.xml b/graphx/pom.xml index dc108d2fe7fb..f8abc7f3c8ac 100644 --- a/graphx/pom.xml +++ b/graphx/pom.xml @@ -46,6 +46,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + org.scalatest scalatest_${scala.binary.version} diff --git a/make-distribution.sh b/make-distribution.sh index c05dcd89d90a..dc7723e80183 100755 --- a/make-distribution.sh +++ b/make-distribution.sh @@ -55,6 +55,7 @@ SPARK_HADOOP_VERSION=1.0.4 SPARK_YARN=false SPARK_HIVE=false SPARK_TACHYON=false +SPARK_PYTHON=true MAKE_TGZ=false NAME=none @@ -105,6 +106,12 @@ else echo "YARN disabled" fi +if [ "$SPARK_PYTHON" == "true" ]; then + echo "Python enabled" +else + echo "Python disabled" +fi + if [ "$SPARK_TACHYON" == "true" ]; then echo "Tachyon Enabled" else @@ -122,22 +129,31 @@ else MAYBE_HIVE="" fi +if [[ "$SPARK_HADOOP_VERSION" =~ "0.23." ]]; then + MAYBE_HADOOP023="-Phadoop-0.23" +else + MAYBE_HADOOP023="" +fi + if [ "$SPARK_YARN" == "true" ]; then - if [[ "$SPARK_HADOOP_VERSION" =~ "0.23." ]]; then - mvn clean package -DskipTests -Pyarn-alpha -Dhadoop.version=$SPARK_HADOOP_VERSION \ - -Dyarn.version=$SPARK_HADOOP_VERSION $MAYBE_HIVE -Phadoop-0.23 + if [[ "$SPARK_HADOOP_VERSION" =~ "0.23." || "$SPARK_HADOOP_VERSION" =~ "2.0." ]]; then + MAYBE_YARN="-Pyarn-alpha -Dyarn.version=$SPARK_HADOOP_VERSION" else - mvn clean package -DskipTests -Pyarn -Dhadoop.version=$SPARK_HADOOP_VERSION \ - -Dyarn.version=$SPARK_HADOOP_VERSION $MAYBE_HIVE + MAYBE_YARN="-Pyarn -Dyarn.version=$SPARK_HADOOP_VERSION" fi else - if [[ "$SPARK_HADOOP_VERSION" =~ "0.23." ]]; then - mvn clean package -Phadoop-0.23 -DskipTests -Dhadoop.version=$SPARK_HADOOP_VERSION $MAYBE_HIVE - else - mvn clean package -DskipTests -Dhadoop.version=$SPARK_HADOOP_VERSION $MAYBE_HIVE - fi + MAYBE_YARN="" +fi + +if [ "$SPARK_PYTHON" == "true" ]; then + MAYBE_PYTHON="-Ppython" +else + MAYBE_PYTHON="" fi +mvn package -Dhadoop.version=$SPARK_HADOOP_VERSION \ +-DskipTests $MAYBE_HIVE $MAYBE_HADOOP023 $MAYBE_YARN $MAYBE_PYTHON + # Make directories rm -rf "$DISTDIR" mkdir -p "$DISTDIR/lib" @@ -152,9 +168,11 @@ mkdir "$DISTDIR"/conf cp "$FWDIR"/conf/*.template "$DISTDIR"/conf cp "$FWDIR"/conf/slaves "$DISTDIR"/conf cp -r "$FWDIR/bin" "$DISTDIR" -cp -r "$FWDIR/python" "$DISTDIR" cp -r "$FWDIR/sbin" "$DISTDIR" +if [ "$SPARK_PYTHON" == "true" ]; then + cp -r "$FWDIR/python" "$DISTDIR" +fi # Download and copy in tachyon, if requested if [ "$SPARK_TACHYON" == "true" ]; then diff --git a/mllib/pom.xml b/mllib/pom.xml index cdd33dbb7970..947d0d919d64 100644 --- a/mllib/pom.xml +++ b/mllib/pom.xml @@ -41,6 +41,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + org.jblas jblas diff --git a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala index 471546cd82c7..b17b95f62dde 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala @@ -23,7 +23,6 @@ import org.apache.spark.annotation.DeveloperApi import org.apache.spark.api.java.JavaRDD import org.apache.spark.rdd.RDD import org.apache.spark.SparkContext._ -import org.apache.spark.mllib.api.python.PythonMLLibAPI /** @@ -68,20 +67,6 @@ class MatrixFactorizationModel( } } - /** - * :: DeveloperApi :: - * Predict the rating of many users for many products. - * This is a Java stub for python predictAll() - * - * @param usersProductsJRDD A JavaRDD with serialized tuples (user, product) - * @return JavaRDD of serialized Rating objects. - */ - def predict(usersProductsJRDD: JavaRDD[Array[Byte]]): JavaRDD[Array[Byte]] = { - val pythonAPI = new PythonMLLibAPI() - val usersProducts = usersProductsJRDD.rdd.map(xBytes => pythonAPI.unpackTuple(xBytes)) - predict(usersProducts).map(rate => pythonAPI.serializeRating(rate)) - } - // TODO: Figure out what other good bulk prediction methods would look like. // Probably want a way to get the top users for a product or vice-versa. } diff --git a/pom.xml b/pom.xml index 646753fe3030..3878c04266c6 100644 --- a/pom.xml +++ b/pom.xml @@ -16,7 +16,8 @@ ~ limitations under the License. --> - + 4.0.0 org.apache @@ -93,7 +94,6 @@ streaming sql/catalyst sql/core - sql/hive repl assembly external/twitter @@ -102,6 +102,8 @@ external/zeromq external/mqtt examples + sql/hive + python-api @@ -119,7 +121,7 @@ 1.2.17 1.0.4 2.4.1 - 0.23.7 + ${hadoop.version} 0.94.6 0.12.0 1.3.2 @@ -135,7 +137,8 @@ - maven-repo + maven-repo + Maven Repository http://repo.maven.apache.org/maven2 @@ -213,6 +216,19 @@ org.eclipse.jetty jetty-server ${jetty.version} + + + org.eclipse.jetty.orbit + javax.servlet + + + + + + javax.servlet + javax.servlet-api + 3.0.1 com.google.guava @@ -371,6 +387,11 @@ commons-net 2.2 + + commons-lang + commons-lang + 2.5 + io.netty netty-all @@ -558,64 +579,7 @@ jets3t 0.7.1 - - org.apache.hadoop - hadoop-yarn-api - ${yarn.version} - - - asm - asm - - - org.ow2.asm - asm - - - org.jboss.netty - netty - - - - - org.apache.hadoop - hadoop-yarn-common - ${yarn.version} - - - asm - asm - - - org.ow2.asm - asm - - - org.jboss.netty - netty - - - - - org.apache.hadoop - hadoop-yarn-client - ${yarn.version} - - - asm - asm - - - org.ow2.asm - asm - - - org.jboss.netty - netty - - - org.codehaus.jackson @@ -737,6 +701,10 @@ ${project.build.directory}/SparkTestSuite.txt -Xmx3g -XX:MaxPermSize=${MaxPermGen} -XX:ReservedCodeCacheSize=512m + + ${session.executionRootDirectory} + 1 + @@ -850,12 +818,76 @@ yarn - - - org.apache.avro - avro - - + + + + org.apache.hadoop + hadoop-yarn-api + ${yarn.version} + + + asm + asm + + + org.ow2.asm + asm + + + org.jboss.netty + netty + + + + + org.apache.hadoop + hadoop-yarn-common + ${yarn.version} + + + asm + asm + + + org.ow2.asm + asm + + + org.jboss.netty + netty + + + javax.servlet + servlet-api + + + + + + org.apache.hadoop + hadoop-yarn-client + ${yarn.version} + + + asm + asm + + + org.ow2.asm + asm + + + org.jboss.netty + netty + + + javax.servlet + servlet-api + + + + + @@ -901,7 +933,76 @@ yarn + + + + org.apache.hadoop + hadoop-yarn-api + ${yarn.version} + + + asm + asm + + + org.ow2.asm + asm + + + org.jboss.netty + netty + + + + + org.apache.hadoop + hadoop-yarn-common + ${yarn.version} + + + asm + asm + + + org.ow2.asm + asm + + + org.jboss.netty + netty + + + javax.servlet + servlet-api + + + + + org.apache.hadoop + hadoop-yarn-client + ${yarn.version} + + + asm + asm + + + org.ow2.asm + asm + + + org.jboss.netty + netty + + + javax.servlet + servlet-api + + + + + @@ -949,6 +1050,5 @@ - diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 51f733511116..1b52d596c449 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -55,7 +55,7 @@ object SparkBuild extends Build { val SCALAC_JVM_VERSION = "jvm-1.6" val JAVAC_JVM_VERSION = "1.6" - lazy val root = Project("root", file("."), settings = rootSettings) aggregate(allProjects: _*) + lazy val root = Project("spark", file("."), settings = rootSettings) aggregate(allProjects: _*) lazy val core = Project("core", file("core"), settings = coreSettings) @@ -266,16 +266,17 @@ object SparkBuild extends Build { */ libraryDependencies ++= Seq( - "io.netty" % "netty-all" % "4.0.17.Final", - "org.eclipse.jetty" % "jetty-server" % jettyVersion, - "org.eclipse.jetty" % "jetty-util" % jettyVersion, - "org.eclipse.jetty" % "jetty-plus" % jettyVersion, - "org.eclipse.jetty" % "jetty-security" % jettyVersion, - "org.scalatest" %% "scalatest" % "1.9.1" % "test", - "org.scalacheck" %% "scalacheck" % "1.10.0" % "test", - "com.novocode" % "junit-interface" % "0.10" % "test", - "org.easymock" % "easymock" % "3.1" % "test", - "org.mockito" % "mockito-all" % "1.8.5" % "test" + "io.netty" % "netty-all" % "4.0.17.Final", + "org.eclipse.jetty" % "jetty-server" % jettyVersion excludeAll(excludeJettyServlet), + "javax.servlet" % "javax.servlet-api" % "3.0.1", + "org.eclipse.jetty" % "jetty-util" % jettyVersion, + "org.eclipse.jetty" % "jetty-plus" % jettyVersion, + "org.eclipse.jetty" % "jetty-security" % jettyVersion, + "org.scalatest" %% "scalatest" % "1.9.1" % "test", + "org.scalacheck" %% "scalacheck" % "1.10.0" % "test", + "com.novocode" % "junit-interface" % "0.10" % "test", + "org.easymock" % "easymock" % "3.1" % "test", + "org.mockito" % "mockito-all" % "1.8.5" % "test" ), testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a"), @@ -315,6 +316,7 @@ object SparkBuild extends Build { val excludeFastutil = ExclusionRule(organization = "it.unimi.dsi") val excludeJruby = ExclusionRule(organization = "org.jruby") val excludeThrift = ExclusionRule(organization = "org.apache.thrift") + val excludeJettyServlet= ExclusionRule(organization = "org.eclipse.jetty.orbit") def sparkPreviousArtifact(id: String, organization: String = "org.apache.spark", version: String = "0.9.0-incubating", crossVersion: String = "2.10"): Option[sbt.ModuleID] = { diff --git a/python-api/pom.xml b/python-api/pom.xml new file mode 100644 index 000000000000..69a4739e8254 --- /dev/null +++ b/python-api/pom.xml @@ -0,0 +1,111 @@ + + + + + 4.0.0 + + org.apache.spark + spark-parent + 1.0.0-SNAPSHOT + ../pom.xml + + + org.apache.spark + python-api_2.10 + jar + Spark Project Python API + http://spark.apache.org/ + + + + org.apache.spark + spark-core_${scala.binary.version} + ${project.version} + + + org.apache.spark + spark-mllib_${scala.binary.version} + ${project.version} + provided + + + org.spark-project + pyrolite + 2.0.1 + + + org.scalatest + scalatest_${scala.binary.version} + test + + + org.scalacheck + scalacheck_${scala.binary.version} + test + + + + + target/scala-${scala.binary.version}/classes + target/scala-${scala.binary.version}/test-classes + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + generate-resources + + exec + + + + + unzip + ../python + + -o + lib/py4j*.zip + -d + build + + + + + + + src/main/resources + + + ../python + + pyspark/*.py + + + + ../python/build + + py4j/*.py + + + + + diff --git a/python-api/src/main/scala/org/apache/spark/PythonSparkEnv.scala b/python-api/src/main/scala/org/apache/spark/PythonSparkEnv.scala new file mode 100644 index 000000000000..40bc23c6df7f --- /dev/null +++ b/python-api/src/main/scala/org/apache/spark/PythonSparkEnv.scala @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark + +import scala.collection.JavaConversions._ +import scala.collection.mutable +import org.apache.spark.annotation.DeveloperApi +import org.apache.spark.api.python.PythonWorkerFactory + +/** + * :: DeveloperApi :: + * Holds all the runtime environment objects for a running Spark instance (either master or worker), + * including the serializer, Akka actor system, block manager, map output tracker, etc. Currently + * Spark code finds the SparkEnv through a thread-local variable, so each thread that accesses these + * objects needs to have the right SparkEnv set. You can get the current environment with + * SparkEnv.get (e.g. after creating a SparkContext) and set it with SparkEnv.set. + * + * NOTE: This is not intended for external use. This is exposed for Shark and may be made private + * in a future release. + */ +@DeveloperApi +class PythonSparkEnv(val sparkEnv: SparkEnv) { + private val pythonWorkers = mutable.HashMap[(String, Map[String, String]), PythonWorkerFactory]() + + sparkEnv.closeables += new java.io.Closeable { + override def close() { + pythonWorkers.foreach { + case (key, worker) => worker.stop() + } + } + } + + private[spark] + def createPythonWorker(pythonExec: String, envVars: Map[String, String]): java.net.Socket = { + synchronized { + val key = (pythonExec, envVars) + pythonWorkers.getOrElseUpdate(key, new PythonWorkerFactory(pythonExec, envVars)).create() + } + } + + private[spark] + def destroyPythonWorker(pythonExec: String, envVars: Map[String, String]) { + synchronized { + val key = (pythonExec, envVars) + pythonWorkers(key).stop() + } + } + +} + +object PythonSparkEnv extends Logging { + private val env = new ThreadLocal[PythonSparkEnv] + + def get: PythonSparkEnv = { + if (env.get == null) { + env.set(new PythonSparkEnv(SparkEnv.get)) + } + env.get + } + + def set(e: PythonSparkEnv) { + env.set(e) + } + +} \ No newline at end of file diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonPartitioner.scala b/python-api/src/main/scala/org/apache/spark/api/python/PythonPartitioner.scala similarity index 100% rename from core/src/main/scala/org/apache/spark/api/python/PythonPartitioner.scala rename to python-api/src/main/scala/org/apache/spark/api/python/PythonPartitioner.scala diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala b/python-api/src/main/scala/org/apache/spark/api/python/PythonRDD.scala similarity index 99% rename from core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala rename to python-api/src/main/scala/org/apache/spark/api/python/PythonRDD.scala index 672c344a5659..efe4fbcd00fa 100644 --- a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala +++ b/python-api/src/main/scala/org/apache/spark/api/python/PythonRDD.scala @@ -53,7 +53,7 @@ private[spark] class PythonRDD[T: ClassTag]( override def compute(split: Partition, context: TaskContext): Iterator[Array[Byte]] = { val startTime = System.currentTimeMillis - val env = SparkEnv.get + val env = PythonSparkEnv.get val worker = env.createPythonWorker(pythonExec, envVars.toMap) @volatile var readerException: Exception = null @@ -62,7 +62,7 @@ private[spark] class PythonRDD[T: ClassTag]( new Thread("stdin writer for " + pythonExec) { override def run() { try { - SparkEnv.set(env) + PythonSparkEnv.set(env) val stream = new BufferedOutputStream(worker.getOutputStream, bufferSize) val dataOut = new DataOutputStream(stream) // Partition index diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala b/python-api/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala similarity index 99% rename from core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala rename to python-api/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala index 02799ce0091b..8f8b103e42c6 100644 --- a/core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala +++ b/python-api/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala @@ -25,7 +25,7 @@ import scala.collection.JavaConversions._ import org.apache.spark._ private[spark] class PythonWorkerFactory(pythonExec: String, envVars: Map[String, String]) - extends Logging { + extends Logging { // Because forking processes from Java is expensive, we prefer to launch a single Python daemon // (pyspark/daemon.py) and tell it to fork new workers for our tasks. This daemon currently @@ -86,6 +86,7 @@ private[spark] class PythonWorkerFactory(pythonExec: String, envVars: Map[String // Redirect the worker's stderr to ours new Thread("stderr reader for " + pythonExec) { setDaemon(true) + override def run() { scala.util.control.Exception.ignoring(classOf[IOException]) { // FIXME: We copy the stream on the level of bytes to avoid encoding problems. @@ -103,6 +104,7 @@ private[spark] class PythonWorkerFactory(pythonExec: String, envVars: Map[String // Redirect worker's stdout to our stderr new Thread("stdout reader for " + pythonExec) { setDaemon(true) + override def run() { scala.util.control.Exception.ignoring(classOf[IOException]) { // FIXME: We copy the stream on the level of bytes to avoid encoding problems. @@ -159,6 +161,7 @@ private[spark] class PythonWorkerFactory(pythonExec: String, envVars: Map[String // Redirect the stderr to ours new Thread("stderr reader for " + pythonExec) { setDaemon(true) + override def run() { scala.util.control.Exception.ignoring(classOf[IOException]) { // FIXME: We copy the stream on the level of bytes to avoid encoding problems. @@ -179,6 +182,7 @@ private[spark] class PythonWorkerFactory(pythonExec: String, envVars: Map[String // Redirect further stdout output to our stderr new Thread("stdout reader for " + pythonExec) { setDaemon(true) + override def run() { scala.util.control.Exception.ignoring(classOf[IOException]) { // FIXME: We copy the stream on the level of bytes to avoid encoding problems. diff --git a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala b/python-api/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala similarity index 95% rename from mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala rename to python-api/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala index 7c65b0d4750f..f0852f290fdd 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala +++ b/python-api/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala @@ -27,6 +27,8 @@ import org.apache.spark.mllib.linalg.{SparseVector, Vector, Vectors} import org.apache.spark.mllib.recommendation._ import org.apache.spark.mllib.regression._ import org.apache.spark.rdd.RDD +import org.apache.spark.mllib.api.python.recommendation. +{MatrixFactorizationModel=> PythonMatrixFactorizationModel} /** * :: DeveloperApi :: @@ -384,9 +386,9 @@ class PythonMLLibAPI extends Serializable { rank: Int, iterations: Int, lambda: Double, - blocks: Int): MatrixFactorizationModel = { + blocks: Int): PythonMatrixFactorizationModel = { val ratings = ratingsBytesJRDD.rdd.map(unpackRating) - ALS.train(ratings, rank, iterations, lambda, blocks) + pythonModel(ALS.train(ratings, rank, iterations, lambda, blocks)) } /** @@ -401,8 +403,12 @@ class PythonMLLibAPI extends Serializable { iterations: Int, lambda: Double, blocks: Int, - alpha: Double): MatrixFactorizationModel = { + alpha: Double): PythonMatrixFactorizationModel = { val ratings = ratingsBytesJRDD.rdd.map(unpackRating) - ALS.trainImplicit(ratings, rank, iterations, lambda, blocks, alpha) + pythonModel(ALS.trainImplicit(ratings, rank, iterations, lambda, blocks, alpha)) + } + + private def pythonModel(model: MatrixFactorizationModel):PythonMatrixFactorizationModel= { + new PythonMatrixFactorizationModel(model.rank,model.userFeatures,model.productFeatures) } } diff --git a/python-api/src/main/scala/org/apache/spark/mllib/api/python/recommendation/MatrixFactorizationModel.scala b/python-api/src/main/scala/org/apache/spark/mllib/api/python/recommendation/MatrixFactorizationModel.scala new file mode 100644 index 000000000000..9dd0a7246797 --- /dev/null +++ b/python-api/src/main/scala/org/apache/spark/mllib/api/python/recommendation/MatrixFactorizationModel.scala @@ -0,0 +1,28 @@ +package org.apache.spark.mllib.api.python.recommendation + +import org.apache.spark.api.java.JavaRDD +import org.apache.spark.rdd.RDD +import org.apache.spark.mllib.api.python.PythonMLLibAPI + +class MatrixFactorizationModel( + override val rank: Int, + override val userFeatures: RDD[(Int, Array[Double])], + override val productFeatures: RDD[(Int, Array[Double])]) + extends org.apache.spark.mllib.recommendation.MatrixFactorizationModel(rank, + userFeatures, productFeatures) { + + /** + * :: DeveloperApi :: + * Predict the rating of many users for many products. + * This is a Java stub for python predictAll() + * + * @param usersProductsJRDD A JavaRDD with serialized tuples (user, product) + * @return JavaRDD of serialized Rating objects. + */ + def predict(usersProductsJRDD: JavaRDD[Array[Byte]]): JavaRDD[Array[Byte]] = { + val pythonAPI = new PythonMLLibAPI() + val usersProducts = usersProductsJRDD.rdd.map(xBytes => pythonAPI.unpackTuple(xBytes)) + predict(usersProducts).map(rate => pythonAPI.serializeRating(rate)) + } + +} diff --git a/core/src/test/scala/org/apache/spark/api/python/PythonRDDSuite.scala b/python-api/src/test/scala/org/apache/spark/api/python/PythonRDDSuite.scala similarity index 100% rename from core/src/test/scala/org/apache/spark/api/python/PythonRDDSuite.scala rename to python-api/src/test/scala/org/apache/spark/api/python/PythonRDDSuite.scala diff --git a/repl/pom.xml b/repl/pom.xml index b761a176ce25..c686b2f978ef 100644 --- a/repl/pom.xml +++ b/repl/pom.xml @@ -58,6 +58,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + org.scala-lang scala-compiler @@ -92,42 +96,12 @@ target/scala-${scala.binary.version}/classes target/scala-${scala.binary.version}/test-classes - - org.apache.maven.plugins - maven-antrun-plugin - - - test - - run - - - true - - - - - - - - - - - - - - - - - - org.scalatest scalatest-maven-plugin ${basedir}/.. - 1 diff --git a/streaming/pom.xml b/streaming/pom.xml index 6435224a1467..ecbafd833418 100644 --- a/streaming/pom.xml +++ b/streaming/pom.xml @@ -41,6 +41,10 @@ org.eclipse.jetty jetty-server + + javax.servlet + javax.servlet-api + org.scala-lang scala-library diff --git a/yarn/pom.xml b/yarn/pom.xml index 02f36627431b..b8714b54502e 100644 --- a/yarn/pom.xml +++ b/yarn/pom.xml @@ -28,7 +28,7 @@ yarn-parent_2.10 pom Spark Project YARN Parent POM - + org.apache.spark @@ -50,7 +50,6 @@ org.apache.hadoop hadoop-client - ${yarn.version} org.scalatest @@ -114,42 +113,12 @@ - - org.apache.maven.plugins - maven-antrun-plugin - - - test - - run - - - true - - - - - - - - - - - - - - - - - - org.scalatest scalatest-maven-plugin ${basedir}/../.. - 1 ${spark.classpath} From 5fb961fdd7b601c937074960641ef17fbd08a7b5 Mon Sep 17 00:00:00 2001 From: witgo Date: Sat, 3 May 2014 15:54:19 +0800 Subject: [PATCH 21/23] revert exclusion org.eclipse.jetty.orbit:javax.servlet --- bagel/pom.xml | 4 ---- core/pom.xml | 4 ---- examples/pom.xml | 4 ---- graphx/pom.xml | 4 ---- mllib/pom.xml | 4 ---- pom.xml | 15 ++------------- repl/pom.xml | 4 ---- streaming/pom.xml | 4 ---- 8 files changed, 2 insertions(+), 41 deletions(-) diff --git a/bagel/pom.xml b/bagel/pom.xml index adb691648213..355f437c5b16 100644 --- a/bagel/pom.xml +++ b/bagel/pom.xml @@ -41,10 +41,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - org.scalatest scalatest_${scala.binary.version} diff --git a/core/pom.xml b/core/pom.xml index 54b53f9d4260..1f71bb95f857 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -65,10 +65,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - com.google.guava guava diff --git a/examples/pom.xml b/examples/pom.xml index f6b3f8150b96..e1fc149d87f1 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -120,10 +120,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - com.twitter algebird-core_${scala.binary.version} diff --git a/graphx/pom.xml b/graphx/pom.xml index f8abc7f3c8ac..dc108d2fe7fb 100644 --- a/graphx/pom.xml +++ b/graphx/pom.xml @@ -46,10 +46,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - org.scalatest scalatest_${scala.binary.version} diff --git a/mllib/pom.xml b/mllib/pom.xml index 947d0d919d64..cdd33dbb7970 100644 --- a/mllib/pom.xml +++ b/mllib/pom.xml @@ -41,10 +41,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - org.jblas jblas diff --git a/pom.xml b/pom.xml index 71def98165f0..e79972cbb42c 100644 --- a/pom.xml +++ b/pom.xml @@ -214,19 +214,6 @@ org.eclipse.jetty jetty-server ${jetty.version} - - - org.eclipse.jetty.orbit - javax.servlet - - - - - - javax.servlet - javax.servlet-api - 3.0.1 com.google.guava @@ -854,6 +841,8 @@ org.jboss.netty netty + javax.servlet servlet-api diff --git a/repl/pom.xml b/repl/pom.xml index c686b2f978ef..bcdb24b040cc 100644 --- a/repl/pom.xml +++ b/repl/pom.xml @@ -58,10 +58,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - org.scala-lang scala-compiler diff --git a/streaming/pom.xml b/streaming/pom.xml index ecbafd833418..6435224a1467 100644 --- a/streaming/pom.xml +++ b/streaming/pom.xml @@ -41,10 +41,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - org.scala-lang scala-library From a5ff7d13f523a6494842f61982f706ada4f9a63a Mon Sep 17 00:00:00 2001 From: witgo Date: Sat, 3 May 2014 18:50:53 +0800 Subject: [PATCH 22/23] revert exclusion org.eclipse.jetty.orbit:javax.servlet --- .travis.yml | 2 +- project/SparkBuild.scala | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index a37e958c6809..8ebd0d68429f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,4 +29,4 @@ - $HOME/.ivy2 - $HOME/.sbt script: - - "sbt ++$TRAVIS_SCALA_VERSION $TEST" \ No newline at end of file + - "sbt ++$TRAVIS_SCALA_VERSION $TEST" diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 1b52d596c449..b537bc7ed579 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -267,8 +267,7 @@ object SparkBuild extends Build { libraryDependencies ++= Seq( "io.netty" % "netty-all" % "4.0.17.Final", - "org.eclipse.jetty" % "jetty-server" % jettyVersion excludeAll(excludeJettyServlet), - "javax.servlet" % "javax.servlet-api" % "3.0.1", + "org.eclipse.jetty" % "jetty-server" % jettyVersion, "org.eclipse.jetty" % "jetty-util" % jettyVersion, "org.eclipse.jetty" % "jetty-plus" % jettyVersion, "org.eclipse.jetty" % "jetty-security" % jettyVersion, @@ -316,7 +315,6 @@ object SparkBuild extends Build { val excludeFastutil = ExclusionRule(organization = "it.unimi.dsi") val excludeJruby = ExclusionRule(organization = "org.jruby") val excludeThrift = ExclusionRule(organization = "org.apache.thrift") - val excludeJettyServlet= ExclusionRule(organization = "org.eclipse.jetty.orbit") def sparkPreviousArtifact(id: String, organization: String = "org.apache.spark", version: String = "0.9.0-incubating", crossVersion: String = "2.10"): Option[sbt.ModuleID] = { From 1cd8cfabda8e281e6da5d2a5ab9eafdb7bcc126b Mon Sep 17 00:00:00 2001 From: witgo Date: Sat, 3 May 2014 22:35:09 +0800 Subject: [PATCH 23/23] add licenses --- bagel/pom.xml | 4 ---- core/pom.xml | 4 ---- examples/pom.xml | 4 ---- graphx/pom.xml | 4 ---- mllib/pom.xml | 4 ---- pom.xml | 13 ------------- .../MatrixFactorizationModel.scala | 17 +++++++++++++++++ repl/pom.xml | 4 ---- streaming/pom.xml | 4 ---- 9 files changed, 17 insertions(+), 41 deletions(-) diff --git a/bagel/pom.xml b/bagel/pom.xml index adb691648213..355f437c5b16 100644 --- a/bagel/pom.xml +++ b/bagel/pom.xml @@ -41,10 +41,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - org.scalatest scalatest_${scala.binary.version} diff --git a/core/pom.xml b/core/pom.xml index 1fc6099a0665..b4e608588fd3 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -65,10 +65,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - com.google.guava guava diff --git a/examples/pom.xml b/examples/pom.xml index f6b3f8150b96..e1fc149d87f1 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -120,10 +120,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - com.twitter algebird-core_${scala.binary.version} diff --git a/graphx/pom.xml b/graphx/pom.xml index f8abc7f3c8ac..dc108d2fe7fb 100644 --- a/graphx/pom.xml +++ b/graphx/pom.xml @@ -46,10 +46,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - org.scalatest scalatest_${scala.binary.version} diff --git a/mllib/pom.xml b/mllib/pom.xml index 947d0d919d64..cdd33dbb7970 100644 --- a/mllib/pom.xml +++ b/mllib/pom.xml @@ -41,10 +41,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - org.jblas jblas diff --git a/pom.xml b/pom.xml index 94f1a9168a1c..c825fac5c04d 100644 --- a/pom.xml +++ b/pom.xml @@ -216,19 +216,6 @@ org.eclipse.jetty jetty-server ${jetty.version} - - - org.eclipse.jetty.orbit - javax.servlet - - - - - - javax.servlet - javax.servlet-api - 3.0.1 com.google.guava diff --git a/python-api/src/main/scala/org/apache/spark/mllib/api/python/recommendation/MatrixFactorizationModel.scala b/python-api/src/main/scala/org/apache/spark/mllib/api/python/recommendation/MatrixFactorizationModel.scala index 9dd0a7246797..ddb868c368cd 100644 --- a/python-api/src/main/scala/org/apache/spark/mllib/api/python/recommendation/MatrixFactorizationModel.scala +++ b/python-api/src/main/scala/org/apache/spark/mllib/api/python/recommendation/MatrixFactorizationModel.scala @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.spark.mllib.api.python.recommendation import org.apache.spark.api.java.JavaRDD diff --git a/repl/pom.xml b/repl/pom.xml index c686b2f978ef..bcdb24b040cc 100644 --- a/repl/pom.xml +++ b/repl/pom.xml @@ -58,10 +58,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - org.scala-lang scala-compiler diff --git a/streaming/pom.xml b/streaming/pom.xml index ecbafd833418..6435224a1467 100644 --- a/streaming/pom.xml +++ b/streaming/pom.xml @@ -41,10 +41,6 @@ org.eclipse.jetty jetty-server - - javax.servlet - javax.servlet-api - org.scala-lang scala-library