From b2c6a099b9b1dd990f024a7657ce0ba53761b2c7 Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Thu, 27 Mar 2014 10:10:55 +0000 Subject: [PATCH 1/3] Add perm gen, code cache settings to scalatest, mirroring SBT settings elsewhere, which allows tests to complete in at least one environment where they are failing. (Also removed a duplicate -Xms setting elsewhere.) --- pom.xml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index f0644ed284363..72acf2b402703 100644 --- a/pom.xml +++ b/pom.xml @@ -646,7 +646,6 @@ -deprecation - -Xms64m -Xms1024m -Xmx1024m -XX:PermSize=${PermGen} @@ -689,7 +688,7 @@ ${project.build.directory}/surefire-reports . ${project.build.directory}/SparkTestSuite.txt - -Xms64m -Xmx3g + -Xmx3g -XX:MaxPermSize=${MaxPermGen} -XX:ReservedCodeCacheSize=512m From a02679cf15f36d652f48ceb4deb1a18e0bb10f23 Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Thu, 27 Mar 2014 10:11:41 +0000 Subject: [PATCH 2/3] Fix scaladoc errors due to missing links, which are generating build warnings, from some recent doc changes. We apparently can't generate links outside the module. --- .../src/main/scala/org/apache/spark/mllib/linalg/SVD.scala | 2 +- .../scala/org/apache/spark/sql/execution/Exchange.scala | 6 +++--- .../scala/org/apache/spark/sql/execution/Generate.scala | 2 +- .../src/main/scala/org/apache/spark/SparkHadoopWriter.scala | 2 +- .../org/apache/spark/sql/hive/HiveMetastoreCatalog.scala | 2 +- .../src/main/scala/org/apache/spark/sql/hive/HiveQl.scala | 2 +- .../src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/SVD.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/SVD.scala index 3e7cc648d1d37..4c4b6a8e70688 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/SVD.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/SVD.scala @@ -73,7 +73,7 @@ class SVD { * U is a row-by-row dense matrix * S is a simple double array of singular values * V is a 2d array matrix - * See [[denseSVD]] for more documentation + * See `denseSVD` for more documentation */ def compute(matrix: RDD[Array[Double]]): (RDD[Array[Double]], Array[Double], Array[Array[Double]]) = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala index 869673b1fe978..d59da95c0fc42 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala @@ -70,8 +70,8 @@ case class Exchange(newPartitioning: Partitioning, child: SparkPlan) extends Una } /** - * Ensures that the [[catalyst.plans.physical.Partitioning Partitioning]] of input data meets the - * [[catalyst.plans.physical.Distribution Distribution]] requirements for each operator by inserting + * Ensures that the Partitioning of input data meets the + * Distribution requirements for each operator by inserting * [[Exchange]] Operators where required. */ object AddExchange extends Rule[SparkPlan] { @@ -133,4 +133,4 @@ object AddExchange extends Rule[SparkPlan] { operator.withNewChildren(repartitionedChildren) } } -} +} \ No newline at end of file diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala index e902e6ced521d..e53845f447f78 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala @@ -20,7 +20,7 @@ package org.apache.spark.sql.execution import org.apache.spark.sql.catalyst.expressions.{Generator, JoinedRow, Literal, Projection} /** - * Applies a [[catalyst.expressions.Generator Generator]] to a stream of input rows, combining the + * Applies a Generator to a stream of input rows, combining the * output of each into a new stream of rows. This operation is similar to a `flatMap` in functional * programming with one important additional feature, which allows the input rows to be joined with * their output. diff --git a/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala b/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala index 7219c030cb0f0..f4a7a6c41dc41 100644 --- a/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala +++ b/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala @@ -30,7 +30,7 @@ import org.apache.hadoop.io.Writable /** * Internal helper class that saves an RDD using a Hive OutputFormat. - * It is based on [[SparkHadoopWriter]]. + * It is based on SparkHadoopWriter. */ protected[spark] class SparkHiveHadoopWriter( diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala index 4f8353666a12b..d1e67bb4d34f9 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala @@ -137,7 +137,7 @@ class HiveMetastoreCatalog(hive: HiveContext) extends Catalog with Logging { /** * UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore. - * For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]]. + * For now, if this functionality is desired mix in the in-memory OverrideCatalog. */ override def registerTable( databaseName: Option[String], tableName: String, plan: LogicalPlan): Unit = ??? diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala index f4b61381f9a27..df4b51746c84c 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala @@ -122,7 +122,7 @@ object HiveQl { /** * A set of implicit transformations that allow Hive ASTNodes to be rewritten by transformations - * similar to [[catalyst.trees.TreeNode]]. + * similar to TreeNode. * * Note that this should be considered very experimental and is not indented as a replacement * for TreeNode. Primarily it should be noted ASTNodes are not immutable and do not appear to diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala index 44901db3f963b..0a5be6879cf89 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala @@ -359,7 +359,7 @@ case class HiveGenericUdaf( /** * Converts a Hive Generic User Defined Table Generating Function (UDTF) to a - * [[catalyst.expressions.Generator Generator]]. Note that the semantics of Generators do not allow + * Generator. Note that the semantics of Generators do not allow * Generators to maintain state in between input rows. Thus UDTFs that rely on partitioning * dependent operations like calls to `close()` before producing output will not operate the same as * in Hive. However, in practice this should not affect compatibility for most sane UDTFs From c0f2d31b066f83173ef9ca35d0ba147a8ddfcbd8 Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Thu, 27 Mar 2014 10:31:29 +0000 Subject: [PATCH 3/3] Appease scalastyle with a newline at the end of the file --- .../main/scala/org/apache/spark/sql/execution/Exchange.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala index d59da95c0fc42..d60a978e46fd8 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala @@ -133,4 +133,4 @@ object AddExchange extends Rule[SparkPlan] { operator.withNewChildren(repartitionedChildren) } } -} \ No newline at end of file +}