diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/SVD.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/SVD.scala
index 3e7cc648d1d37..4c4b6a8e70688 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/SVD.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/SVD.scala
@@ -73,7 +73,7 @@ class SVD {
* U is a row-by-row dense matrix
* S is a simple double array of singular values
* V is a 2d array matrix
- * See [[denseSVD]] for more documentation
+ * See `denseSVD` for more documentation
*/
def compute(matrix: RDD[Array[Double]]):
(RDD[Array[Double]], Array[Double], Array[Array[Double]]) = {
diff --git a/pom.xml b/pom.xml
index f0644ed284363..72acf2b402703 100644
--- a/pom.xml
+++ b/pom.xml
@@ -646,7 +646,6 @@
-deprecation
- -Xms64m
-Xms1024m
-Xmx1024m
-XX:PermSize=${PermGen}
@@ -689,7 +688,7 @@
${project.build.directory}/surefire-reports
.
${project.build.directory}/SparkTestSuite.txt
- -Xms64m -Xmx3g
+ -Xmx3g -XX:MaxPermSize=${MaxPermGen} -XX:ReservedCodeCacheSize=512m
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
index 869673b1fe978..d60a978e46fd8 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
@@ -70,8 +70,8 @@ case class Exchange(newPartitioning: Partitioning, child: SparkPlan) extends Una
}
/**
- * Ensures that the [[catalyst.plans.physical.Partitioning Partitioning]] of input data meets the
- * [[catalyst.plans.physical.Distribution Distribution]] requirements for each operator by inserting
+ * Ensures that the Partitioning of input data meets the
+ * Distribution requirements for each operator by inserting
* [[Exchange]] Operators where required.
*/
object AddExchange extends Rule[SparkPlan] {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala
index e902e6ced521d..e53845f447f78 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.execution
import org.apache.spark.sql.catalyst.expressions.{Generator, JoinedRow, Literal, Projection}
/**
- * Applies a [[catalyst.expressions.Generator Generator]] to a stream of input rows, combining the
+ * Applies a Generator to a stream of input rows, combining the
* output of each into a new stream of rows. This operation is similar to a `flatMap` in functional
* programming with one important additional feature, which allows the input rows to be joined with
* their output.
diff --git a/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala b/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala
index 7219c030cb0f0..f4a7a6c41dc41 100644
--- a/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala
@@ -30,7 +30,7 @@ import org.apache.hadoop.io.Writable
/**
* Internal helper class that saves an RDD using a Hive OutputFormat.
- * It is based on [[SparkHadoopWriter]].
+ * It is based on SparkHadoopWriter.
*/
protected[spark]
class SparkHiveHadoopWriter(
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index 4f8353666a12b..d1e67bb4d34f9 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -137,7 +137,7 @@ class HiveMetastoreCatalog(hive: HiveContext) extends Catalog with Logging {
/**
* UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore.
- * For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]].
+ * For now, if this functionality is desired mix in the in-memory OverrideCatalog.
*/
override def registerTable(
databaseName: Option[String], tableName: String, plan: LogicalPlan): Unit = ???
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
index f4b61381f9a27..df4b51746c84c 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
@@ -122,7 +122,7 @@ object HiveQl {
/**
* A set of implicit transformations that allow Hive ASTNodes to be rewritten by transformations
- * similar to [[catalyst.trees.TreeNode]].
+ * similar to TreeNode.
*
* Note that this should be considered very experimental and is not indented as a replacement
* for TreeNode. Primarily it should be noted ASTNodes are not immutable and do not appear to
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
index 44901db3f963b..0a5be6879cf89 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
@@ -359,7 +359,7 @@ case class HiveGenericUdaf(
/**
* Converts a Hive Generic User Defined Table Generating Function (UDTF) to a
- * [[catalyst.expressions.Generator Generator]]. Note that the semantics of Generators do not allow
+ * Generator. Note that the semantics of Generators do not allow
* Generators to maintain state in between input rows. Thus UDTFs that rely on partitioning
* dependent operations like calls to `close()` before producing output will not operate the same as
* in Hive. However, in practice this should not affect compatibility for most sane UDTFs