diff --git a/docs/running-on-yarn.md b/docs/running-on-yarn.md
index 0362f5a22331..9e054deace29 100644
--- a/docs/running-on-yarn.md
+++ b/docs/running-on-yarn.md
@@ -83,14 +83,14 @@ Most of the configs are the same for Spark on YARN as for other deployment modes
spark.yarn.executor.memoryOverhead |
- 384 |
+ executorMemory * 0.06, with minimum of 384 |
The amount of off heap memory (in megabytes) to be allocated per executor. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc.
|
spark.yarn.driver.memoryOverhead |
- 384 |
+ driverMemory * 0.06, with minimum of 384 |
The amount of off heap memory (in megabytes) to be allocated per driver. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc.
|
diff --git a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/ExecutorLauncher.scala b/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/ExecutorLauncher.scala
index bfdb6232f511..d3d8e25621eb 100644
--- a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/ExecutorLauncher.scala
+++ b/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/ExecutorLauncher.scala
@@ -100,7 +100,8 @@ class ExecutorLauncher(args: ApplicationMasterArguments, conf: Configuration, sp
if (minimumMemory > 0) {
val mem = args.executorMemory + sparkConf.getInt("spark.yarn.executor.memoryOverhead",
- YarnAllocationHandler.MEMORY_OVERHEAD)
+ math.max((YarnAllocationHandler.MEMORY_OVERHEAD_FACTOR * args.executorMemory).toInt,
+ YarnAllocationHandler.MEMORY_OVERHEAD_MIN))
val numCore = (mem / minimumMemory) + (if (0 != (mem % minimumMemory)) 1 else 0)
if (numCore > 0) {
diff --git a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala b/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
index 80e0162e9f27..47d67b40875c 100644
--- a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
+++ b/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
@@ -90,7 +90,8 @@ private[yarn] class YarnAllocationHandler(
// Additional memory overhead - in mb.
private def memoryOverhead: Int = sparkConf.getInt("spark.yarn.executor.memoryOverhead",
- YarnAllocationHandler.MEMORY_OVERHEAD)
+ math.max((YarnAllocationHandler.MEMORY_OVERHEAD_FACTOR * executorMemory).toInt,
+ YarnAllocationHandler.MEMORY_OVERHEAD_MIN))
private val numExecutorsRunning = new AtomicInteger()
// Used to generate a unique id per executor
@@ -548,8 +549,9 @@ object YarnAllocationHandler {
// request types (like map/reduce in hadoop for example)
val PRIORITY = 1
- // Additional memory overhead - in mb
- val MEMORY_OVERHEAD = 384
+ // Additional memory overhead
+ val MEMORY_OVERHEAD_FACTOR = 0.06
+ val MEMORY_OVERHEAD_MIN = 384
// Host to rack map - saved from allocation requests
// We are expecting this not to change.
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
index 556f49342977..689db6299be6 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
+++ b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
@@ -67,7 +67,8 @@ trait ClientBase extends Logging {
// Additional memory overhead - in mb.
protected def memoryOverhead: Int = sparkConf.getInt("spark.yarn.driver.memoryOverhead",
- YarnAllocationHandler.MEMORY_OVERHEAD)
+ math.max((YarnAllocationHandler.MEMORY_OVERHEAD_FACTOR * args.amMemory).toInt,
+ YarnAllocationHandler.MEMORY_OVERHEAD_MIN))
// TODO(harvey): This could just go in ClientArguments.
def validateArgs() = {
diff --git a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala b/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
index 29ccec2adcac..c35ed62d77ee 100644
--- a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
+++ b/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
@@ -92,7 +92,8 @@ private[yarn] class YarnAllocationHandler(
// Additional memory overhead - in mb.
private def memoryOverhead: Int = sparkConf.getInt("spark.yarn.executor.memoryOverhead",
- YarnAllocationHandler.MEMORY_OVERHEAD)
+ math.max((YarnAllocationHandler.MEMORY_OVERHEAD_FACTOR * executorMemory).toInt,
+ YarnAllocationHandler.MEMORY_OVERHEAD_MIN))
// Number of container requests that have been sent to, but not yet allocated by the
// ApplicationMaster.
@@ -562,8 +563,9 @@ object YarnAllocationHandler {
// request types (like map/reduce in hadoop for example)
val PRIORITY = 1
- // Additional memory overhead - in mb.
- val MEMORY_OVERHEAD = 384
+ // Additional memory overhead
+ val MEMORY_OVERHEAD_FACTOR = 0.06
+ val MEMORY_OVERHEAD_MIN = 384
// Host to rack map - saved from allocation requests. We are expecting this not to change.
// Note that it is possible for this to change : and ResurceManager will indicate that to us via